blob: 54ef8d75541d3cc8607e37b0a128305390f3dff5 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * raid5.c : Multiple Devices driver for Linux
3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
4 * Copyright (C) 1999, 2000 Ingo Molnar
NeilBrown16a53ec2006-06-26 00:27:38 -07005 * Copyright (C) 2002, 2003 H. Peter Anvin
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
NeilBrown16a53ec2006-06-26 00:27:38 -07007 * RAID-4/5/6 management functions.
8 * Thanks to Penguin Computing for making the RAID-6 development possible
9 * by donating a test server!
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * You should have received a copy of the GNU General Public License
17 * (for example /usr/src/linux/COPYING); if not, write to the Free
18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
NeilBrownae3c20c2006-07-10 04:44:17 -070021/*
22 * BITMAP UNPLUGGING:
23 *
24 * The sequencing for updating the bitmap reliably is a little
25 * subtle (and I got it wrong the first time) so it deserves some
26 * explanation.
27 *
28 * We group bitmap updates into batches. Each batch has a number.
29 * We may write out several batches at once, but that isn't very important.
30 * conf->bm_write is the number of the last batch successfully written.
31 * conf->bm_flush is the number of the last batch that was closed to
32 * new additions.
33 * When we discover that we will need to write to any block in a stripe
34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
35 * the number of the batch it will be in. This is bm_flush+1.
36 * When we are ready to do a write, if that batch hasn't been written yet,
37 * we plug the array and queue the stripe for later.
38 * When an unplug happens, we increment bm_flush, thus closing the current
39 * batch.
40 * When we notice that bm_flush > bm_write, we write out all pending updates
41 * to the bitmap, and advance bm_write to where bm_flush was.
42 * This may occasionally write a bit out twice, but is sure never to
43 * miss any bits.
44 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
NeilBrownbff61972009-03-31 14:33:13 +110046#include <linux/blkdev.h>
NeilBrownf6705572006-03-27 01:18:11 -080047#include <linux/kthread.h>
Dan Williamsf701d582009-03-31 15:09:39 +110048#include <linux/raid/pq.h>
Dan Williams91c00922007-01-02 13:52:30 -070049#include <linux/async_tx.h>
Dan Williams07a3b412009-08-29 19:13:13 -070050#include <linux/async.h>
NeilBrownbff61972009-03-31 14:33:13 +110051#include <linux/seq_file.h>
Dan Williams36d1c642009-07-14 11:48:22 -070052#include <linux/cpu.h>
NeilBrown43b2e5d2009-03-31 14:33:13 +110053#include "md.h"
NeilBrownbff61972009-03-31 14:33:13 +110054#include "raid5.h"
Christoph Hellwigef740c32009-03-31 14:27:03 +110055#include "bitmap.h"
NeilBrown72626682005-09-09 16:23:54 -070056
Linus Torvalds1da177e2005-04-16 15:20:36 -070057/*
58 * Stripe cache
59 */
60
61#define NR_STRIPES 256
62#define STRIPE_SIZE PAGE_SIZE
63#define STRIPE_SHIFT (PAGE_SHIFT - 9)
64#define STRIPE_SECTORS (STRIPE_SIZE>>9)
65#define IO_THRESHOLD 1
Dan Williams8b3e6cd2008-04-28 02:15:53 -070066#define BYPASS_THRESHOLD 1
NeilBrownfccddba2006-01-06 00:20:33 -080067#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
Linus Torvalds1da177e2005-04-16 15:20:36 -070068#define HASH_MASK (NR_HASH - 1)
69
NeilBrownfccddba2006-01-06 00:20:33 -080070#define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]))
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
72/* bio's attached to a stripe+device for I/O are linked together in bi_sector
73 * order without overlap. There may be several bio's per stripe+device, and
74 * a bio could span several devices.
75 * When walking this list for a particular stripe+device, we must never proceed
76 * beyond a bio that extends past this device, as the next bio might no longer
77 * be valid.
78 * This macro is used to determine the 'next' bio in the list, given the sector
79 * of the current stripe+device
80 */
81#define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL)
82/*
83 * The following can be used to debug the driver
84 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070085#define RAID5_PARANOIA 1
86#if RAID5_PARANOIA && defined(CONFIG_SMP)
87# define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock)
88#else
89# define CHECK_DEVLOCK()
90#endif
91
Dan Williams45b42332007-07-09 11:56:43 -070092#ifdef DEBUG
Linus Torvalds1da177e2005-04-16 15:20:36 -070093#define inline
94#define __inline__
95#endif
96
Bernd Schubert6be9d492008-05-23 13:04:34 -070097#define printk_rl(args...) ((void) (printk_ratelimit() && printk(args)))
98
Jens Axboe960e7392008-08-15 10:41:18 +020099/*
Jens Axboe5b99c2f2008-08-15 10:56:11 +0200100 * We maintain a biased count of active stripes in the bottom 16 bits of
101 * bi_phys_segments, and a count of processed stripes in the upper 16 bits
Jens Axboe960e7392008-08-15 10:41:18 +0200102 */
103static inline int raid5_bi_phys_segments(struct bio *bio)
104{
Jens Axboe5b99c2f2008-08-15 10:56:11 +0200105 return bio->bi_phys_segments & 0xffff;
Jens Axboe960e7392008-08-15 10:41:18 +0200106}
107
108static inline int raid5_bi_hw_segments(struct bio *bio)
109{
Jens Axboe5b99c2f2008-08-15 10:56:11 +0200110 return (bio->bi_phys_segments >> 16) & 0xffff;
Jens Axboe960e7392008-08-15 10:41:18 +0200111}
112
113static inline int raid5_dec_bi_phys_segments(struct bio *bio)
114{
115 --bio->bi_phys_segments;
116 return raid5_bi_phys_segments(bio);
117}
118
119static inline int raid5_dec_bi_hw_segments(struct bio *bio)
120{
121 unsigned short val = raid5_bi_hw_segments(bio);
122
123 --val;
Jens Axboe5b99c2f2008-08-15 10:56:11 +0200124 bio->bi_phys_segments = (val << 16) | raid5_bi_phys_segments(bio);
Jens Axboe960e7392008-08-15 10:41:18 +0200125 return val;
126}
127
128static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt)
129{
Jens Axboe5b99c2f2008-08-15 10:56:11 +0200130 bio->bi_phys_segments = raid5_bi_phys_segments(bio) || (cnt << 16);
Jens Axboe960e7392008-08-15 10:41:18 +0200131}
132
NeilBrownd0dabf72009-03-31 14:39:38 +1100133/* Find first data disk in a raid6 stripe */
134static inline int raid6_d0(struct stripe_head *sh)
135{
NeilBrown67cc2b82009-03-31 14:39:38 +1100136 if (sh->ddf_layout)
137 /* ddf always start from first device */
138 return 0;
139 /* md starts just after Q block */
NeilBrownd0dabf72009-03-31 14:39:38 +1100140 if (sh->qd_idx == sh->disks - 1)
141 return 0;
142 else
143 return sh->qd_idx + 1;
144}
NeilBrown16a53ec2006-06-26 00:27:38 -0700145static inline int raid6_next_disk(int disk, int raid_disks)
146{
147 disk++;
148 return (disk < raid_disks) ? disk : 0;
149}
Dan Williamsa4456852007-07-09 11:56:43 -0700150
NeilBrownd0dabf72009-03-31 14:39:38 +1100151/* When walking through the disks in a raid5, starting at raid6_d0,
152 * We need to map each disk to a 'slot', where the data disks are slot
153 * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk
154 * is raid_disks-1. This help does that mapping.
155 */
NeilBrown67cc2b82009-03-31 14:39:38 +1100156static int raid6_idx_to_slot(int idx, struct stripe_head *sh,
157 int *count, int syndrome_disks)
NeilBrownd0dabf72009-03-31 14:39:38 +1100158{
159 int slot;
NeilBrown67cc2b82009-03-31 14:39:38 +1100160
NeilBrownd0dabf72009-03-31 14:39:38 +1100161 if (idx == sh->pd_idx)
NeilBrown67cc2b82009-03-31 14:39:38 +1100162 return syndrome_disks;
NeilBrownd0dabf72009-03-31 14:39:38 +1100163 if (idx == sh->qd_idx)
NeilBrown67cc2b82009-03-31 14:39:38 +1100164 return syndrome_disks + 1;
NeilBrownd0dabf72009-03-31 14:39:38 +1100165 slot = (*count)++;
166 return slot;
167}
168
Dan Williamsa4456852007-07-09 11:56:43 -0700169static void return_io(struct bio *return_bi)
170{
171 struct bio *bi = return_bi;
172 while (bi) {
Dan Williamsa4456852007-07-09 11:56:43 -0700173
174 return_bi = bi->bi_next;
175 bi->bi_next = NULL;
176 bi->bi_size = 0;
Neil Brown0e13fe232008-06-28 08:31:20 +1000177 bio_endio(bi, 0);
Dan Williamsa4456852007-07-09 11:56:43 -0700178 bi = return_bi;
179 }
180}
181
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182static void print_raid5_conf (raid5_conf_t *conf);
183
Dan Williams600aa102008-06-28 08:32:05 +1000184static int stripe_operations_active(struct stripe_head *sh)
185{
186 return sh->check_state || sh->reconstruct_state ||
187 test_bit(STRIPE_BIOFILL_RUN, &sh->state) ||
188 test_bit(STRIPE_COMPUTE_RUN, &sh->state);
189}
190
Arjan van de Ven858119e2006-01-14 13:20:43 -0800191static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192{
193 if (atomic_dec_and_test(&sh->count)) {
Eric Sesterhenn78bafeb2006-04-02 13:31:42 +0200194 BUG_ON(!list_empty(&sh->lru));
195 BUG_ON(atomic_read(&conf->active_stripes)==0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 if (test_bit(STRIPE_HANDLE, &sh->state)) {
NeilBrown7c785b72006-07-10 04:44:16 -0700197 if (test_bit(STRIPE_DELAYED, &sh->state)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 list_add_tail(&sh->lru, &conf->delayed_list);
NeilBrown7c785b72006-07-10 04:44:16 -0700199 blk_plug_device(conf->mddev->queue);
200 } else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
NeilBrownae3c20c2006-07-10 04:44:17 -0700201 sh->bm_seq - conf->seq_write > 0) {
NeilBrown72626682005-09-09 16:23:54 -0700202 list_add_tail(&sh->lru, &conf->bitmap_list);
NeilBrown7c785b72006-07-10 04:44:16 -0700203 blk_plug_device(conf->mddev->queue);
204 } else {
NeilBrown72626682005-09-09 16:23:54 -0700205 clear_bit(STRIPE_BIT_DELAY, &sh->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 list_add_tail(&sh->lru, &conf->handle_list);
NeilBrown72626682005-09-09 16:23:54 -0700207 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 md_wakeup_thread(conf->mddev->thread);
209 } else {
Dan Williams600aa102008-06-28 08:32:05 +1000210 BUG_ON(stripe_operations_active(sh));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
212 atomic_dec(&conf->preread_active_stripes);
213 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
214 md_wakeup_thread(conf->mddev->thread);
215 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 atomic_dec(&conf->active_stripes);
NeilBrownccfcc3c2006-03-27 01:18:09 -0800217 if (!test_bit(STRIPE_EXPANDING, &sh->state)) {
218 list_add_tail(&sh->lru, &conf->inactive_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 wake_up(&conf->wait_for_stripe);
Raz Ben-Jehuda(caro)46031f92006-12-10 02:20:47 -0800220 if (conf->retry_read_aligned)
221 md_wakeup_thread(conf->mddev->thread);
NeilBrownccfcc3c2006-03-27 01:18:09 -0800222 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 }
224 }
225}
NeilBrownd0dabf72009-03-31 14:39:38 +1100226
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227static void release_stripe(struct stripe_head *sh)
228{
229 raid5_conf_t *conf = sh->raid_conf;
230 unsigned long flags;
NeilBrown16a53ec2006-06-26 00:27:38 -0700231
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 spin_lock_irqsave(&conf->device_lock, flags);
233 __release_stripe(conf, sh);
234 spin_unlock_irqrestore(&conf->device_lock, flags);
235}
236
NeilBrownfccddba2006-01-06 00:20:33 -0800237static inline void remove_hash(struct stripe_head *sh)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238{
Dan Williams45b42332007-07-09 11:56:43 -0700239 pr_debug("remove_hash(), stripe %llu\n",
240 (unsigned long long)sh->sector);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241
NeilBrownfccddba2006-01-06 00:20:33 -0800242 hlist_del_init(&sh->hash);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243}
244
NeilBrown16a53ec2006-06-26 00:27:38 -0700245static inline void insert_hash(raid5_conf_t *conf, struct stripe_head *sh)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246{
NeilBrownfccddba2006-01-06 00:20:33 -0800247 struct hlist_head *hp = stripe_hash(conf, sh->sector);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248
Dan Williams45b42332007-07-09 11:56:43 -0700249 pr_debug("insert_hash(), stripe %llu\n",
250 (unsigned long long)sh->sector);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
252 CHECK_DEVLOCK();
NeilBrownfccddba2006-01-06 00:20:33 -0800253 hlist_add_head(&sh->hash, hp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254}
255
256
257/* find an idle stripe, make sure it is unhashed, and return it. */
258static struct stripe_head *get_free_stripe(raid5_conf_t *conf)
259{
260 struct stripe_head *sh = NULL;
261 struct list_head *first;
262
263 CHECK_DEVLOCK();
264 if (list_empty(&conf->inactive_list))
265 goto out;
266 first = conf->inactive_list.next;
267 sh = list_entry(first, struct stripe_head, lru);
268 list_del_init(first);
269 remove_hash(sh);
270 atomic_inc(&conf->active_stripes);
271out:
272 return sh;
273}
274
275static void shrink_buffers(struct stripe_head *sh, int num)
276{
277 struct page *p;
278 int i;
279
280 for (i=0; i<num ; i++) {
281 p = sh->dev[i].page;
282 if (!p)
283 continue;
284 sh->dev[i].page = NULL;
NeilBrown2d1f3b52006-01-06 00:20:31 -0800285 put_page(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 }
287}
288
289static int grow_buffers(struct stripe_head *sh, int num)
290{
291 int i;
292
293 for (i=0; i<num; i++) {
294 struct page *page;
295
296 if (!(page = alloc_page(GFP_KERNEL))) {
297 return 1;
298 }
299 sh->dev[i].page = page;
300 }
301 return 0;
302}
303
NeilBrown784052e2009-03-31 15:19:07 +1100304static void raid5_build_block(struct stripe_head *sh, int i, int previous);
NeilBrown911d4ee2009-03-31 14:39:38 +1100305static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous,
306 struct stripe_head *sh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307
NeilBrownb5663ba2009-03-31 14:39:38 +1100308static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309{
310 raid5_conf_t *conf = sh->raid_conf;
NeilBrown7ecaa1e2006-03-27 01:18:08 -0800311 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312
Eric Sesterhenn78bafeb2006-04-02 13:31:42 +0200313 BUG_ON(atomic_read(&sh->count) != 0);
314 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
Dan Williams600aa102008-06-28 08:32:05 +1000315 BUG_ON(stripe_operations_active(sh));
Dan Williamsd84e0f12007-01-02 13:52:30 -0700316
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 CHECK_DEVLOCK();
Dan Williams45b42332007-07-09 11:56:43 -0700318 pr_debug("init_stripe called, stripe %llu\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 (unsigned long long)sh->sector);
320
321 remove_hash(sh);
NeilBrown16a53ec2006-06-26 00:27:38 -0700322
NeilBrown86b42c72009-03-31 15:19:03 +1100323 sh->generation = conf->generation - previous;
NeilBrownb5663ba2009-03-31 14:39:38 +1100324 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 sh->sector = sector;
NeilBrown911d4ee2009-03-31 14:39:38 +1100326 stripe_set_idx(sector, conf, previous, sh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327 sh->state = 0;
328
NeilBrown7ecaa1e2006-03-27 01:18:08 -0800329
330 for (i = sh->disks; i--; ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 struct r5dev *dev = &sh->dev[i];
332
Dan Williamsd84e0f12007-01-02 13:52:30 -0700333 if (dev->toread || dev->read || dev->towrite || dev->written ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 test_bit(R5_LOCKED, &dev->flags)) {
Dan Williamsd84e0f12007-01-02 13:52:30 -0700335 printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 (unsigned long long)sh->sector, i, dev->toread,
Dan Williamsd84e0f12007-01-02 13:52:30 -0700337 dev->read, dev->towrite, dev->written,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 test_bit(R5_LOCKED, &dev->flags));
339 BUG();
340 }
341 dev->flags = 0;
NeilBrown784052e2009-03-31 15:19:07 +1100342 raid5_build_block(sh, i, previous);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 }
344 insert_hash(conf, sh);
345}
346
NeilBrown86b42c72009-03-31 15:19:03 +1100347static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector,
348 short generation)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349{
350 struct stripe_head *sh;
NeilBrownfccddba2006-01-06 00:20:33 -0800351 struct hlist_node *hn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352
353 CHECK_DEVLOCK();
Dan Williams45b42332007-07-09 11:56:43 -0700354 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector);
NeilBrownfccddba2006-01-06 00:20:33 -0800355 hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash)
NeilBrown86b42c72009-03-31 15:19:03 +1100356 if (sh->sector == sector && sh->generation == generation)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 return sh;
Dan Williams45b42332007-07-09 11:56:43 -0700358 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 return NULL;
360}
361
362static void unplug_slaves(mddev_t *mddev);
Jens Axboe165125e2007-07-24 09:28:11 +0200363static void raid5_unplug_device(struct request_queue *q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364
NeilBrownb5663ba2009-03-31 14:39:38 +1100365static struct stripe_head *
366get_active_stripe(raid5_conf_t *conf, sector_t sector,
NeilBrowna8c906c2009-06-09 14:39:59 +1000367 int previous, int noblock, int noquiesce)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368{
369 struct stripe_head *sh;
370
Dan Williams45b42332007-07-09 11:56:43 -0700371 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372
373 spin_lock_irq(&conf->device_lock);
374
375 do {
NeilBrown72626682005-09-09 16:23:54 -0700376 wait_event_lock_irq(conf->wait_for_stripe,
NeilBrowna8c906c2009-06-09 14:39:59 +1000377 conf->quiesce == 0 || noquiesce,
NeilBrown72626682005-09-09 16:23:54 -0700378 conf->device_lock, /* nothing */);
NeilBrown86b42c72009-03-31 15:19:03 +1100379 sh = __find_stripe(conf, sector, conf->generation - previous);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 if (!sh) {
381 if (!conf->inactive_blocked)
382 sh = get_free_stripe(conf);
383 if (noblock && sh == NULL)
384 break;
385 if (!sh) {
386 conf->inactive_blocked = 1;
387 wait_event_lock_irq(conf->wait_for_stripe,
388 !list_empty(&conf->inactive_list) &&
NeilBrown50368052005-12-12 02:39:17 -0800389 (atomic_read(&conf->active_stripes)
390 < (conf->max_nr_stripes *3/4)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 || !conf->inactive_blocked),
392 conf->device_lock,
NeilBrownf4370782006-07-10 04:44:14 -0700393 raid5_unplug_device(conf->mddev->queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 );
395 conf->inactive_blocked = 0;
396 } else
NeilBrownb5663ba2009-03-31 14:39:38 +1100397 init_stripe(sh, sector, previous);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 } else {
399 if (atomic_read(&sh->count)) {
NeilBrownab69ae12009-03-31 15:26:47 +1100400 BUG_ON(!list_empty(&sh->lru)
401 && !test_bit(STRIPE_EXPANDING, &sh->state));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 } else {
403 if (!test_bit(STRIPE_HANDLE, &sh->state))
404 atomic_inc(&conf->active_stripes);
NeilBrownff4e8d92006-07-10 04:44:16 -0700405 if (list_empty(&sh->lru) &&
406 !test_bit(STRIPE_EXPANDING, &sh->state))
NeilBrown16a53ec2006-06-26 00:27:38 -0700407 BUG();
408 list_del_init(&sh->lru);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 }
410 }
411 } while (sh == NULL);
412
413 if (sh)
414 atomic_inc(&sh->count);
415
416 spin_unlock_irq(&conf->device_lock);
417 return sh;
418}
419
NeilBrown6712ecf2007-09-27 12:47:43 +0200420static void
421raid5_end_read_request(struct bio *bi, int error);
422static void
423raid5_end_write_request(struct bio *bi, int error);
Dan Williams91c00922007-01-02 13:52:30 -0700424
Dan Williamsc4e5ac02008-06-28 08:31:53 +1000425static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
Dan Williams91c00922007-01-02 13:52:30 -0700426{
427 raid5_conf_t *conf = sh->raid_conf;
428 int i, disks = sh->disks;
429
430 might_sleep();
431
432 for (i = disks; i--; ) {
433 int rw;
434 struct bio *bi;
435 mdk_rdev_t *rdev;
436 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags))
437 rw = WRITE;
438 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
439 rw = READ;
440 else
441 continue;
442
443 bi = &sh->dev[i].req;
444
445 bi->bi_rw = rw;
446 if (rw == WRITE)
447 bi->bi_end_io = raid5_end_write_request;
448 else
449 bi->bi_end_io = raid5_end_read_request;
450
451 rcu_read_lock();
452 rdev = rcu_dereference(conf->disks[i].rdev);
453 if (rdev && test_bit(Faulty, &rdev->flags))
454 rdev = NULL;
455 if (rdev)
456 atomic_inc(&rdev->nr_pending);
457 rcu_read_unlock();
458
459 if (rdev) {
Dan Williamsc4e5ac02008-06-28 08:31:53 +1000460 if (s->syncing || s->expanding || s->expanded)
Dan Williams91c00922007-01-02 13:52:30 -0700461 md_sync_acct(rdev->bdev, STRIPE_SECTORS);
462
Dan Williams2b7497f2008-06-28 08:31:52 +1000463 set_bit(STRIPE_IO_STARTED, &sh->state);
464
Dan Williams91c00922007-01-02 13:52:30 -0700465 bi->bi_bdev = rdev->bdev;
466 pr_debug("%s: for %llu schedule op %ld on disc %d\n",
Harvey Harrisone46b272b2008-04-28 02:15:50 -0700467 __func__, (unsigned long long)sh->sector,
Dan Williams91c00922007-01-02 13:52:30 -0700468 bi->bi_rw, i);
469 atomic_inc(&sh->count);
470 bi->bi_sector = sh->sector + rdev->data_offset;
471 bi->bi_flags = 1 << BIO_UPTODATE;
472 bi->bi_vcnt = 1;
473 bi->bi_max_vecs = 1;
474 bi->bi_idx = 0;
475 bi->bi_io_vec = &sh->dev[i].vec;
476 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
477 bi->bi_io_vec[0].bv_offset = 0;
478 bi->bi_size = STRIPE_SIZE;
479 bi->bi_next = NULL;
480 if (rw == WRITE &&
481 test_bit(R5_ReWrite, &sh->dev[i].flags))
482 atomic_add(STRIPE_SECTORS,
483 &rdev->corrected_errors);
484 generic_make_request(bi);
485 } else {
486 if (rw == WRITE)
487 set_bit(STRIPE_DEGRADED, &sh->state);
488 pr_debug("skip op %ld on disc %d for sector %llu\n",
489 bi->bi_rw, i, (unsigned long long)sh->sector);
490 clear_bit(R5_LOCKED, &sh->dev[i].flags);
491 set_bit(STRIPE_HANDLE, &sh->state);
492 }
493 }
494}
495
496static struct dma_async_tx_descriptor *
497async_copy_data(int frombio, struct bio *bio, struct page *page,
498 sector_t sector, struct dma_async_tx_descriptor *tx)
499{
500 struct bio_vec *bvl;
501 struct page *bio_page;
502 int i;
503 int page_offset;
Dan Williamsa08abd82009-06-03 11:43:59 -0700504 struct async_submit_ctl submit;
Dan Williams0403e382009-09-08 17:42:50 -0700505 enum async_tx_flags flags = 0;
Dan Williams91c00922007-01-02 13:52:30 -0700506
507 if (bio->bi_sector >= sector)
508 page_offset = (signed)(bio->bi_sector - sector) * 512;
509 else
510 page_offset = (signed)(sector - bio->bi_sector) * -512;
Dan Williamsa08abd82009-06-03 11:43:59 -0700511
Dan Williams0403e382009-09-08 17:42:50 -0700512 if (frombio)
513 flags |= ASYNC_TX_FENCE;
514 init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
515
Dan Williams91c00922007-01-02 13:52:30 -0700516 bio_for_each_segment(bvl, bio, i) {
517 int len = bio_iovec_idx(bio, i)->bv_len;
518 int clen;
519 int b_offset = 0;
520
521 if (page_offset < 0) {
522 b_offset = -page_offset;
523 page_offset += b_offset;
524 len -= b_offset;
525 }
526
527 if (len > 0 && page_offset + len > STRIPE_SIZE)
528 clen = STRIPE_SIZE - page_offset;
529 else
530 clen = len;
531
532 if (clen > 0) {
533 b_offset += bio_iovec_idx(bio, i)->bv_offset;
534 bio_page = bio_iovec_idx(bio, i)->bv_page;
535 if (frombio)
536 tx = async_memcpy(page, bio_page, page_offset,
Dan Williamsa08abd82009-06-03 11:43:59 -0700537 b_offset, clen, &submit);
Dan Williams91c00922007-01-02 13:52:30 -0700538 else
539 tx = async_memcpy(bio_page, page, b_offset,
Dan Williamsa08abd82009-06-03 11:43:59 -0700540 page_offset, clen, &submit);
Dan Williams91c00922007-01-02 13:52:30 -0700541 }
Dan Williamsa08abd82009-06-03 11:43:59 -0700542 /* chain the operations */
543 submit.depend_tx = tx;
544
Dan Williams91c00922007-01-02 13:52:30 -0700545 if (clen < len) /* hit end of page */
546 break;
547 page_offset += len;
548 }
549
550 return tx;
551}
552
553static void ops_complete_biofill(void *stripe_head_ref)
554{
555 struct stripe_head *sh = stripe_head_ref;
556 struct bio *return_bi = NULL;
557 raid5_conf_t *conf = sh->raid_conf;
Dan Williamse4d84902007-09-24 10:06:13 -0700558 int i;
Dan Williams91c00922007-01-02 13:52:30 -0700559
Harvey Harrisone46b272b2008-04-28 02:15:50 -0700560 pr_debug("%s: stripe %llu\n", __func__,
Dan Williams91c00922007-01-02 13:52:30 -0700561 (unsigned long long)sh->sector);
562
563 /* clear completed biofills */
Dan Williams83de75c2008-06-28 08:31:58 +1000564 spin_lock_irq(&conf->device_lock);
Dan Williams91c00922007-01-02 13:52:30 -0700565 for (i = sh->disks; i--; ) {
566 struct r5dev *dev = &sh->dev[i];
Dan Williams91c00922007-01-02 13:52:30 -0700567
568 /* acknowledge completion of a biofill operation */
Dan Williamse4d84902007-09-24 10:06:13 -0700569 /* and check if we need to reply to a read request,
570 * new R5_Wantfill requests are held off until
Dan Williams83de75c2008-06-28 08:31:58 +1000571 * !STRIPE_BIOFILL_RUN
Dan Williamse4d84902007-09-24 10:06:13 -0700572 */
573 if (test_and_clear_bit(R5_Wantfill, &dev->flags)) {
Dan Williams91c00922007-01-02 13:52:30 -0700574 struct bio *rbi, *rbi2;
Dan Williams91c00922007-01-02 13:52:30 -0700575
Dan Williams91c00922007-01-02 13:52:30 -0700576 BUG_ON(!dev->read);
577 rbi = dev->read;
578 dev->read = NULL;
579 while (rbi && rbi->bi_sector <
580 dev->sector + STRIPE_SECTORS) {
581 rbi2 = r5_next_bio(rbi, dev->sector);
Jens Axboe960e7392008-08-15 10:41:18 +0200582 if (!raid5_dec_bi_phys_segments(rbi)) {
Dan Williams91c00922007-01-02 13:52:30 -0700583 rbi->bi_next = return_bi;
584 return_bi = rbi;
585 }
Dan Williams91c00922007-01-02 13:52:30 -0700586 rbi = rbi2;
587 }
588 }
589 }
Dan Williams83de75c2008-06-28 08:31:58 +1000590 spin_unlock_irq(&conf->device_lock);
591 clear_bit(STRIPE_BIOFILL_RUN, &sh->state);
Dan Williams91c00922007-01-02 13:52:30 -0700592
593 return_io(return_bi);
594
Dan Williamse4d84902007-09-24 10:06:13 -0700595 set_bit(STRIPE_HANDLE, &sh->state);
Dan Williams91c00922007-01-02 13:52:30 -0700596 release_stripe(sh);
597}
598
599static void ops_run_biofill(struct stripe_head *sh)
600{
601 struct dma_async_tx_descriptor *tx = NULL;
602 raid5_conf_t *conf = sh->raid_conf;
Dan Williamsa08abd82009-06-03 11:43:59 -0700603 struct async_submit_ctl submit;
Dan Williams91c00922007-01-02 13:52:30 -0700604 int i;
605
Harvey Harrisone46b272b2008-04-28 02:15:50 -0700606 pr_debug("%s: stripe %llu\n", __func__,
Dan Williams91c00922007-01-02 13:52:30 -0700607 (unsigned long long)sh->sector);
608
609 for (i = sh->disks; i--; ) {
610 struct r5dev *dev = &sh->dev[i];
611 if (test_bit(R5_Wantfill, &dev->flags)) {
612 struct bio *rbi;
613 spin_lock_irq(&conf->device_lock);
614 dev->read = rbi = dev->toread;
615 dev->toread = NULL;
616 spin_unlock_irq(&conf->device_lock);
617 while (rbi && rbi->bi_sector <
618 dev->sector + STRIPE_SECTORS) {
619 tx = async_copy_data(0, rbi, dev->page,
620 dev->sector, tx);
621 rbi = r5_next_bio(rbi, dev->sector);
622 }
623 }
624 }
625
626 atomic_inc(&sh->count);
Dan Williamsa08abd82009-06-03 11:43:59 -0700627 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL);
628 async_trigger_callback(&submit);
Dan Williams91c00922007-01-02 13:52:30 -0700629}
630
Dan Williams4e7d2c02009-08-29 19:13:11 -0700631static void mark_target_uptodate(struct stripe_head *sh, int target)
632{
633 struct r5dev *tgt;
634
635 if (target < 0)
636 return;
637
638 tgt = &sh->dev[target];
639 set_bit(R5_UPTODATE, &tgt->flags);
640 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
641 clear_bit(R5_Wantcompute, &tgt->flags);
642}
643
Dan Williamsac6b53b2009-07-14 13:40:19 -0700644static void ops_complete_compute(void *stripe_head_ref)
Dan Williams91c00922007-01-02 13:52:30 -0700645{
646 struct stripe_head *sh = stripe_head_ref;
Dan Williams91c00922007-01-02 13:52:30 -0700647
Harvey Harrisone46b272b2008-04-28 02:15:50 -0700648 pr_debug("%s: stripe %llu\n", __func__,
Dan Williams91c00922007-01-02 13:52:30 -0700649 (unsigned long long)sh->sector);
650
Dan Williamsac6b53b2009-07-14 13:40:19 -0700651 /* mark the computed target(s) as uptodate */
Dan Williams4e7d2c02009-08-29 19:13:11 -0700652 mark_target_uptodate(sh, sh->ops.target);
Dan Williamsac6b53b2009-07-14 13:40:19 -0700653 mark_target_uptodate(sh, sh->ops.target2);
Dan Williams4e7d2c02009-08-29 19:13:11 -0700654
Dan Williamsecc65c92008-06-28 08:31:57 +1000655 clear_bit(STRIPE_COMPUTE_RUN, &sh->state);
656 if (sh->check_state == check_state_compute_run)
657 sh->check_state = check_state_compute_result;
Dan Williams91c00922007-01-02 13:52:30 -0700658 set_bit(STRIPE_HANDLE, &sh->state);
659 release_stripe(sh);
660}
661
Dan Williamsd6f38f32009-07-14 11:50:52 -0700662/* return a pointer to the address conversion region of the scribble buffer */
663static addr_conv_t *to_addr_conv(struct stripe_head *sh,
664 struct raid5_percpu *percpu)
Dan Williams91c00922007-01-02 13:52:30 -0700665{
Dan Williamsd6f38f32009-07-14 11:50:52 -0700666 return percpu->scribble + sizeof(struct page *) * (sh->disks + 2);
667}
668
669static struct dma_async_tx_descriptor *
670ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
671{
Dan Williams91c00922007-01-02 13:52:30 -0700672 int disks = sh->disks;
Dan Williamsd6f38f32009-07-14 11:50:52 -0700673 struct page **xor_srcs = percpu->scribble;
Dan Williams91c00922007-01-02 13:52:30 -0700674 int target = sh->ops.target;
675 struct r5dev *tgt = &sh->dev[target];
676 struct page *xor_dest = tgt->page;
677 int count = 0;
678 struct dma_async_tx_descriptor *tx;
Dan Williamsa08abd82009-06-03 11:43:59 -0700679 struct async_submit_ctl submit;
Dan Williams91c00922007-01-02 13:52:30 -0700680 int i;
681
682 pr_debug("%s: stripe %llu block: %d\n",
Harvey Harrisone46b272b2008-04-28 02:15:50 -0700683 __func__, (unsigned long long)sh->sector, target);
Dan Williams91c00922007-01-02 13:52:30 -0700684 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
685
686 for (i = disks; i--; )
687 if (i != target)
688 xor_srcs[count++] = sh->dev[i].page;
689
690 atomic_inc(&sh->count);
691
Dan Williams0403e382009-09-08 17:42:50 -0700692 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL,
Dan Williamsac6b53b2009-07-14 13:40:19 -0700693 ops_complete_compute, sh, to_addr_conv(sh, percpu));
Dan Williams91c00922007-01-02 13:52:30 -0700694 if (unlikely(count == 1))
Dan Williamsa08abd82009-06-03 11:43:59 -0700695 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
Dan Williams91c00922007-01-02 13:52:30 -0700696 else
Dan Williamsa08abd82009-06-03 11:43:59 -0700697 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
Dan Williams91c00922007-01-02 13:52:30 -0700698
Dan Williams91c00922007-01-02 13:52:30 -0700699 return tx;
700}
701
Dan Williamsac6b53b2009-07-14 13:40:19 -0700702/* set_syndrome_sources - populate source buffers for gen_syndrome
703 * @srcs - (struct page *) array of size sh->disks
704 * @sh - stripe_head to parse
705 *
706 * Populates srcs in proper layout order for the stripe and returns the
707 * 'count' of sources to be used in a call to async_gen_syndrome. The P
708 * destination buffer is recorded in srcs[count] and the Q destination
709 * is recorded in srcs[count+1]].
710 */
711static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh)
712{
713 int disks = sh->disks;
714 int syndrome_disks = sh->ddf_layout ? disks : (disks - 2);
715 int d0_idx = raid6_d0(sh);
716 int count;
717 int i;
718
719 for (i = 0; i < disks; i++)
720 srcs[i] = (void *)raid6_empty_zero_page;
721
722 count = 0;
723 i = d0_idx;
724 do {
725 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
726
727 srcs[slot] = sh->dev[i].page;
728 i = raid6_next_disk(i, disks);
729 } while (i != d0_idx);
730 BUG_ON(count != syndrome_disks);
731
732 return count;
733}
734
735static struct dma_async_tx_descriptor *
736ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu)
737{
738 int disks = sh->disks;
739 struct page **blocks = percpu->scribble;
740 int target;
741 int qd_idx = sh->qd_idx;
742 struct dma_async_tx_descriptor *tx;
743 struct async_submit_ctl submit;
744 struct r5dev *tgt;
745 struct page *dest;
746 int i;
747 int count;
748
749 if (sh->ops.target < 0)
750 target = sh->ops.target2;
751 else if (sh->ops.target2 < 0)
752 target = sh->ops.target;
753 else
754 /* we should only have one valid target */
755 BUG();
756 BUG_ON(target < 0);
757 pr_debug("%s: stripe %llu block: %d\n",
758 __func__, (unsigned long long)sh->sector, target);
759
760 tgt = &sh->dev[target];
761 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
762 dest = tgt->page;
763
764 atomic_inc(&sh->count);
765
766 if (target == qd_idx) {
767 count = set_syndrome_sources(blocks, sh);
768 blocks[count] = NULL; /* regenerating p is not necessary */
769 BUG_ON(blocks[count+1] != dest); /* q should already be set */
Dan Williams0403e382009-09-08 17:42:50 -0700770 init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
771 ops_complete_compute, sh,
Dan Williamsac6b53b2009-07-14 13:40:19 -0700772 to_addr_conv(sh, percpu));
773 tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
774 } else {
775 /* Compute any data- or p-drive using XOR */
776 count = 0;
777 for (i = disks; i-- ; ) {
778 if (i == target || i == qd_idx)
779 continue;
780 blocks[count++] = sh->dev[i].page;
781 }
782
Dan Williams0403e382009-09-08 17:42:50 -0700783 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
784 NULL, ops_complete_compute, sh,
Dan Williamsac6b53b2009-07-14 13:40:19 -0700785 to_addr_conv(sh, percpu));
786 tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit);
787 }
788
789 return tx;
790}
791
792static struct dma_async_tx_descriptor *
793ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
794{
795 int i, count, disks = sh->disks;
796 int syndrome_disks = sh->ddf_layout ? disks : disks-2;
797 int d0_idx = raid6_d0(sh);
798 int faila = -1, failb = -1;
799 int target = sh->ops.target;
800 int target2 = sh->ops.target2;
801 struct r5dev *tgt = &sh->dev[target];
802 struct r5dev *tgt2 = &sh->dev[target2];
803 struct dma_async_tx_descriptor *tx;
804 struct page **blocks = percpu->scribble;
805 struct async_submit_ctl submit;
806
807 pr_debug("%s: stripe %llu block1: %d block2: %d\n",
808 __func__, (unsigned long long)sh->sector, target, target2);
809 BUG_ON(target < 0 || target2 < 0);
810 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
811 BUG_ON(!test_bit(R5_Wantcompute, &tgt2->flags));
812
813 /* we need to open-code set_syndrome_sources to handle to the
814 * slot number conversion for 'faila' and 'failb'
815 */
816 for (i = 0; i < disks ; i++)
817 blocks[i] = (void *)raid6_empty_zero_page;
818 count = 0;
819 i = d0_idx;
820 do {
821 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
822
823 blocks[slot] = sh->dev[i].page;
824
825 if (i == target)
826 faila = slot;
827 if (i == target2)
828 failb = slot;
829 i = raid6_next_disk(i, disks);
830 } while (i != d0_idx);
831 BUG_ON(count != syndrome_disks);
832
833 BUG_ON(faila == failb);
834 if (failb < faila)
835 swap(faila, failb);
836 pr_debug("%s: stripe: %llu faila: %d failb: %d\n",
837 __func__, (unsigned long long)sh->sector, faila, failb);
838
839 atomic_inc(&sh->count);
840
841 if (failb == syndrome_disks+1) {
842 /* Q disk is one of the missing disks */
843 if (faila == syndrome_disks) {
844 /* Missing P+Q, just recompute */
Dan Williams0403e382009-09-08 17:42:50 -0700845 init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
846 ops_complete_compute, sh,
847 to_addr_conv(sh, percpu));
Dan Williamsac6b53b2009-07-14 13:40:19 -0700848 return async_gen_syndrome(blocks, 0, count+2,
849 STRIPE_SIZE, &submit);
850 } else {
851 struct page *dest;
852 int data_target;
853 int qd_idx = sh->qd_idx;
854
855 /* Missing D+Q: recompute D from P, then recompute Q */
856 if (target == qd_idx)
857 data_target = target2;
858 else
859 data_target = target;
860
861 count = 0;
862 for (i = disks; i-- ; ) {
863 if (i == data_target || i == qd_idx)
864 continue;
865 blocks[count++] = sh->dev[i].page;
866 }
867 dest = sh->dev[data_target].page;
Dan Williams0403e382009-09-08 17:42:50 -0700868 init_async_submit(&submit,
869 ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
870 NULL, NULL, NULL,
871 to_addr_conv(sh, percpu));
Dan Williamsac6b53b2009-07-14 13:40:19 -0700872 tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE,
873 &submit);
874
875 count = set_syndrome_sources(blocks, sh);
Dan Williams0403e382009-09-08 17:42:50 -0700876 init_async_submit(&submit, ASYNC_TX_FENCE, tx,
877 ops_complete_compute, sh,
878 to_addr_conv(sh, percpu));
Dan Williamsac6b53b2009-07-14 13:40:19 -0700879 return async_gen_syndrome(blocks, 0, count+2,
880 STRIPE_SIZE, &submit);
881 }
882 }
883
Dan Williams0403e382009-09-08 17:42:50 -0700884 init_async_submit(&submit, ASYNC_TX_FENCE, NULL, ops_complete_compute,
885 sh, to_addr_conv(sh, percpu));
Dan Williamsac6b53b2009-07-14 13:40:19 -0700886 if (failb == syndrome_disks) {
887 /* We're missing D+P. */
888 return async_raid6_datap_recov(syndrome_disks+2, STRIPE_SIZE,
889 faila, blocks, &submit);
890 } else {
891 /* We're missing D+D. */
892 return async_raid6_2data_recov(syndrome_disks+2, STRIPE_SIZE,
893 faila, failb, blocks, &submit);
894 }
895}
896
897
Dan Williams91c00922007-01-02 13:52:30 -0700898static void ops_complete_prexor(void *stripe_head_ref)
899{
900 struct stripe_head *sh = stripe_head_ref;
901
Harvey Harrisone46b272b2008-04-28 02:15:50 -0700902 pr_debug("%s: stripe %llu\n", __func__,
Dan Williams91c00922007-01-02 13:52:30 -0700903 (unsigned long long)sh->sector);
Dan Williams91c00922007-01-02 13:52:30 -0700904}
905
906static struct dma_async_tx_descriptor *
Dan Williamsd6f38f32009-07-14 11:50:52 -0700907ops_run_prexor(struct stripe_head *sh, struct raid5_percpu *percpu,
908 struct dma_async_tx_descriptor *tx)
Dan Williams91c00922007-01-02 13:52:30 -0700909{
Dan Williams91c00922007-01-02 13:52:30 -0700910 int disks = sh->disks;
Dan Williamsd6f38f32009-07-14 11:50:52 -0700911 struct page **xor_srcs = percpu->scribble;
Dan Williams91c00922007-01-02 13:52:30 -0700912 int count = 0, pd_idx = sh->pd_idx, i;
Dan Williamsa08abd82009-06-03 11:43:59 -0700913 struct async_submit_ctl submit;
Dan Williams91c00922007-01-02 13:52:30 -0700914
915 /* existing parity data subtracted */
916 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
917
Harvey Harrisone46b272b2008-04-28 02:15:50 -0700918 pr_debug("%s: stripe %llu\n", __func__,
Dan Williams91c00922007-01-02 13:52:30 -0700919 (unsigned long long)sh->sector);
920
921 for (i = disks; i--; ) {
922 struct r5dev *dev = &sh->dev[i];
923 /* Only process blocks that are known to be uptodate */
Dan Williamsd8ee0722008-06-28 08:32:06 +1000924 if (test_bit(R5_Wantdrain, &dev->flags))
Dan Williams91c00922007-01-02 13:52:30 -0700925 xor_srcs[count++] = dev->page;
926 }
927
Dan Williams0403e382009-09-08 17:42:50 -0700928 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
Dan Williamsd6f38f32009-07-14 11:50:52 -0700929 ops_complete_prexor, sh, to_addr_conv(sh, percpu));
Dan Williamsa08abd82009-06-03 11:43:59 -0700930 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
Dan Williams91c00922007-01-02 13:52:30 -0700931
932 return tx;
933}
934
935static struct dma_async_tx_descriptor *
Dan Williamsd8ee0722008-06-28 08:32:06 +1000936ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
Dan Williams91c00922007-01-02 13:52:30 -0700937{
938 int disks = sh->disks;
Dan Williamsd8ee0722008-06-28 08:32:06 +1000939 int i;
Dan Williams91c00922007-01-02 13:52:30 -0700940
Harvey Harrisone46b272b2008-04-28 02:15:50 -0700941 pr_debug("%s: stripe %llu\n", __func__,
Dan Williams91c00922007-01-02 13:52:30 -0700942 (unsigned long long)sh->sector);
943
944 for (i = disks; i--; ) {
945 struct r5dev *dev = &sh->dev[i];
946 struct bio *chosen;
Dan Williams91c00922007-01-02 13:52:30 -0700947
Dan Williamsd8ee0722008-06-28 08:32:06 +1000948 if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) {
Dan Williams91c00922007-01-02 13:52:30 -0700949 struct bio *wbi;
950
951 spin_lock(&sh->lock);
952 chosen = dev->towrite;
953 dev->towrite = NULL;
954 BUG_ON(dev->written);
955 wbi = dev->written = chosen;
956 spin_unlock(&sh->lock);
957
958 while (wbi && wbi->bi_sector <
959 dev->sector + STRIPE_SECTORS) {
960 tx = async_copy_data(1, wbi, dev->page,
961 dev->sector, tx);
962 wbi = r5_next_bio(wbi, dev->sector);
963 }
964 }
965 }
966
967 return tx;
968}
969
Dan Williamsac6b53b2009-07-14 13:40:19 -0700970static void ops_complete_reconstruct(void *stripe_head_ref)
Dan Williams91c00922007-01-02 13:52:30 -0700971{
972 struct stripe_head *sh = stripe_head_ref;
Dan Williamsac6b53b2009-07-14 13:40:19 -0700973 int disks = sh->disks;
974 int pd_idx = sh->pd_idx;
975 int qd_idx = sh->qd_idx;
976 int i;
Dan Williams91c00922007-01-02 13:52:30 -0700977
Harvey Harrisone46b272b2008-04-28 02:15:50 -0700978 pr_debug("%s: stripe %llu\n", __func__,
Dan Williams91c00922007-01-02 13:52:30 -0700979 (unsigned long long)sh->sector);
980
981 for (i = disks; i--; ) {
982 struct r5dev *dev = &sh->dev[i];
Dan Williamsac6b53b2009-07-14 13:40:19 -0700983
984 if (dev->written || i == pd_idx || i == qd_idx)
Dan Williams91c00922007-01-02 13:52:30 -0700985 set_bit(R5_UPTODATE, &dev->flags);
986 }
987
Dan Williamsd8ee0722008-06-28 08:32:06 +1000988 if (sh->reconstruct_state == reconstruct_state_drain_run)
989 sh->reconstruct_state = reconstruct_state_drain_result;
990 else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run)
991 sh->reconstruct_state = reconstruct_state_prexor_drain_result;
992 else {
993 BUG_ON(sh->reconstruct_state != reconstruct_state_run);
994 sh->reconstruct_state = reconstruct_state_result;
995 }
Dan Williams91c00922007-01-02 13:52:30 -0700996
997 set_bit(STRIPE_HANDLE, &sh->state);
998 release_stripe(sh);
999}
1000
1001static void
Dan Williamsac6b53b2009-07-14 13:40:19 -07001002ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu,
1003 struct dma_async_tx_descriptor *tx)
Dan Williams91c00922007-01-02 13:52:30 -07001004{
Dan Williams91c00922007-01-02 13:52:30 -07001005 int disks = sh->disks;
Dan Williamsd6f38f32009-07-14 11:50:52 -07001006 struct page **xor_srcs = percpu->scribble;
Dan Williamsa08abd82009-06-03 11:43:59 -07001007 struct async_submit_ctl submit;
Dan Williams91c00922007-01-02 13:52:30 -07001008 int count = 0, pd_idx = sh->pd_idx, i;
1009 struct page *xor_dest;
Dan Williamsd8ee0722008-06-28 08:32:06 +10001010 int prexor = 0;
Dan Williams91c00922007-01-02 13:52:30 -07001011 unsigned long flags;
Dan Williams91c00922007-01-02 13:52:30 -07001012
Harvey Harrisone46b272b2008-04-28 02:15:50 -07001013 pr_debug("%s: stripe %llu\n", __func__,
Dan Williams91c00922007-01-02 13:52:30 -07001014 (unsigned long long)sh->sector);
1015
1016 /* check if prexor is active which means only process blocks
1017 * that are part of a read-modify-write (written)
1018 */
Dan Williamsd8ee0722008-06-28 08:32:06 +10001019 if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
1020 prexor = 1;
Dan Williams91c00922007-01-02 13:52:30 -07001021 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
1022 for (i = disks; i--; ) {
1023 struct r5dev *dev = &sh->dev[i];
1024 if (dev->written)
1025 xor_srcs[count++] = dev->page;
1026 }
1027 } else {
1028 xor_dest = sh->dev[pd_idx].page;
1029 for (i = disks; i--; ) {
1030 struct r5dev *dev = &sh->dev[i];
1031 if (i != pd_idx)
1032 xor_srcs[count++] = dev->page;
1033 }
1034 }
1035
Dan Williams91c00922007-01-02 13:52:30 -07001036 /* 1/ if we prexor'd then the dest is reused as a source
1037 * 2/ if we did not prexor then we are redoing the parity
1038 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
1039 * for the synchronous xor case
1040 */
Dan Williams88ba2aa2009-04-09 16:16:18 -07001041 flags = ASYNC_TX_ACK |
Dan Williams91c00922007-01-02 13:52:30 -07001042 (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST);
1043
1044 atomic_inc(&sh->count);
1045
Dan Williamsac6b53b2009-07-14 13:40:19 -07001046 init_async_submit(&submit, flags, tx, ops_complete_reconstruct, sh,
Dan Williamsd6f38f32009-07-14 11:50:52 -07001047 to_addr_conv(sh, percpu));
Dan Williamsa08abd82009-06-03 11:43:59 -07001048 if (unlikely(count == 1))
1049 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
1050 else
1051 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
Dan Williams91c00922007-01-02 13:52:30 -07001052}
1053
Dan Williamsac6b53b2009-07-14 13:40:19 -07001054static void
1055ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu,
1056 struct dma_async_tx_descriptor *tx)
1057{
1058 struct async_submit_ctl submit;
1059 struct page **blocks = percpu->scribble;
1060 int count;
1061
1062 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector);
1063
1064 count = set_syndrome_sources(blocks, sh);
1065
1066 atomic_inc(&sh->count);
1067
1068 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_reconstruct,
1069 sh, to_addr_conv(sh, percpu));
1070 async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
Dan Williams91c00922007-01-02 13:52:30 -07001071}
1072
1073static void ops_complete_check(void *stripe_head_ref)
1074{
1075 struct stripe_head *sh = stripe_head_ref;
Dan Williams91c00922007-01-02 13:52:30 -07001076
Harvey Harrisone46b272b2008-04-28 02:15:50 -07001077 pr_debug("%s: stripe %llu\n", __func__,
Dan Williams91c00922007-01-02 13:52:30 -07001078 (unsigned long long)sh->sector);
1079
Dan Williamsecc65c92008-06-28 08:31:57 +10001080 sh->check_state = check_state_check_result;
Dan Williams91c00922007-01-02 13:52:30 -07001081 set_bit(STRIPE_HANDLE, &sh->state);
1082 release_stripe(sh);
1083}
1084
Dan Williamsac6b53b2009-07-14 13:40:19 -07001085static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu)
Dan Williams91c00922007-01-02 13:52:30 -07001086{
Dan Williams91c00922007-01-02 13:52:30 -07001087 int disks = sh->disks;
Dan Williamsac6b53b2009-07-14 13:40:19 -07001088 int pd_idx = sh->pd_idx;
1089 int qd_idx = sh->qd_idx;
1090 struct page *xor_dest;
Dan Williamsd6f38f32009-07-14 11:50:52 -07001091 struct page **xor_srcs = percpu->scribble;
Dan Williams91c00922007-01-02 13:52:30 -07001092 struct dma_async_tx_descriptor *tx;
Dan Williamsa08abd82009-06-03 11:43:59 -07001093 struct async_submit_ctl submit;
Dan Williamsac6b53b2009-07-14 13:40:19 -07001094 int count;
1095 int i;
Dan Williams91c00922007-01-02 13:52:30 -07001096
Harvey Harrisone46b272b2008-04-28 02:15:50 -07001097 pr_debug("%s: stripe %llu\n", __func__,
Dan Williams91c00922007-01-02 13:52:30 -07001098 (unsigned long long)sh->sector);
1099
Dan Williamsac6b53b2009-07-14 13:40:19 -07001100 count = 0;
1101 xor_dest = sh->dev[pd_idx].page;
1102 xor_srcs[count++] = xor_dest;
Dan Williams91c00922007-01-02 13:52:30 -07001103 for (i = disks; i--; ) {
Dan Williamsac6b53b2009-07-14 13:40:19 -07001104 if (i == pd_idx || i == qd_idx)
1105 continue;
1106 xor_srcs[count++] = sh->dev[i].page;
Dan Williams91c00922007-01-02 13:52:30 -07001107 }
1108
Dan Williamsd6f38f32009-07-14 11:50:52 -07001109 init_async_submit(&submit, 0, NULL, NULL, NULL,
1110 to_addr_conv(sh, percpu));
Dan Williams099f53c2009-04-08 14:28:37 -07001111 tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
Dan Williamsa08abd82009-06-03 11:43:59 -07001112 &sh->ops.zero_sum_result, &submit);
Dan Williams91c00922007-01-02 13:52:30 -07001113
Dan Williams91c00922007-01-02 13:52:30 -07001114 atomic_inc(&sh->count);
Dan Williamsa08abd82009-06-03 11:43:59 -07001115 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL);
1116 tx = async_trigger_callback(&submit);
Dan Williams91c00922007-01-02 13:52:30 -07001117}
1118
Dan Williamsac6b53b2009-07-14 13:40:19 -07001119static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp)
1120{
1121 struct page **srcs = percpu->scribble;
1122 struct async_submit_ctl submit;
1123 int count;
1124
1125 pr_debug("%s: stripe %llu checkp: %d\n", __func__,
1126 (unsigned long long)sh->sector, checkp);
1127
1128 count = set_syndrome_sources(srcs, sh);
1129 if (!checkp)
1130 srcs[count] = NULL;
1131
1132 atomic_inc(&sh->count);
1133 init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check,
1134 sh, to_addr_conv(sh, percpu));
1135 async_syndrome_val(srcs, 0, count+2, STRIPE_SIZE,
1136 &sh->ops.zero_sum_result, percpu->spare_page, &submit);
1137}
1138
1139static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
Dan Williams91c00922007-01-02 13:52:30 -07001140{
1141 int overlap_clear = 0, i, disks = sh->disks;
1142 struct dma_async_tx_descriptor *tx = NULL;
Dan Williamsd6f38f32009-07-14 11:50:52 -07001143 raid5_conf_t *conf = sh->raid_conf;
Dan Williamsac6b53b2009-07-14 13:40:19 -07001144 int level = conf->level;
Dan Williamsd6f38f32009-07-14 11:50:52 -07001145 struct raid5_percpu *percpu;
1146 unsigned long cpu;
Dan Williams91c00922007-01-02 13:52:30 -07001147
Dan Williamsd6f38f32009-07-14 11:50:52 -07001148 cpu = get_cpu();
1149 percpu = per_cpu_ptr(conf->percpu, cpu);
Dan Williams83de75c2008-06-28 08:31:58 +10001150 if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
Dan Williams91c00922007-01-02 13:52:30 -07001151 ops_run_biofill(sh);
1152 overlap_clear++;
1153 }
1154
Dan Williams7b3a8712008-06-28 08:32:09 +10001155 if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) {
Dan Williamsac6b53b2009-07-14 13:40:19 -07001156 if (level < 6)
1157 tx = ops_run_compute5(sh, percpu);
1158 else {
1159 if (sh->ops.target2 < 0 || sh->ops.target < 0)
1160 tx = ops_run_compute6_1(sh, percpu);
1161 else
1162 tx = ops_run_compute6_2(sh, percpu);
1163 }
1164 /* terminate the chain if reconstruct is not set to be run */
1165 if (tx && !test_bit(STRIPE_OP_RECONSTRUCT, &ops_request))
Dan Williams7b3a8712008-06-28 08:32:09 +10001166 async_tx_ack(tx);
1167 }
Dan Williams91c00922007-01-02 13:52:30 -07001168
Dan Williams600aa102008-06-28 08:32:05 +10001169 if (test_bit(STRIPE_OP_PREXOR, &ops_request))
Dan Williamsd6f38f32009-07-14 11:50:52 -07001170 tx = ops_run_prexor(sh, percpu, tx);
Dan Williams91c00922007-01-02 13:52:30 -07001171
Dan Williams600aa102008-06-28 08:32:05 +10001172 if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) {
Dan Williamsd8ee0722008-06-28 08:32:06 +10001173 tx = ops_run_biodrain(sh, tx);
Dan Williams91c00922007-01-02 13:52:30 -07001174 overlap_clear++;
1175 }
1176
Dan Williamsac6b53b2009-07-14 13:40:19 -07001177 if (test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) {
1178 if (level < 6)
1179 ops_run_reconstruct5(sh, percpu, tx);
1180 else
1181 ops_run_reconstruct6(sh, percpu, tx);
1182 }
Dan Williams91c00922007-01-02 13:52:30 -07001183
Dan Williamsac6b53b2009-07-14 13:40:19 -07001184 if (test_bit(STRIPE_OP_CHECK, &ops_request)) {
1185 if (sh->check_state == check_state_run)
1186 ops_run_check_p(sh, percpu);
1187 else if (sh->check_state == check_state_run_q)
1188 ops_run_check_pq(sh, percpu, 0);
1189 else if (sh->check_state == check_state_run_pq)
1190 ops_run_check_pq(sh, percpu, 1);
1191 else
1192 BUG();
1193 }
Dan Williams91c00922007-01-02 13:52:30 -07001194
Dan Williams91c00922007-01-02 13:52:30 -07001195 if (overlap_clear)
1196 for (i = disks; i--; ) {
1197 struct r5dev *dev = &sh->dev[i];
1198 if (test_and_clear_bit(R5_Overlap, &dev->flags))
1199 wake_up(&sh->raid_conf->wait_for_overlap);
1200 }
Dan Williamsd6f38f32009-07-14 11:50:52 -07001201 put_cpu();
Dan Williams91c00922007-01-02 13:52:30 -07001202}
1203
NeilBrown3f294f42005-11-08 21:39:25 -08001204static int grow_one_stripe(raid5_conf_t *conf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205{
1206 struct stripe_head *sh;
NeilBrown3f294f42005-11-08 21:39:25 -08001207 sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL);
1208 if (!sh)
1209 return 0;
1210 memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev));
1211 sh->raid_conf = conf;
1212 spin_lock_init(&sh->lock);
1213
1214 if (grow_buffers(sh, conf->raid_disks)) {
1215 shrink_buffers(sh, conf->raid_disks);
1216 kmem_cache_free(conf->slab_cache, sh);
1217 return 0;
1218 }
NeilBrown7ecaa1e2006-03-27 01:18:08 -08001219 sh->disks = conf->raid_disks;
NeilBrown3f294f42005-11-08 21:39:25 -08001220 /* we just created an active stripe so... */
1221 atomic_set(&sh->count, 1);
1222 atomic_inc(&conf->active_stripes);
1223 INIT_LIST_HEAD(&sh->lru);
1224 release_stripe(sh);
1225 return 1;
1226}
1227
1228static int grow_stripes(raid5_conf_t *conf, int num)
1229{
Christoph Lametere18b8902006-12-06 20:33:20 -08001230 struct kmem_cache *sc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231 int devs = conf->raid_disks;
1232
NeilBrown245f46c2009-03-31 14:39:39 +11001233 sprintf(conf->cache_name[0],
1234 "raid%d-%s", conf->level, mdname(conf->mddev));
1235 sprintf(conf->cache_name[1],
1236 "raid%d-%s-alt", conf->level, mdname(conf->mddev));
NeilBrownad01c9e2006-03-27 01:18:07 -08001237 conf->active_name = 0;
1238 sc = kmem_cache_create(conf->cache_name[conf->active_name],
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
Paul Mundt20c2df82007-07-20 10:11:58 +09001240 0, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 if (!sc)
1242 return 1;
1243 conf->slab_cache = sc;
NeilBrownad01c9e2006-03-27 01:18:07 -08001244 conf->pool_size = devs;
NeilBrown16a53ec2006-06-26 00:27:38 -07001245 while (num--)
NeilBrown3f294f42005-11-08 21:39:25 -08001246 if (!grow_one_stripe(conf))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248 return 0;
1249}
NeilBrown29269552006-03-27 01:18:10 -08001250
Dan Williamsd6f38f32009-07-14 11:50:52 -07001251/**
1252 * scribble_len - return the required size of the scribble region
1253 * @num - total number of disks in the array
1254 *
1255 * The size must be enough to contain:
1256 * 1/ a struct page pointer for each device in the array +2
1257 * 2/ room to convert each entry in (1) to its corresponding dma
1258 * (dma_map_page()) or page (page_address()) address.
1259 *
1260 * Note: the +2 is for the destination buffers of the ddf/raid6 case where we
1261 * calculate over all devices (not just the data blocks), using zeros in place
1262 * of the P and Q blocks.
1263 */
1264static size_t scribble_len(int num)
1265{
1266 size_t len;
1267
1268 len = sizeof(struct page *) * (num+2) + sizeof(addr_conv_t) * (num+2);
1269
1270 return len;
1271}
1272
NeilBrownad01c9e2006-03-27 01:18:07 -08001273static int resize_stripes(raid5_conf_t *conf, int newsize)
1274{
1275 /* Make all the stripes able to hold 'newsize' devices.
1276 * New slots in each stripe get 'page' set to a new page.
1277 *
1278 * This happens in stages:
1279 * 1/ create a new kmem_cache and allocate the required number of
1280 * stripe_heads.
1281 * 2/ gather all the old stripe_heads and tranfer the pages across
1282 * to the new stripe_heads. This will have the side effect of
1283 * freezing the array as once all stripe_heads have been collected,
1284 * no IO will be possible. Old stripe heads are freed once their
1285 * pages have been transferred over, and the old kmem_cache is
1286 * freed when all stripes are done.
1287 * 3/ reallocate conf->disks to be suitable bigger. If this fails,
1288 * we simple return a failre status - no need to clean anything up.
1289 * 4/ allocate new pages for the new slots in the new stripe_heads.
1290 * If this fails, we don't bother trying the shrink the
1291 * stripe_heads down again, we just leave them as they are.
1292 * As each stripe_head is processed the new one is released into
1293 * active service.
1294 *
1295 * Once step2 is started, we cannot afford to wait for a write,
1296 * so we use GFP_NOIO allocations.
1297 */
1298 struct stripe_head *osh, *nsh;
1299 LIST_HEAD(newstripes);
1300 struct disk_info *ndisks;
Dan Williamsd6f38f32009-07-14 11:50:52 -07001301 unsigned long cpu;
Dan Williamsb5470dc2008-06-27 21:44:04 -07001302 int err;
Christoph Lametere18b8902006-12-06 20:33:20 -08001303 struct kmem_cache *sc;
NeilBrownad01c9e2006-03-27 01:18:07 -08001304 int i;
1305
1306 if (newsize <= conf->pool_size)
1307 return 0; /* never bother to shrink */
1308
Dan Williamsb5470dc2008-06-27 21:44:04 -07001309 err = md_allow_write(conf->mddev);
1310 if (err)
1311 return err;
NeilBrown2a2275d2007-01-26 00:57:11 -08001312
NeilBrownad01c9e2006-03-27 01:18:07 -08001313 /* Step 1 */
1314 sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
1315 sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev),
Paul Mundt20c2df82007-07-20 10:11:58 +09001316 0, 0, NULL);
NeilBrownad01c9e2006-03-27 01:18:07 -08001317 if (!sc)
1318 return -ENOMEM;
1319
1320 for (i = conf->max_nr_stripes; i; i--) {
1321 nsh = kmem_cache_alloc(sc, GFP_KERNEL);
1322 if (!nsh)
1323 break;
1324
1325 memset(nsh, 0, sizeof(*nsh) + (newsize-1)*sizeof(struct r5dev));
1326
1327 nsh->raid_conf = conf;
1328 spin_lock_init(&nsh->lock);
1329
1330 list_add(&nsh->lru, &newstripes);
1331 }
1332 if (i) {
1333 /* didn't get enough, give up */
1334 while (!list_empty(&newstripes)) {
1335 nsh = list_entry(newstripes.next, struct stripe_head, lru);
1336 list_del(&nsh->lru);
1337 kmem_cache_free(sc, nsh);
1338 }
1339 kmem_cache_destroy(sc);
1340 return -ENOMEM;
1341 }
1342 /* Step 2 - Must use GFP_NOIO now.
1343 * OK, we have enough stripes, start collecting inactive
1344 * stripes and copying them over
1345 */
1346 list_for_each_entry(nsh, &newstripes, lru) {
1347 spin_lock_irq(&conf->device_lock);
1348 wait_event_lock_irq(conf->wait_for_stripe,
1349 !list_empty(&conf->inactive_list),
1350 conf->device_lock,
NeilBrownb3b46be2006-03-27 01:18:16 -08001351 unplug_slaves(conf->mddev)
NeilBrownad01c9e2006-03-27 01:18:07 -08001352 );
1353 osh = get_free_stripe(conf);
1354 spin_unlock_irq(&conf->device_lock);
1355 atomic_set(&nsh->count, 1);
1356 for(i=0; i<conf->pool_size; i++)
1357 nsh->dev[i].page = osh->dev[i].page;
1358 for( ; i<newsize; i++)
1359 nsh->dev[i].page = NULL;
1360 kmem_cache_free(conf->slab_cache, osh);
1361 }
1362 kmem_cache_destroy(conf->slab_cache);
1363
1364 /* Step 3.
1365 * At this point, we are holding all the stripes so the array
1366 * is completely stalled, so now is a good time to resize
Dan Williamsd6f38f32009-07-14 11:50:52 -07001367 * conf->disks and the scribble region
NeilBrownad01c9e2006-03-27 01:18:07 -08001368 */
1369 ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO);
1370 if (ndisks) {
1371 for (i=0; i<conf->raid_disks; i++)
1372 ndisks[i] = conf->disks[i];
1373 kfree(conf->disks);
1374 conf->disks = ndisks;
1375 } else
1376 err = -ENOMEM;
1377
Dan Williamsd6f38f32009-07-14 11:50:52 -07001378 get_online_cpus();
1379 conf->scribble_len = scribble_len(newsize);
1380 for_each_present_cpu(cpu) {
1381 struct raid5_percpu *percpu;
1382 void *scribble;
1383
1384 percpu = per_cpu_ptr(conf->percpu, cpu);
1385 scribble = kmalloc(conf->scribble_len, GFP_NOIO);
1386
1387 if (scribble) {
1388 kfree(percpu->scribble);
1389 percpu->scribble = scribble;
1390 } else {
1391 err = -ENOMEM;
1392 break;
1393 }
1394 }
1395 put_online_cpus();
1396
NeilBrownad01c9e2006-03-27 01:18:07 -08001397 /* Step 4, return new stripes to service */
1398 while(!list_empty(&newstripes)) {
1399 nsh = list_entry(newstripes.next, struct stripe_head, lru);
1400 list_del_init(&nsh->lru);
Dan Williamsd6f38f32009-07-14 11:50:52 -07001401
NeilBrownad01c9e2006-03-27 01:18:07 -08001402 for (i=conf->raid_disks; i < newsize; i++)
1403 if (nsh->dev[i].page == NULL) {
1404 struct page *p = alloc_page(GFP_NOIO);
1405 nsh->dev[i].page = p;
1406 if (!p)
1407 err = -ENOMEM;
1408 }
1409 release_stripe(nsh);
1410 }
1411 /* critical section pass, GFP_NOIO no longer needed */
1412
1413 conf->slab_cache = sc;
1414 conf->active_name = 1-conf->active_name;
1415 conf->pool_size = newsize;
1416 return err;
1417}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418
NeilBrown3f294f42005-11-08 21:39:25 -08001419static int drop_one_stripe(raid5_conf_t *conf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420{
1421 struct stripe_head *sh;
1422
NeilBrown3f294f42005-11-08 21:39:25 -08001423 spin_lock_irq(&conf->device_lock);
1424 sh = get_free_stripe(conf);
1425 spin_unlock_irq(&conf->device_lock);
1426 if (!sh)
1427 return 0;
Eric Sesterhenn78bafeb2006-04-02 13:31:42 +02001428 BUG_ON(atomic_read(&sh->count));
NeilBrownad01c9e2006-03-27 01:18:07 -08001429 shrink_buffers(sh, conf->pool_size);
NeilBrown3f294f42005-11-08 21:39:25 -08001430 kmem_cache_free(conf->slab_cache, sh);
1431 atomic_dec(&conf->active_stripes);
1432 return 1;
1433}
1434
1435static void shrink_stripes(raid5_conf_t *conf)
1436{
1437 while (drop_one_stripe(conf))
1438 ;
1439
NeilBrown29fc7e32006-02-03 03:03:41 -08001440 if (conf->slab_cache)
1441 kmem_cache_destroy(conf->slab_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442 conf->slab_cache = NULL;
1443}
1444
NeilBrown6712ecf2007-09-27 12:47:43 +02001445static void raid5_end_read_request(struct bio * bi, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446{
NeilBrown99c0fb52009-03-31 14:39:38 +11001447 struct stripe_head *sh = bi->bi_private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448 raid5_conf_t *conf = sh->raid_conf;
NeilBrown7ecaa1e2006-03-27 01:18:08 -08001449 int disks = sh->disks, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
NeilBrownd6950432006-07-10 04:44:20 -07001451 char b[BDEVNAME_SIZE];
1452 mdk_rdev_t *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454
1455 for (i=0 ; i<disks; i++)
1456 if (bi == &sh->dev[i].req)
1457 break;
1458
Dan Williams45b42332007-07-09 11:56:43 -07001459 pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n",
1460 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 uptodate);
1462 if (i == disks) {
1463 BUG();
NeilBrown6712ecf2007-09-27 12:47:43 +02001464 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465 }
1466
1467 if (uptodate) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468 set_bit(R5_UPTODATE, &sh->dev[i].flags);
NeilBrown4e5314b2005-11-08 21:39:22 -08001469 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
NeilBrownd6950432006-07-10 04:44:20 -07001470 rdev = conf->disks[i].rdev;
Bernd Schubert6be9d492008-05-23 13:04:34 -07001471 printk_rl(KERN_INFO "raid5:%s: read error corrected"
1472 " (%lu sectors at %llu on %s)\n",
1473 mdname(conf->mddev), STRIPE_SECTORS,
1474 (unsigned long long)(sh->sector
1475 + rdev->data_offset),
1476 bdevname(rdev->bdev, b));
NeilBrown4e5314b2005-11-08 21:39:22 -08001477 clear_bit(R5_ReadError, &sh->dev[i].flags);
1478 clear_bit(R5_ReWrite, &sh->dev[i].flags);
1479 }
NeilBrownba22dcb2005-11-08 21:39:31 -08001480 if (atomic_read(&conf->disks[i].rdev->read_errors))
1481 atomic_set(&conf->disks[i].rdev->read_errors, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482 } else {
NeilBrownd6950432006-07-10 04:44:20 -07001483 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
NeilBrownba22dcb2005-11-08 21:39:31 -08001484 int retry = 0;
NeilBrownd6950432006-07-10 04:44:20 -07001485 rdev = conf->disks[i].rdev;
1486
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
NeilBrownd6950432006-07-10 04:44:20 -07001488 atomic_inc(&rdev->read_errors);
NeilBrownba22dcb2005-11-08 21:39:31 -08001489 if (conf->mddev->degraded)
Bernd Schubert6be9d492008-05-23 13:04:34 -07001490 printk_rl(KERN_WARNING
1491 "raid5:%s: read error not correctable "
1492 "(sector %llu on %s).\n",
1493 mdname(conf->mddev),
1494 (unsigned long long)(sh->sector
1495 + rdev->data_offset),
1496 bdn);
NeilBrownba22dcb2005-11-08 21:39:31 -08001497 else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
NeilBrown4e5314b2005-11-08 21:39:22 -08001498 /* Oh, no!!! */
Bernd Schubert6be9d492008-05-23 13:04:34 -07001499 printk_rl(KERN_WARNING
1500 "raid5:%s: read error NOT corrected!! "
1501 "(sector %llu on %s).\n",
1502 mdname(conf->mddev),
1503 (unsigned long long)(sh->sector
1504 + rdev->data_offset),
1505 bdn);
NeilBrownd6950432006-07-10 04:44:20 -07001506 else if (atomic_read(&rdev->read_errors)
NeilBrownba22dcb2005-11-08 21:39:31 -08001507 > conf->max_nr_stripes)
NeilBrown14f8d262006-01-06 00:20:14 -08001508 printk(KERN_WARNING
NeilBrownd6950432006-07-10 04:44:20 -07001509 "raid5:%s: Too many read errors, failing device %s.\n",
1510 mdname(conf->mddev), bdn);
NeilBrownba22dcb2005-11-08 21:39:31 -08001511 else
1512 retry = 1;
1513 if (retry)
1514 set_bit(R5_ReadError, &sh->dev[i].flags);
1515 else {
NeilBrown4e5314b2005-11-08 21:39:22 -08001516 clear_bit(R5_ReadError, &sh->dev[i].flags);
1517 clear_bit(R5_ReWrite, &sh->dev[i].flags);
NeilBrownd6950432006-07-10 04:44:20 -07001518 md_error(conf->mddev, rdev);
NeilBrownba22dcb2005-11-08 21:39:31 -08001519 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520 }
1521 rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522 clear_bit(R5_LOCKED, &sh->dev[i].flags);
1523 set_bit(STRIPE_HANDLE, &sh->state);
1524 release_stripe(sh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525}
1526
NeilBrownd710e132008-10-13 11:55:12 +11001527static void raid5_end_write_request(struct bio *bi, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528{
NeilBrown99c0fb52009-03-31 14:39:38 +11001529 struct stripe_head *sh = bi->bi_private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 raid5_conf_t *conf = sh->raid_conf;
NeilBrown7ecaa1e2006-03-27 01:18:08 -08001531 int disks = sh->disks, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
1533
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 for (i=0 ; i<disks; i++)
1535 if (bi == &sh->dev[i].req)
1536 break;
1537
Dan Williams45b42332007-07-09 11:56:43 -07001538 pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539 (unsigned long long)sh->sector, i, atomic_read(&sh->count),
1540 uptodate);
1541 if (i == disks) {
1542 BUG();
NeilBrown6712ecf2007-09-27 12:47:43 +02001543 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544 }
1545
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546 if (!uptodate)
1547 md_error(conf->mddev, conf->disks[i].rdev);
1548
1549 rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
1550
1551 clear_bit(R5_LOCKED, &sh->dev[i].flags);
1552 set_bit(STRIPE_HANDLE, &sh->state);
NeilBrownc04be0a2006-10-03 01:15:53 -07001553 release_stripe(sh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554}
1555
1556
NeilBrown784052e2009-03-31 15:19:07 +11001557static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558
NeilBrown784052e2009-03-31 15:19:07 +11001559static void raid5_build_block(struct stripe_head *sh, int i, int previous)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560{
1561 struct r5dev *dev = &sh->dev[i];
1562
1563 bio_init(&dev->req);
1564 dev->req.bi_io_vec = &dev->vec;
1565 dev->req.bi_vcnt++;
1566 dev->req.bi_max_vecs++;
1567 dev->vec.bv_page = dev->page;
1568 dev->vec.bv_len = STRIPE_SIZE;
1569 dev->vec.bv_offset = 0;
1570
1571 dev->req.bi_sector = sh->sector;
1572 dev->req.bi_private = sh;
1573
1574 dev->flags = 0;
NeilBrown784052e2009-03-31 15:19:07 +11001575 dev->sector = compute_blocknr(sh, i, previous);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576}
1577
1578static void error(mddev_t *mddev, mdk_rdev_t *rdev)
1579{
1580 char b[BDEVNAME_SIZE];
1581 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
Dan Williams45b42332007-07-09 11:56:43 -07001582 pr_debug("raid5: error called\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583
NeilBrownb2d444d2005-11-08 21:39:31 -08001584 if (!test_bit(Faulty, &rdev->flags)) {
NeilBrown850b2b42006-10-03 01:15:46 -07001585 set_bit(MD_CHANGE_DEVS, &mddev->flags);
NeilBrownc04be0a2006-10-03 01:15:53 -07001586 if (test_and_clear_bit(In_sync, &rdev->flags)) {
1587 unsigned long flags;
1588 spin_lock_irqsave(&conf->device_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589 mddev->degraded++;
NeilBrownc04be0a2006-10-03 01:15:53 -07001590 spin_unlock_irqrestore(&conf->device_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591 /*
1592 * if recovery was running, make sure it aborts.
1593 */
NeilBrowndfc70642008-05-23 13:04:39 -07001594 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 }
NeilBrownb2d444d2005-11-08 21:39:31 -08001596 set_bit(Faulty, &rdev->flags);
NeilBrownd710e132008-10-13 11:55:12 +11001597 printk(KERN_ALERT
1598 "raid5: Disk failure on %s, disabling device.\n"
1599 "raid5: Operation continuing on %d devices.\n",
1600 bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601 }
NeilBrown16a53ec2006-06-26 00:27:38 -07001602}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603
1604/*
1605 * Input: a 'big' sector number,
1606 * Output: index of the data and parity disk, and the sector # in them.
1607 */
NeilBrown112bf892009-03-31 14:39:38 +11001608static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
NeilBrown911d4ee2009-03-31 14:39:38 +11001609 int previous, int *dd_idx,
1610 struct stripe_head *sh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611{
1612 long stripe;
1613 unsigned long chunk_number;
1614 unsigned int chunk_offset;
NeilBrown911d4ee2009-03-31 14:39:38 +11001615 int pd_idx, qd_idx;
NeilBrown67cc2b82009-03-31 14:39:38 +11001616 int ddf_layout = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617 sector_t new_sector;
NeilBrowne183eae2009-03-31 15:20:22 +11001618 int algorithm = previous ? conf->prev_algo
1619 : conf->algorithm;
NeilBrown784052e2009-03-31 15:19:07 +11001620 int sectors_per_chunk = previous ? (conf->prev_chunk >> 9)
1621 : (conf->chunk_size >> 9);
NeilBrown112bf892009-03-31 14:39:38 +11001622 int raid_disks = previous ? conf->previous_raid_disks
1623 : conf->raid_disks;
1624 int data_disks = raid_disks - conf->max_degraded;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625
1626 /* First compute the information on this sector */
1627
1628 /*
1629 * Compute the chunk number and the sector offset inside the chunk
1630 */
1631 chunk_offset = sector_div(r_sector, sectors_per_chunk);
1632 chunk_number = r_sector;
1633 BUG_ON(r_sector != chunk_number);
1634
1635 /*
1636 * Compute the stripe number
1637 */
1638 stripe = chunk_number / data_disks;
1639
1640 /*
1641 * Compute the data disk and parity disk indexes inside the stripe
1642 */
1643 *dd_idx = chunk_number % data_disks;
1644
1645 /*
1646 * Select the parity disk based on the user selected algorithm.
1647 */
NeilBrown911d4ee2009-03-31 14:39:38 +11001648 pd_idx = qd_idx = ~0;
NeilBrown16a53ec2006-06-26 00:27:38 -07001649 switch(conf->level) {
1650 case 4:
NeilBrown911d4ee2009-03-31 14:39:38 +11001651 pd_idx = data_disks;
NeilBrown16a53ec2006-06-26 00:27:38 -07001652 break;
1653 case 5:
NeilBrowne183eae2009-03-31 15:20:22 +11001654 switch (algorithm) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655 case ALGORITHM_LEFT_ASYMMETRIC:
NeilBrown911d4ee2009-03-31 14:39:38 +11001656 pd_idx = data_disks - stripe % raid_disks;
1657 if (*dd_idx >= pd_idx)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658 (*dd_idx)++;
1659 break;
1660 case ALGORITHM_RIGHT_ASYMMETRIC:
NeilBrown911d4ee2009-03-31 14:39:38 +11001661 pd_idx = stripe % raid_disks;
1662 if (*dd_idx >= pd_idx)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663 (*dd_idx)++;
1664 break;
1665 case ALGORITHM_LEFT_SYMMETRIC:
NeilBrown911d4ee2009-03-31 14:39:38 +11001666 pd_idx = data_disks - stripe % raid_disks;
1667 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668 break;
1669 case ALGORITHM_RIGHT_SYMMETRIC:
NeilBrown911d4ee2009-03-31 14:39:38 +11001670 pd_idx = stripe % raid_disks;
1671 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672 break;
NeilBrown99c0fb52009-03-31 14:39:38 +11001673 case ALGORITHM_PARITY_0:
1674 pd_idx = 0;
1675 (*dd_idx)++;
1676 break;
1677 case ALGORITHM_PARITY_N:
1678 pd_idx = data_disks;
1679 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680 default:
NeilBrown14f8d262006-01-06 00:20:14 -08001681 printk(KERN_ERR "raid5: unsupported algorithm %d\n",
NeilBrowne183eae2009-03-31 15:20:22 +11001682 algorithm);
NeilBrown99c0fb52009-03-31 14:39:38 +11001683 BUG();
NeilBrown16a53ec2006-06-26 00:27:38 -07001684 }
1685 break;
1686 case 6:
1687
NeilBrowne183eae2009-03-31 15:20:22 +11001688 switch (algorithm) {
NeilBrown16a53ec2006-06-26 00:27:38 -07001689 case ALGORITHM_LEFT_ASYMMETRIC:
NeilBrown911d4ee2009-03-31 14:39:38 +11001690 pd_idx = raid_disks - 1 - (stripe % raid_disks);
1691 qd_idx = pd_idx + 1;
1692 if (pd_idx == raid_disks-1) {
NeilBrown99c0fb52009-03-31 14:39:38 +11001693 (*dd_idx)++; /* Q D D D P */
NeilBrown911d4ee2009-03-31 14:39:38 +11001694 qd_idx = 0;
1695 } else if (*dd_idx >= pd_idx)
NeilBrown16a53ec2006-06-26 00:27:38 -07001696 (*dd_idx) += 2; /* D D P Q D */
1697 break;
1698 case ALGORITHM_RIGHT_ASYMMETRIC:
NeilBrown911d4ee2009-03-31 14:39:38 +11001699 pd_idx = stripe % raid_disks;
1700 qd_idx = pd_idx + 1;
1701 if (pd_idx == raid_disks-1) {
NeilBrown99c0fb52009-03-31 14:39:38 +11001702 (*dd_idx)++; /* Q D D D P */
NeilBrown911d4ee2009-03-31 14:39:38 +11001703 qd_idx = 0;
1704 } else if (*dd_idx >= pd_idx)
NeilBrown16a53ec2006-06-26 00:27:38 -07001705 (*dd_idx) += 2; /* D D P Q D */
1706 break;
1707 case ALGORITHM_LEFT_SYMMETRIC:
NeilBrown911d4ee2009-03-31 14:39:38 +11001708 pd_idx = raid_disks - 1 - (stripe % raid_disks);
1709 qd_idx = (pd_idx + 1) % raid_disks;
1710 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
NeilBrown16a53ec2006-06-26 00:27:38 -07001711 break;
1712 case ALGORITHM_RIGHT_SYMMETRIC:
NeilBrown911d4ee2009-03-31 14:39:38 +11001713 pd_idx = stripe % raid_disks;
1714 qd_idx = (pd_idx + 1) % raid_disks;
1715 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
NeilBrown16a53ec2006-06-26 00:27:38 -07001716 break;
NeilBrown99c0fb52009-03-31 14:39:38 +11001717
1718 case ALGORITHM_PARITY_0:
1719 pd_idx = 0;
1720 qd_idx = 1;
1721 (*dd_idx) += 2;
1722 break;
1723 case ALGORITHM_PARITY_N:
1724 pd_idx = data_disks;
1725 qd_idx = data_disks + 1;
1726 break;
1727
1728 case ALGORITHM_ROTATING_ZERO_RESTART:
1729 /* Exactly the same as RIGHT_ASYMMETRIC, but or
1730 * of blocks for computing Q is different.
1731 */
1732 pd_idx = stripe % raid_disks;
1733 qd_idx = pd_idx + 1;
1734 if (pd_idx == raid_disks-1) {
1735 (*dd_idx)++; /* Q D D D P */
1736 qd_idx = 0;
1737 } else if (*dd_idx >= pd_idx)
1738 (*dd_idx) += 2; /* D D P Q D */
NeilBrown67cc2b82009-03-31 14:39:38 +11001739 ddf_layout = 1;
NeilBrown99c0fb52009-03-31 14:39:38 +11001740 break;
1741
1742 case ALGORITHM_ROTATING_N_RESTART:
1743 /* Same a left_asymmetric, by first stripe is
1744 * D D D P Q rather than
1745 * Q D D D P
1746 */
1747 pd_idx = raid_disks - 1 - ((stripe + 1) % raid_disks);
1748 qd_idx = pd_idx + 1;
1749 if (pd_idx == raid_disks-1) {
1750 (*dd_idx)++; /* Q D D D P */
1751 qd_idx = 0;
1752 } else if (*dd_idx >= pd_idx)
1753 (*dd_idx) += 2; /* D D P Q D */
NeilBrown67cc2b82009-03-31 14:39:38 +11001754 ddf_layout = 1;
NeilBrown99c0fb52009-03-31 14:39:38 +11001755 break;
1756
1757 case ALGORITHM_ROTATING_N_CONTINUE:
1758 /* Same as left_symmetric but Q is before P */
1759 pd_idx = raid_disks - 1 - (stripe % raid_disks);
1760 qd_idx = (pd_idx + raid_disks - 1) % raid_disks;
1761 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
NeilBrown67cc2b82009-03-31 14:39:38 +11001762 ddf_layout = 1;
NeilBrown99c0fb52009-03-31 14:39:38 +11001763 break;
1764
1765 case ALGORITHM_LEFT_ASYMMETRIC_6:
1766 /* RAID5 left_asymmetric, with Q on last device */
1767 pd_idx = data_disks - stripe % (raid_disks-1);
1768 if (*dd_idx >= pd_idx)
1769 (*dd_idx)++;
1770 qd_idx = raid_disks - 1;
1771 break;
1772
1773 case ALGORITHM_RIGHT_ASYMMETRIC_6:
1774 pd_idx = stripe % (raid_disks-1);
1775 if (*dd_idx >= pd_idx)
1776 (*dd_idx)++;
1777 qd_idx = raid_disks - 1;
1778 break;
1779
1780 case ALGORITHM_LEFT_SYMMETRIC_6:
1781 pd_idx = data_disks - stripe % (raid_disks-1);
1782 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
1783 qd_idx = raid_disks - 1;
1784 break;
1785
1786 case ALGORITHM_RIGHT_SYMMETRIC_6:
1787 pd_idx = stripe % (raid_disks-1);
1788 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
1789 qd_idx = raid_disks - 1;
1790 break;
1791
1792 case ALGORITHM_PARITY_0_6:
1793 pd_idx = 0;
1794 (*dd_idx)++;
1795 qd_idx = raid_disks - 1;
1796 break;
1797
1798
NeilBrown16a53ec2006-06-26 00:27:38 -07001799 default:
NeilBrownd710e132008-10-13 11:55:12 +11001800 printk(KERN_CRIT "raid6: unsupported algorithm %d\n",
NeilBrowne183eae2009-03-31 15:20:22 +11001801 algorithm);
NeilBrown99c0fb52009-03-31 14:39:38 +11001802 BUG();
NeilBrown16a53ec2006-06-26 00:27:38 -07001803 }
1804 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805 }
1806
NeilBrown911d4ee2009-03-31 14:39:38 +11001807 if (sh) {
1808 sh->pd_idx = pd_idx;
1809 sh->qd_idx = qd_idx;
NeilBrown67cc2b82009-03-31 14:39:38 +11001810 sh->ddf_layout = ddf_layout;
NeilBrown911d4ee2009-03-31 14:39:38 +11001811 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812 /*
1813 * Finally, compute the new sector number
1814 */
1815 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset;
1816 return new_sector;
1817}
1818
1819
NeilBrown784052e2009-03-31 15:19:07 +11001820static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821{
1822 raid5_conf_t *conf = sh->raid_conf;
NeilBrownb875e532006-12-10 02:20:49 -08001823 int raid_disks = sh->disks;
1824 int data_disks = raid_disks - conf->max_degraded;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825 sector_t new_sector = sh->sector, check;
NeilBrown784052e2009-03-31 15:19:07 +11001826 int sectors_per_chunk = previous ? (conf->prev_chunk >> 9)
1827 : (conf->chunk_size >> 9);
NeilBrowne183eae2009-03-31 15:20:22 +11001828 int algorithm = previous ? conf->prev_algo
1829 : conf->algorithm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001830 sector_t stripe;
1831 int chunk_offset;
NeilBrown911d4ee2009-03-31 14:39:38 +11001832 int chunk_number, dummy1, dd_idx = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833 sector_t r_sector;
NeilBrown911d4ee2009-03-31 14:39:38 +11001834 struct stripe_head sh2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835
NeilBrown16a53ec2006-06-26 00:27:38 -07001836
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837 chunk_offset = sector_div(new_sector, sectors_per_chunk);
1838 stripe = new_sector;
1839 BUG_ON(new_sector != stripe);
1840
NeilBrown16a53ec2006-06-26 00:27:38 -07001841 if (i == sh->pd_idx)
1842 return 0;
1843 switch(conf->level) {
1844 case 4: break;
1845 case 5:
NeilBrowne183eae2009-03-31 15:20:22 +11001846 switch (algorithm) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847 case ALGORITHM_LEFT_ASYMMETRIC:
1848 case ALGORITHM_RIGHT_ASYMMETRIC:
1849 if (i > sh->pd_idx)
1850 i--;
1851 break;
1852 case ALGORITHM_LEFT_SYMMETRIC:
1853 case ALGORITHM_RIGHT_SYMMETRIC:
1854 if (i < sh->pd_idx)
1855 i += raid_disks;
1856 i -= (sh->pd_idx + 1);
1857 break;
NeilBrown99c0fb52009-03-31 14:39:38 +11001858 case ALGORITHM_PARITY_0:
1859 i -= 1;
1860 break;
1861 case ALGORITHM_PARITY_N:
1862 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863 default:
NeilBrown14f8d262006-01-06 00:20:14 -08001864 printk(KERN_ERR "raid5: unsupported algorithm %d\n",
NeilBrowne183eae2009-03-31 15:20:22 +11001865 algorithm);
NeilBrown99c0fb52009-03-31 14:39:38 +11001866 BUG();
NeilBrown16a53ec2006-06-26 00:27:38 -07001867 }
1868 break;
1869 case 6:
NeilBrownd0dabf72009-03-31 14:39:38 +11001870 if (i == sh->qd_idx)
NeilBrown16a53ec2006-06-26 00:27:38 -07001871 return 0; /* It is the Q disk */
NeilBrowne183eae2009-03-31 15:20:22 +11001872 switch (algorithm) {
NeilBrown16a53ec2006-06-26 00:27:38 -07001873 case ALGORITHM_LEFT_ASYMMETRIC:
1874 case ALGORITHM_RIGHT_ASYMMETRIC:
NeilBrown99c0fb52009-03-31 14:39:38 +11001875 case ALGORITHM_ROTATING_ZERO_RESTART:
1876 case ALGORITHM_ROTATING_N_RESTART:
1877 if (sh->pd_idx == raid_disks-1)
1878 i--; /* Q D D D P */
NeilBrown16a53ec2006-06-26 00:27:38 -07001879 else if (i > sh->pd_idx)
1880 i -= 2; /* D D P Q D */
1881 break;
1882 case ALGORITHM_LEFT_SYMMETRIC:
1883 case ALGORITHM_RIGHT_SYMMETRIC:
1884 if (sh->pd_idx == raid_disks-1)
1885 i--; /* Q D D D P */
1886 else {
1887 /* D D P Q D */
1888 if (i < sh->pd_idx)
1889 i += raid_disks;
1890 i -= (sh->pd_idx + 2);
1891 }
1892 break;
NeilBrown99c0fb52009-03-31 14:39:38 +11001893 case ALGORITHM_PARITY_0:
1894 i -= 2;
1895 break;
1896 case ALGORITHM_PARITY_N:
1897 break;
1898 case ALGORITHM_ROTATING_N_CONTINUE:
1899 if (sh->pd_idx == 0)
1900 i--; /* P D D D Q */
1901 else if (i > sh->pd_idx)
1902 i -= 2; /* D D Q P D */
1903 break;
1904 case ALGORITHM_LEFT_ASYMMETRIC_6:
1905 case ALGORITHM_RIGHT_ASYMMETRIC_6:
1906 if (i > sh->pd_idx)
1907 i--;
1908 break;
1909 case ALGORITHM_LEFT_SYMMETRIC_6:
1910 case ALGORITHM_RIGHT_SYMMETRIC_6:
1911 if (i < sh->pd_idx)
1912 i += data_disks + 1;
1913 i -= (sh->pd_idx + 1);
1914 break;
1915 case ALGORITHM_PARITY_0_6:
1916 i -= 1;
1917 break;
NeilBrown16a53ec2006-06-26 00:27:38 -07001918 default:
NeilBrownd710e132008-10-13 11:55:12 +11001919 printk(KERN_CRIT "raid6: unsupported algorithm %d\n",
NeilBrowne183eae2009-03-31 15:20:22 +11001920 algorithm);
NeilBrown99c0fb52009-03-31 14:39:38 +11001921 BUG();
NeilBrown16a53ec2006-06-26 00:27:38 -07001922 }
1923 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924 }
1925
1926 chunk_number = stripe * data_disks + i;
1927 r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset;
1928
NeilBrown112bf892009-03-31 14:39:38 +11001929 check = raid5_compute_sector(conf, r_sector,
NeilBrown784052e2009-03-31 15:19:07 +11001930 previous, &dummy1, &sh2);
NeilBrown911d4ee2009-03-31 14:39:38 +11001931 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx
1932 || sh2.qd_idx != sh->qd_idx) {
NeilBrown14f8d262006-01-06 00:20:14 -08001933 printk(KERN_ERR "compute_blocknr: map not correct\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934 return 0;
1935 }
1936 return r_sector;
1937}
1938
1939
Dan Williams600aa102008-06-28 08:32:05 +10001940static void
Yuri Tikhonovc0f7bdd2009-08-29 19:13:12 -07001941schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
Dan Williams600aa102008-06-28 08:32:05 +10001942 int rcw, int expand)
Dan Williamse33129d2007-01-02 13:52:30 -07001943{
1944 int i, pd_idx = sh->pd_idx, disks = sh->disks;
Yuri Tikhonovc0f7bdd2009-08-29 19:13:12 -07001945 raid5_conf_t *conf = sh->raid_conf;
1946 int level = conf->level;
NeilBrown16a53ec2006-06-26 00:27:38 -07001947
Dan Williamse33129d2007-01-02 13:52:30 -07001948 if (rcw) {
1949 /* if we are not expanding this is a proper write request, and
1950 * there will be bios with new data to be drained into the
1951 * stripe cache
1952 */
1953 if (!expand) {
Dan Williams600aa102008-06-28 08:32:05 +10001954 sh->reconstruct_state = reconstruct_state_drain_run;
1955 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
1956 } else
1957 sh->reconstruct_state = reconstruct_state_run;
Dan Williamse33129d2007-01-02 13:52:30 -07001958
Dan Williamsac6b53b2009-07-14 13:40:19 -07001959 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
Dan Williamse33129d2007-01-02 13:52:30 -07001960
1961 for (i = disks; i--; ) {
1962 struct r5dev *dev = &sh->dev[i];
1963
1964 if (dev->towrite) {
1965 set_bit(R5_LOCKED, &dev->flags);
Dan Williamsd8ee0722008-06-28 08:32:06 +10001966 set_bit(R5_Wantdrain, &dev->flags);
Dan Williamse33129d2007-01-02 13:52:30 -07001967 if (!expand)
1968 clear_bit(R5_UPTODATE, &dev->flags);
Dan Williams600aa102008-06-28 08:32:05 +10001969 s->locked++;
Dan Williamse33129d2007-01-02 13:52:30 -07001970 }
1971 }
Yuri Tikhonovc0f7bdd2009-08-29 19:13:12 -07001972 if (s->locked + conf->max_degraded == disks)
Dan Williams8b3e6cd2008-04-28 02:15:53 -07001973 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
Yuri Tikhonovc0f7bdd2009-08-29 19:13:12 -07001974 atomic_inc(&conf->pending_full_writes);
Dan Williamse33129d2007-01-02 13:52:30 -07001975 } else {
Yuri Tikhonovc0f7bdd2009-08-29 19:13:12 -07001976 BUG_ON(level == 6);
Dan Williamse33129d2007-01-02 13:52:30 -07001977 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
1978 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
1979
Dan Williamsd8ee0722008-06-28 08:32:06 +10001980 sh->reconstruct_state = reconstruct_state_prexor_drain_run;
Dan Williams600aa102008-06-28 08:32:05 +10001981 set_bit(STRIPE_OP_PREXOR, &s->ops_request);
1982 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
Dan Williamsac6b53b2009-07-14 13:40:19 -07001983 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
Dan Williamse33129d2007-01-02 13:52:30 -07001984
1985 for (i = disks; i--; ) {
1986 struct r5dev *dev = &sh->dev[i];
1987 if (i == pd_idx)
1988 continue;
1989
Dan Williamse33129d2007-01-02 13:52:30 -07001990 if (dev->towrite &&
1991 (test_bit(R5_UPTODATE, &dev->flags) ||
Dan Williamsd8ee0722008-06-28 08:32:06 +10001992 test_bit(R5_Wantcompute, &dev->flags))) {
1993 set_bit(R5_Wantdrain, &dev->flags);
Dan Williamse33129d2007-01-02 13:52:30 -07001994 set_bit(R5_LOCKED, &dev->flags);
1995 clear_bit(R5_UPTODATE, &dev->flags);
Dan Williams600aa102008-06-28 08:32:05 +10001996 s->locked++;
Dan Williamse33129d2007-01-02 13:52:30 -07001997 }
1998 }
1999 }
2000
Yuri Tikhonovc0f7bdd2009-08-29 19:13:12 -07002001 /* keep the parity disk(s) locked while asynchronous operations
Dan Williamse33129d2007-01-02 13:52:30 -07002002 * are in flight
2003 */
2004 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
2005 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
Dan Williams600aa102008-06-28 08:32:05 +10002006 s->locked++;
Dan Williamse33129d2007-01-02 13:52:30 -07002007
Yuri Tikhonovc0f7bdd2009-08-29 19:13:12 -07002008 if (level == 6) {
2009 int qd_idx = sh->qd_idx;
2010 struct r5dev *dev = &sh->dev[qd_idx];
2011
2012 set_bit(R5_LOCKED, &dev->flags);
2013 clear_bit(R5_UPTODATE, &dev->flags);
2014 s->locked++;
2015 }
2016
Dan Williams600aa102008-06-28 08:32:05 +10002017 pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n",
Harvey Harrisone46b272b2008-04-28 02:15:50 -07002018 __func__, (unsigned long long)sh->sector,
Dan Williams600aa102008-06-28 08:32:05 +10002019 s->locked, s->ops_request);
Dan Williamse33129d2007-01-02 13:52:30 -07002020}
NeilBrown16a53ec2006-06-26 00:27:38 -07002021
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022/*
2023 * Each stripe/dev can have one or more bion attached.
NeilBrown16a53ec2006-06-26 00:27:38 -07002024 * toread/towrite point to the first in a chain.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025 * The bi_next chain must be in order.
2026 */
2027static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite)
2028{
2029 struct bio **bip;
2030 raid5_conf_t *conf = sh->raid_conf;
NeilBrown72626682005-09-09 16:23:54 -07002031 int firstwrite=0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032
Dan Williams45b42332007-07-09 11:56:43 -07002033 pr_debug("adding bh b#%llu to stripe s#%llu\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034 (unsigned long long)bi->bi_sector,
2035 (unsigned long long)sh->sector);
2036
2037
2038 spin_lock(&sh->lock);
2039 spin_lock_irq(&conf->device_lock);
NeilBrown72626682005-09-09 16:23:54 -07002040 if (forwrite) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041 bip = &sh->dev[dd_idx].towrite;
NeilBrown72626682005-09-09 16:23:54 -07002042 if (*bip == NULL && sh->dev[dd_idx].written == NULL)
2043 firstwrite = 1;
2044 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045 bip = &sh->dev[dd_idx].toread;
2046 while (*bip && (*bip)->bi_sector < bi->bi_sector) {
2047 if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector)
2048 goto overlap;
2049 bip = & (*bip)->bi_next;
2050 }
2051 if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
2052 goto overlap;
2053
Eric Sesterhenn78bafeb2006-04-02 13:31:42 +02002054 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055 if (*bip)
2056 bi->bi_next = *bip;
2057 *bip = bi;
Jens Axboe960e7392008-08-15 10:41:18 +02002058 bi->bi_phys_segments++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059 spin_unlock_irq(&conf->device_lock);
2060 spin_unlock(&sh->lock);
2061
Dan Williams45b42332007-07-09 11:56:43 -07002062 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063 (unsigned long long)bi->bi_sector,
2064 (unsigned long long)sh->sector, dd_idx);
2065
NeilBrown72626682005-09-09 16:23:54 -07002066 if (conf->mddev->bitmap && firstwrite) {
NeilBrown72626682005-09-09 16:23:54 -07002067 bitmap_startwrite(conf->mddev->bitmap, sh->sector,
2068 STRIPE_SECTORS, 0);
NeilBrownae3c20c2006-07-10 04:44:17 -07002069 sh->bm_seq = conf->seq_flush+1;
NeilBrown72626682005-09-09 16:23:54 -07002070 set_bit(STRIPE_BIT_DELAY, &sh->state);
2071 }
2072
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073 if (forwrite) {
2074 /* check if page is covered */
2075 sector_t sector = sh->dev[dd_idx].sector;
2076 for (bi=sh->dev[dd_idx].towrite;
2077 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
2078 bi && bi->bi_sector <= sector;
2079 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
2080 if (bi->bi_sector + (bi->bi_size>>9) >= sector)
2081 sector = bi->bi_sector + (bi->bi_size>>9);
2082 }
2083 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
2084 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
2085 }
2086 return 1;
2087
2088 overlap:
2089 set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
2090 spin_unlock_irq(&conf->device_lock);
2091 spin_unlock(&sh->lock);
2092 return 0;
2093}
2094
NeilBrown29269552006-03-27 01:18:10 -08002095static void end_reshape(raid5_conf_t *conf);
2096
NeilBrown911d4ee2009-03-31 14:39:38 +11002097static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous,
2098 struct stripe_head *sh)
NeilBrownccfcc3c2006-03-27 01:18:09 -08002099{
NeilBrown784052e2009-03-31 15:19:07 +11002100 int sectors_per_chunk =
2101 previous ? (conf->prev_chunk >> 9)
2102 : (conf->chunk_size >> 9);
NeilBrown911d4ee2009-03-31 14:39:38 +11002103 int dd_idx;
Coywolf Qi Hunt2d2063c2006-10-03 01:15:50 -07002104 int chunk_offset = sector_div(stripe, sectors_per_chunk);
NeilBrown112bf892009-03-31 14:39:38 +11002105 int disks = previous ? conf->previous_raid_disks : conf->raid_disks;
Coywolf Qi Hunt2d2063c2006-10-03 01:15:50 -07002106
NeilBrown112bf892009-03-31 14:39:38 +11002107 raid5_compute_sector(conf,
2108 stripe * (disks - conf->max_degraded)
NeilBrownb875e532006-12-10 02:20:49 -08002109 *sectors_per_chunk + chunk_offset,
NeilBrown112bf892009-03-31 14:39:38 +11002110 previous,
NeilBrown911d4ee2009-03-31 14:39:38 +11002111 &dd_idx, sh);
NeilBrownccfcc3c2006-03-27 01:18:09 -08002112}
2113
Dan Williamsa4456852007-07-09 11:56:43 -07002114static void
Dan Williams1fe797e2008-06-28 09:16:30 +10002115handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh,
Dan Williamsa4456852007-07-09 11:56:43 -07002116 struct stripe_head_state *s, int disks,
2117 struct bio **return_bi)
2118{
2119 int i;
2120 for (i = disks; i--; ) {
2121 struct bio *bi;
2122 int bitmap_end = 0;
2123
2124 if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
2125 mdk_rdev_t *rdev;
2126 rcu_read_lock();
2127 rdev = rcu_dereference(conf->disks[i].rdev);
2128 if (rdev && test_bit(In_sync, &rdev->flags))
2129 /* multiple read failures in one stripe */
2130 md_error(conf->mddev, rdev);
2131 rcu_read_unlock();
2132 }
2133 spin_lock_irq(&conf->device_lock);
2134 /* fail all writes first */
2135 bi = sh->dev[i].towrite;
2136 sh->dev[i].towrite = NULL;
2137 if (bi) {
2138 s->to_write--;
2139 bitmap_end = 1;
2140 }
2141
2142 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2143 wake_up(&conf->wait_for_overlap);
2144
2145 while (bi && bi->bi_sector <
2146 sh->dev[i].sector + STRIPE_SECTORS) {
2147 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
2148 clear_bit(BIO_UPTODATE, &bi->bi_flags);
Jens Axboe960e7392008-08-15 10:41:18 +02002149 if (!raid5_dec_bi_phys_segments(bi)) {
Dan Williamsa4456852007-07-09 11:56:43 -07002150 md_write_end(conf->mddev);
2151 bi->bi_next = *return_bi;
2152 *return_bi = bi;
2153 }
2154 bi = nextbi;
2155 }
2156 /* and fail all 'written' */
2157 bi = sh->dev[i].written;
2158 sh->dev[i].written = NULL;
2159 if (bi) bitmap_end = 1;
2160 while (bi && bi->bi_sector <
2161 sh->dev[i].sector + STRIPE_SECTORS) {
2162 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
2163 clear_bit(BIO_UPTODATE, &bi->bi_flags);
Jens Axboe960e7392008-08-15 10:41:18 +02002164 if (!raid5_dec_bi_phys_segments(bi)) {
Dan Williamsa4456852007-07-09 11:56:43 -07002165 md_write_end(conf->mddev);
2166 bi->bi_next = *return_bi;
2167 *return_bi = bi;
2168 }
2169 bi = bi2;
2170 }
2171
Dan Williamsb5e98d62007-01-02 13:52:31 -07002172 /* fail any reads if this device is non-operational and
2173 * the data has not reached the cache yet.
2174 */
2175 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) &&
2176 (!test_bit(R5_Insync, &sh->dev[i].flags) ||
2177 test_bit(R5_ReadError, &sh->dev[i].flags))) {
Dan Williamsa4456852007-07-09 11:56:43 -07002178 bi = sh->dev[i].toread;
2179 sh->dev[i].toread = NULL;
2180 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2181 wake_up(&conf->wait_for_overlap);
2182 if (bi) s->to_read--;
2183 while (bi && bi->bi_sector <
2184 sh->dev[i].sector + STRIPE_SECTORS) {
2185 struct bio *nextbi =
2186 r5_next_bio(bi, sh->dev[i].sector);
2187 clear_bit(BIO_UPTODATE, &bi->bi_flags);
Jens Axboe960e7392008-08-15 10:41:18 +02002188 if (!raid5_dec_bi_phys_segments(bi)) {
Dan Williamsa4456852007-07-09 11:56:43 -07002189 bi->bi_next = *return_bi;
2190 *return_bi = bi;
2191 }
2192 bi = nextbi;
2193 }
2194 }
2195 spin_unlock_irq(&conf->device_lock);
2196 if (bitmap_end)
2197 bitmap_endwrite(conf->mddev->bitmap, sh->sector,
2198 STRIPE_SECTORS, 0, 0);
2199 }
2200
Dan Williams8b3e6cd2008-04-28 02:15:53 -07002201 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
2202 if (atomic_dec_and_test(&conf->pending_full_writes))
2203 md_wakeup_thread(conf->mddev->thread);
Dan Williamsa4456852007-07-09 11:56:43 -07002204}
2205
Dan Williams1fe797e2008-06-28 09:16:30 +10002206/* fetch_block5 - checks the given member device to see if its data needs
2207 * to be read or computed to satisfy a request.
2208 *
2209 * Returns 1 when no more member devices need to be checked, otherwise returns
2210 * 0 to tell the loop in handle_stripe_fill5 to continue
Dan Williamsf38e1212007-01-02 13:52:30 -07002211 */
Dan Williams1fe797e2008-06-28 09:16:30 +10002212static int fetch_block5(struct stripe_head *sh, struct stripe_head_state *s,
2213 int disk_idx, int disks)
Dan Williamsf38e1212007-01-02 13:52:30 -07002214{
2215 struct r5dev *dev = &sh->dev[disk_idx];
2216 struct r5dev *failed_dev = &sh->dev[s->failed_num];
2217
Dan Williamsf38e1212007-01-02 13:52:30 -07002218 /* is the data in this block needed, and can we get it? */
2219 if (!test_bit(R5_LOCKED, &dev->flags) &&
Dan Williams1fe797e2008-06-28 09:16:30 +10002220 !test_bit(R5_UPTODATE, &dev->flags) &&
2221 (dev->toread ||
2222 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
2223 s->syncing || s->expanding ||
2224 (s->failed &&
2225 (failed_dev->toread ||
2226 (failed_dev->towrite &&
2227 !test_bit(R5_OVERWRITE, &failed_dev->flags)))))) {
Dan Williams976ea8d2008-06-28 08:32:03 +10002228 /* We would like to get this block, possibly by computing it,
2229 * otherwise read it if the backing disk is insync
Dan Williamsf38e1212007-01-02 13:52:30 -07002230 */
2231 if ((s->uptodate == disks - 1) &&
Dan Williamsecc65c92008-06-28 08:31:57 +10002232 (s->failed && disk_idx == s->failed_num)) {
Dan Williams976ea8d2008-06-28 08:32:03 +10002233 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2234 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
Dan Williamsf38e1212007-01-02 13:52:30 -07002235 set_bit(R5_Wantcompute, &dev->flags);
2236 sh->ops.target = disk_idx;
Dan Williamsac6b53b2009-07-14 13:40:19 -07002237 sh->ops.target2 = -1;
Dan Williamsf38e1212007-01-02 13:52:30 -07002238 s->req_compute = 1;
Dan Williamsf38e1212007-01-02 13:52:30 -07002239 /* Careful: from this point on 'uptodate' is in the eye
Dan Williamsac6b53b2009-07-14 13:40:19 -07002240 * of raid_run_ops which services 'compute' operations
Dan Williamsf38e1212007-01-02 13:52:30 -07002241 * before writes. R5_Wantcompute flags a block that will
2242 * be R5_UPTODATE by the time it is needed for a
2243 * subsequent operation.
2244 */
2245 s->uptodate++;
Dan Williams1fe797e2008-06-28 09:16:30 +10002246 return 1; /* uptodate + compute == disks */
Dan Williams7a1fc532008-07-10 04:54:57 -07002247 } else if (test_bit(R5_Insync, &dev->flags)) {
Dan Williamsf38e1212007-01-02 13:52:30 -07002248 set_bit(R5_LOCKED, &dev->flags);
2249 set_bit(R5_Wantread, &dev->flags);
Dan Williamsf38e1212007-01-02 13:52:30 -07002250 s->locked++;
2251 pr_debug("Reading block %d (sync=%d)\n", disk_idx,
2252 s->syncing);
2253 }
2254 }
2255
Dan Williams1fe797e2008-06-28 09:16:30 +10002256 return 0;
Dan Williamsf38e1212007-01-02 13:52:30 -07002257}
2258
Dan Williams1fe797e2008-06-28 09:16:30 +10002259/**
2260 * handle_stripe_fill5 - read or compute data to satisfy pending requests.
2261 */
2262static void handle_stripe_fill5(struct stripe_head *sh,
Dan Williamsa4456852007-07-09 11:56:43 -07002263 struct stripe_head_state *s, int disks)
2264{
2265 int i;
Dan Williamsf38e1212007-01-02 13:52:30 -07002266
Dan Williamsf38e1212007-01-02 13:52:30 -07002267 /* look for blocks to read/compute, skip this if a compute
2268 * is already in flight, or if the stripe contents are in the
2269 * midst of changing due to a write
2270 */
Dan Williams976ea8d2008-06-28 08:32:03 +10002271 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
Dan Williams1fe797e2008-06-28 09:16:30 +10002272 !sh->reconstruct_state)
Dan Williamsf38e1212007-01-02 13:52:30 -07002273 for (i = disks; i--; )
Dan Williams1fe797e2008-06-28 09:16:30 +10002274 if (fetch_block5(sh, s, i, disks))
Dan Williamsf38e1212007-01-02 13:52:30 -07002275 break;
Dan Williamsa4456852007-07-09 11:56:43 -07002276 set_bit(STRIPE_HANDLE, &sh->state);
2277}
2278
Yuri Tikhonov5599bec2009-08-29 19:13:12 -07002279/* fetch_block6 - checks the given member device to see if its data needs
2280 * to be read or computed to satisfy a request.
2281 *
2282 * Returns 1 when no more member devices need to be checked, otherwise returns
2283 * 0 to tell the loop in handle_stripe_fill6 to continue
2284 */
2285static int fetch_block6(struct stripe_head *sh, struct stripe_head_state *s,
2286 struct r6_state *r6s, int disk_idx, int disks)
2287{
2288 struct r5dev *dev = &sh->dev[disk_idx];
2289 struct r5dev *fdev[2] = { &sh->dev[r6s->failed_num[0]],
2290 &sh->dev[r6s->failed_num[1]] };
2291
2292 if (!test_bit(R5_LOCKED, &dev->flags) &&
2293 !test_bit(R5_UPTODATE, &dev->flags) &&
2294 (dev->toread ||
2295 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
2296 s->syncing || s->expanding ||
2297 (s->failed >= 1 &&
2298 (fdev[0]->toread || s->to_write)) ||
2299 (s->failed >= 2 &&
2300 (fdev[1]->toread || s->to_write)))) {
2301 /* we would like to get this block, possibly by computing it,
2302 * otherwise read it if the backing disk is insync
2303 */
2304 BUG_ON(test_bit(R5_Wantcompute, &dev->flags));
2305 BUG_ON(test_bit(R5_Wantread, &dev->flags));
2306 if ((s->uptodate == disks - 1) &&
2307 (s->failed && (disk_idx == r6s->failed_num[0] ||
2308 disk_idx == r6s->failed_num[1]))) {
2309 /* have disk failed, and we're requested to fetch it;
2310 * do compute it
2311 */
2312 pr_debug("Computing stripe %llu block %d\n",
2313 (unsigned long long)sh->sector, disk_idx);
2314 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2315 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2316 set_bit(R5_Wantcompute, &dev->flags);
2317 sh->ops.target = disk_idx;
2318 sh->ops.target2 = -1; /* no 2nd target */
2319 s->req_compute = 1;
2320 s->uptodate++;
2321 return 1;
2322 } else if (s->uptodate == disks-2 && s->failed >= 2) {
2323 /* Computing 2-failure is *very* expensive; only
2324 * do it if failed >= 2
2325 */
2326 int other;
2327 for (other = disks; other--; ) {
2328 if (other == disk_idx)
2329 continue;
2330 if (!test_bit(R5_UPTODATE,
2331 &sh->dev[other].flags))
2332 break;
2333 }
2334 BUG_ON(other < 0);
2335 pr_debug("Computing stripe %llu blocks %d,%d\n",
2336 (unsigned long long)sh->sector,
2337 disk_idx, other);
2338 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2339 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2340 set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags);
2341 set_bit(R5_Wantcompute, &sh->dev[other].flags);
2342 sh->ops.target = disk_idx;
2343 sh->ops.target2 = other;
2344 s->uptodate += 2;
2345 s->req_compute = 1;
2346 return 1;
2347 } else if (test_bit(R5_Insync, &dev->flags)) {
2348 set_bit(R5_LOCKED, &dev->flags);
2349 set_bit(R5_Wantread, &dev->flags);
2350 s->locked++;
2351 pr_debug("Reading block %d (sync=%d)\n",
2352 disk_idx, s->syncing);
2353 }
2354 }
2355
2356 return 0;
2357}
2358
2359/**
2360 * handle_stripe_fill6 - read or compute data to satisfy pending requests.
2361 */
Dan Williams1fe797e2008-06-28 09:16:30 +10002362static void handle_stripe_fill6(struct stripe_head *sh,
Dan Williamsa4456852007-07-09 11:56:43 -07002363 struct stripe_head_state *s, struct r6_state *r6s,
2364 int disks)
2365{
2366 int i;
Yuri Tikhonov5599bec2009-08-29 19:13:12 -07002367
2368 /* look for blocks to read/compute, skip this if a compute
2369 * is already in flight, or if the stripe contents are in the
2370 * midst of changing due to a write
2371 */
2372 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
2373 !sh->reconstruct_state)
2374 for (i = disks; i--; )
2375 if (fetch_block6(sh, s, r6s, i, disks))
2376 break;
Dan Williamsa4456852007-07-09 11:56:43 -07002377 set_bit(STRIPE_HANDLE, &sh->state);
2378}
2379
2380
Dan Williams1fe797e2008-06-28 09:16:30 +10002381/* handle_stripe_clean_event
Dan Williamsa4456852007-07-09 11:56:43 -07002382 * any written block on an uptodate or failed drive can be returned.
2383 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
2384 * never LOCKED, so we don't need to test 'failed' directly.
2385 */
Dan Williams1fe797e2008-06-28 09:16:30 +10002386static void handle_stripe_clean_event(raid5_conf_t *conf,
Dan Williamsa4456852007-07-09 11:56:43 -07002387 struct stripe_head *sh, int disks, struct bio **return_bi)
2388{
2389 int i;
2390 struct r5dev *dev;
2391
2392 for (i = disks; i--; )
2393 if (sh->dev[i].written) {
2394 dev = &sh->dev[i];
2395 if (!test_bit(R5_LOCKED, &dev->flags) &&
2396 test_bit(R5_UPTODATE, &dev->flags)) {
2397 /* We can return any write requests */
2398 struct bio *wbi, *wbi2;
2399 int bitmap_end = 0;
Dan Williams45b42332007-07-09 11:56:43 -07002400 pr_debug("Return write for disc %d\n", i);
Dan Williamsa4456852007-07-09 11:56:43 -07002401 spin_lock_irq(&conf->device_lock);
2402 wbi = dev->written;
2403 dev->written = NULL;
2404 while (wbi && wbi->bi_sector <
2405 dev->sector + STRIPE_SECTORS) {
2406 wbi2 = r5_next_bio(wbi, dev->sector);
Jens Axboe960e7392008-08-15 10:41:18 +02002407 if (!raid5_dec_bi_phys_segments(wbi)) {
Dan Williamsa4456852007-07-09 11:56:43 -07002408 md_write_end(conf->mddev);
2409 wbi->bi_next = *return_bi;
2410 *return_bi = wbi;
2411 }
2412 wbi = wbi2;
2413 }
2414 if (dev->towrite == NULL)
2415 bitmap_end = 1;
2416 spin_unlock_irq(&conf->device_lock);
2417 if (bitmap_end)
2418 bitmap_endwrite(conf->mddev->bitmap,
2419 sh->sector,
2420 STRIPE_SECTORS,
2421 !test_bit(STRIPE_DEGRADED, &sh->state),
2422 0);
2423 }
2424 }
Dan Williams8b3e6cd2008-04-28 02:15:53 -07002425
2426 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
2427 if (atomic_dec_and_test(&conf->pending_full_writes))
2428 md_wakeup_thread(conf->mddev->thread);
Dan Williamsa4456852007-07-09 11:56:43 -07002429}
2430
Dan Williams1fe797e2008-06-28 09:16:30 +10002431static void handle_stripe_dirtying5(raid5_conf_t *conf,
Dan Williamsa4456852007-07-09 11:56:43 -07002432 struct stripe_head *sh, struct stripe_head_state *s, int disks)
2433{
2434 int rmw = 0, rcw = 0, i;
2435 for (i = disks; i--; ) {
2436 /* would I have to read this buffer for read_modify_write */
2437 struct r5dev *dev = &sh->dev[i];
2438 if ((dev->towrite || i == sh->pd_idx) &&
2439 !test_bit(R5_LOCKED, &dev->flags) &&
Dan Williamsf38e1212007-01-02 13:52:30 -07002440 !(test_bit(R5_UPTODATE, &dev->flags) ||
2441 test_bit(R5_Wantcompute, &dev->flags))) {
Dan Williamsa4456852007-07-09 11:56:43 -07002442 if (test_bit(R5_Insync, &dev->flags))
2443 rmw++;
2444 else
2445 rmw += 2*disks; /* cannot read it */
2446 }
2447 /* Would I have to read this buffer for reconstruct_write */
2448 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
2449 !test_bit(R5_LOCKED, &dev->flags) &&
Dan Williamsf38e1212007-01-02 13:52:30 -07002450 !(test_bit(R5_UPTODATE, &dev->flags) ||
2451 test_bit(R5_Wantcompute, &dev->flags))) {
2452 if (test_bit(R5_Insync, &dev->flags)) rcw++;
Dan Williamsa4456852007-07-09 11:56:43 -07002453 else
2454 rcw += 2*disks;
2455 }
2456 }
Dan Williams45b42332007-07-09 11:56:43 -07002457 pr_debug("for sector %llu, rmw=%d rcw=%d\n",
Dan Williamsa4456852007-07-09 11:56:43 -07002458 (unsigned long long)sh->sector, rmw, rcw);
2459 set_bit(STRIPE_HANDLE, &sh->state);
2460 if (rmw < rcw && rmw > 0)
2461 /* prefer read-modify-write, but need to get some data */
2462 for (i = disks; i--; ) {
2463 struct r5dev *dev = &sh->dev[i];
2464 if ((dev->towrite || i == sh->pd_idx) &&
2465 !test_bit(R5_LOCKED, &dev->flags) &&
Dan Williamsf38e1212007-01-02 13:52:30 -07002466 !(test_bit(R5_UPTODATE, &dev->flags) ||
2467 test_bit(R5_Wantcompute, &dev->flags)) &&
Dan Williamsa4456852007-07-09 11:56:43 -07002468 test_bit(R5_Insync, &dev->flags)) {
2469 if (
2470 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
Dan Williams45b42332007-07-09 11:56:43 -07002471 pr_debug("Read_old block "
Dan Williamsa4456852007-07-09 11:56:43 -07002472 "%d for r-m-w\n", i);
2473 set_bit(R5_LOCKED, &dev->flags);
2474 set_bit(R5_Wantread, &dev->flags);
2475 s->locked++;
2476 } else {
2477 set_bit(STRIPE_DELAYED, &sh->state);
2478 set_bit(STRIPE_HANDLE, &sh->state);
2479 }
2480 }
2481 }
2482 if (rcw <= rmw && rcw > 0)
2483 /* want reconstruct write, but need to get some data */
2484 for (i = disks; i--; ) {
2485 struct r5dev *dev = &sh->dev[i];
2486 if (!test_bit(R5_OVERWRITE, &dev->flags) &&
2487 i != sh->pd_idx &&
2488 !test_bit(R5_LOCKED, &dev->flags) &&
Dan Williamsf38e1212007-01-02 13:52:30 -07002489 !(test_bit(R5_UPTODATE, &dev->flags) ||
2490 test_bit(R5_Wantcompute, &dev->flags)) &&
Dan Williamsa4456852007-07-09 11:56:43 -07002491 test_bit(R5_Insync, &dev->flags)) {
2492 if (
2493 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
Dan Williams45b42332007-07-09 11:56:43 -07002494 pr_debug("Read_old block "
Dan Williamsa4456852007-07-09 11:56:43 -07002495 "%d for Reconstruct\n", i);
2496 set_bit(R5_LOCKED, &dev->flags);
2497 set_bit(R5_Wantread, &dev->flags);
2498 s->locked++;
2499 } else {
2500 set_bit(STRIPE_DELAYED, &sh->state);
2501 set_bit(STRIPE_HANDLE, &sh->state);
2502 }
2503 }
2504 }
2505 /* now if nothing is locked, and if we have enough data,
2506 * we can start a write request
2507 */
Dan Williamsf38e1212007-01-02 13:52:30 -07002508 /* since handle_stripe can be called at any time we need to handle the
2509 * case where a compute block operation has been submitted and then a
Dan Williamsac6b53b2009-07-14 13:40:19 -07002510 * subsequent call wants to start a write request. raid_run_ops only
2511 * handles the case where compute block and reconstruct are requested
Dan Williamsf38e1212007-01-02 13:52:30 -07002512 * simultaneously. If this is not the case then new writes need to be
2513 * held off until the compute completes.
2514 */
Dan Williams976ea8d2008-06-28 08:32:03 +10002515 if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
2516 (s->locked == 0 && (rcw == 0 || rmw == 0) &&
2517 !test_bit(STRIPE_BIT_DELAY, &sh->state)))
Yuri Tikhonovc0f7bdd2009-08-29 19:13:12 -07002518 schedule_reconstruction(sh, s, rcw == 0, 0);
Dan Williamsa4456852007-07-09 11:56:43 -07002519}
2520
Dan Williams1fe797e2008-06-28 09:16:30 +10002521static void handle_stripe_dirtying6(raid5_conf_t *conf,
Dan Williamsa4456852007-07-09 11:56:43 -07002522 struct stripe_head *sh, struct stripe_head_state *s,
2523 struct r6_state *r6s, int disks)
2524{
Yuri Tikhonova9b39a72009-08-29 19:13:12 -07002525 int rcw = 0, pd_idx = sh->pd_idx, i;
NeilBrown34e04e82009-03-31 15:10:16 +11002526 int qd_idx = sh->qd_idx;
Yuri Tikhonova9b39a72009-08-29 19:13:12 -07002527
2528 set_bit(STRIPE_HANDLE, &sh->state);
Dan Williamsa4456852007-07-09 11:56:43 -07002529 for (i = disks; i--; ) {
2530 struct r5dev *dev = &sh->dev[i];
Yuri Tikhonova9b39a72009-08-29 19:13:12 -07002531 /* check if we haven't enough data */
2532 if (!test_bit(R5_OVERWRITE, &dev->flags) &&
2533 i != pd_idx && i != qd_idx &&
2534 !test_bit(R5_LOCKED, &dev->flags) &&
2535 !(test_bit(R5_UPTODATE, &dev->flags) ||
2536 test_bit(R5_Wantcompute, &dev->flags))) {
2537 rcw++;
2538 if (!test_bit(R5_Insync, &dev->flags))
2539 continue; /* it's a failed drive */
2540
2541 if (
2542 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
2543 pr_debug("Read_old stripe %llu "
2544 "block %d for Reconstruct\n",
2545 (unsigned long long)sh->sector, i);
2546 set_bit(R5_LOCKED, &dev->flags);
2547 set_bit(R5_Wantread, &dev->flags);
2548 s->locked++;
2549 } else {
2550 pr_debug("Request delayed stripe %llu "
2551 "block %d for Reconstruct\n",
2552 (unsigned long long)sh->sector, i);
2553 set_bit(STRIPE_DELAYED, &sh->state);
2554 set_bit(STRIPE_HANDLE, &sh->state);
Dan Williamsa4456852007-07-09 11:56:43 -07002555 }
2556 }
2557 }
Dan Williamsa4456852007-07-09 11:56:43 -07002558 /* now if nothing is locked, and if we have enough data, we can start a
2559 * write request
2560 */
Yuri Tikhonova9b39a72009-08-29 19:13:12 -07002561 if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
2562 s->locked == 0 && rcw == 0 &&
Dan Williamsa4456852007-07-09 11:56:43 -07002563 !test_bit(STRIPE_BIT_DELAY, &sh->state)) {
Yuri Tikhonova9b39a72009-08-29 19:13:12 -07002564 schedule_reconstruction(sh, s, 1, 0);
Dan Williamsa4456852007-07-09 11:56:43 -07002565 }
2566}
2567
2568static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh,
2569 struct stripe_head_state *s, int disks)
2570{
Dan Williamsecc65c92008-06-28 08:31:57 +10002571 struct r5dev *dev = NULL;
Dan Williamse89f8962007-01-02 13:52:31 -07002572
Dan Williamsbd2ab672008-04-10 21:29:27 -07002573 set_bit(STRIPE_HANDLE, &sh->state);
2574
Dan Williamsecc65c92008-06-28 08:31:57 +10002575 switch (sh->check_state) {
2576 case check_state_idle:
2577 /* start a new check operation if there are no failures */
Dan Williamsbd2ab672008-04-10 21:29:27 -07002578 if (s->failed == 0) {
Dan Williamsbd2ab672008-04-10 21:29:27 -07002579 BUG_ON(s->uptodate != disks);
Dan Williamsecc65c92008-06-28 08:31:57 +10002580 sh->check_state = check_state_run;
2581 set_bit(STRIPE_OP_CHECK, &s->ops_request);
Dan Williamsbd2ab672008-04-10 21:29:27 -07002582 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
Dan Williamsbd2ab672008-04-10 21:29:27 -07002583 s->uptodate--;
Dan Williamsecc65c92008-06-28 08:31:57 +10002584 break;
Dan Williamsbd2ab672008-04-10 21:29:27 -07002585 }
Dan Williamsa4456852007-07-09 11:56:43 -07002586 dev = &sh->dev[s->failed_num];
Dan Williamsecc65c92008-06-28 08:31:57 +10002587 /* fall through */
2588 case check_state_compute_result:
2589 sh->check_state = check_state_idle;
2590 if (!dev)
2591 dev = &sh->dev[sh->pd_idx];
2592
2593 /* check that a write has not made the stripe insync */
2594 if (test_bit(STRIPE_INSYNC, &sh->state))
2595 break;
2596
2597 /* either failed parity check, or recovery is happening */
Dan Williamsa4456852007-07-09 11:56:43 -07002598 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
2599 BUG_ON(s->uptodate != disks);
2600
2601 set_bit(R5_LOCKED, &dev->flags);
Dan Williamsecc65c92008-06-28 08:31:57 +10002602 s->locked++;
Dan Williamsa4456852007-07-09 11:56:43 -07002603 set_bit(R5_Wantwrite, &dev->flags);
Dan Williams830ea012007-01-02 13:52:31 -07002604
Dan Williamsa4456852007-07-09 11:56:43 -07002605 clear_bit(STRIPE_DEGRADED, &sh->state);
Dan Williamsa4456852007-07-09 11:56:43 -07002606 set_bit(STRIPE_INSYNC, &sh->state);
Dan Williamsecc65c92008-06-28 08:31:57 +10002607 break;
2608 case check_state_run:
2609 break; /* we will be called again upon completion */
2610 case check_state_check_result:
2611 sh->check_state = check_state_idle;
2612
2613 /* if a failure occurred during the check operation, leave
2614 * STRIPE_INSYNC not set and let the stripe be handled again
2615 */
2616 if (s->failed)
2617 break;
2618
2619 /* handle a successful check operation, if parity is correct
2620 * we are done. Otherwise update the mismatch count and repair
2621 * parity if !MD_RECOVERY_CHECK
2622 */
Dan Williamsad283ea2009-08-29 19:09:26 -07002623 if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0)
Dan Williamsecc65c92008-06-28 08:31:57 +10002624 /* parity is correct (on disc,
2625 * not in buffer any more)
2626 */
2627 set_bit(STRIPE_INSYNC, &sh->state);
2628 else {
2629 conf->mddev->resync_mismatches += STRIPE_SECTORS;
2630 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
2631 /* don't try to repair!! */
2632 set_bit(STRIPE_INSYNC, &sh->state);
2633 else {
2634 sh->check_state = check_state_compute_run;
Dan Williams976ea8d2008-06-28 08:32:03 +10002635 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
Dan Williamsecc65c92008-06-28 08:31:57 +10002636 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2637 set_bit(R5_Wantcompute,
2638 &sh->dev[sh->pd_idx].flags);
2639 sh->ops.target = sh->pd_idx;
Dan Williamsac6b53b2009-07-14 13:40:19 -07002640 sh->ops.target2 = -1;
Dan Williamsecc65c92008-06-28 08:31:57 +10002641 s->uptodate++;
2642 }
2643 }
2644 break;
2645 case check_state_compute_run:
2646 break;
2647 default:
2648 printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
2649 __func__, sh->check_state,
2650 (unsigned long long) sh->sector);
2651 BUG();
Dan Williamsa4456852007-07-09 11:56:43 -07002652 }
2653}
2654
2655
2656static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh,
Dan Williams36d1c642009-07-14 11:48:22 -07002657 struct stripe_head_state *s,
2658 struct r6_state *r6s, int disks)
Dan Williamsa4456852007-07-09 11:56:43 -07002659{
Dan Williamsa4456852007-07-09 11:56:43 -07002660 int pd_idx = sh->pd_idx;
NeilBrown34e04e82009-03-31 15:10:16 +11002661 int qd_idx = sh->qd_idx;
Dan Williamsd82dfee2009-07-14 13:40:57 -07002662 struct r5dev *dev;
Dan Williamsa4456852007-07-09 11:56:43 -07002663
2664 set_bit(STRIPE_HANDLE, &sh->state);
2665
2666 BUG_ON(s->failed > 2);
Dan Williamsd82dfee2009-07-14 13:40:57 -07002667
Dan Williamsa4456852007-07-09 11:56:43 -07002668 /* Want to check and possibly repair P and Q.
2669 * However there could be one 'failed' device, in which
2670 * case we can only check one of them, possibly using the
2671 * other to generate missing data
2672 */
2673
Dan Williamsd82dfee2009-07-14 13:40:57 -07002674 switch (sh->check_state) {
2675 case check_state_idle:
2676 /* start a new check operation if there are < 2 failures */
Dan Williamsa4456852007-07-09 11:56:43 -07002677 if (s->failed == r6s->q_failed) {
Dan Williamsd82dfee2009-07-14 13:40:57 -07002678 /* The only possible failed device holds Q, so it
Dan Williamsa4456852007-07-09 11:56:43 -07002679 * makes sense to check P (If anything else were failed,
2680 * we would have used P to recreate it).
2681 */
Dan Williamsd82dfee2009-07-14 13:40:57 -07002682 sh->check_state = check_state_run;
Dan Williamsa4456852007-07-09 11:56:43 -07002683 }
2684 if (!r6s->q_failed && s->failed < 2) {
Dan Williamsd82dfee2009-07-14 13:40:57 -07002685 /* Q is not failed, and we didn't use it to generate
Dan Williamsa4456852007-07-09 11:56:43 -07002686 * anything, so it makes sense to check it
2687 */
Dan Williamsd82dfee2009-07-14 13:40:57 -07002688 if (sh->check_state == check_state_run)
2689 sh->check_state = check_state_run_pq;
2690 else
2691 sh->check_state = check_state_run_q;
Dan Williamsa4456852007-07-09 11:56:43 -07002692 }
Dan Williams36d1c642009-07-14 11:48:22 -07002693
Dan Williamsd82dfee2009-07-14 13:40:57 -07002694 /* discard potentially stale zero_sum_result */
2695 sh->ops.zero_sum_result = 0;
Dan Williams36d1c642009-07-14 11:48:22 -07002696
Dan Williamsd82dfee2009-07-14 13:40:57 -07002697 if (sh->check_state == check_state_run) {
2698 /* async_xor_zero_sum destroys the contents of P */
2699 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
2700 s->uptodate--;
Dan Williamsa4456852007-07-09 11:56:43 -07002701 }
Dan Williamsd82dfee2009-07-14 13:40:57 -07002702 if (sh->check_state >= check_state_run &&
2703 sh->check_state <= check_state_run_pq) {
2704 /* async_syndrome_zero_sum preserves P and Q, so
2705 * no need to mark them !uptodate here
2706 */
2707 set_bit(STRIPE_OP_CHECK, &s->ops_request);
2708 break;
2709 }
Dan Williams36d1c642009-07-14 11:48:22 -07002710
Dan Williamsd82dfee2009-07-14 13:40:57 -07002711 /* we have 2-disk failure */
2712 BUG_ON(s->failed != 2);
2713 /* fall through */
2714 case check_state_compute_result:
2715 sh->check_state = check_state_idle;
Dan Williams36d1c642009-07-14 11:48:22 -07002716
Dan Williamsd82dfee2009-07-14 13:40:57 -07002717 /* check that a write has not made the stripe insync */
2718 if (test_bit(STRIPE_INSYNC, &sh->state))
2719 break;
Dan Williamsa4456852007-07-09 11:56:43 -07002720
2721 /* now write out any block on a failed drive,
Dan Williamsd82dfee2009-07-14 13:40:57 -07002722 * or P or Q if they were recomputed
Dan Williamsa4456852007-07-09 11:56:43 -07002723 */
Dan Williamsd82dfee2009-07-14 13:40:57 -07002724 BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */
Dan Williamsa4456852007-07-09 11:56:43 -07002725 if (s->failed == 2) {
2726 dev = &sh->dev[r6s->failed_num[1]];
2727 s->locked++;
2728 set_bit(R5_LOCKED, &dev->flags);
2729 set_bit(R5_Wantwrite, &dev->flags);
2730 }
2731 if (s->failed >= 1) {
2732 dev = &sh->dev[r6s->failed_num[0]];
2733 s->locked++;
2734 set_bit(R5_LOCKED, &dev->flags);
2735 set_bit(R5_Wantwrite, &dev->flags);
2736 }
Dan Williamsd82dfee2009-07-14 13:40:57 -07002737 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
Dan Williamsa4456852007-07-09 11:56:43 -07002738 dev = &sh->dev[pd_idx];
2739 s->locked++;
2740 set_bit(R5_LOCKED, &dev->flags);
2741 set_bit(R5_Wantwrite, &dev->flags);
2742 }
Dan Williamsd82dfee2009-07-14 13:40:57 -07002743 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
Dan Williamsa4456852007-07-09 11:56:43 -07002744 dev = &sh->dev[qd_idx];
2745 s->locked++;
2746 set_bit(R5_LOCKED, &dev->flags);
2747 set_bit(R5_Wantwrite, &dev->flags);
2748 }
2749 clear_bit(STRIPE_DEGRADED, &sh->state);
2750
2751 set_bit(STRIPE_INSYNC, &sh->state);
Dan Williamsd82dfee2009-07-14 13:40:57 -07002752 break;
2753 case check_state_run:
2754 case check_state_run_q:
2755 case check_state_run_pq:
2756 break; /* we will be called again upon completion */
2757 case check_state_check_result:
2758 sh->check_state = check_state_idle;
2759
2760 /* handle a successful check operation, if parity is correct
2761 * we are done. Otherwise update the mismatch count and repair
2762 * parity if !MD_RECOVERY_CHECK
2763 */
2764 if (sh->ops.zero_sum_result == 0) {
2765 /* both parities are correct */
2766 if (!s->failed)
2767 set_bit(STRIPE_INSYNC, &sh->state);
2768 else {
2769 /* in contrast to the raid5 case we can validate
2770 * parity, but still have a failure to write
2771 * back
2772 */
2773 sh->check_state = check_state_compute_result;
2774 /* Returning at this point means that we may go
2775 * off and bring p and/or q uptodate again so
2776 * we make sure to check zero_sum_result again
2777 * to verify if p or q need writeback
2778 */
2779 }
2780 } else {
2781 conf->mddev->resync_mismatches += STRIPE_SECTORS;
2782 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
2783 /* don't try to repair!! */
2784 set_bit(STRIPE_INSYNC, &sh->state);
2785 else {
2786 int *target = &sh->ops.target;
2787
2788 sh->ops.target = -1;
2789 sh->ops.target2 = -1;
2790 sh->check_state = check_state_compute_run;
2791 set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2792 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2793 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
2794 set_bit(R5_Wantcompute,
2795 &sh->dev[pd_idx].flags);
2796 *target = pd_idx;
2797 target = &sh->ops.target2;
2798 s->uptodate++;
2799 }
2800 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
2801 set_bit(R5_Wantcompute,
2802 &sh->dev[qd_idx].flags);
2803 *target = qd_idx;
2804 s->uptodate++;
2805 }
2806 }
2807 }
2808 break;
2809 case check_state_compute_run:
2810 break;
2811 default:
2812 printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
2813 __func__, sh->check_state,
2814 (unsigned long long) sh->sector);
2815 BUG();
Dan Williamsa4456852007-07-09 11:56:43 -07002816 }
2817}
2818
2819static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
2820 struct r6_state *r6s)
2821{
2822 int i;
2823
2824 /* We have read all the blocks in this stripe and now we need to
2825 * copy some of them into a target stripe for expand.
2826 */
Dan Williamsf0a50d32007-01-02 13:52:31 -07002827 struct dma_async_tx_descriptor *tx = NULL;
Dan Williamsa4456852007-07-09 11:56:43 -07002828 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
2829 for (i = 0; i < sh->disks; i++)
NeilBrown34e04e82009-03-31 15:10:16 +11002830 if (i != sh->pd_idx && i != sh->qd_idx) {
NeilBrown911d4ee2009-03-31 14:39:38 +11002831 int dd_idx, j;
Dan Williamsa4456852007-07-09 11:56:43 -07002832 struct stripe_head *sh2;
Dan Williamsa08abd82009-06-03 11:43:59 -07002833 struct async_submit_ctl submit;
Dan Williamsa4456852007-07-09 11:56:43 -07002834
NeilBrown784052e2009-03-31 15:19:07 +11002835 sector_t bn = compute_blocknr(sh, i, 1);
NeilBrown911d4ee2009-03-31 14:39:38 +11002836 sector_t s = raid5_compute_sector(conf, bn, 0,
2837 &dd_idx, NULL);
NeilBrowna8c906c2009-06-09 14:39:59 +10002838 sh2 = get_active_stripe(conf, s, 0, 1, 1);
Dan Williamsa4456852007-07-09 11:56:43 -07002839 if (sh2 == NULL)
2840 /* so far only the early blocks of this stripe
2841 * have been requested. When later blocks
2842 * get requested, we will try again
2843 */
2844 continue;
2845 if (!test_bit(STRIPE_EXPANDING, &sh2->state) ||
2846 test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) {
2847 /* must have already done this block */
2848 release_stripe(sh2);
2849 continue;
2850 }
Dan Williamsf0a50d32007-01-02 13:52:31 -07002851
2852 /* place all the copies on one channel */
Dan Williamsa08abd82009-06-03 11:43:59 -07002853 init_async_submit(&submit, 0, tx, NULL, NULL, NULL);
Dan Williamsf0a50d32007-01-02 13:52:31 -07002854 tx = async_memcpy(sh2->dev[dd_idx].page,
Dan Williams88ba2aa2009-04-09 16:16:18 -07002855 sh->dev[i].page, 0, 0, STRIPE_SIZE,
Dan Williamsa08abd82009-06-03 11:43:59 -07002856 &submit);
Dan Williamsf0a50d32007-01-02 13:52:31 -07002857
Dan Williamsa4456852007-07-09 11:56:43 -07002858 set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);
2859 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
2860 for (j = 0; j < conf->raid_disks; j++)
2861 if (j != sh2->pd_idx &&
NeilBrownd0dabf72009-03-31 14:39:38 +11002862 (!r6s || j != sh2->qd_idx) &&
Dan Williamsa4456852007-07-09 11:56:43 -07002863 !test_bit(R5_Expanded, &sh2->dev[j].flags))
2864 break;
2865 if (j == conf->raid_disks) {
2866 set_bit(STRIPE_EXPAND_READY, &sh2->state);
2867 set_bit(STRIPE_HANDLE, &sh2->state);
2868 }
2869 release_stripe(sh2);
Dan Williamsf0a50d32007-01-02 13:52:31 -07002870
Dan Williamsa4456852007-07-09 11:56:43 -07002871 }
NeilBrowna2e08552007-09-11 15:23:36 -07002872 /* done submitting copies, wait for them to complete */
2873 if (tx) {
2874 async_tx_ack(tx);
2875 dma_wait_for_async_tx(tx);
2876 }
Dan Williamsa4456852007-07-09 11:56:43 -07002877}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002878
Dan Williams6bfe0b42008-04-30 00:52:32 -07002879
Linus Torvalds1da177e2005-04-16 15:20:36 -07002880/*
2881 * handle_stripe - do things to a stripe.
2882 *
2883 * We lock the stripe and then examine the state of various bits
2884 * to see what needs to be done.
2885 * Possible results:
2886 * return some read request which now have data
2887 * return some write requests which are safely on disc
2888 * schedule a read on some buffers
2889 * schedule a write of some buffers
2890 * return confirmation of parity correctness
2891 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07002892 * buffers are taken off read_list or write_list, and bh_cache buffers
2893 * get BH_Lock set before the stripe lock is released.
2894 *
2895 */
Dan Williamsa4456852007-07-09 11:56:43 -07002896
Dan Williamsdf10cfb2008-07-28 23:10:39 -07002897static bool handle_stripe5(struct stripe_head *sh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002898{
2899 raid5_conf_t *conf = sh->raid_conf;
Dan Williamsa4456852007-07-09 11:56:43 -07002900 int disks = sh->disks, i;
2901 struct bio *return_bi = NULL;
2902 struct stripe_head_state s;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002903 struct r5dev *dev;
Dan Williams6bfe0b42008-04-30 00:52:32 -07002904 mdk_rdev_t *blocked_rdev = NULL;
Dan Williamse0a115e2008-06-05 22:45:52 -07002905 int prexor;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002906
Dan Williamsa4456852007-07-09 11:56:43 -07002907 memset(&s, 0, sizeof(s));
Dan Williams600aa102008-06-28 08:32:05 +10002908 pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d check:%d "
2909 "reconstruct:%d\n", (unsigned long long)sh->sector, sh->state,
2910 atomic_read(&sh->count), sh->pd_idx, sh->check_state,
2911 sh->reconstruct_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002912
2913 spin_lock(&sh->lock);
2914 clear_bit(STRIPE_HANDLE, &sh->state);
2915 clear_bit(STRIPE_DELAYED, &sh->state);
2916
Dan Williamsa4456852007-07-09 11:56:43 -07002917 s.syncing = test_bit(STRIPE_SYNCING, &sh->state);
2918 s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
2919 s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
Dan Williams83de75c2008-06-28 08:31:58 +10002920
Linus Torvalds1da177e2005-04-16 15:20:36 -07002921 /* Now to look around and see what can be done */
NeilBrown9910f162006-01-06 00:20:24 -08002922 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002923 for (i=disks; i--; ) {
2924 mdk_rdev_t *rdev;
Dan Williamsa4456852007-07-09 11:56:43 -07002925 struct r5dev *dev = &sh->dev[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002926 clear_bit(R5_Insync, &dev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002927
Dan Williamsb5e98d62007-01-02 13:52:31 -07002928 pr_debug("check %d: state 0x%lx toread %p read %p write %p "
2929 "written %p\n", i, dev->flags, dev->toread, dev->read,
2930 dev->towrite, dev->written);
2931
2932 /* maybe we can request a biofill operation
2933 *
2934 * new wantfill requests are only permitted while
Dan Williams83de75c2008-06-28 08:31:58 +10002935 * ops_complete_biofill is guaranteed to be inactive
Dan Williamsb5e98d62007-01-02 13:52:31 -07002936 */
2937 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread &&
Dan Williams83de75c2008-06-28 08:31:58 +10002938 !test_bit(STRIPE_BIOFILL_RUN, &sh->state))
Dan Williamsb5e98d62007-01-02 13:52:31 -07002939 set_bit(R5_Wantfill, &dev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002940
2941 /* now count some things */
Dan Williamsa4456852007-07-09 11:56:43 -07002942 if (test_bit(R5_LOCKED, &dev->flags)) s.locked++;
2943 if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++;
Dan Williamsf38e1212007-01-02 13:52:30 -07002944 if (test_bit(R5_Wantcompute, &dev->flags)) s.compute++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002945
Dan Williamsb5e98d62007-01-02 13:52:31 -07002946 if (test_bit(R5_Wantfill, &dev->flags))
2947 s.to_fill++;
2948 else if (dev->toread)
Dan Williamsa4456852007-07-09 11:56:43 -07002949 s.to_read++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002950 if (dev->towrite) {
Dan Williamsa4456852007-07-09 11:56:43 -07002951 s.to_write++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002952 if (!test_bit(R5_OVERWRITE, &dev->flags))
Dan Williamsa4456852007-07-09 11:56:43 -07002953 s.non_overwrite++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002954 }
Dan Williamsa4456852007-07-09 11:56:43 -07002955 if (dev->written)
2956 s.written++;
NeilBrown9910f162006-01-06 00:20:24 -08002957 rdev = rcu_dereference(conf->disks[i].rdev);
NeilBrownac4090d2008-08-05 15:54:13 +10002958 if (blocked_rdev == NULL &&
2959 rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
Dan Williams6bfe0b42008-04-30 00:52:32 -07002960 blocked_rdev = rdev;
2961 atomic_inc(&rdev->nr_pending);
Dan Williams6bfe0b42008-04-30 00:52:32 -07002962 }
NeilBrownb2d444d2005-11-08 21:39:31 -08002963 if (!rdev || !test_bit(In_sync, &rdev->flags)) {
NeilBrown14f8d262006-01-06 00:20:14 -08002964 /* The ReadError flag will just be confusing now */
NeilBrown4e5314b2005-11-08 21:39:22 -08002965 clear_bit(R5_ReadError, &dev->flags);
2966 clear_bit(R5_ReWrite, &dev->flags);
2967 }
NeilBrownb2d444d2005-11-08 21:39:31 -08002968 if (!rdev || !test_bit(In_sync, &rdev->flags)
NeilBrown4e5314b2005-11-08 21:39:22 -08002969 || test_bit(R5_ReadError, &dev->flags)) {
Dan Williamsa4456852007-07-09 11:56:43 -07002970 s.failed++;
2971 s.failed_num = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002972 } else
2973 set_bit(R5_Insync, &dev->flags);
2974 }
NeilBrown9910f162006-01-06 00:20:24 -08002975 rcu_read_unlock();
Dan Williamsb5e98d62007-01-02 13:52:31 -07002976
Dan Williams6bfe0b42008-04-30 00:52:32 -07002977 if (unlikely(blocked_rdev)) {
NeilBrownac4090d2008-08-05 15:54:13 +10002978 if (s.syncing || s.expanding || s.expanded ||
2979 s.to_write || s.written) {
2980 set_bit(STRIPE_HANDLE, &sh->state);
2981 goto unlock;
2982 }
2983 /* There is nothing for the blocked_rdev to block */
2984 rdev_dec_pending(blocked_rdev, conf->mddev);
2985 blocked_rdev = NULL;
Dan Williams6bfe0b42008-04-30 00:52:32 -07002986 }
2987
Dan Williams83de75c2008-06-28 08:31:58 +10002988 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
2989 set_bit(STRIPE_OP_BIOFILL, &s.ops_request);
2990 set_bit(STRIPE_BIOFILL_RUN, &sh->state);
2991 }
Dan Williamsb5e98d62007-01-02 13:52:31 -07002992
Dan Williams45b42332007-07-09 11:56:43 -07002993 pr_debug("locked=%d uptodate=%d to_read=%d"
Linus Torvalds1da177e2005-04-16 15:20:36 -07002994 " to_write=%d failed=%d failed_num=%d\n",
Dan Williamsa4456852007-07-09 11:56:43 -07002995 s.locked, s.uptodate, s.to_read, s.to_write,
2996 s.failed, s.failed_num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002997 /* check if the array has lost two devices and, if so, some requests might
2998 * need to be failed
2999 */
Dan Williamsa4456852007-07-09 11:56:43 -07003000 if (s.failed > 1 && s.to_read+s.to_write+s.written)
Dan Williams1fe797e2008-06-28 09:16:30 +10003001 handle_failed_stripe(conf, sh, &s, disks, &return_bi);
Dan Williamsa4456852007-07-09 11:56:43 -07003002 if (s.failed > 1 && s.syncing) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003003 md_done_sync(conf->mddev, STRIPE_SECTORS,0);
3004 clear_bit(STRIPE_SYNCING, &sh->state);
Dan Williamsa4456852007-07-09 11:56:43 -07003005 s.syncing = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003006 }
3007
3008 /* might be able to return some write requests if the parity block
3009 * is safe, or on a failed drive
3010 */
3011 dev = &sh->dev[sh->pd_idx];
Dan Williamsa4456852007-07-09 11:56:43 -07003012 if ( s.written &&
3013 ((test_bit(R5_Insync, &dev->flags) &&
3014 !test_bit(R5_LOCKED, &dev->flags) &&
3015 test_bit(R5_UPTODATE, &dev->flags)) ||
3016 (s.failed == 1 && s.failed_num == sh->pd_idx)))
Dan Williams1fe797e2008-06-28 09:16:30 +10003017 handle_stripe_clean_event(conf, sh, disks, &return_bi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003018
3019 /* Now we might consider reading some blocks, either to check/generate
3020 * parity, or to satisfy requests
3021 * or to load a block that is being partially written.
3022 */
Dan Williamsa4456852007-07-09 11:56:43 -07003023 if (s.to_read || s.non_overwrite ||
Dan Williams976ea8d2008-06-28 08:32:03 +10003024 (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding)
Dan Williams1fe797e2008-06-28 09:16:30 +10003025 handle_stripe_fill5(sh, &s, disks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003026
Dan Williamse33129d2007-01-02 13:52:30 -07003027 /* Now we check to see if any write operations have recently
3028 * completed
3029 */
Dan Williamse0a115e2008-06-05 22:45:52 -07003030 prexor = 0;
Dan Williamsd8ee0722008-06-28 08:32:06 +10003031 if (sh->reconstruct_state == reconstruct_state_prexor_drain_result)
Dan Williamse0a115e2008-06-05 22:45:52 -07003032 prexor = 1;
Dan Williamsd8ee0722008-06-28 08:32:06 +10003033 if (sh->reconstruct_state == reconstruct_state_drain_result ||
3034 sh->reconstruct_state == reconstruct_state_prexor_drain_result) {
Dan Williams600aa102008-06-28 08:32:05 +10003035 sh->reconstruct_state = reconstruct_state_idle;
Dan Williamse33129d2007-01-02 13:52:30 -07003036
3037 /* All the 'written' buffers and the parity block are ready to
3038 * be written back to disk
3039 */
3040 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags));
3041 for (i = disks; i--; ) {
3042 dev = &sh->dev[i];
3043 if (test_bit(R5_LOCKED, &dev->flags) &&
3044 (i == sh->pd_idx || dev->written)) {
3045 pr_debug("Writing block %d\n", i);
3046 set_bit(R5_Wantwrite, &dev->flags);
Dan Williamse0a115e2008-06-05 22:45:52 -07003047 if (prexor)
3048 continue;
Dan Williamse33129d2007-01-02 13:52:30 -07003049 if (!test_bit(R5_Insync, &dev->flags) ||
3050 (i == sh->pd_idx && s.failed == 0))
3051 set_bit(STRIPE_INSYNC, &sh->state);
3052 }
3053 }
3054 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
3055 atomic_dec(&conf->preread_active_stripes);
3056 if (atomic_read(&conf->preread_active_stripes) <
3057 IO_THRESHOLD)
3058 md_wakeup_thread(conf->mddev->thread);
3059 }
3060 }
3061
3062 /* Now to consider new write requests and what else, if anything
3063 * should be read. We do not handle new writes when:
3064 * 1/ A 'write' operation (copy+xor) is already in flight.
3065 * 2/ A 'check' operation is in flight, as it may clobber the parity
3066 * block.
3067 */
Dan Williams600aa102008-06-28 08:32:05 +10003068 if (s.to_write && !sh->reconstruct_state && !sh->check_state)
Dan Williams1fe797e2008-06-28 09:16:30 +10003069 handle_stripe_dirtying5(conf, sh, &s, disks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003070
3071 /* maybe we need to check and possibly fix the parity for this stripe
Dan Williamse89f8962007-01-02 13:52:31 -07003072 * Any reads will already have been scheduled, so we just see if enough
3073 * data is available. The parity check is held off while parity
3074 * dependent operations are in flight.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003075 */
Dan Williamsecc65c92008-06-28 08:31:57 +10003076 if (sh->check_state ||
3077 (s.syncing && s.locked == 0 &&
Dan Williams976ea8d2008-06-28 08:32:03 +10003078 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
Dan Williamsecc65c92008-06-28 08:31:57 +10003079 !test_bit(STRIPE_INSYNC, &sh->state)))
Dan Williamsa4456852007-07-09 11:56:43 -07003080 handle_parity_checks5(conf, sh, &s, disks);
Dan Williamse89f8962007-01-02 13:52:31 -07003081
Dan Williamsa4456852007-07-09 11:56:43 -07003082 if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003083 md_done_sync(conf->mddev, STRIPE_SECTORS,1);
3084 clear_bit(STRIPE_SYNCING, &sh->state);
3085 }
NeilBrown4e5314b2005-11-08 21:39:22 -08003086
3087 /* If the failed drive is just a ReadError, then we might need to progress
3088 * the repair/check process
3089 */
Dan Williamsa4456852007-07-09 11:56:43 -07003090 if (s.failed == 1 && !conf->mddev->ro &&
3091 test_bit(R5_ReadError, &sh->dev[s.failed_num].flags)
3092 && !test_bit(R5_LOCKED, &sh->dev[s.failed_num].flags)
3093 && test_bit(R5_UPTODATE, &sh->dev[s.failed_num].flags)
NeilBrown4e5314b2005-11-08 21:39:22 -08003094 ) {
Dan Williamsa4456852007-07-09 11:56:43 -07003095 dev = &sh->dev[s.failed_num];
NeilBrown4e5314b2005-11-08 21:39:22 -08003096 if (!test_bit(R5_ReWrite, &dev->flags)) {
3097 set_bit(R5_Wantwrite, &dev->flags);
3098 set_bit(R5_ReWrite, &dev->flags);
3099 set_bit(R5_LOCKED, &dev->flags);
Dan Williamsa4456852007-07-09 11:56:43 -07003100 s.locked++;
NeilBrown4e5314b2005-11-08 21:39:22 -08003101 } else {
3102 /* let's read it back */
3103 set_bit(R5_Wantread, &dev->flags);
3104 set_bit(R5_LOCKED, &dev->flags);
Dan Williamsa4456852007-07-09 11:56:43 -07003105 s.locked++;
NeilBrown4e5314b2005-11-08 21:39:22 -08003106 }
3107 }
3108
Dan Williams600aa102008-06-28 08:32:05 +10003109 /* Finish reconstruct operations initiated by the expansion process */
3110 if (sh->reconstruct_state == reconstruct_state_result) {
NeilBrownab69ae12009-03-31 15:26:47 +11003111 struct stripe_head *sh2
NeilBrowna8c906c2009-06-09 14:39:59 +10003112 = get_active_stripe(conf, sh->sector, 1, 1, 1);
NeilBrownab69ae12009-03-31 15:26:47 +11003113 if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) {
3114 /* sh cannot be written until sh2 has been read.
3115 * so arrange for sh to be delayed a little
3116 */
3117 set_bit(STRIPE_DELAYED, &sh->state);
3118 set_bit(STRIPE_HANDLE, &sh->state);
3119 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
3120 &sh2->state))
3121 atomic_inc(&conf->preread_active_stripes);
3122 release_stripe(sh2);
3123 goto unlock;
3124 }
3125 if (sh2)
3126 release_stripe(sh2);
3127
Dan Williams600aa102008-06-28 08:32:05 +10003128 sh->reconstruct_state = reconstruct_state_idle;
Dan Williamsf0a50d32007-01-02 13:52:31 -07003129 clear_bit(STRIPE_EXPANDING, &sh->state);
Dan Williams23397882008-07-23 20:05:34 -07003130 for (i = conf->raid_disks; i--; ) {
Dan Williamsf0a50d32007-01-02 13:52:31 -07003131 set_bit(R5_Wantwrite, &sh->dev[i].flags);
Dan Williams23397882008-07-23 20:05:34 -07003132 set_bit(R5_LOCKED, &sh->dev[i].flags);
Neil Brownefe31142008-06-28 08:31:14 +10003133 s.locked++;
Dan Williams23397882008-07-23 20:05:34 -07003134 }
Dan Williamsf0a50d32007-01-02 13:52:31 -07003135 }
3136
3137 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
Dan Williams600aa102008-06-28 08:32:05 +10003138 !sh->reconstruct_state) {
NeilBrownccfcc3c2006-03-27 01:18:09 -08003139 /* Need to write out all blocks after computing parity */
3140 sh->disks = conf->raid_disks;
NeilBrown911d4ee2009-03-31 14:39:38 +11003141 stripe_set_idx(sh->sector, conf, 0, sh);
Yuri Tikhonovc0f7bdd2009-08-29 19:13:12 -07003142 schedule_reconstruction(sh, &s, 1, 1);
Dan Williams600aa102008-06-28 08:32:05 +10003143 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
NeilBrownccfcc3c2006-03-27 01:18:09 -08003144 clear_bit(STRIPE_EXPAND_READY, &sh->state);
NeilBrownf6705572006-03-27 01:18:11 -08003145 atomic_dec(&conf->reshape_stripes);
NeilBrownccfcc3c2006-03-27 01:18:09 -08003146 wake_up(&conf->wait_for_overlap);
3147 md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
3148 }
3149
Dan Williams0f94e87c2008-01-08 15:32:53 -08003150 if (s.expanding && s.locked == 0 &&
Dan Williams976ea8d2008-06-28 08:32:03 +10003151 !test_bit(STRIPE_COMPUTE_RUN, &sh->state))
Dan Williamsa4456852007-07-09 11:56:43 -07003152 handle_stripe_expansion(conf, sh, NULL);
NeilBrownccfcc3c2006-03-27 01:18:09 -08003153
Dan Williams6bfe0b42008-04-30 00:52:32 -07003154 unlock:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003155 spin_unlock(&sh->lock);
3156
Dan Williams6bfe0b42008-04-30 00:52:32 -07003157 /* wait for this device to become unblocked */
3158 if (unlikely(blocked_rdev))
3159 md_wait_for_blocked_rdev(blocked_rdev, conf->mddev);
3160
Dan Williams600aa102008-06-28 08:32:05 +10003161 if (s.ops_request)
Dan Williamsac6b53b2009-07-14 13:40:19 -07003162 raid_run_ops(sh, s.ops_request);
Dan Williamsd84e0f12007-01-02 13:52:30 -07003163
Dan Williamsc4e5ac02008-06-28 08:31:53 +10003164 ops_run_io(sh, &s);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003165
Dan Williamsa4456852007-07-09 11:56:43 -07003166 return_io(return_bi);
Dan Williamsdf10cfb2008-07-28 23:10:39 -07003167
3168 return blocked_rdev == NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003169}
3170
Dan Williams36d1c642009-07-14 11:48:22 -07003171static bool handle_stripe6(struct stripe_head *sh)
NeilBrown16a53ec2006-06-26 00:27:38 -07003172{
NeilBrownbff61972009-03-31 14:33:13 +11003173 raid5_conf_t *conf = sh->raid_conf;
NeilBrownf4168852007-02-28 20:11:53 -08003174 int disks = sh->disks;
Dan Williamsa4456852007-07-09 11:56:43 -07003175 struct bio *return_bi = NULL;
NeilBrown34e04e82009-03-31 15:10:16 +11003176 int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx;
Dan Williamsa4456852007-07-09 11:56:43 -07003177 struct stripe_head_state s;
3178 struct r6_state r6s;
NeilBrown16a53ec2006-06-26 00:27:38 -07003179 struct r5dev *dev, *pdev, *qdev;
Dan Williams6bfe0b42008-04-30 00:52:32 -07003180 mdk_rdev_t *blocked_rdev = NULL;
NeilBrown16a53ec2006-06-26 00:27:38 -07003181
Dan Williams45b42332007-07-09 11:56:43 -07003182 pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
Yuri Tikhonov6c0069c2009-08-29 19:13:13 -07003183 "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
Dan Williamsa4456852007-07-09 11:56:43 -07003184 (unsigned long long)sh->sector, sh->state,
Yuri Tikhonov6c0069c2009-08-29 19:13:13 -07003185 atomic_read(&sh->count), pd_idx, qd_idx,
3186 sh->check_state, sh->reconstruct_state);
Dan Williamsa4456852007-07-09 11:56:43 -07003187 memset(&s, 0, sizeof(s));
NeilBrown16a53ec2006-06-26 00:27:38 -07003188
3189 spin_lock(&sh->lock);
3190 clear_bit(STRIPE_HANDLE, &sh->state);
3191 clear_bit(STRIPE_DELAYED, &sh->state);
3192
Dan Williamsa4456852007-07-09 11:56:43 -07003193 s.syncing = test_bit(STRIPE_SYNCING, &sh->state);
3194 s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
3195 s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
NeilBrown16a53ec2006-06-26 00:27:38 -07003196 /* Now to look around and see what can be done */
3197
3198 rcu_read_lock();
3199 for (i=disks; i--; ) {
3200 mdk_rdev_t *rdev;
3201 dev = &sh->dev[i];
3202 clear_bit(R5_Insync, &dev->flags);
3203
Dan Williams45b42332007-07-09 11:56:43 -07003204 pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
NeilBrown16a53ec2006-06-26 00:27:38 -07003205 i, dev->flags, dev->toread, dev->towrite, dev->written);
Yuri Tikhonov6c0069c2009-08-29 19:13:13 -07003206 /* maybe we can reply to a read
3207 *
3208 * new wantfill requests are only permitted while
3209 * ops_complete_biofill is guaranteed to be inactive
3210 */
3211 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread &&
3212 !test_bit(STRIPE_BIOFILL_RUN, &sh->state))
3213 set_bit(R5_Wantfill, &dev->flags);
NeilBrown16a53ec2006-06-26 00:27:38 -07003214
3215 /* now count some things */
Dan Williamsa4456852007-07-09 11:56:43 -07003216 if (test_bit(R5_LOCKED, &dev->flags)) s.locked++;
3217 if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++;
Yuri Tikhonov6c0069c2009-08-29 19:13:13 -07003218 if (test_bit(R5_Wantcompute, &dev->flags))
3219 BUG_ON(++s.compute > 2);
NeilBrown16a53ec2006-06-26 00:27:38 -07003220
Yuri Tikhonov6c0069c2009-08-29 19:13:13 -07003221 if (test_bit(R5_Wantfill, &dev->flags)) {
3222 s.to_fill++;
3223 } else if (dev->toread)
Dan Williamsa4456852007-07-09 11:56:43 -07003224 s.to_read++;
NeilBrown16a53ec2006-06-26 00:27:38 -07003225 if (dev->towrite) {
Dan Williamsa4456852007-07-09 11:56:43 -07003226 s.to_write++;
NeilBrown16a53ec2006-06-26 00:27:38 -07003227 if (!test_bit(R5_OVERWRITE, &dev->flags))
Dan Williamsa4456852007-07-09 11:56:43 -07003228 s.non_overwrite++;
NeilBrown16a53ec2006-06-26 00:27:38 -07003229 }
Dan Williamsa4456852007-07-09 11:56:43 -07003230 if (dev->written)
3231 s.written++;
NeilBrown16a53ec2006-06-26 00:27:38 -07003232 rdev = rcu_dereference(conf->disks[i].rdev);
NeilBrownac4090d2008-08-05 15:54:13 +10003233 if (blocked_rdev == NULL &&
3234 rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
Dan Williams6bfe0b42008-04-30 00:52:32 -07003235 blocked_rdev = rdev;
3236 atomic_inc(&rdev->nr_pending);
Dan Williams6bfe0b42008-04-30 00:52:32 -07003237 }
NeilBrown16a53ec2006-06-26 00:27:38 -07003238 if (!rdev || !test_bit(In_sync, &rdev->flags)) {
3239 /* The ReadError flag will just be confusing now */
3240 clear_bit(R5_ReadError, &dev->flags);
3241 clear_bit(R5_ReWrite, &dev->flags);
3242 }
3243 if (!rdev || !test_bit(In_sync, &rdev->flags)
3244 || test_bit(R5_ReadError, &dev->flags)) {
Dan Williamsa4456852007-07-09 11:56:43 -07003245 if (s.failed < 2)
3246 r6s.failed_num[s.failed] = i;
3247 s.failed++;
NeilBrown16a53ec2006-06-26 00:27:38 -07003248 } else
3249 set_bit(R5_Insync, &dev->flags);
3250 }
3251 rcu_read_unlock();
Dan Williams6bfe0b42008-04-30 00:52:32 -07003252
3253 if (unlikely(blocked_rdev)) {
NeilBrownac4090d2008-08-05 15:54:13 +10003254 if (s.syncing || s.expanding || s.expanded ||
3255 s.to_write || s.written) {
3256 set_bit(STRIPE_HANDLE, &sh->state);
3257 goto unlock;
3258 }
3259 /* There is nothing for the blocked_rdev to block */
3260 rdev_dec_pending(blocked_rdev, conf->mddev);
3261 blocked_rdev = NULL;
Dan Williams6bfe0b42008-04-30 00:52:32 -07003262 }
NeilBrownac4090d2008-08-05 15:54:13 +10003263
Yuri Tikhonov6c0069c2009-08-29 19:13:13 -07003264 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
3265 set_bit(STRIPE_OP_BIOFILL, &s.ops_request);
3266 set_bit(STRIPE_BIOFILL_RUN, &sh->state);
3267 }
3268
Dan Williams45b42332007-07-09 11:56:43 -07003269 pr_debug("locked=%d uptodate=%d to_read=%d"
NeilBrown16a53ec2006-06-26 00:27:38 -07003270 " to_write=%d failed=%d failed_num=%d,%d\n",
Dan Williamsa4456852007-07-09 11:56:43 -07003271 s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
3272 r6s.failed_num[0], r6s.failed_num[1]);
3273 /* check if the array has lost >2 devices and, if so, some requests
3274 * might need to be failed
NeilBrown16a53ec2006-06-26 00:27:38 -07003275 */
Dan Williamsa4456852007-07-09 11:56:43 -07003276 if (s.failed > 2 && s.to_read+s.to_write+s.written)
Dan Williams1fe797e2008-06-28 09:16:30 +10003277 handle_failed_stripe(conf, sh, &s, disks, &return_bi);
Dan Williamsa4456852007-07-09 11:56:43 -07003278 if (s.failed > 2 && s.syncing) {
NeilBrown16a53ec2006-06-26 00:27:38 -07003279 md_done_sync(conf->mddev, STRIPE_SECTORS,0);
3280 clear_bit(STRIPE_SYNCING, &sh->state);
Dan Williamsa4456852007-07-09 11:56:43 -07003281 s.syncing = 0;
NeilBrown16a53ec2006-06-26 00:27:38 -07003282 }
3283
3284 /*
3285 * might be able to return some write requests if the parity blocks
3286 * are safe, or on a failed drive
3287 */
3288 pdev = &sh->dev[pd_idx];
Dan Williamsa4456852007-07-09 11:56:43 -07003289 r6s.p_failed = (s.failed >= 1 && r6s.failed_num[0] == pd_idx)
3290 || (s.failed >= 2 && r6s.failed_num[1] == pd_idx);
NeilBrown34e04e82009-03-31 15:10:16 +11003291 qdev = &sh->dev[qd_idx];
3292 r6s.q_failed = (s.failed >= 1 && r6s.failed_num[0] == qd_idx)
3293 || (s.failed >= 2 && r6s.failed_num[1] == qd_idx);
NeilBrown16a53ec2006-06-26 00:27:38 -07003294
Dan Williamsa4456852007-07-09 11:56:43 -07003295 if ( s.written &&
3296 ( r6s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
NeilBrown16a53ec2006-06-26 00:27:38 -07003297 && !test_bit(R5_LOCKED, &pdev->flags)
Dan Williamsa4456852007-07-09 11:56:43 -07003298 && test_bit(R5_UPTODATE, &pdev->flags)))) &&
3299 ( r6s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
NeilBrown16a53ec2006-06-26 00:27:38 -07003300 && !test_bit(R5_LOCKED, &qdev->flags)
Dan Williamsa4456852007-07-09 11:56:43 -07003301 && test_bit(R5_UPTODATE, &qdev->flags)))))
Dan Williams1fe797e2008-06-28 09:16:30 +10003302 handle_stripe_clean_event(conf, sh, disks, &return_bi);
NeilBrown16a53ec2006-06-26 00:27:38 -07003303
3304 /* Now we might consider reading some blocks, either to check/generate
3305 * parity, or to satisfy requests
3306 * or to load a block that is being partially written.
3307 */
Dan Williamsa4456852007-07-09 11:56:43 -07003308 if (s.to_read || s.non_overwrite || (s.to_write && s.failed) ||
Yuri Tikhonov6c0069c2009-08-29 19:13:13 -07003309 (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding)
Dan Williams1fe797e2008-06-28 09:16:30 +10003310 handle_stripe_fill6(sh, &s, &r6s, disks);
NeilBrown16a53ec2006-06-26 00:27:38 -07003311
Yuri Tikhonov6c0069c2009-08-29 19:13:13 -07003312 /* Now we check to see if any write operations have recently
3313 * completed
3314 */
3315 if (sh->reconstruct_state == reconstruct_state_drain_result) {
3316 int qd_idx = sh->qd_idx;
3317
3318 sh->reconstruct_state = reconstruct_state_idle;
3319 /* All the 'written' buffers and the parity blocks are ready to
3320 * be written back to disk
3321 */
3322 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags));
3323 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[qd_idx].flags));
3324 for (i = disks; i--; ) {
3325 dev = &sh->dev[i];
3326 if (test_bit(R5_LOCKED, &dev->flags) &&
3327 (i == sh->pd_idx || i == qd_idx ||
3328 dev->written)) {
3329 pr_debug("Writing block %d\n", i);
3330 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
3331 set_bit(R5_Wantwrite, &dev->flags);
3332 if (!test_bit(R5_Insync, &dev->flags) ||
3333 ((i == sh->pd_idx || i == qd_idx) &&
3334 s.failed == 0))
3335 set_bit(STRIPE_INSYNC, &sh->state);
3336 }
3337 }
3338 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
3339 atomic_dec(&conf->preread_active_stripes);
3340 if (atomic_read(&conf->preread_active_stripes) <
3341 IO_THRESHOLD)
3342 md_wakeup_thread(conf->mddev->thread);
3343 }
3344 }
3345
Yuri Tikhonova9b39a72009-08-29 19:13:12 -07003346 /* Now to consider new write requests and what else, if anything
3347 * should be read. We do not handle new writes when:
3348 * 1/ A 'write' operation (copy+gen_syndrome) is already in flight.
3349 * 2/ A 'check' operation is in flight, as it may clobber the parity
3350 * block.
3351 */
3352 if (s.to_write && !sh->reconstruct_state && !sh->check_state)
Dan Williams1fe797e2008-06-28 09:16:30 +10003353 handle_stripe_dirtying6(conf, sh, &s, &r6s, disks);
NeilBrown16a53ec2006-06-26 00:27:38 -07003354
3355 /* maybe we need to check and possibly fix the parity for this stripe
Dan Williamsa4456852007-07-09 11:56:43 -07003356 * Any reads will already have been scheduled, so we just see if enough
Yuri Tikhonov6c0069c2009-08-29 19:13:13 -07003357 * data is available. The parity check is held off while parity
3358 * dependent operations are in flight.
NeilBrown16a53ec2006-06-26 00:27:38 -07003359 */
Yuri Tikhonov6c0069c2009-08-29 19:13:13 -07003360 if (sh->check_state ||
3361 (s.syncing && s.locked == 0 &&
3362 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
3363 !test_bit(STRIPE_INSYNC, &sh->state)))
Dan Williams36d1c642009-07-14 11:48:22 -07003364 handle_parity_checks6(conf, sh, &s, &r6s, disks);
NeilBrown16a53ec2006-06-26 00:27:38 -07003365
Dan Williamsa4456852007-07-09 11:56:43 -07003366 if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
NeilBrown16a53ec2006-06-26 00:27:38 -07003367 md_done_sync(conf->mddev, STRIPE_SECTORS,1);
3368 clear_bit(STRIPE_SYNCING, &sh->state);
3369 }
3370
3371 /* If the failed drives are just a ReadError, then we might need
3372 * to progress the repair/check process
3373 */
Dan Williamsa4456852007-07-09 11:56:43 -07003374 if (s.failed <= 2 && !conf->mddev->ro)
3375 for (i = 0; i < s.failed; i++) {
3376 dev = &sh->dev[r6s.failed_num[i]];
NeilBrown16a53ec2006-06-26 00:27:38 -07003377 if (test_bit(R5_ReadError, &dev->flags)
3378 && !test_bit(R5_LOCKED, &dev->flags)
3379 && test_bit(R5_UPTODATE, &dev->flags)
3380 ) {
3381 if (!test_bit(R5_ReWrite, &dev->flags)) {
3382 set_bit(R5_Wantwrite, &dev->flags);
3383 set_bit(R5_ReWrite, &dev->flags);
3384 set_bit(R5_LOCKED, &dev->flags);
Yuri Tikhonov6c0069c2009-08-29 19:13:13 -07003385 s.locked++;
NeilBrown16a53ec2006-06-26 00:27:38 -07003386 } else {
3387 /* let's read it back */
3388 set_bit(R5_Wantread, &dev->flags);
3389 set_bit(R5_LOCKED, &dev->flags);
Yuri Tikhonov6c0069c2009-08-29 19:13:13 -07003390 s.locked++;
NeilBrown16a53ec2006-06-26 00:27:38 -07003391 }
3392 }
3393 }
NeilBrownf4168852007-02-28 20:11:53 -08003394
Yuri Tikhonov6c0069c2009-08-29 19:13:13 -07003395 /* Finish reconstruct operations initiated by the expansion process */
3396 if (sh->reconstruct_state == reconstruct_state_result) {
3397 sh->reconstruct_state = reconstruct_state_idle;
3398 clear_bit(STRIPE_EXPANDING, &sh->state);
3399 for (i = conf->raid_disks; i--; ) {
3400 set_bit(R5_Wantwrite, &sh->dev[i].flags);
3401 set_bit(R5_LOCKED, &sh->dev[i].flags);
3402 s.locked++;
3403 }
3404 }
3405
3406 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
3407 !sh->reconstruct_state) {
NeilBrownab69ae12009-03-31 15:26:47 +11003408 struct stripe_head *sh2
NeilBrowna8c906c2009-06-09 14:39:59 +10003409 = get_active_stripe(conf, sh->sector, 1, 1, 1);
NeilBrownab69ae12009-03-31 15:26:47 +11003410 if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) {
3411 /* sh cannot be written until sh2 has been read.
3412 * so arrange for sh to be delayed a little
3413 */
3414 set_bit(STRIPE_DELAYED, &sh->state);
3415 set_bit(STRIPE_HANDLE, &sh->state);
3416 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
3417 &sh2->state))
3418 atomic_inc(&conf->preread_active_stripes);
3419 release_stripe(sh2);
3420 goto unlock;
3421 }
3422 if (sh2)
3423 release_stripe(sh2);
3424
NeilBrownf4168852007-02-28 20:11:53 -08003425 /* Need to write out all blocks after computing P&Q */
3426 sh->disks = conf->raid_disks;
NeilBrown911d4ee2009-03-31 14:39:38 +11003427 stripe_set_idx(sh->sector, conf, 0, sh);
Yuri Tikhonov6c0069c2009-08-29 19:13:13 -07003428 schedule_reconstruction(sh, &s, 1, 1);
3429 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
NeilBrownf4168852007-02-28 20:11:53 -08003430 clear_bit(STRIPE_EXPAND_READY, &sh->state);
3431 atomic_dec(&conf->reshape_stripes);
3432 wake_up(&conf->wait_for_overlap);
3433 md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
3434 }
3435
Dan Williams0f94e87c2008-01-08 15:32:53 -08003436 if (s.expanding && s.locked == 0 &&
Dan Williams976ea8d2008-06-28 08:32:03 +10003437 !test_bit(STRIPE_COMPUTE_RUN, &sh->state))
Dan Williamsa4456852007-07-09 11:56:43 -07003438 handle_stripe_expansion(conf, sh, &r6s);
NeilBrownf4168852007-02-28 20:11:53 -08003439
Dan Williams6bfe0b42008-04-30 00:52:32 -07003440 unlock:
NeilBrown16a53ec2006-06-26 00:27:38 -07003441 spin_unlock(&sh->lock);
3442
Dan Williams6bfe0b42008-04-30 00:52:32 -07003443 /* wait for this device to become unblocked */
3444 if (unlikely(blocked_rdev))
3445 md_wait_for_blocked_rdev(blocked_rdev, conf->mddev);
3446
Yuri Tikhonov6c0069c2009-08-29 19:13:13 -07003447 if (s.ops_request)
3448 raid_run_ops(sh, s.ops_request);
3449
Dan Williamsf0e43bc2008-06-28 08:31:55 +10003450 ops_run_io(sh, &s);
3451
Dan Williamsa4456852007-07-09 11:56:43 -07003452 return_io(return_bi);
Dan Williamsdf10cfb2008-07-28 23:10:39 -07003453
3454 return blocked_rdev == NULL;
NeilBrown16a53ec2006-06-26 00:27:38 -07003455}
3456
Dan Williamsdf10cfb2008-07-28 23:10:39 -07003457/* returns true if the stripe was handled */
Dan Williams36d1c642009-07-14 11:48:22 -07003458static bool handle_stripe(struct stripe_head *sh)
NeilBrown16a53ec2006-06-26 00:27:38 -07003459{
3460 if (sh->raid_conf->level == 6)
Dan Williams36d1c642009-07-14 11:48:22 -07003461 return handle_stripe6(sh);
NeilBrown16a53ec2006-06-26 00:27:38 -07003462 else
Dan Williamsdf10cfb2008-07-28 23:10:39 -07003463 return handle_stripe5(sh);
NeilBrown16a53ec2006-06-26 00:27:38 -07003464}
3465
Arjan van de Ven858119e2006-01-14 13:20:43 -08003466static void raid5_activate_delayed(raid5_conf_t *conf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003467{
3468 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
3469 while (!list_empty(&conf->delayed_list)) {
3470 struct list_head *l = conf->delayed_list.next;
3471 struct stripe_head *sh;
3472 sh = list_entry(l, struct stripe_head, lru);
3473 list_del_init(l);
3474 clear_bit(STRIPE_DELAYED, &sh->state);
3475 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
3476 atomic_inc(&conf->preread_active_stripes);
Dan Williams8b3e6cd2008-04-28 02:15:53 -07003477 list_add_tail(&sh->lru, &conf->hold_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003478 }
NeilBrown6ed30032008-02-06 01:40:00 -08003479 } else
3480 blk_plug_device(conf->mddev->queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003481}
3482
Arjan van de Ven858119e2006-01-14 13:20:43 -08003483static void activate_bit_delay(raid5_conf_t *conf)
NeilBrown72626682005-09-09 16:23:54 -07003484{
3485 /* device_lock is held */
3486 struct list_head head;
3487 list_add(&head, &conf->bitmap_list);
3488 list_del_init(&conf->bitmap_list);
3489 while (!list_empty(&head)) {
3490 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
3491 list_del_init(&sh->lru);
3492 atomic_inc(&sh->count);
3493 __release_stripe(conf, sh);
3494 }
3495}
3496
Linus Torvalds1da177e2005-04-16 15:20:36 -07003497static void unplug_slaves(mddev_t *mddev)
3498{
3499 raid5_conf_t *conf = mddev_to_conf(mddev);
3500 int i;
3501
3502 rcu_read_lock();
NeilBrownf001a702009-06-09 14:30:31 +10003503 for (i = 0; i < conf->raid_disks; i++) {
Suzanne Woodd6065f72005-11-08 21:39:27 -08003504 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
NeilBrownb2d444d2005-11-08 21:39:31 -08003505 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
Jens Axboe165125e2007-07-24 09:28:11 +02003506 struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003507
3508 atomic_inc(&rdev->nr_pending);
3509 rcu_read_unlock();
3510
Alan D. Brunelle2ad8b1e2007-11-07 14:26:56 -05003511 blk_unplug(r_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003512
3513 rdev_dec_pending(rdev, mddev);
3514 rcu_read_lock();
3515 }
3516 }
3517 rcu_read_unlock();
3518}
3519
Jens Axboe165125e2007-07-24 09:28:11 +02003520static void raid5_unplug_device(struct request_queue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003521{
3522 mddev_t *mddev = q->queuedata;
3523 raid5_conf_t *conf = mddev_to_conf(mddev);
3524 unsigned long flags;
3525
3526 spin_lock_irqsave(&conf->device_lock, flags);
3527
NeilBrown72626682005-09-09 16:23:54 -07003528 if (blk_remove_plug(q)) {
3529 conf->seq_flush++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003530 raid5_activate_delayed(conf);
NeilBrown72626682005-09-09 16:23:54 -07003531 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003532 md_wakeup_thread(mddev->thread);
3533
3534 spin_unlock_irqrestore(&conf->device_lock, flags);
3535
3536 unplug_slaves(mddev);
3537}
3538
NeilBrownf022b2f2006-10-03 01:15:56 -07003539static int raid5_congested(void *data, int bits)
3540{
3541 mddev_t *mddev = data;
3542 raid5_conf_t *conf = mddev_to_conf(mddev);
3543
3544 /* No difference between reads and writes. Just check
3545 * how busy the stripe_cache is
3546 */
3547 if (conf->inactive_blocked)
3548 return 1;
3549 if (conf->quiesce)
3550 return 1;
3551 if (list_empty_careful(&conf->inactive_list))
3552 return 1;
3553
3554 return 0;
3555}
3556
Raz Ben-Jehuda(caro)23032a02006-12-10 02:20:45 -08003557/* We want read requests to align with chunks where possible,
3558 * but write requests don't need to.
3559 */
Alasdair G Kergoncc371e62008-07-03 09:53:43 +02003560static int raid5_mergeable_bvec(struct request_queue *q,
3561 struct bvec_merge_data *bvm,
3562 struct bio_vec *biovec)
Raz Ben-Jehuda(caro)23032a02006-12-10 02:20:45 -08003563{
3564 mddev_t *mddev = q->queuedata;
Alasdair G Kergoncc371e62008-07-03 09:53:43 +02003565 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
Raz Ben-Jehuda(caro)23032a02006-12-10 02:20:45 -08003566 int max;
3567 unsigned int chunk_sectors = mddev->chunk_size >> 9;
Alasdair G Kergoncc371e62008-07-03 09:53:43 +02003568 unsigned int bio_sectors = bvm->bi_size >> 9;
Raz Ben-Jehuda(caro)23032a02006-12-10 02:20:45 -08003569
Alasdair G Kergoncc371e62008-07-03 09:53:43 +02003570 if ((bvm->bi_rw & 1) == WRITE)
Raz Ben-Jehuda(caro)23032a02006-12-10 02:20:45 -08003571 return biovec->bv_len; /* always allow writes to be mergeable */
3572
NeilBrown784052e2009-03-31 15:19:07 +11003573 if (mddev->new_chunk < mddev->chunk_size)
3574 chunk_sectors = mddev->new_chunk >> 9;
Raz Ben-Jehuda(caro)23032a02006-12-10 02:20:45 -08003575 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
3576 if (max < 0) max = 0;
3577 if (max <= biovec->bv_len && bio_sectors == 0)
3578 return biovec->bv_len;
3579 else
3580 return max;
3581}
3582
Raz Ben-Jehuda(caro)f6796232006-12-10 02:20:46 -08003583
3584static int in_chunk_boundary(mddev_t *mddev, struct bio *bio)
3585{
3586 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
3587 unsigned int chunk_sectors = mddev->chunk_size >> 9;
3588 unsigned int bio_sectors = bio->bi_size >> 9;
3589
NeilBrown784052e2009-03-31 15:19:07 +11003590 if (mddev->new_chunk < mddev->chunk_size)
3591 chunk_sectors = mddev->new_chunk >> 9;
Raz Ben-Jehuda(caro)f6796232006-12-10 02:20:46 -08003592 return chunk_sectors >=
3593 ((sector & (chunk_sectors - 1)) + bio_sectors);
3594}
3595
3596/*
Raz Ben-Jehuda(caro)46031f92006-12-10 02:20:47 -08003597 * add bio to the retry LIFO ( in O(1) ... we are in interrupt )
3598 * later sampled by raid5d.
3599 */
3600static void add_bio_to_retry(struct bio *bi,raid5_conf_t *conf)
3601{
3602 unsigned long flags;
3603
3604 spin_lock_irqsave(&conf->device_lock, flags);
3605
3606 bi->bi_next = conf->retry_read_aligned_list;
3607 conf->retry_read_aligned_list = bi;
3608
3609 spin_unlock_irqrestore(&conf->device_lock, flags);
3610 md_wakeup_thread(conf->mddev->thread);
3611}
3612
3613
3614static struct bio *remove_bio_from_retry(raid5_conf_t *conf)
3615{
3616 struct bio *bi;
3617
3618 bi = conf->retry_read_aligned;
3619 if (bi) {
3620 conf->retry_read_aligned = NULL;
3621 return bi;
3622 }
3623 bi = conf->retry_read_aligned_list;
3624 if(bi) {
Neil Brown387bb172007-02-08 14:20:29 -08003625 conf->retry_read_aligned_list = bi->bi_next;
Raz Ben-Jehuda(caro)46031f92006-12-10 02:20:47 -08003626 bi->bi_next = NULL;
Jens Axboe960e7392008-08-15 10:41:18 +02003627 /*
3628 * this sets the active strip count to 1 and the processed
3629 * strip count to zero (upper 8 bits)
3630 */
Raz Ben-Jehuda(caro)46031f92006-12-10 02:20:47 -08003631 bi->bi_phys_segments = 1; /* biased count of active stripes */
Raz Ben-Jehuda(caro)46031f92006-12-10 02:20:47 -08003632 }
3633
3634 return bi;
3635}
3636
3637
3638/*
Raz Ben-Jehuda(caro)f6796232006-12-10 02:20:46 -08003639 * The "raid5_align_endio" should check if the read succeeded and if it
3640 * did, call bio_endio on the original bio (having bio_put the new bio
3641 * first).
3642 * If the read failed..
3643 */
NeilBrown6712ecf2007-09-27 12:47:43 +02003644static void raid5_align_endio(struct bio *bi, int error)
Raz Ben-Jehuda(caro)f6796232006-12-10 02:20:46 -08003645{
3646 struct bio* raid_bi = bi->bi_private;
Raz Ben-Jehuda(caro)46031f92006-12-10 02:20:47 -08003647 mddev_t *mddev;
3648 raid5_conf_t *conf;
3649 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
3650 mdk_rdev_t *rdev;
3651
Raz Ben-Jehuda(caro)f6796232006-12-10 02:20:46 -08003652 bio_put(bi);
Raz Ben-Jehuda(caro)46031f92006-12-10 02:20:47 -08003653
3654 mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata;
3655 conf = mddev_to_conf(mddev);
3656 rdev = (void*)raid_bi->bi_next;
3657 raid_bi->bi_next = NULL;
3658
3659 rdev_dec_pending(rdev, conf->mddev);
3660
3661 if (!error && uptodate) {
NeilBrown6712ecf2007-09-27 12:47:43 +02003662 bio_endio(raid_bi, 0);
Raz Ben-Jehuda(caro)46031f92006-12-10 02:20:47 -08003663 if (atomic_dec_and_test(&conf->active_aligned_reads))
3664 wake_up(&conf->wait_for_stripe);
NeilBrown6712ecf2007-09-27 12:47:43 +02003665 return;
Raz Ben-Jehuda(caro)46031f92006-12-10 02:20:47 -08003666 }
3667
3668
Dan Williams45b42332007-07-09 11:56:43 -07003669 pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
Raz Ben-Jehuda(caro)46031f92006-12-10 02:20:47 -08003670
3671 add_bio_to_retry(raid_bi, conf);
Raz Ben-Jehuda(caro)f6796232006-12-10 02:20:46 -08003672}
3673
Neil Brown387bb172007-02-08 14:20:29 -08003674static int bio_fits_rdev(struct bio *bi)
3675{
Jens Axboe165125e2007-07-24 09:28:11 +02003676 struct request_queue *q = bdev_get_queue(bi->bi_bdev);
Neil Brown387bb172007-02-08 14:20:29 -08003677
3678 if ((bi->bi_size>>9) > q->max_sectors)
3679 return 0;
3680 blk_recount_segments(q, bi);
Jens Axboe960e7392008-08-15 10:41:18 +02003681 if (bi->bi_phys_segments > q->max_phys_segments)
Neil Brown387bb172007-02-08 14:20:29 -08003682 return 0;
3683
3684 if (q->merge_bvec_fn)
3685 /* it's too hard to apply the merge_bvec_fn at this stage,
3686 * just just give up
3687 */
3688 return 0;
3689
3690 return 1;
3691}
3692
3693
Jens Axboe165125e2007-07-24 09:28:11 +02003694static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio)
Raz Ben-Jehuda(caro)f6796232006-12-10 02:20:46 -08003695{
3696 mddev_t *mddev = q->queuedata;
3697 raid5_conf_t *conf = mddev_to_conf(mddev);
NeilBrown911d4ee2009-03-31 14:39:38 +11003698 unsigned int dd_idx;
Raz Ben-Jehuda(caro)f6796232006-12-10 02:20:46 -08003699 struct bio* align_bi;
3700 mdk_rdev_t *rdev;
3701
3702 if (!in_chunk_boundary(mddev, raid_bio)) {
Dan Williams45b42332007-07-09 11:56:43 -07003703 pr_debug("chunk_aligned_read : non aligned\n");
Raz Ben-Jehuda(caro)f6796232006-12-10 02:20:46 -08003704 return 0;
3705 }
3706 /*
NeilBrown99c0fb52009-03-31 14:39:38 +11003707 * use bio_clone to make a copy of the bio
Raz Ben-Jehuda(caro)f6796232006-12-10 02:20:46 -08003708 */
3709 align_bi = bio_clone(raid_bio, GFP_NOIO);
3710 if (!align_bi)
3711 return 0;
3712 /*
3713 * set bi_end_io to a new function, and set bi_private to the
3714 * original bio.
3715 */
3716 align_bi->bi_end_io = raid5_align_endio;
3717 align_bi->bi_private = raid_bio;
3718 /*
3719 * compute position
3720 */
NeilBrown112bf892009-03-31 14:39:38 +11003721 align_bi->bi_sector = raid5_compute_sector(conf, raid_bio->bi_sector,
3722 0,
NeilBrown911d4ee2009-03-31 14:39:38 +11003723 &dd_idx, NULL);
Raz Ben-Jehuda(caro)f6796232006-12-10 02:20:46 -08003724
3725 rcu_read_lock();
3726 rdev = rcu_dereference(conf->disks[dd_idx].rdev);
3727 if (rdev && test_bit(In_sync, &rdev->flags)) {
Raz Ben-Jehuda(caro)f6796232006-12-10 02:20:46 -08003728 atomic_inc(&rdev->nr_pending);
3729 rcu_read_unlock();
Raz Ben-Jehuda(caro)46031f92006-12-10 02:20:47 -08003730 raid_bio->bi_next = (void*)rdev;
3731 align_bi->bi_bdev = rdev->bdev;
3732 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
3733 align_bi->bi_sector += rdev->data_offset;
3734
Neil Brown387bb172007-02-08 14:20:29 -08003735 if (!bio_fits_rdev(align_bi)) {
3736 /* too big in some way */
3737 bio_put(align_bi);
3738 rdev_dec_pending(rdev, mddev);
3739 return 0;
3740 }
3741
Raz Ben-Jehuda(caro)46031f92006-12-10 02:20:47 -08003742 spin_lock_irq(&conf->device_lock);
3743 wait_event_lock_irq(conf->wait_for_stripe,
3744 conf->quiesce == 0,
3745 conf->device_lock, /* nothing */);
3746 atomic_inc(&conf->active_aligned_reads);
3747 spin_unlock_irq(&conf->device_lock);
3748
Raz Ben-Jehuda(caro)f6796232006-12-10 02:20:46 -08003749 generic_make_request(align_bi);
3750 return 1;
3751 } else {
3752 rcu_read_unlock();
Raz Ben-Jehuda(caro)46031f92006-12-10 02:20:47 -08003753 bio_put(align_bi);
Raz Ben-Jehuda(caro)f6796232006-12-10 02:20:46 -08003754 return 0;
3755 }
3756}
3757
Dan Williams8b3e6cd2008-04-28 02:15:53 -07003758/* __get_priority_stripe - get the next stripe to process
3759 *
3760 * Full stripe writes are allowed to pass preread active stripes up until
3761 * the bypass_threshold is exceeded. In general the bypass_count
3762 * increments when the handle_list is handled before the hold_list; however, it
3763 * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a
3764 * stripe with in flight i/o. The bypass_count will be reset when the
3765 * head of the hold_list has changed, i.e. the head was promoted to the
3766 * handle_list.
3767 */
3768static struct stripe_head *__get_priority_stripe(raid5_conf_t *conf)
3769{
3770 struct stripe_head *sh;
3771
3772 pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n",
3773 __func__,
3774 list_empty(&conf->handle_list) ? "empty" : "busy",
3775 list_empty(&conf->hold_list) ? "empty" : "busy",
3776 atomic_read(&conf->pending_full_writes), conf->bypass_count);
3777
3778 if (!list_empty(&conf->handle_list)) {
3779 sh = list_entry(conf->handle_list.next, typeof(*sh), lru);
3780
3781 if (list_empty(&conf->hold_list))
3782 conf->bypass_count = 0;
3783 else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) {
3784 if (conf->hold_list.next == conf->last_hold)
3785 conf->bypass_count++;
3786 else {
3787 conf->last_hold = conf->hold_list.next;
3788 conf->bypass_count -= conf->bypass_threshold;
3789 if (conf->bypass_count < 0)
3790 conf->bypass_count = 0;
3791 }
3792 }
3793 } else if (!list_empty(&conf->hold_list) &&
3794 ((conf->bypass_threshold &&
3795 conf->bypass_count > conf->bypass_threshold) ||
3796 atomic_read(&conf->pending_full_writes) == 0)) {
3797 sh = list_entry(conf->hold_list.next,
3798 typeof(*sh), lru);
3799 conf->bypass_count -= conf->bypass_threshold;
3800 if (conf->bypass_count < 0)
3801 conf->bypass_count = 0;
3802 } else
3803 return NULL;
3804
3805 list_del_init(&sh->lru);
3806 atomic_inc(&sh->count);
3807 BUG_ON(atomic_read(&sh->count) != 1);
3808 return sh;
3809}
Raz Ben-Jehuda(caro)f6796232006-12-10 02:20:46 -08003810
Jens Axboe165125e2007-07-24 09:28:11 +02003811static int make_request(struct request_queue *q, struct bio * bi)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003812{
3813 mddev_t *mddev = q->queuedata;
3814 raid5_conf_t *conf = mddev_to_conf(mddev);
NeilBrown911d4ee2009-03-31 14:39:38 +11003815 int dd_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003816 sector_t new_sector;
3817 sector_t logical_sector, last_sector;
3818 struct stripe_head *sh;
Jens Axboea3623572005-11-01 09:26:16 +01003819 const int rw = bio_data_dir(bi);
Tejun Heoc9959052008-08-25 19:47:21 +09003820 int cpu, remaining;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003821
NeilBrowne5dcdd82005-09-09 16:23:41 -07003822 if (unlikely(bio_barrier(bi))) {
NeilBrown6712ecf2007-09-27 12:47:43 +02003823 bio_endio(bi, -EOPNOTSUPP);
NeilBrowne5dcdd82005-09-09 16:23:41 -07003824 return 0;
3825 }
3826
NeilBrown3d310eb2005-06-21 17:17:26 -07003827 md_write_start(mddev, bi);
NeilBrown06d91a52005-06-21 17:17:12 -07003828
Tejun Heo074a7ac2008-08-25 19:56:14 +09003829 cpu = part_stat_lock();
3830 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
3831 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
3832 bio_sectors(bi));
3833 part_stat_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003834
NeilBrown802ba062006-12-13 00:34:13 -08003835 if (rw == READ &&
Raz Ben-Jehuda(caro)52488612006-12-10 02:20:48 -08003836 mddev->reshape_position == MaxSector &&
3837 chunk_aligned_read(q,bi))
NeilBrown99c0fb52009-03-31 14:39:38 +11003838 return 0;
Raz Ben-Jehuda(caro)52488612006-12-10 02:20:48 -08003839
Linus Torvalds1da177e2005-04-16 15:20:36 -07003840 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
3841 last_sector = bi->bi_sector + (bi->bi_size>>9);
3842 bi->bi_next = NULL;
3843 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
NeilBrown06d91a52005-06-21 17:17:12 -07003844
Linus Torvalds1da177e2005-04-16 15:20:36 -07003845 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
3846 DEFINE_WAIT(w);
NeilBrown16a53ec2006-06-26 00:27:38 -07003847 int disks, data_disks;
NeilBrownb5663ba2009-03-31 14:39:38 +11003848 int previous;
NeilBrownb578d552006-03-27 01:18:12 -08003849
NeilBrown7ecaa1e2006-03-27 01:18:08 -08003850 retry:
NeilBrownb5663ba2009-03-31 14:39:38 +11003851 previous = 0;
NeilBrownb0f9ec02009-03-31 15:27:18 +11003852 disks = conf->raid_disks;
NeilBrownb578d552006-03-27 01:18:12 -08003853 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
NeilBrownb0f9ec02009-03-31 15:27:18 +11003854 if (unlikely(conf->reshape_progress != MaxSector)) {
NeilBrownfef9c612009-03-31 15:16:46 +11003855 /* spinlock is needed as reshape_progress may be
NeilBrowndf8e7f762006-03-27 01:18:15 -08003856 * 64bit on a 32bit platform, and so it might be
3857 * possible to see a half-updated value
NeilBrownfef9c612009-03-31 15:16:46 +11003858 * Ofcourse reshape_progress could change after
NeilBrowndf8e7f762006-03-27 01:18:15 -08003859 * the lock is dropped, so once we get a reference
3860 * to the stripe that we think it is, we will have
3861 * to check again.
3862 */
NeilBrown7ecaa1e2006-03-27 01:18:08 -08003863 spin_lock_irq(&conf->device_lock);
NeilBrownfef9c612009-03-31 15:16:46 +11003864 if (mddev->delta_disks < 0
3865 ? logical_sector < conf->reshape_progress
3866 : logical_sector >= conf->reshape_progress) {
NeilBrown7ecaa1e2006-03-27 01:18:08 -08003867 disks = conf->previous_raid_disks;
NeilBrownb5663ba2009-03-31 14:39:38 +11003868 previous = 1;
3869 } else {
NeilBrownfef9c612009-03-31 15:16:46 +11003870 if (mddev->delta_disks < 0
3871 ? logical_sector < conf->reshape_safe
3872 : logical_sector >= conf->reshape_safe) {
NeilBrownb578d552006-03-27 01:18:12 -08003873 spin_unlock_irq(&conf->device_lock);
3874 schedule();
3875 goto retry;
3876 }
3877 }
NeilBrown7ecaa1e2006-03-27 01:18:08 -08003878 spin_unlock_irq(&conf->device_lock);
3879 }
NeilBrown16a53ec2006-06-26 00:27:38 -07003880 data_disks = disks - conf->max_degraded;
3881
NeilBrown112bf892009-03-31 14:39:38 +11003882 new_sector = raid5_compute_sector(conf, logical_sector,
3883 previous,
NeilBrown911d4ee2009-03-31 14:39:38 +11003884 &dd_idx, NULL);
Dan Williams45b42332007-07-09 11:56:43 -07003885 pr_debug("raid5: make_request, sector %llu logical %llu\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003886 (unsigned long long)new_sector,
3887 (unsigned long long)logical_sector);
3888
NeilBrownb5663ba2009-03-31 14:39:38 +11003889 sh = get_active_stripe(conf, new_sector, previous,
NeilBrowna8c906c2009-06-09 14:39:59 +10003890 (bi->bi_rw&RWA_MASK), 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003891 if (sh) {
NeilBrownb0f9ec02009-03-31 15:27:18 +11003892 if (unlikely(previous)) {
NeilBrown7ecaa1e2006-03-27 01:18:08 -08003893 /* expansion might have moved on while waiting for a
NeilBrowndf8e7f762006-03-27 01:18:15 -08003894 * stripe, so we must do the range check again.
3895 * Expansion could still move past after this
3896 * test, but as we are holding a reference to
3897 * 'sh', we know that if that happens,
3898 * STRIPE_EXPANDING will get set and the expansion
3899 * won't proceed until we finish with the stripe.
NeilBrown7ecaa1e2006-03-27 01:18:08 -08003900 */
3901 int must_retry = 0;
3902 spin_lock_irq(&conf->device_lock);
NeilBrownb0f9ec02009-03-31 15:27:18 +11003903 if (mddev->delta_disks < 0
3904 ? logical_sector >= conf->reshape_progress
3905 : logical_sector < conf->reshape_progress)
NeilBrown7ecaa1e2006-03-27 01:18:08 -08003906 /* mismatch, need to try again */
3907 must_retry = 1;
3908 spin_unlock_irq(&conf->device_lock);
3909 if (must_retry) {
3910 release_stripe(sh);
3911 goto retry;
3912 }
3913 }
NeilBrowne464eaf2006-03-27 01:18:14 -08003914 /* FIXME what if we get a false positive because these
3915 * are being updated.
3916 */
3917 if (logical_sector >= mddev->suspend_lo &&
3918 logical_sector < mddev->suspend_hi) {
3919 release_stripe(sh);
3920 schedule();
3921 goto retry;
3922 }
NeilBrown7ecaa1e2006-03-27 01:18:08 -08003923
3924 if (test_bit(STRIPE_EXPANDING, &sh->state) ||
3925 !add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) {
3926 /* Stripe is busy expanding or
3927 * add failed due to overlap. Flush everything
Linus Torvalds1da177e2005-04-16 15:20:36 -07003928 * and wait a while
3929 */
3930 raid5_unplug_device(mddev->queue);
3931 release_stripe(sh);
3932 schedule();
3933 goto retry;
3934 }
3935 finish_wait(&conf->wait_for_overlap, &w);
NeilBrown6ed30032008-02-06 01:40:00 -08003936 set_bit(STRIPE_HANDLE, &sh->state);
3937 clear_bit(STRIPE_DELAYED, &sh->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003938 release_stripe(sh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003939 } else {
3940 /* cannot get stripe for read-ahead, just give-up */
3941 clear_bit(BIO_UPTODATE, &bi->bi_flags);
3942 finish_wait(&conf->wait_for_overlap, &w);
3943 break;
3944 }
3945
3946 }
3947 spin_lock_irq(&conf->device_lock);
Jens Axboe960e7392008-08-15 10:41:18 +02003948 remaining = raid5_dec_bi_phys_segments(bi);
NeilBrownf6344752006-03-27 01:18:17 -08003949 spin_unlock_irq(&conf->device_lock);
3950 if (remaining == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003951
NeilBrown16a53ec2006-06-26 00:27:38 -07003952 if ( rw == WRITE )
Linus Torvalds1da177e2005-04-16 15:20:36 -07003953 md_write_end(mddev);
NeilBrown6712ecf2007-09-27 12:47:43 +02003954
Neil Brown0e13fe232008-06-28 08:31:20 +10003955 bio_endio(bi, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003956 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003957 return 0;
3958}
3959
Dan Williamsb522adc2009-03-31 15:00:31 +11003960static sector_t raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks);
3961
NeilBrown52c03292006-06-26 00:27:43 -07003962static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003963{
NeilBrown52c03292006-06-26 00:27:43 -07003964 /* reshaping is quite different to recovery/resync so it is
3965 * handled quite separately ... here.
3966 *
3967 * On each call to sync_request, we gather one chunk worth of
3968 * destination stripes and flag them as expanding.
3969 * Then we find all the source stripes and request reads.
3970 * As the reads complete, handle_stripe will copy the data
3971 * into the destination stripe and release that stripe.
3972 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003973 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
3974 struct stripe_head *sh;
NeilBrownccfcc3c2006-03-27 01:18:09 -08003975 sector_t first_sector, last_sector;
NeilBrownf4168852007-02-28 20:11:53 -08003976 int raid_disks = conf->previous_raid_disks;
3977 int data_disks = raid_disks - conf->max_degraded;
3978 int new_data_disks = conf->raid_disks - conf->max_degraded;
NeilBrown52c03292006-06-26 00:27:43 -07003979 int i;
3980 int dd_idx;
NeilBrownc8f517c2009-03-31 15:28:40 +11003981 sector_t writepos, readpos, safepos;
NeilBrownec32a2b2009-03-31 15:17:38 +11003982 sector_t stripe_addr;
NeilBrown7a661382009-03-31 15:21:40 +11003983 int reshape_sectors;
NeilBrownab69ae12009-03-31 15:26:47 +11003984 struct list_head stripes;
NeilBrown52c03292006-06-26 00:27:43 -07003985
NeilBrownfef9c612009-03-31 15:16:46 +11003986 if (sector_nr == 0) {
3987 /* If restarting in the middle, skip the initial sectors */
3988 if (mddev->delta_disks < 0 &&
3989 conf->reshape_progress < raid5_size(mddev, 0, 0)) {
3990 sector_nr = raid5_size(mddev, 0, 0)
3991 - conf->reshape_progress;
3992 } else if (mddev->delta_disks > 0 &&
3993 conf->reshape_progress > 0)
3994 sector_nr = conf->reshape_progress;
NeilBrownf4168852007-02-28 20:11:53 -08003995 sector_div(sector_nr, new_data_disks);
NeilBrownfef9c612009-03-31 15:16:46 +11003996 if (sector_nr) {
3997 *skipped = 1;
3998 return sector_nr;
3999 }
NeilBrown52c03292006-06-26 00:27:43 -07004000 }
4001
NeilBrown7a661382009-03-31 15:21:40 +11004002 /* We need to process a full chunk at a time.
4003 * If old and new chunk sizes differ, we need to process the
4004 * largest of these
4005 */
4006 if (mddev->new_chunk > mddev->chunk_size)
4007 reshape_sectors = mddev->new_chunk / 512;
4008 else
4009 reshape_sectors = mddev->chunk_size / 512;
4010
NeilBrown52c03292006-06-26 00:27:43 -07004011 /* we update the metadata when there is more than 3Meg
4012 * in the block range (that is rather arbitrary, should
4013 * probably be time based) or when the data about to be
4014 * copied would over-write the source of the data at
4015 * the front of the range.
NeilBrownfef9c612009-03-31 15:16:46 +11004016 * i.e. one new_stripe along from reshape_progress new_maps
4017 * to after where reshape_safe old_maps to
NeilBrown52c03292006-06-26 00:27:43 -07004018 */
NeilBrownfef9c612009-03-31 15:16:46 +11004019 writepos = conf->reshape_progress;
NeilBrownf4168852007-02-28 20:11:53 -08004020 sector_div(writepos, new_data_disks);
NeilBrownc8f517c2009-03-31 15:28:40 +11004021 readpos = conf->reshape_progress;
4022 sector_div(readpos, data_disks);
NeilBrownfef9c612009-03-31 15:16:46 +11004023 safepos = conf->reshape_safe;
NeilBrownf4168852007-02-28 20:11:53 -08004024 sector_div(safepos, data_disks);
NeilBrownfef9c612009-03-31 15:16:46 +11004025 if (mddev->delta_disks < 0) {
NeilBrowned37d832009-05-27 21:39:05 +10004026 writepos -= min_t(sector_t, reshape_sectors, writepos);
NeilBrownc8f517c2009-03-31 15:28:40 +11004027 readpos += reshape_sectors;
NeilBrown7a661382009-03-31 15:21:40 +11004028 safepos += reshape_sectors;
NeilBrownfef9c612009-03-31 15:16:46 +11004029 } else {
NeilBrown7a661382009-03-31 15:21:40 +11004030 writepos += reshape_sectors;
NeilBrowned37d832009-05-27 21:39:05 +10004031 readpos -= min_t(sector_t, reshape_sectors, readpos);
4032 safepos -= min_t(sector_t, reshape_sectors, safepos);
NeilBrownfef9c612009-03-31 15:16:46 +11004033 }
NeilBrown52c03292006-06-26 00:27:43 -07004034
NeilBrownc8f517c2009-03-31 15:28:40 +11004035 /* 'writepos' is the most advanced device address we might write.
4036 * 'readpos' is the least advanced device address we might read.
4037 * 'safepos' is the least address recorded in the metadata as having
4038 * been reshaped.
4039 * If 'readpos' is behind 'writepos', then there is no way that we can
4040 * ensure safety in the face of a crash - that must be done by userspace
4041 * making a backup of the data. So in that case there is no particular
4042 * rush to update metadata.
4043 * Otherwise if 'safepos' is behind 'writepos', then we really need to
4044 * update the metadata to advance 'safepos' to match 'readpos' so that
4045 * we can be safe in the event of a crash.
4046 * So we insist on updating metadata if safepos is behind writepos and
4047 * readpos is beyond writepos.
4048 * In any case, update the metadata every 10 seconds.
4049 * Maybe that number should be configurable, but I'm not sure it is
4050 * worth it.... maybe it could be a multiple of safemode_delay???
4051 */
NeilBrownfef9c612009-03-31 15:16:46 +11004052 if ((mddev->delta_disks < 0
NeilBrownc8f517c2009-03-31 15:28:40 +11004053 ? (safepos > writepos && readpos < writepos)
4054 : (safepos < writepos && readpos > writepos)) ||
4055 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
NeilBrown52c03292006-06-26 00:27:43 -07004056 /* Cannot proceed until we've updated the superblock... */
4057 wait_event(conf->wait_for_overlap,
4058 atomic_read(&conf->reshape_stripes)==0);
NeilBrownfef9c612009-03-31 15:16:46 +11004059 mddev->reshape_position = conf->reshape_progress;
NeilBrownacb180b2009-04-14 16:28:34 +10004060 mddev->curr_resync_completed = mddev->curr_resync;
NeilBrownc8f517c2009-03-31 15:28:40 +11004061 conf->reshape_checkpoint = jiffies;
NeilBrown850b2b42006-10-03 01:15:46 -07004062 set_bit(MD_CHANGE_DEVS, &mddev->flags);
NeilBrown52c03292006-06-26 00:27:43 -07004063 md_wakeup_thread(mddev->thread);
NeilBrown850b2b42006-10-03 01:15:46 -07004064 wait_event(mddev->sb_wait, mddev->flags == 0 ||
NeilBrown52c03292006-06-26 00:27:43 -07004065 kthread_should_stop());
4066 spin_lock_irq(&conf->device_lock);
NeilBrownfef9c612009-03-31 15:16:46 +11004067 conf->reshape_safe = mddev->reshape_position;
NeilBrown52c03292006-06-26 00:27:43 -07004068 spin_unlock_irq(&conf->device_lock);
4069 wake_up(&conf->wait_for_overlap);
NeilBrownacb180b2009-04-14 16:28:34 +10004070 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
NeilBrown52c03292006-06-26 00:27:43 -07004071 }
4072
NeilBrownec32a2b2009-03-31 15:17:38 +11004073 if (mddev->delta_disks < 0) {
4074 BUG_ON(conf->reshape_progress == 0);
4075 stripe_addr = writepos;
4076 BUG_ON((mddev->dev_sectors &
NeilBrown7a661382009-03-31 15:21:40 +11004077 ~((sector_t)reshape_sectors - 1))
4078 - reshape_sectors - stripe_addr
NeilBrownec32a2b2009-03-31 15:17:38 +11004079 != sector_nr);
4080 } else {
NeilBrown7a661382009-03-31 15:21:40 +11004081 BUG_ON(writepos != sector_nr + reshape_sectors);
NeilBrownec32a2b2009-03-31 15:17:38 +11004082 stripe_addr = sector_nr;
4083 }
NeilBrownab69ae12009-03-31 15:26:47 +11004084 INIT_LIST_HEAD(&stripes);
NeilBrown7a661382009-03-31 15:21:40 +11004085 for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) {
NeilBrown52c03292006-06-26 00:27:43 -07004086 int j;
4087 int skipped = 0;
NeilBrowna8c906c2009-06-09 14:39:59 +10004088 sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1);
NeilBrown52c03292006-06-26 00:27:43 -07004089 set_bit(STRIPE_EXPANDING, &sh->state);
4090 atomic_inc(&conf->reshape_stripes);
4091 /* If any of this stripe is beyond the end of the old
4092 * array, then we need to zero those blocks
4093 */
4094 for (j=sh->disks; j--;) {
4095 sector_t s;
4096 if (j == sh->pd_idx)
4097 continue;
NeilBrownf4168852007-02-28 20:11:53 -08004098 if (conf->level == 6 &&
NeilBrownd0dabf72009-03-31 14:39:38 +11004099 j == sh->qd_idx)
NeilBrownf4168852007-02-28 20:11:53 -08004100 continue;
NeilBrown784052e2009-03-31 15:19:07 +11004101 s = compute_blocknr(sh, j, 0);
Dan Williamsb522adc2009-03-31 15:00:31 +11004102 if (s < raid5_size(mddev, 0, 0)) {
NeilBrown52c03292006-06-26 00:27:43 -07004103 skipped = 1;
4104 continue;
4105 }
4106 memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE);
4107 set_bit(R5_Expanded, &sh->dev[j].flags);
4108 set_bit(R5_UPTODATE, &sh->dev[j].flags);
4109 }
4110 if (!skipped) {
4111 set_bit(STRIPE_EXPAND_READY, &sh->state);
4112 set_bit(STRIPE_HANDLE, &sh->state);
4113 }
NeilBrownab69ae12009-03-31 15:26:47 +11004114 list_add(&sh->lru, &stripes);
NeilBrown52c03292006-06-26 00:27:43 -07004115 }
4116 spin_lock_irq(&conf->device_lock);
NeilBrownfef9c612009-03-31 15:16:46 +11004117 if (mddev->delta_disks < 0)
NeilBrown7a661382009-03-31 15:21:40 +11004118 conf->reshape_progress -= reshape_sectors * new_data_disks;
NeilBrownfef9c612009-03-31 15:16:46 +11004119 else
NeilBrown7a661382009-03-31 15:21:40 +11004120 conf->reshape_progress += reshape_sectors * new_data_disks;
NeilBrown52c03292006-06-26 00:27:43 -07004121 spin_unlock_irq(&conf->device_lock);
4122 /* Ok, those stripe are ready. We can start scheduling
4123 * reads on the source stripes.
4124 * The source stripes are determined by mapping the first and last
4125 * block on the destination stripes.
4126 */
NeilBrown52c03292006-06-26 00:27:43 -07004127 first_sector =
NeilBrownec32a2b2009-03-31 15:17:38 +11004128 raid5_compute_sector(conf, stripe_addr*(new_data_disks),
NeilBrown911d4ee2009-03-31 14:39:38 +11004129 1, &dd_idx, NULL);
NeilBrown52c03292006-06-26 00:27:43 -07004130 last_sector =
NeilBrown0e6e0272009-06-09 16:32:22 +10004131 raid5_compute_sector(conf, ((stripe_addr+reshape_sectors)
NeilBrown112bf892009-03-31 14:39:38 +11004132 *(new_data_disks) - 1),
NeilBrown911d4ee2009-03-31 14:39:38 +11004133 1, &dd_idx, NULL);
Andre Noll58c0fed2009-03-31 14:33:13 +11004134 if (last_sector >= mddev->dev_sectors)
4135 last_sector = mddev->dev_sectors - 1;
NeilBrown52c03292006-06-26 00:27:43 -07004136 while (first_sector <= last_sector) {
NeilBrowna8c906c2009-06-09 14:39:59 +10004137 sh = get_active_stripe(conf, first_sector, 1, 0, 1);
NeilBrown52c03292006-06-26 00:27:43 -07004138 set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
4139 set_bit(STRIPE_HANDLE, &sh->state);
4140 release_stripe(sh);
4141 first_sector += STRIPE_SECTORS;
4142 }
NeilBrownab69ae12009-03-31 15:26:47 +11004143 /* Now that the sources are clearly marked, we can release
4144 * the destination stripes
4145 */
4146 while (!list_empty(&stripes)) {
4147 sh = list_entry(stripes.next, struct stripe_head, lru);
4148 list_del_init(&sh->lru);
4149 release_stripe(sh);
4150 }
NeilBrownc6207272008-02-06 01:39:52 -08004151 /* If this takes us to the resync_max point where we have to pause,
4152 * then we need to write out the superblock.
4153 */
NeilBrown7a661382009-03-31 15:21:40 +11004154 sector_nr += reshape_sectors;
NeilBrownc03f6a12009-04-17 11:06:30 +10004155 if ((sector_nr - mddev->curr_resync_completed) * 2
4156 >= mddev->resync_max - mddev->curr_resync_completed) {
NeilBrownc6207272008-02-06 01:39:52 -08004157 /* Cannot proceed until we've updated the superblock... */
4158 wait_event(conf->wait_for_overlap,
4159 atomic_read(&conf->reshape_stripes) == 0);
NeilBrownfef9c612009-03-31 15:16:46 +11004160 mddev->reshape_position = conf->reshape_progress;
NeilBrownacb180b2009-04-14 16:28:34 +10004161 mddev->curr_resync_completed = mddev->curr_resync;
NeilBrownc8f517c2009-03-31 15:28:40 +11004162 conf->reshape_checkpoint = jiffies;
NeilBrownc6207272008-02-06 01:39:52 -08004163 set_bit(MD_CHANGE_DEVS, &mddev->flags);
4164 md_wakeup_thread(mddev->thread);
4165 wait_event(mddev->sb_wait,
4166 !test_bit(MD_CHANGE_DEVS, &mddev->flags)
4167 || kthread_should_stop());
4168 spin_lock_irq(&conf->device_lock);
NeilBrownfef9c612009-03-31 15:16:46 +11004169 conf->reshape_safe = mddev->reshape_position;
NeilBrownc6207272008-02-06 01:39:52 -08004170 spin_unlock_irq(&conf->device_lock);
4171 wake_up(&conf->wait_for_overlap);
NeilBrownacb180b2009-04-14 16:28:34 +10004172 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
NeilBrownc6207272008-02-06 01:39:52 -08004173 }
NeilBrown7a661382009-03-31 15:21:40 +11004174 return reshape_sectors;
NeilBrown52c03292006-06-26 00:27:43 -07004175}
4176
4177/* FIXME go_faster isn't used */
4178static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
4179{
4180 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
4181 struct stripe_head *sh;
Andre Noll58c0fed2009-03-31 14:33:13 +11004182 sector_t max_sector = mddev->dev_sectors;
NeilBrown72626682005-09-09 16:23:54 -07004183 int sync_blocks;
NeilBrown16a53ec2006-06-26 00:27:38 -07004184 int still_degraded = 0;
4185 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004186
NeilBrown72626682005-09-09 16:23:54 -07004187 if (sector_nr >= max_sector) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004188 /* just being told to finish up .. nothing much to do */
4189 unplug_slaves(mddev);
NeilBrowncea9c222009-03-31 15:15:05 +11004190
NeilBrown29269552006-03-27 01:18:10 -08004191 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
4192 end_reshape(conf);
4193 return 0;
4194 }
NeilBrown72626682005-09-09 16:23:54 -07004195
4196 if (mddev->curr_resync < max_sector) /* aborted */
4197 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
4198 &sync_blocks, 1);
NeilBrown16a53ec2006-06-26 00:27:38 -07004199 else /* completed sync */
NeilBrown72626682005-09-09 16:23:54 -07004200 conf->fullsync = 0;
4201 bitmap_close_sync(mddev->bitmap);
4202
Linus Torvalds1da177e2005-04-16 15:20:36 -07004203 return 0;
4204 }
NeilBrownccfcc3c2006-03-27 01:18:09 -08004205
NeilBrown52c03292006-06-26 00:27:43 -07004206 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
4207 return reshape_request(mddev, sector_nr, skipped);
NeilBrownf6705572006-03-27 01:18:11 -08004208
NeilBrownc6207272008-02-06 01:39:52 -08004209 /* No need to check resync_max as we never do more than one
4210 * stripe, and as resync_max will always be on a chunk boundary,
4211 * if the check in md_do_sync didn't fire, there is no chance
4212 * of overstepping resync_max here
4213 */
4214
NeilBrown16a53ec2006-06-26 00:27:38 -07004215 /* if there is too many failed drives and we are trying
Linus Torvalds1da177e2005-04-16 15:20:36 -07004216 * to resync, then assert that we are finished, because there is
4217 * nothing we can do.
4218 */
NeilBrown3285edf2006-06-26 00:27:55 -07004219 if (mddev->degraded >= conf->max_degraded &&
NeilBrown16a53ec2006-06-26 00:27:38 -07004220 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
Andre Noll58c0fed2009-03-31 14:33:13 +11004221 sector_t rv = mddev->dev_sectors - sector_nr;
NeilBrown57afd892005-06-21 17:17:13 -07004222 *skipped = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004223 return rv;
4224 }
NeilBrown72626682005-09-09 16:23:54 -07004225 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
NeilBrown3855ad92005-11-08 21:39:38 -08004226 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
NeilBrown72626682005-09-09 16:23:54 -07004227 !conf->fullsync && sync_blocks >= STRIPE_SECTORS) {
4228 /* we can skip this block, and probably more */
4229 sync_blocks /= STRIPE_SECTORS;
4230 *skipped = 1;
4231 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
4232 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004233
NeilBrownb47490c2008-02-06 01:39:50 -08004234
4235 bitmap_cond_end_sync(mddev->bitmap, sector_nr);
4236
NeilBrowna8c906c2009-06-09 14:39:59 +10004237 sh = get_active_stripe(conf, sector_nr, 0, 1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004238 if (sh == NULL) {
NeilBrowna8c906c2009-06-09 14:39:59 +10004239 sh = get_active_stripe(conf, sector_nr, 0, 0, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004240 /* make sure we don't swamp the stripe cache if someone else
NeilBrown16a53ec2006-06-26 00:27:38 -07004241 * is trying to get access
Linus Torvalds1da177e2005-04-16 15:20:36 -07004242 */
Nishanth Aravamudan66c006a2005-11-07 01:01:17 -08004243 schedule_timeout_uninterruptible(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004244 }
NeilBrown16a53ec2006-06-26 00:27:38 -07004245 /* Need to check if array will still be degraded after recovery/resync
4246 * We don't need to check the 'failed' flag as when that gets set,
4247 * recovery aborts.
4248 */
NeilBrownf001a702009-06-09 14:30:31 +10004249 for (i = 0; i < conf->raid_disks; i++)
NeilBrown16a53ec2006-06-26 00:27:38 -07004250 if (conf->disks[i].rdev == NULL)
4251 still_degraded = 1;
4252
4253 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
4254
4255 spin_lock(&sh->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004256 set_bit(STRIPE_SYNCING, &sh->state);
4257 clear_bit(STRIPE_INSYNC, &sh->state);
4258 spin_unlock(&sh->lock);
4259
Dan Williamsdf10cfb2008-07-28 23:10:39 -07004260 /* wait for any blocked device to be handled */
Dan Williams36d1c642009-07-14 11:48:22 -07004261 while (unlikely(!handle_stripe(sh)))
Dan Williamsdf10cfb2008-07-28 23:10:39 -07004262 ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004263 release_stripe(sh);
4264
4265 return STRIPE_SECTORS;
4266}
4267
Raz Ben-Jehuda(caro)46031f92006-12-10 02:20:47 -08004268static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
4269{
4270 /* We may not be able to submit a whole bio at once as there
4271 * may not be enough stripe_heads available.
4272 * We cannot pre-allocate enough stripe_heads as we may need
4273 * more than exist in the cache (if we allow ever large chunks).
4274 * So we do one stripe head at a time and record in
4275 * ->bi_hw_segments how many have been done.
4276 *
4277 * We *know* that this entire raid_bio is in one chunk, so
4278 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector.
4279 */
4280 struct stripe_head *sh;
NeilBrown911d4ee2009-03-31 14:39:38 +11004281 int dd_idx;
Raz Ben-Jehuda(caro)46031f92006-12-10 02:20:47 -08004282 sector_t sector, logical_sector, last_sector;
4283 int scnt = 0;
4284 int remaining;
4285 int handled = 0;
4286
4287 logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
NeilBrown112bf892009-03-31 14:39:38 +11004288 sector = raid5_compute_sector(conf, logical_sector,
NeilBrown911d4ee2009-03-31 14:39:38 +11004289 0, &dd_idx, NULL);
Raz Ben-Jehuda(caro)46031f92006-12-10 02:20:47 -08004290 last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9);
4291
4292 for (; logical_sector < last_sector;
Neil Brown387bb172007-02-08 14:20:29 -08004293 logical_sector += STRIPE_SECTORS,
4294 sector += STRIPE_SECTORS,
4295 scnt++) {
Raz Ben-Jehuda(caro)46031f92006-12-10 02:20:47 -08004296
Jens Axboe960e7392008-08-15 10:41:18 +02004297 if (scnt < raid5_bi_hw_segments(raid_bio))
Raz Ben-Jehuda(caro)46031f92006-12-10 02:20:47 -08004298 /* already done this stripe */
4299 continue;
4300
NeilBrowna8c906c2009-06-09 14:39:59 +10004301 sh = get_active_stripe(conf, sector, 0, 1, 0);
Raz Ben-Jehuda(caro)46031f92006-12-10 02:20:47 -08004302
4303 if (!sh) {
4304 /* failed to get a stripe - must wait */
Jens Axboe960e7392008-08-15 10:41:18 +02004305 raid5_set_bi_hw_segments(raid_bio, scnt);
Raz Ben-Jehuda(caro)46031f92006-12-10 02:20:47 -08004306 conf->retry_read_aligned = raid_bio;
4307 return handled;
4308 }
4309
4310 set_bit(R5_ReadError, &sh->dev[dd_idx].flags);
Neil Brown387bb172007-02-08 14:20:29 -08004311 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) {
4312 release_stripe(sh);
Jens Axboe960e7392008-08-15 10:41:18 +02004313 raid5_set_bi_hw_segments(raid_bio, scnt);
Neil Brown387bb172007-02-08 14:20:29 -08004314 conf->retry_read_aligned = raid_bio;
4315 return handled;
4316 }
4317
Dan Williams36d1c642009-07-14 11:48:22 -07004318 handle_stripe(sh);
Raz Ben-Jehuda(caro)46031f92006-12-10 02:20:47 -08004319 release_stripe(sh);
4320 handled++;
4321 }
4322 spin_lock_irq(&conf->device_lock);
Jens Axboe960e7392008-08-15 10:41:18 +02004323 remaining = raid5_dec_bi_phys_segments(raid_bio);
Raz Ben-Jehuda(caro)46031f92006-12-10 02:20:47 -08004324 spin_unlock_irq(&conf->device_lock);
Neil Brown0e13fe232008-06-28 08:31:20 +10004325 if (remaining == 0)
4326 bio_endio(raid_bio, 0);
Raz Ben-Jehuda(caro)46031f92006-12-10 02:20:47 -08004327 if (atomic_dec_and_test(&conf->active_aligned_reads))
4328 wake_up(&conf->wait_for_stripe);
4329 return handled;
4330}
4331
Dan Williams07a3b412009-08-29 19:13:13 -07004332#ifdef CONFIG_MULTICORE_RAID456
4333static void __process_stripe(void *param, async_cookie_t cookie)
4334{
4335 struct stripe_head *sh = param;
4336
4337 handle_stripe(sh);
4338 release_stripe(sh);
4339}
4340
4341static void process_stripe(struct stripe_head *sh, struct list_head *domain)
4342{
4343 async_schedule_domain(__process_stripe, sh, domain);
4344}
4345
4346static void synchronize_stripe_processing(struct list_head *domain)
4347{
4348 async_synchronize_full_domain(domain);
4349}
4350#else
4351static void process_stripe(struct stripe_head *sh, struct list_head *domain)
4352{
4353 handle_stripe(sh);
4354 release_stripe(sh);
4355 cond_resched();
4356}
4357
4358static void synchronize_stripe_processing(struct list_head *domain)
4359{
4360}
4361#endif
Raz Ben-Jehuda(caro)46031f92006-12-10 02:20:47 -08004362
4363
Linus Torvalds1da177e2005-04-16 15:20:36 -07004364/*
4365 * This is our raid5 kernel thread.
4366 *
4367 * We scan the hash table for stripes which can be handled now.
4368 * During the scan, completed stripes are saved for us by the interrupt
4369 * handler, so that they will not have to wait for our next wakeup.
4370 */
NeilBrown6ed30032008-02-06 01:40:00 -08004371static void raid5d(mddev_t *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004372{
4373 struct stripe_head *sh;
4374 raid5_conf_t *conf = mddev_to_conf(mddev);
4375 int handled;
Dan Williams07a3b412009-08-29 19:13:13 -07004376 LIST_HEAD(raid_domain);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004377
Dan Williams45b42332007-07-09 11:56:43 -07004378 pr_debug("+++ raid5d active\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004379
4380 md_check_recovery(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004381
4382 handled = 0;
4383 spin_lock_irq(&conf->device_lock);
4384 while (1) {
Raz Ben-Jehuda(caro)46031f92006-12-10 02:20:47 -08004385 struct bio *bio;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004386
NeilBrownae3c20c2006-07-10 04:44:17 -07004387 if (conf->seq_flush != conf->seq_write) {
NeilBrown72626682005-09-09 16:23:54 -07004388 int seq = conf->seq_flush;
NeilBrown700e4322005-11-28 13:44:10 -08004389 spin_unlock_irq(&conf->device_lock);
NeilBrown72626682005-09-09 16:23:54 -07004390 bitmap_unplug(mddev->bitmap);
NeilBrown700e4322005-11-28 13:44:10 -08004391 spin_lock_irq(&conf->device_lock);
NeilBrown72626682005-09-09 16:23:54 -07004392 conf->seq_write = seq;
4393 activate_bit_delay(conf);
4394 }
4395
Raz Ben-Jehuda(caro)46031f92006-12-10 02:20:47 -08004396 while ((bio = remove_bio_from_retry(conf))) {
4397 int ok;
4398 spin_unlock_irq(&conf->device_lock);
4399 ok = retry_aligned_read(conf, bio);
4400 spin_lock_irq(&conf->device_lock);
4401 if (!ok)
4402 break;
4403 handled++;
4404 }
4405
Dan Williams8b3e6cd2008-04-28 02:15:53 -07004406 sh = __get_priority_stripe(conf);
4407
Dan Williamsc9f21aa2008-07-23 12:05:51 -07004408 if (!sh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004409 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004410 spin_unlock_irq(&conf->device_lock);
4411
4412 handled++;
Dan Williams07a3b412009-08-29 19:13:13 -07004413 process_stripe(sh, &raid_domain);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004414
4415 spin_lock_irq(&conf->device_lock);
4416 }
Dan Williams45b42332007-07-09 11:56:43 -07004417 pr_debug("%d stripes handled\n", handled);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004418
4419 spin_unlock_irq(&conf->device_lock);
4420
Dan Williams07a3b412009-08-29 19:13:13 -07004421 synchronize_stripe_processing(&raid_domain);
Dan Williamsc9f21aa2008-07-23 12:05:51 -07004422 async_tx_issue_pending_all();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004423 unplug_slaves(mddev);
4424
Dan Williams45b42332007-07-09 11:56:43 -07004425 pr_debug("--- raid5d inactive\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004426}
4427
NeilBrown3f294f42005-11-08 21:39:25 -08004428static ssize_t
NeilBrown007583c2005-11-08 21:39:30 -08004429raid5_show_stripe_cache_size(mddev_t *mddev, char *page)
NeilBrown3f294f42005-11-08 21:39:25 -08004430{
NeilBrown007583c2005-11-08 21:39:30 -08004431 raid5_conf_t *conf = mddev_to_conf(mddev);
NeilBrown96de1e62005-11-08 21:39:39 -08004432 if (conf)
4433 return sprintf(page, "%d\n", conf->max_nr_stripes);
4434 else
4435 return 0;
NeilBrown3f294f42005-11-08 21:39:25 -08004436}
4437
4438static ssize_t
NeilBrown007583c2005-11-08 21:39:30 -08004439raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
NeilBrown3f294f42005-11-08 21:39:25 -08004440{
NeilBrown007583c2005-11-08 21:39:30 -08004441 raid5_conf_t *conf = mddev_to_conf(mddev);
Dan Williams4ef197d82008-04-28 02:15:54 -07004442 unsigned long new;
Dan Williamsb5470dc2008-06-27 21:44:04 -07004443 int err;
4444
NeilBrown3f294f42005-11-08 21:39:25 -08004445 if (len >= PAGE_SIZE)
4446 return -EINVAL;
NeilBrown96de1e62005-11-08 21:39:39 -08004447 if (!conf)
4448 return -ENODEV;
NeilBrown3f294f42005-11-08 21:39:25 -08004449
Dan Williams4ef197d82008-04-28 02:15:54 -07004450 if (strict_strtoul(page, 10, &new))
NeilBrown3f294f42005-11-08 21:39:25 -08004451 return -EINVAL;
4452 if (new <= 16 || new > 32768)
4453 return -EINVAL;
4454 while (new < conf->max_nr_stripes) {
4455 if (drop_one_stripe(conf))
4456 conf->max_nr_stripes--;
4457 else
4458 break;
4459 }
Dan Williamsb5470dc2008-06-27 21:44:04 -07004460 err = md_allow_write(mddev);
4461 if (err)
4462 return err;
NeilBrown3f294f42005-11-08 21:39:25 -08004463 while (new > conf->max_nr_stripes) {
4464 if (grow_one_stripe(conf))
4465 conf->max_nr_stripes++;
4466 else break;
4467 }
4468 return len;
4469}
NeilBrown007583c2005-11-08 21:39:30 -08004470
NeilBrown96de1e62005-11-08 21:39:39 -08004471static struct md_sysfs_entry
4472raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
4473 raid5_show_stripe_cache_size,
4474 raid5_store_stripe_cache_size);
NeilBrown3f294f42005-11-08 21:39:25 -08004475
4476static ssize_t
Dan Williams8b3e6cd2008-04-28 02:15:53 -07004477raid5_show_preread_threshold(mddev_t *mddev, char *page)
4478{
4479 raid5_conf_t *conf = mddev_to_conf(mddev);
4480 if (conf)
4481 return sprintf(page, "%d\n", conf->bypass_threshold);
4482 else
4483 return 0;
4484}
4485
4486static ssize_t
4487raid5_store_preread_threshold(mddev_t *mddev, const char *page, size_t len)
4488{
4489 raid5_conf_t *conf = mddev_to_conf(mddev);
Dan Williams4ef197d82008-04-28 02:15:54 -07004490 unsigned long new;
Dan Williams8b3e6cd2008-04-28 02:15:53 -07004491 if (len >= PAGE_SIZE)
4492 return -EINVAL;
4493 if (!conf)
4494 return -ENODEV;
4495
Dan Williams4ef197d82008-04-28 02:15:54 -07004496 if (strict_strtoul(page, 10, &new))
Dan Williams8b3e6cd2008-04-28 02:15:53 -07004497 return -EINVAL;
Dan Williams4ef197d82008-04-28 02:15:54 -07004498 if (new > conf->max_nr_stripes)
Dan Williams8b3e6cd2008-04-28 02:15:53 -07004499 return -EINVAL;
4500 conf->bypass_threshold = new;
4501 return len;
4502}
4503
4504static struct md_sysfs_entry
4505raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold,
4506 S_IRUGO | S_IWUSR,
4507 raid5_show_preread_threshold,
4508 raid5_store_preread_threshold);
4509
4510static ssize_t
NeilBrown96de1e62005-11-08 21:39:39 -08004511stripe_cache_active_show(mddev_t *mddev, char *page)
NeilBrown3f294f42005-11-08 21:39:25 -08004512{
NeilBrown007583c2005-11-08 21:39:30 -08004513 raid5_conf_t *conf = mddev_to_conf(mddev);
NeilBrown96de1e62005-11-08 21:39:39 -08004514 if (conf)
4515 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
4516 else
4517 return 0;
NeilBrown3f294f42005-11-08 21:39:25 -08004518}
4519
NeilBrown96de1e62005-11-08 21:39:39 -08004520static struct md_sysfs_entry
4521raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
NeilBrown3f294f42005-11-08 21:39:25 -08004522
NeilBrown007583c2005-11-08 21:39:30 -08004523static struct attribute *raid5_attrs[] = {
NeilBrown3f294f42005-11-08 21:39:25 -08004524 &raid5_stripecache_size.attr,
4525 &raid5_stripecache_active.attr,
Dan Williams8b3e6cd2008-04-28 02:15:53 -07004526 &raid5_preread_bypass_threshold.attr,
NeilBrown3f294f42005-11-08 21:39:25 -08004527 NULL,
4528};
NeilBrown007583c2005-11-08 21:39:30 -08004529static struct attribute_group raid5_attrs_group = {
4530 .name = NULL,
4531 .attrs = raid5_attrs,
NeilBrown3f294f42005-11-08 21:39:25 -08004532};
4533
Dan Williams80c3a6c2009-03-17 18:10:40 -07004534static sector_t
4535raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks)
4536{
4537 raid5_conf_t *conf = mddev_to_conf(mddev);
4538
4539 if (!sectors)
4540 sectors = mddev->dev_sectors;
NeilBrown7ec05472009-03-31 15:10:36 +11004541 if (!raid_disks) {
4542 /* size is defined by the smallest of previous and new size */
4543 if (conf->raid_disks < conf->previous_raid_disks)
4544 raid_disks = conf->raid_disks;
4545 else
4546 raid_disks = conf->previous_raid_disks;
4547 }
Dan Williams80c3a6c2009-03-17 18:10:40 -07004548
4549 sectors &= ~((sector_t)mddev->chunk_size/512 - 1);
NeilBrown784052e2009-03-31 15:19:07 +11004550 sectors &= ~((sector_t)mddev->new_chunk/512 - 1);
Dan Williams80c3a6c2009-03-17 18:10:40 -07004551 return sectors * (raid_disks - conf->max_degraded);
4552}
4553
Dan Williams36d1c642009-07-14 11:48:22 -07004554static void raid5_free_percpu(raid5_conf_t *conf)
4555{
4556 struct raid5_percpu *percpu;
4557 unsigned long cpu;
4558
4559 if (!conf->percpu)
4560 return;
4561
4562 get_online_cpus();
4563 for_each_possible_cpu(cpu) {
4564 percpu = per_cpu_ptr(conf->percpu, cpu);
4565 safe_put_page(percpu->spare_page);
Dan Williamsd6f38f32009-07-14 11:50:52 -07004566 kfree(percpu->scribble);
Dan Williams36d1c642009-07-14 11:48:22 -07004567 }
4568#ifdef CONFIG_HOTPLUG_CPU
4569 unregister_cpu_notifier(&conf->cpu_notify);
4570#endif
4571 put_online_cpus();
4572
4573 free_percpu(conf->percpu);
4574}
4575
Dan Williamsa11034b2009-07-14 11:48:16 -07004576static void free_conf(raid5_conf_t *conf)
4577{
4578 shrink_stripes(conf);
Dan Williams36d1c642009-07-14 11:48:22 -07004579 raid5_free_percpu(conf);
Dan Williamsa11034b2009-07-14 11:48:16 -07004580 kfree(conf->disks);
4581 kfree(conf->stripe_hashtbl);
4582 kfree(conf);
4583}
4584
Dan Williams36d1c642009-07-14 11:48:22 -07004585#ifdef CONFIG_HOTPLUG_CPU
4586static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
4587 void *hcpu)
4588{
4589 raid5_conf_t *conf = container_of(nfb, raid5_conf_t, cpu_notify);
4590 long cpu = (long)hcpu;
4591 struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu);
4592
4593 switch (action) {
4594 case CPU_UP_PREPARE:
4595 case CPU_UP_PREPARE_FROZEN:
Dan Williamsd6f38f32009-07-14 11:50:52 -07004596 if (conf->level == 6 && !percpu->spare_page)
Dan Williams36d1c642009-07-14 11:48:22 -07004597 percpu->spare_page = alloc_page(GFP_KERNEL);
Dan Williamsd6f38f32009-07-14 11:50:52 -07004598 if (!percpu->scribble)
4599 percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
4600
4601 if (!percpu->scribble ||
4602 (conf->level == 6 && !percpu->spare_page)) {
4603 safe_put_page(percpu->spare_page);
4604 kfree(percpu->scribble);
Dan Williams36d1c642009-07-14 11:48:22 -07004605 pr_err("%s: failed memory allocation for cpu%ld\n",
4606 __func__, cpu);
4607 return NOTIFY_BAD;
4608 }
4609 break;
4610 case CPU_DEAD:
4611 case CPU_DEAD_FROZEN:
4612 safe_put_page(percpu->spare_page);
Dan Williamsd6f38f32009-07-14 11:50:52 -07004613 kfree(percpu->scribble);
Dan Williams36d1c642009-07-14 11:48:22 -07004614 percpu->spare_page = NULL;
Dan Williamsd6f38f32009-07-14 11:50:52 -07004615 percpu->scribble = NULL;
Dan Williams36d1c642009-07-14 11:48:22 -07004616 break;
4617 default:
4618 break;
4619 }
4620 return NOTIFY_OK;
4621}
4622#endif
4623
4624static int raid5_alloc_percpu(raid5_conf_t *conf)
4625{
4626 unsigned long cpu;
4627 struct page *spare_page;
4628 struct raid5_percpu *allcpus;
Dan Williamsd6f38f32009-07-14 11:50:52 -07004629 void *scribble;
Dan Williams36d1c642009-07-14 11:48:22 -07004630 int err;
4631
Dan Williams36d1c642009-07-14 11:48:22 -07004632 allcpus = alloc_percpu(struct raid5_percpu);
4633 if (!allcpus)
4634 return -ENOMEM;
4635 conf->percpu = allcpus;
4636
4637 get_online_cpus();
4638 err = 0;
4639 for_each_present_cpu(cpu) {
Dan Williamsd6f38f32009-07-14 11:50:52 -07004640 if (conf->level == 6) {
4641 spare_page = alloc_page(GFP_KERNEL);
4642 if (!spare_page) {
4643 err = -ENOMEM;
4644 break;
4645 }
4646 per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page;
4647 }
4648 scribble = kmalloc(scribble_len(conf->raid_disks), GFP_KERNEL);
4649 if (!scribble) {
Dan Williams36d1c642009-07-14 11:48:22 -07004650 err = -ENOMEM;
4651 break;
4652 }
Dan Williamsd6f38f32009-07-14 11:50:52 -07004653 per_cpu_ptr(conf->percpu, cpu)->scribble = scribble;
Dan Williams36d1c642009-07-14 11:48:22 -07004654 }
4655#ifdef CONFIG_HOTPLUG_CPU
4656 conf->cpu_notify.notifier_call = raid456_cpu_notify;
4657 conf->cpu_notify.priority = 0;
4658 if (err == 0)
4659 err = register_cpu_notifier(&conf->cpu_notify);
4660#endif
4661 put_online_cpus();
4662
4663 return err;
4664}
4665
NeilBrown91adb562009-03-31 14:39:39 +11004666static raid5_conf_t *setup_conf(mddev_t *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004667{
4668 raid5_conf_t *conf;
4669 int raid_disk, memory;
4670 mdk_rdev_t *rdev;
4671 struct disk_info *disk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004672
NeilBrown91adb562009-03-31 14:39:39 +11004673 if (mddev->new_level != 5
4674 && mddev->new_level != 4
4675 && mddev->new_level != 6) {
NeilBrown16a53ec2006-06-26 00:27:38 -07004676 printk(KERN_ERR "raid5: %s: raid level not set to 4/5/6 (%d)\n",
NeilBrown91adb562009-03-31 14:39:39 +11004677 mdname(mddev), mddev->new_level);
4678 return ERR_PTR(-EIO);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004679 }
NeilBrown91adb562009-03-31 14:39:39 +11004680 if ((mddev->new_level == 5
4681 && !algorithm_valid_raid5(mddev->new_layout)) ||
4682 (mddev->new_level == 6
4683 && !algorithm_valid_raid6(mddev->new_layout))) {
NeilBrown99c0fb52009-03-31 14:39:38 +11004684 printk(KERN_ERR "raid5: %s: layout %d not supported\n",
NeilBrown91adb562009-03-31 14:39:39 +11004685 mdname(mddev), mddev->new_layout);
4686 return ERR_PTR(-EIO);
4687 }
4688 if (mddev->new_level == 6 && mddev->raid_disks < 4) {
4689 printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 4)\n",
4690 mdname(mddev), mddev->raid_disks);
4691 return ERR_PTR(-EINVAL);
NeilBrown99c0fb52009-03-31 14:39:38 +11004692 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004693
NeilBrown91adb562009-03-31 14:39:39 +11004694 if (!mddev->new_chunk || mddev->new_chunk % PAGE_SIZE) {
4695 printk(KERN_ERR "raid5: invalid chunk size %d for %s\n",
4696 mddev->new_chunk, mdname(mddev));
4697 return ERR_PTR(-EINVAL);
NeilBrown4bbf3772008-10-13 11:55:12 +11004698 }
4699
NeilBrown91adb562009-03-31 14:39:39 +11004700 conf = kzalloc(sizeof(raid5_conf_t), GFP_KERNEL);
4701 if (conf == NULL)
4702 goto abort;
4703
4704 conf->raid_disks = mddev->raid_disks;
Dan Williamsd6f38f32009-07-14 11:50:52 -07004705 conf->scribble_len = scribble_len(conf->raid_disks);
NeilBrown91adb562009-03-31 14:39:39 +11004706 if (mddev->reshape_position == MaxSector)
4707 conf->previous_raid_disks = mddev->raid_disks;
4708 else
4709 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks;
4710
4711 conf->disks = kzalloc(conf->raid_disks * sizeof(struct disk_info),
4712 GFP_KERNEL);
4713 if (!conf->disks)
4714 goto abort;
4715
4716 conf->mddev = mddev;
4717
4718 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
4719 goto abort;
4720
Dan Williams36d1c642009-07-14 11:48:22 -07004721 conf->level = mddev->new_level;
4722 if (raid5_alloc_percpu(conf) != 0)
4723 goto abort;
4724
NeilBrown91adb562009-03-31 14:39:39 +11004725 spin_lock_init(&conf->device_lock);
4726 init_waitqueue_head(&conf->wait_for_stripe);
4727 init_waitqueue_head(&conf->wait_for_overlap);
4728 INIT_LIST_HEAD(&conf->handle_list);
4729 INIT_LIST_HEAD(&conf->hold_list);
4730 INIT_LIST_HEAD(&conf->delayed_list);
4731 INIT_LIST_HEAD(&conf->bitmap_list);
4732 INIT_LIST_HEAD(&conf->inactive_list);
4733 atomic_set(&conf->active_stripes, 0);
4734 atomic_set(&conf->preread_active_stripes, 0);
4735 atomic_set(&conf->active_aligned_reads, 0);
4736 conf->bypass_threshold = BYPASS_THRESHOLD;
4737
4738 pr_debug("raid5: run(%s) called.\n", mdname(mddev));
4739
4740 list_for_each_entry(rdev, &mddev->disks, same_set) {
4741 raid_disk = rdev->raid_disk;
4742 if (raid_disk >= conf->raid_disks
4743 || raid_disk < 0)
4744 continue;
4745 disk = conf->disks + raid_disk;
4746
4747 disk->rdev = rdev;
4748
4749 if (test_bit(In_sync, &rdev->flags)) {
4750 char b[BDEVNAME_SIZE];
4751 printk(KERN_INFO "raid5: device %s operational as raid"
4752 " disk %d\n", bdevname(rdev->bdev,b),
4753 raid_disk);
4754 } else
4755 /* Cannot rely on bitmap to complete recovery */
4756 conf->fullsync = 1;
4757 }
4758
4759 conf->chunk_size = mddev->new_chunk;
NeilBrown91adb562009-03-31 14:39:39 +11004760 if (conf->level == 6)
4761 conf->max_degraded = 2;
4762 else
4763 conf->max_degraded = 1;
4764 conf->algorithm = mddev->new_layout;
4765 conf->max_nr_stripes = NR_STRIPES;
NeilBrownfef9c612009-03-31 15:16:46 +11004766 conf->reshape_progress = mddev->reshape_position;
NeilBrowne183eae2009-03-31 15:20:22 +11004767 if (conf->reshape_progress != MaxSector) {
NeilBrown784052e2009-03-31 15:19:07 +11004768 conf->prev_chunk = mddev->chunk_size;
NeilBrowne183eae2009-03-31 15:20:22 +11004769 conf->prev_algo = mddev->layout;
4770 }
NeilBrown91adb562009-03-31 14:39:39 +11004771
4772 memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
4773 conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
4774 if (grow_stripes(conf, conf->max_nr_stripes)) {
4775 printk(KERN_ERR
4776 "raid5: couldn't allocate %dkB for buffers\n", memory);
4777 goto abort;
4778 } else
4779 printk(KERN_INFO "raid5: allocated %dkB for %s\n",
4780 memory, mdname(mddev));
4781
4782 conf->thread = md_register_thread(raid5d, mddev, "%s_raid5");
4783 if (!conf->thread) {
4784 printk(KERN_ERR
4785 "raid5: couldn't allocate thread for %s\n",
4786 mdname(mddev));
4787 goto abort;
4788 }
4789
4790 return conf;
4791
4792 abort:
4793 if (conf) {
Dan Williamsa11034b2009-07-14 11:48:16 -07004794 free_conf(conf);
NeilBrown91adb562009-03-31 14:39:39 +11004795 return ERR_PTR(-EIO);
4796 } else
4797 return ERR_PTR(-ENOMEM);
4798}
4799
4800static int run(mddev_t *mddev)
4801{
4802 raid5_conf_t *conf;
4803 int working_disks = 0;
4804 mdk_rdev_t *rdev;
4805
NeilBrownf6705572006-03-27 01:18:11 -08004806 if (mddev->reshape_position != MaxSector) {
4807 /* Check that we can continue the reshape.
4808 * Currently only disks can change, it must
4809 * increase, and we must be past the point where
4810 * a stripe over-writes itself
4811 */
4812 sector_t here_new, here_old;
4813 int old_disks;
Andre Noll18b00332009-03-31 15:00:56 +11004814 int max_degraded = (mddev->level == 6 ? 2 : 1);
NeilBrownf6705572006-03-27 01:18:11 -08004815
NeilBrown88ce4932009-03-31 15:24:23 +11004816 if (mddev->new_level != mddev->level) {
NeilBrownf4168852007-02-28 20:11:53 -08004817 printk(KERN_ERR "raid5: %s: unsupported reshape "
4818 "required - aborting.\n",
NeilBrownf6705572006-03-27 01:18:11 -08004819 mdname(mddev));
4820 return -EINVAL;
4821 }
NeilBrownf6705572006-03-27 01:18:11 -08004822 old_disks = mddev->raid_disks - mddev->delta_disks;
4823 /* reshape_position must be on a new-stripe boundary, and one
NeilBrownf4168852007-02-28 20:11:53 -08004824 * further up in new geometry must map after here in old
4825 * geometry.
NeilBrownf6705572006-03-27 01:18:11 -08004826 */
4827 here_new = mddev->reshape_position;
NeilBrown784052e2009-03-31 15:19:07 +11004828 if (sector_div(here_new, (mddev->new_chunk>>9)*
NeilBrownf4168852007-02-28 20:11:53 -08004829 (mddev->raid_disks - max_degraded))) {
4830 printk(KERN_ERR "raid5: reshape_position not "
4831 "on a stripe boundary\n");
NeilBrownf6705572006-03-27 01:18:11 -08004832 return -EINVAL;
4833 }
4834 /* here_new is the stripe we will write to */
4835 here_old = mddev->reshape_position;
NeilBrownf4168852007-02-28 20:11:53 -08004836 sector_div(here_old, (mddev->chunk_size>>9)*
4837 (old_disks-max_degraded));
4838 /* here_old is the first stripe that we might need to read
4839 * from */
NeilBrownf6705572006-03-27 01:18:11 -08004840 if (here_new >= here_old) {
4841 /* Reading from the same stripe as writing to - bad */
NeilBrownf4168852007-02-28 20:11:53 -08004842 printk(KERN_ERR "raid5: reshape_position too early for "
4843 "auto-recovery - aborting.\n");
NeilBrownf6705572006-03-27 01:18:11 -08004844 return -EINVAL;
4845 }
4846 printk(KERN_INFO "raid5: reshape will continue\n");
4847 /* OK, we should be able to continue; */
NeilBrownf6705572006-03-27 01:18:11 -08004848 } else {
NeilBrown91adb562009-03-31 14:39:39 +11004849 BUG_ON(mddev->level != mddev->new_level);
4850 BUG_ON(mddev->layout != mddev->new_layout);
4851 BUG_ON(mddev->chunk_size != mddev->new_chunk);
4852 BUG_ON(mddev->delta_disks != 0);
NeilBrownf6705572006-03-27 01:18:11 -08004853 }
4854
NeilBrown245f46c2009-03-31 14:39:39 +11004855 if (mddev->private == NULL)
4856 conf = setup_conf(mddev);
4857 else
4858 conf = mddev->private;
4859
NeilBrown91adb562009-03-31 14:39:39 +11004860 if (IS_ERR(conf))
4861 return PTR_ERR(conf);
NeilBrown9ffae0c2006-01-06 00:20:32 -08004862
NeilBrown91adb562009-03-31 14:39:39 +11004863 mddev->thread = conf->thread;
4864 conf->thread = NULL;
4865 mddev->private = conf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004866
Linus Torvalds1da177e2005-04-16 15:20:36 -07004867 /*
NeilBrown16a53ec2006-06-26 00:27:38 -07004868 * 0 for a fully functional array, 1 or 2 for a degraded array.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004869 */
NeilBrown91adb562009-03-31 14:39:39 +11004870 list_for_each_entry(rdev, &mddev->disks, same_set)
4871 if (rdev->raid_disk >= 0 &&
4872 test_bit(In_sync, &rdev->flags))
4873 working_disks++;
4874
NeilBrown02c2de82006-10-03 01:15:47 -07004875 mddev->degraded = conf->raid_disks - working_disks;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004876
NeilBrown16a53ec2006-06-26 00:27:38 -07004877 if (mddev->degraded > conf->max_degraded) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004878 printk(KERN_ERR "raid5: not enough operational devices for %s"
4879 " (%d/%d failed)\n",
NeilBrown02c2de82006-10-03 01:15:47 -07004880 mdname(mddev), mddev->degraded, conf->raid_disks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004881 goto abort;
4882 }
4883
NeilBrown91adb562009-03-31 14:39:39 +11004884 /* device size must be a multiple of chunk size */
4885 mddev->dev_sectors &= ~(mddev->chunk_size / 512 - 1);
4886 mddev->resync_max_sectors = mddev->dev_sectors;
4887
NeilBrown16a53ec2006-06-26 00:27:38 -07004888 if (mddev->degraded > 0 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07004889 mddev->recovery_cp != MaxSector) {
NeilBrown6ff8d8ec2006-01-06 00:20:15 -08004890 if (mddev->ok_start_degraded)
4891 printk(KERN_WARNING
4892 "raid5: starting dirty degraded array: %s"
4893 "- data corruption possible.\n",
4894 mdname(mddev));
4895 else {
4896 printk(KERN_ERR
4897 "raid5: cannot start dirty degraded array for %s\n",
4898 mdname(mddev));
4899 goto abort;
4900 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004901 }
4902
Linus Torvalds1da177e2005-04-16 15:20:36 -07004903 if (mddev->degraded == 0)
4904 printk("raid5: raid level %d set %s active with %d out of %d"
NeilBrowne183eae2009-03-31 15:20:22 +11004905 " devices, algorithm %d\n", conf->level, mdname(mddev),
4906 mddev->raid_disks-mddev->degraded, mddev->raid_disks,
4907 mddev->new_layout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004908 else
4909 printk(KERN_ALERT "raid5: raid level %d set %s active with %d"
4910 " out of %d devices, algorithm %d\n", conf->level,
4911 mdname(mddev), mddev->raid_disks - mddev->degraded,
NeilBrowne183eae2009-03-31 15:20:22 +11004912 mddev->raid_disks, mddev->new_layout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004913
4914 print_raid5_conf(conf);
4915
NeilBrownfef9c612009-03-31 15:16:46 +11004916 if (conf->reshape_progress != MaxSector) {
NeilBrownf6705572006-03-27 01:18:11 -08004917 printk("...ok start reshape thread\n");
NeilBrownfef9c612009-03-31 15:16:46 +11004918 conf->reshape_safe = conf->reshape_progress;
NeilBrownf6705572006-03-27 01:18:11 -08004919 atomic_set(&conf->reshape_stripes, 0);
4920 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4921 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4922 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
4923 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
4924 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
4925 "%s_reshape");
NeilBrownf6705572006-03-27 01:18:11 -08004926 }
4927
Linus Torvalds1da177e2005-04-16 15:20:36 -07004928 /* read-ahead size must cover two whole stripes, which is
NeilBrown16a53ec2006-06-26 00:27:38 -07004929 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
Linus Torvalds1da177e2005-04-16 15:20:36 -07004930 */
4931 {
NeilBrown16a53ec2006-06-26 00:27:38 -07004932 int data_disks = conf->previous_raid_disks - conf->max_degraded;
4933 int stripe = data_disks *
NeilBrown8932c2e2006-06-26 00:27:36 -07004934 (mddev->chunk_size / PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004935 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
4936 mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
4937 }
4938
4939 /* Ok, everything is just fine now */
NeilBrown5e55e2f2007-03-26 21:32:14 -08004940 if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
4941 printk(KERN_WARNING
4942 "raid5: failed to create sysfs attributes for %s\n",
4943 mdname(mddev));
NeilBrown7a5febe2005-05-16 21:53:16 -07004944
NeilBrown91adb562009-03-31 14:39:39 +11004945 mddev->queue->queue_lock = &conf->device_lock;
4946
NeilBrown7a5febe2005-05-16 21:53:16 -07004947 mddev->queue->unplug_fn = raid5_unplug_device;
NeilBrownf022b2f2006-10-03 01:15:56 -07004948 mddev->queue->backing_dev_info.congested_data = mddev;
NeilBrown041ae522007-03-26 21:32:14 -08004949 mddev->queue->backing_dev_info.congested_fn = raid5_congested;
NeilBrownf022b2f2006-10-03 01:15:56 -07004950
Dan Williams1f403622009-03-31 14:59:03 +11004951 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
NeilBrown7a5febe2005-05-16 21:53:16 -07004952
Raz Ben-Jehuda(caro)23032a02006-12-10 02:20:45 -08004953 blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
4954
Linus Torvalds1da177e2005-04-16 15:20:36 -07004955 return 0;
4956abort:
NeilBrowne0cf8f02009-03-31 14:39:39 +11004957 md_unregister_thread(mddev->thread);
NeilBrown91adb562009-03-31 14:39:39 +11004958 mddev->thread = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004959 if (conf) {
4960 print_raid5_conf(conf);
Dan Williamsa11034b2009-07-14 11:48:16 -07004961 free_conf(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004962 }
4963 mddev->private = NULL;
4964 printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev));
4965 return -EIO;
4966}
4967
4968
4969
NeilBrown3f294f42005-11-08 21:39:25 -08004970static int stop(mddev_t *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004971{
4972 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
4973
4974 md_unregister_thread(mddev->thread);
4975 mddev->thread = NULL;
NeilBrown041ae522007-03-26 21:32:14 -08004976 mddev->queue->backing_dev_info.congested_fn = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004977 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
NeilBrown007583c2005-11-08 21:39:30 -08004978 sysfs_remove_group(&mddev->kobj, &raid5_attrs_group);
Dan Williamsa11034b2009-07-14 11:48:16 -07004979 free_conf(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004980 mddev->private = NULL;
4981 return 0;
4982}
4983
Dan Williams45b42332007-07-09 11:56:43 -07004984#ifdef DEBUG
NeilBrownd710e132008-10-13 11:55:12 +11004985static void print_sh(struct seq_file *seq, struct stripe_head *sh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004986{
4987 int i;
4988
NeilBrown16a53ec2006-06-26 00:27:38 -07004989 seq_printf(seq, "sh %llu, pd_idx %d, state %ld.\n",
4990 (unsigned long long)sh->sector, sh->pd_idx, sh->state);
4991 seq_printf(seq, "sh %llu, count %d.\n",
4992 (unsigned long long)sh->sector, atomic_read(&sh->count));
4993 seq_printf(seq, "sh %llu, ", (unsigned long long)sh->sector);
NeilBrown7ecaa1e2006-03-27 01:18:08 -08004994 for (i = 0; i < sh->disks; i++) {
NeilBrown16a53ec2006-06-26 00:27:38 -07004995 seq_printf(seq, "(cache%d: %p %ld) ",
4996 i, sh->dev[i].page, sh->dev[i].flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004997 }
NeilBrown16a53ec2006-06-26 00:27:38 -07004998 seq_printf(seq, "\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004999}
5000
NeilBrownd710e132008-10-13 11:55:12 +11005001static void printall(struct seq_file *seq, raid5_conf_t *conf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005002{
5003 struct stripe_head *sh;
NeilBrownfccddba2006-01-06 00:20:33 -08005004 struct hlist_node *hn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005005 int i;
5006
5007 spin_lock_irq(&conf->device_lock);
5008 for (i = 0; i < NR_HASH; i++) {
NeilBrownfccddba2006-01-06 00:20:33 -08005009 hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005010 if (sh->raid_conf != conf)
5011 continue;
NeilBrown16a53ec2006-06-26 00:27:38 -07005012 print_sh(seq, sh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005013 }
5014 }
5015 spin_unlock_irq(&conf->device_lock);
5016}
5017#endif
5018
NeilBrownd710e132008-10-13 11:55:12 +11005019static void status(struct seq_file *seq, mddev_t *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005020{
5021 raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
5022 int i;
5023
5024 seq_printf (seq, " level %d, %dk chunk, algorithm %d", mddev->level, mddev->chunk_size >> 10, mddev->layout);
NeilBrown02c2de82006-10-03 01:15:47 -07005025 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005026 for (i = 0; i < conf->raid_disks; i++)
5027 seq_printf (seq, "%s",
5028 conf->disks[i].rdev &&
NeilBrownb2d444d2005-11-08 21:39:31 -08005029 test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005030 seq_printf (seq, "]");
Dan Williams45b42332007-07-09 11:56:43 -07005031#ifdef DEBUG
NeilBrown16a53ec2006-06-26 00:27:38 -07005032 seq_printf (seq, "\n");
5033 printall(seq, conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005034#endif
5035}
5036
5037static void print_raid5_conf (raid5_conf_t *conf)
5038{
5039 int i;
5040 struct disk_info *tmp;
5041
5042 printk("RAID5 conf printout:\n");
5043 if (!conf) {
5044 printk("(conf==NULL)\n");
5045 return;
5046 }
NeilBrown02c2de82006-10-03 01:15:47 -07005047 printk(" --- rd:%d wd:%d\n", conf->raid_disks,
5048 conf->raid_disks - conf->mddev->degraded);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005049
5050 for (i = 0; i < conf->raid_disks; i++) {
5051 char b[BDEVNAME_SIZE];
5052 tmp = conf->disks + i;
5053 if (tmp->rdev)
5054 printk(" disk %d, o:%d, dev:%s\n",
NeilBrownb2d444d2005-11-08 21:39:31 -08005055 i, !test_bit(Faulty, &tmp->rdev->flags),
Linus Torvalds1da177e2005-04-16 15:20:36 -07005056 bdevname(tmp->rdev->bdev,b));
5057 }
5058}
5059
5060static int raid5_spare_active(mddev_t *mddev)
5061{
5062 int i;
5063 raid5_conf_t *conf = mddev->private;
5064 struct disk_info *tmp;
5065
5066 for (i = 0; i < conf->raid_disks; i++) {
5067 tmp = conf->disks + i;
5068 if (tmp->rdev
NeilBrownb2d444d2005-11-08 21:39:31 -08005069 && !test_bit(Faulty, &tmp->rdev->flags)
NeilBrownc04be0a2006-10-03 01:15:53 -07005070 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
5071 unsigned long flags;
5072 spin_lock_irqsave(&conf->device_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005073 mddev->degraded--;
NeilBrownc04be0a2006-10-03 01:15:53 -07005074 spin_unlock_irqrestore(&conf->device_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005075 }
5076 }
5077 print_raid5_conf(conf);
5078 return 0;
5079}
5080
5081static int raid5_remove_disk(mddev_t *mddev, int number)
5082{
5083 raid5_conf_t *conf = mddev->private;
5084 int err = 0;
5085 mdk_rdev_t *rdev;
5086 struct disk_info *p = conf->disks + number;
5087
5088 print_raid5_conf(conf);
5089 rdev = p->rdev;
5090 if (rdev) {
NeilBrownec32a2b2009-03-31 15:17:38 +11005091 if (number >= conf->raid_disks &&
5092 conf->reshape_progress == MaxSector)
5093 clear_bit(In_sync, &rdev->flags);
5094
NeilBrownb2d444d2005-11-08 21:39:31 -08005095 if (test_bit(In_sync, &rdev->flags) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07005096 atomic_read(&rdev->nr_pending)) {
5097 err = -EBUSY;
5098 goto abort;
5099 }
NeilBrowndfc70642008-05-23 13:04:39 -07005100 /* Only remove non-faulty devices if recovery
5101 * isn't possible.
5102 */
5103 if (!test_bit(Faulty, &rdev->flags) &&
NeilBrownec32a2b2009-03-31 15:17:38 +11005104 mddev->degraded <= conf->max_degraded &&
5105 number < conf->raid_disks) {
NeilBrowndfc70642008-05-23 13:04:39 -07005106 err = -EBUSY;
5107 goto abort;
5108 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005109 p->rdev = NULL;
Paul E. McKenneyfbd568a3e2005-05-01 08:59:04 -07005110 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005111 if (atomic_read(&rdev->nr_pending)) {
5112 /* lost the race, try later */
5113 err = -EBUSY;
5114 p->rdev = rdev;
5115 }
5116 }
5117abort:
5118
5119 print_raid5_conf(conf);
5120 return err;
5121}
5122
5123static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
5124{
5125 raid5_conf_t *conf = mddev->private;
Neil Brown199050e2008-06-28 08:31:33 +10005126 int err = -EEXIST;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005127 int disk;
5128 struct disk_info *p;
Neil Brown6c2fce22008-06-28 08:31:31 +10005129 int first = 0;
5130 int last = conf->raid_disks - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005131
NeilBrown16a53ec2006-06-26 00:27:38 -07005132 if (mddev->degraded > conf->max_degraded)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005133 /* no point adding a device */
Neil Brown199050e2008-06-28 08:31:33 +10005134 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005135
Neil Brown6c2fce22008-06-28 08:31:31 +10005136 if (rdev->raid_disk >= 0)
5137 first = last = rdev->raid_disk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005138
5139 /*
NeilBrown16a53ec2006-06-26 00:27:38 -07005140 * find the disk ... but prefer rdev->saved_raid_disk
5141 * if possible.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005142 */
NeilBrown16a53ec2006-06-26 00:27:38 -07005143 if (rdev->saved_raid_disk >= 0 &&
Neil Brown6c2fce22008-06-28 08:31:31 +10005144 rdev->saved_raid_disk >= first &&
NeilBrown16a53ec2006-06-26 00:27:38 -07005145 conf->disks[rdev->saved_raid_disk].rdev == NULL)
5146 disk = rdev->saved_raid_disk;
5147 else
Neil Brown6c2fce22008-06-28 08:31:31 +10005148 disk = first;
5149 for ( ; disk <= last ; disk++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005150 if ((p=conf->disks + disk)->rdev == NULL) {
NeilBrownb2d444d2005-11-08 21:39:31 -08005151 clear_bit(In_sync, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005152 rdev->raid_disk = disk;
Neil Brown199050e2008-06-28 08:31:33 +10005153 err = 0;
NeilBrown72626682005-09-09 16:23:54 -07005154 if (rdev->saved_raid_disk != disk)
5155 conf->fullsync = 1;
Suzanne Woodd6065f72005-11-08 21:39:27 -08005156 rcu_assign_pointer(p->rdev, rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005157 break;
5158 }
5159 print_raid5_conf(conf);
Neil Brown199050e2008-06-28 08:31:33 +10005160 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005161}
5162
5163static int raid5_resize(mddev_t *mddev, sector_t sectors)
5164{
5165 /* no resync is happening, and there is enough space
5166 * on all devices, so we can resize.
5167 * We need to make sure resync covers any new space.
5168 * If the array is shrinking we should possibly wait until
5169 * any io in the removed space completes, but it hardly seems
5170 * worth it.
5171 */
5172 sectors &= ~((sector_t)mddev->chunk_size/512 - 1);
Dan Williams1f403622009-03-31 14:59:03 +11005173 md_set_array_sectors(mddev, raid5_size(mddev, sectors,
5174 mddev->raid_disks));
Dan Williamsb522adc2009-03-31 15:00:31 +11005175 if (mddev->array_sectors >
5176 raid5_size(mddev, sectors, mddev->raid_disks))
5177 return -EINVAL;
Andre Nollf233ea52008-07-21 17:05:22 +10005178 set_capacity(mddev->gendisk, mddev->array_sectors);
Linus Torvalds44ce62942007-05-09 18:51:36 -07005179 mddev->changed = 1;
Andre Noll58c0fed2009-03-31 14:33:13 +11005180 if (sectors > mddev->dev_sectors && mddev->recovery_cp == MaxSector) {
5181 mddev->recovery_cp = mddev->dev_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005182 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5183 }
Andre Noll58c0fed2009-03-31 14:33:13 +11005184 mddev->dev_sectors = sectors;
NeilBrown4b5c7ae2005-07-27 11:43:28 -07005185 mddev->resync_max_sectors = sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005186 return 0;
5187}
5188
NeilBrown63c70c42006-03-27 01:18:13 -08005189static int raid5_check_reshape(mddev_t *mddev)
NeilBrown29269552006-03-27 01:18:10 -08005190{
5191 raid5_conf_t *conf = mddev_to_conf(mddev);
NeilBrown29269552006-03-27 01:18:10 -08005192
NeilBrown88ce4932009-03-31 15:24:23 +11005193 if (mddev->delta_disks == 0 &&
5194 mddev->new_layout == mddev->layout &&
5195 mddev->new_chunk == mddev->chunk_size)
5196 return -EINVAL; /* nothing to do */
NeilBrowndba034e2008-08-05 15:54:13 +10005197 if (mddev->bitmap)
5198 /* Cannot grow a bitmap yet */
5199 return -EBUSY;
NeilBrownec32a2b2009-03-31 15:17:38 +11005200 if (mddev->degraded > conf->max_degraded)
5201 return -EINVAL;
5202 if (mddev->delta_disks < 0) {
5203 /* We might be able to shrink, but the devices must
5204 * be made bigger first.
5205 * For raid6, 4 is the minimum size.
5206 * Otherwise 2 is the minimum
5207 */
5208 int min = 2;
5209 if (mddev->level == 6)
5210 min = 4;
5211 if (mddev->raid_disks + mddev->delta_disks < min)
5212 return -EINVAL;
5213 }
NeilBrown29269552006-03-27 01:18:10 -08005214
5215 /* Can only proceed if there are plenty of stripe_heads.
5216 * We need a minimum of one full stripe,, and for sensible progress
5217 * it is best to have about 4 times that.
5218 * If we require 4 times, then the default 256 4K stripe_heads will
5219 * allow for chunk sizes up to 256K, which is probably OK.
5220 * If the chunk size is greater, user-space should request more
5221 * stripe_heads first.
5222 */
NeilBrown63c70c42006-03-27 01:18:13 -08005223 if ((mddev->chunk_size / STRIPE_SIZE) * 4 > conf->max_nr_stripes ||
5224 (mddev->new_chunk / STRIPE_SIZE) * 4 > conf->max_nr_stripes) {
NeilBrown29269552006-03-27 01:18:10 -08005225 printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n",
NeilBrown784052e2009-03-31 15:19:07 +11005226 (max(mddev->chunk_size, mddev->new_chunk)
5227 / STRIPE_SIZE)*4);
NeilBrown29269552006-03-27 01:18:10 -08005228 return -ENOSPC;
5229 }
5230
NeilBrownec32a2b2009-03-31 15:17:38 +11005231 return resize_stripes(conf, conf->raid_disks + mddev->delta_disks);
NeilBrown63c70c42006-03-27 01:18:13 -08005232}
5233
5234static int raid5_start_reshape(mddev_t *mddev)
5235{
5236 raid5_conf_t *conf = mddev_to_conf(mddev);
5237 mdk_rdev_t *rdev;
NeilBrown63c70c42006-03-27 01:18:13 -08005238 int spares = 0;
5239 int added_devices = 0;
NeilBrownc04be0a2006-10-03 01:15:53 -07005240 unsigned long flags;
NeilBrown63c70c42006-03-27 01:18:13 -08005241
NeilBrownf4168852007-02-28 20:11:53 -08005242 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
NeilBrown63c70c42006-03-27 01:18:13 -08005243 return -EBUSY;
5244
Cheng Renquan159ec1f2009-01-09 08:31:08 +11005245 list_for_each_entry(rdev, &mddev->disks, same_set)
NeilBrown29269552006-03-27 01:18:10 -08005246 if (rdev->raid_disk < 0 &&
5247 !test_bit(Faulty, &rdev->flags))
5248 spares++;
NeilBrown63c70c42006-03-27 01:18:13 -08005249
NeilBrownf4168852007-02-28 20:11:53 -08005250 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded)
NeilBrown29269552006-03-27 01:18:10 -08005251 /* Not enough devices even to make a degraded array
5252 * of that size
5253 */
5254 return -EINVAL;
5255
NeilBrownec32a2b2009-03-31 15:17:38 +11005256 /* Refuse to reduce size of the array. Any reductions in
5257 * array size must be through explicit setting of array_size
5258 * attribute.
5259 */
5260 if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks)
5261 < mddev->array_sectors) {
5262 printk(KERN_ERR "md: %s: array size must be reduced "
5263 "before number of disks\n", mdname(mddev));
5264 return -EINVAL;
5265 }
5266
NeilBrownf6705572006-03-27 01:18:11 -08005267 atomic_set(&conf->reshape_stripes, 0);
NeilBrown29269552006-03-27 01:18:10 -08005268 spin_lock_irq(&conf->device_lock);
5269 conf->previous_raid_disks = conf->raid_disks;
NeilBrown63c70c42006-03-27 01:18:13 -08005270 conf->raid_disks += mddev->delta_disks;
NeilBrown88ce4932009-03-31 15:24:23 +11005271 conf->prev_chunk = conf->chunk_size;
5272 conf->chunk_size = mddev->new_chunk;
5273 conf->prev_algo = conf->algorithm;
5274 conf->algorithm = mddev->new_layout;
NeilBrownfef9c612009-03-31 15:16:46 +11005275 if (mddev->delta_disks < 0)
5276 conf->reshape_progress = raid5_size(mddev, 0, 0);
5277 else
5278 conf->reshape_progress = 0;
5279 conf->reshape_safe = conf->reshape_progress;
NeilBrown86b42c72009-03-31 15:19:03 +11005280 conf->generation++;
NeilBrown29269552006-03-27 01:18:10 -08005281 spin_unlock_irq(&conf->device_lock);
5282
5283 /* Add some new drives, as many as will fit.
5284 * We know there are enough to make the newly sized array work.
5285 */
Cheng Renquan159ec1f2009-01-09 08:31:08 +11005286 list_for_each_entry(rdev, &mddev->disks, same_set)
NeilBrown29269552006-03-27 01:18:10 -08005287 if (rdev->raid_disk < 0 &&
5288 !test_bit(Faulty, &rdev->flags)) {
Neil Brown199050e2008-06-28 08:31:33 +10005289 if (raid5_add_disk(mddev, rdev) == 0) {
NeilBrown29269552006-03-27 01:18:10 -08005290 char nm[20];
5291 set_bit(In_sync, &rdev->flags);
NeilBrown29269552006-03-27 01:18:10 -08005292 added_devices++;
NeilBrown5fd6c1d2006-06-26 00:27:40 -07005293 rdev->recovery_offset = 0;
NeilBrown29269552006-03-27 01:18:10 -08005294 sprintf(nm, "rd%d", rdev->raid_disk);
NeilBrown5e55e2f2007-03-26 21:32:14 -08005295 if (sysfs_create_link(&mddev->kobj,
5296 &rdev->kobj, nm))
5297 printk(KERN_WARNING
5298 "raid5: failed to create "
5299 " link %s for %s\n",
5300 nm, mdname(mddev));
NeilBrown29269552006-03-27 01:18:10 -08005301 } else
5302 break;
5303 }
5304
NeilBrownec32a2b2009-03-31 15:17:38 +11005305 if (mddev->delta_disks > 0) {
5306 spin_lock_irqsave(&conf->device_lock, flags);
5307 mddev->degraded = (conf->raid_disks - conf->previous_raid_disks)
5308 - added_devices;
5309 spin_unlock_irqrestore(&conf->device_lock, flags);
5310 }
NeilBrown63c70c42006-03-27 01:18:13 -08005311 mddev->raid_disks = conf->raid_disks;
NeilBrownf6705572006-03-27 01:18:11 -08005312 mddev->reshape_position = 0;
NeilBrown850b2b42006-10-03 01:15:46 -07005313 set_bit(MD_CHANGE_DEVS, &mddev->flags);
NeilBrownf6705572006-03-27 01:18:11 -08005314
NeilBrown29269552006-03-27 01:18:10 -08005315 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
5316 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
5317 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
5318 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
5319 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
5320 "%s_reshape");
5321 if (!mddev->sync_thread) {
5322 mddev->recovery = 0;
5323 spin_lock_irq(&conf->device_lock);
5324 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks;
NeilBrownfef9c612009-03-31 15:16:46 +11005325 conf->reshape_progress = MaxSector;
NeilBrown29269552006-03-27 01:18:10 -08005326 spin_unlock_irq(&conf->device_lock);
5327 return -EAGAIN;
5328 }
NeilBrownc8f517c2009-03-31 15:28:40 +11005329 conf->reshape_checkpoint = jiffies;
NeilBrown29269552006-03-27 01:18:10 -08005330 md_wakeup_thread(mddev->sync_thread);
5331 md_new_event(mddev);
5332 return 0;
5333}
NeilBrown29269552006-03-27 01:18:10 -08005334
NeilBrownec32a2b2009-03-31 15:17:38 +11005335/* This is called from the reshape thread and should make any
5336 * changes needed in 'conf'
5337 */
NeilBrown29269552006-03-27 01:18:10 -08005338static void end_reshape(raid5_conf_t *conf)
5339{
NeilBrown29269552006-03-27 01:18:10 -08005340
NeilBrownf6705572006-03-27 01:18:11 -08005341 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
Dan Williams80c3a6c2009-03-17 18:10:40 -07005342
NeilBrownf6705572006-03-27 01:18:11 -08005343 spin_lock_irq(&conf->device_lock);
NeilBrowncea9c222009-03-31 15:15:05 +11005344 conf->previous_raid_disks = conf->raid_disks;
NeilBrownfef9c612009-03-31 15:16:46 +11005345 conf->reshape_progress = MaxSector;
NeilBrownf6705572006-03-27 01:18:11 -08005346 spin_unlock_irq(&conf->device_lock);
NeilBrownb0f9ec02009-03-31 15:27:18 +11005347 wake_up(&conf->wait_for_overlap);
NeilBrown16a53ec2006-06-26 00:27:38 -07005348
5349 /* read-ahead size must cover two whole stripes, which is
5350 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
5351 */
5352 {
NeilBrowncea9c222009-03-31 15:15:05 +11005353 int data_disks = conf->raid_disks - conf->max_degraded;
5354 int stripe = data_disks * (conf->chunk_size
5355 / PAGE_SIZE);
NeilBrown16a53ec2006-06-26 00:27:38 -07005356 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
5357 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
5358 }
NeilBrown29269552006-03-27 01:18:10 -08005359 }
NeilBrown29269552006-03-27 01:18:10 -08005360}
5361
NeilBrownec32a2b2009-03-31 15:17:38 +11005362/* This is called from the raid5d thread with mddev_lock held.
5363 * It makes config changes to the device.
5364 */
NeilBrowncea9c222009-03-31 15:15:05 +11005365static void raid5_finish_reshape(mddev_t *mddev)
5366{
5367 struct block_device *bdev;
NeilBrown88ce4932009-03-31 15:24:23 +11005368 raid5_conf_t *conf = mddev_to_conf(mddev);
NeilBrowncea9c222009-03-31 15:15:05 +11005369
5370 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
5371
NeilBrownec32a2b2009-03-31 15:17:38 +11005372 if (mddev->delta_disks > 0) {
5373 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
5374 set_capacity(mddev->gendisk, mddev->array_sectors);
5375 mddev->changed = 1;
NeilBrowncea9c222009-03-31 15:15:05 +11005376
NeilBrownec32a2b2009-03-31 15:17:38 +11005377 bdev = bdget_disk(mddev->gendisk, 0);
5378 if (bdev) {
5379 mutex_lock(&bdev->bd_inode->i_mutex);
5380 i_size_write(bdev->bd_inode,
5381 (loff_t)mddev->array_sectors << 9);
5382 mutex_unlock(&bdev->bd_inode->i_mutex);
5383 bdput(bdev);
5384 }
5385 } else {
5386 int d;
NeilBrownec32a2b2009-03-31 15:17:38 +11005387 mddev->degraded = conf->raid_disks;
5388 for (d = 0; d < conf->raid_disks ; d++)
5389 if (conf->disks[d].rdev &&
5390 test_bit(In_sync,
5391 &conf->disks[d].rdev->flags))
5392 mddev->degraded--;
5393 for (d = conf->raid_disks ;
5394 d < conf->raid_disks - mddev->delta_disks;
5395 d++)
5396 raid5_remove_disk(mddev, d);
NeilBrowncea9c222009-03-31 15:15:05 +11005397 }
NeilBrown88ce4932009-03-31 15:24:23 +11005398 mddev->layout = conf->algorithm;
5399 mddev->chunk_size = conf->chunk_size;
NeilBrownec32a2b2009-03-31 15:17:38 +11005400 mddev->reshape_position = MaxSector;
5401 mddev->delta_disks = 0;
NeilBrowncea9c222009-03-31 15:15:05 +11005402 }
5403}
5404
NeilBrown72626682005-09-09 16:23:54 -07005405static void raid5_quiesce(mddev_t *mddev, int state)
5406{
5407 raid5_conf_t *conf = mddev_to_conf(mddev);
5408
5409 switch(state) {
NeilBrowne464eaf2006-03-27 01:18:14 -08005410 case 2: /* resume for a suspend */
5411 wake_up(&conf->wait_for_overlap);
5412 break;
5413
NeilBrown72626682005-09-09 16:23:54 -07005414 case 1: /* stop all writes */
5415 spin_lock_irq(&conf->device_lock);
5416 conf->quiesce = 1;
5417 wait_event_lock_irq(conf->wait_for_stripe,
Raz Ben-Jehuda(caro)46031f92006-12-10 02:20:47 -08005418 atomic_read(&conf->active_stripes) == 0 &&
5419 atomic_read(&conf->active_aligned_reads) == 0,
NeilBrown72626682005-09-09 16:23:54 -07005420 conf->device_lock, /* nothing */);
5421 spin_unlock_irq(&conf->device_lock);
5422 break;
5423
5424 case 0: /* re-enable writes */
5425 spin_lock_irq(&conf->device_lock);
5426 conf->quiesce = 0;
5427 wake_up(&conf->wait_for_stripe);
NeilBrowne464eaf2006-03-27 01:18:14 -08005428 wake_up(&conf->wait_for_overlap);
NeilBrown72626682005-09-09 16:23:54 -07005429 spin_unlock_irq(&conf->device_lock);
5430 break;
5431 }
NeilBrown72626682005-09-09 16:23:54 -07005432}
NeilBrownb15c2e52006-01-06 00:20:16 -08005433
NeilBrownd562b0c2009-03-31 14:39:39 +11005434
5435static void *raid5_takeover_raid1(mddev_t *mddev)
5436{
5437 int chunksect;
5438
5439 if (mddev->raid_disks != 2 ||
5440 mddev->degraded > 1)
5441 return ERR_PTR(-EINVAL);
5442
5443 /* Should check if there are write-behind devices? */
5444
5445 chunksect = 64*2; /* 64K by default */
5446
5447 /* The array must be an exact multiple of chunksize */
5448 while (chunksect && (mddev->array_sectors & (chunksect-1)))
5449 chunksect >>= 1;
5450
5451 if ((chunksect<<9) < STRIPE_SIZE)
5452 /* array size does not allow a suitable chunk size */
5453 return ERR_PTR(-EINVAL);
5454
5455 mddev->new_level = 5;
5456 mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC;
5457 mddev->new_chunk = chunksect << 9;
5458
5459 return setup_conf(mddev);
5460}
5461
NeilBrownfc9739c2009-03-31 14:57:20 +11005462static void *raid5_takeover_raid6(mddev_t *mddev)
5463{
5464 int new_layout;
5465
5466 switch (mddev->layout) {
5467 case ALGORITHM_LEFT_ASYMMETRIC_6:
5468 new_layout = ALGORITHM_LEFT_ASYMMETRIC;
5469 break;
5470 case ALGORITHM_RIGHT_ASYMMETRIC_6:
5471 new_layout = ALGORITHM_RIGHT_ASYMMETRIC;
5472 break;
5473 case ALGORITHM_LEFT_SYMMETRIC_6:
5474 new_layout = ALGORITHM_LEFT_SYMMETRIC;
5475 break;
5476 case ALGORITHM_RIGHT_SYMMETRIC_6:
5477 new_layout = ALGORITHM_RIGHT_SYMMETRIC;
5478 break;
5479 case ALGORITHM_PARITY_0_6:
5480 new_layout = ALGORITHM_PARITY_0;
5481 break;
5482 case ALGORITHM_PARITY_N:
5483 new_layout = ALGORITHM_PARITY_N;
5484 break;
5485 default:
5486 return ERR_PTR(-EINVAL);
5487 }
5488 mddev->new_level = 5;
5489 mddev->new_layout = new_layout;
5490 mddev->delta_disks = -1;
5491 mddev->raid_disks -= 1;
5492 return setup_conf(mddev);
5493}
5494
NeilBrownd562b0c2009-03-31 14:39:39 +11005495
NeilBrownb3546032009-03-31 14:56:41 +11005496static int raid5_reconfig(mddev_t *mddev, int new_layout, int new_chunk)
5497{
NeilBrown88ce4932009-03-31 15:24:23 +11005498 /* For a 2-drive array, the layout and chunk size can be changed
5499 * immediately as not restriping is needed.
5500 * For larger arrays we record the new value - after validation
5501 * to be used by a reshape pass.
NeilBrownb3546032009-03-31 14:56:41 +11005502 */
5503 raid5_conf_t *conf = mddev_to_conf(mddev);
5504
5505 if (new_layout >= 0 && !algorithm_valid_raid5(new_layout))
5506 return -EINVAL;
5507 if (new_chunk > 0) {
5508 if (new_chunk & (new_chunk-1))
5509 /* not a power of 2 */
5510 return -EINVAL;
5511 if (new_chunk < PAGE_SIZE)
5512 return -EINVAL;
5513 if (mddev->array_sectors & ((new_chunk>>9)-1))
5514 /* not factor of array size */
5515 return -EINVAL;
5516 }
5517
5518 /* They look valid */
5519
NeilBrown88ce4932009-03-31 15:24:23 +11005520 if (mddev->raid_disks == 2) {
NeilBrownb3546032009-03-31 14:56:41 +11005521
NeilBrown88ce4932009-03-31 15:24:23 +11005522 if (new_layout >= 0) {
5523 conf->algorithm = new_layout;
5524 mddev->layout = mddev->new_layout = new_layout;
5525 }
5526 if (new_chunk > 0) {
5527 conf->chunk_size = new_chunk;
5528 mddev->chunk_size = mddev->new_chunk = new_chunk;
5529 }
5530 set_bit(MD_CHANGE_DEVS, &mddev->flags);
5531 md_wakeup_thread(mddev->thread);
5532 } else {
5533 if (new_layout >= 0)
5534 mddev->new_layout = new_layout;
5535 if (new_chunk > 0)
5536 mddev->new_chunk = new_chunk;
NeilBrownb3546032009-03-31 14:56:41 +11005537 }
NeilBrown88ce4932009-03-31 15:24:23 +11005538 return 0;
5539}
5540
5541static int raid6_reconfig(mddev_t *mddev, int new_layout, int new_chunk)
5542{
5543 if (new_layout >= 0 && !algorithm_valid_raid6(new_layout))
5544 return -EINVAL;
NeilBrownb3546032009-03-31 14:56:41 +11005545 if (new_chunk > 0) {
NeilBrown88ce4932009-03-31 15:24:23 +11005546 if (new_chunk & (new_chunk-1))
5547 /* not a power of 2 */
5548 return -EINVAL;
5549 if (new_chunk < PAGE_SIZE)
5550 return -EINVAL;
5551 if (mddev->array_sectors & ((new_chunk>>9)-1))
5552 /* not factor of array size */
5553 return -EINVAL;
NeilBrownb3546032009-03-31 14:56:41 +11005554 }
NeilBrown88ce4932009-03-31 15:24:23 +11005555
5556 /* They look valid */
5557
5558 if (new_layout >= 0)
5559 mddev->new_layout = new_layout;
5560 if (new_chunk > 0)
5561 mddev->new_chunk = new_chunk;
5562
NeilBrownb3546032009-03-31 14:56:41 +11005563 return 0;
5564}
5565
NeilBrownd562b0c2009-03-31 14:39:39 +11005566static void *raid5_takeover(mddev_t *mddev)
5567{
5568 /* raid5 can take over:
5569 * raid0 - if all devices are the same - make it a raid4 layout
5570 * raid1 - if there are two drives. We need to know the chunk size
5571 * raid4 - trivial - just use a raid4 layout.
5572 * raid6 - Providing it is a *_6 layout
5573 *
5574 * For now, just do raid1
5575 */
5576
5577 if (mddev->level == 1)
5578 return raid5_takeover_raid1(mddev);
NeilBrowne9d47582009-03-31 14:57:09 +11005579 if (mddev->level == 4) {
5580 mddev->new_layout = ALGORITHM_PARITY_N;
5581 mddev->new_level = 5;
5582 return setup_conf(mddev);
5583 }
NeilBrownfc9739c2009-03-31 14:57:20 +11005584 if (mddev->level == 6)
5585 return raid5_takeover_raid6(mddev);
NeilBrownd562b0c2009-03-31 14:39:39 +11005586
5587 return ERR_PTR(-EINVAL);
5588}
5589
5590
NeilBrown245f46c2009-03-31 14:39:39 +11005591static struct mdk_personality raid5_personality;
5592
5593static void *raid6_takeover(mddev_t *mddev)
5594{
5595 /* Currently can only take over a raid5. We map the
5596 * personality to an equivalent raid6 personality
5597 * with the Q block at the end.
5598 */
5599 int new_layout;
5600
5601 if (mddev->pers != &raid5_personality)
5602 return ERR_PTR(-EINVAL);
5603 if (mddev->degraded > 1)
5604 return ERR_PTR(-EINVAL);
5605 if (mddev->raid_disks > 253)
5606 return ERR_PTR(-EINVAL);
5607 if (mddev->raid_disks < 3)
5608 return ERR_PTR(-EINVAL);
5609
5610 switch (mddev->layout) {
5611 case ALGORITHM_LEFT_ASYMMETRIC:
5612 new_layout = ALGORITHM_LEFT_ASYMMETRIC_6;
5613 break;
5614 case ALGORITHM_RIGHT_ASYMMETRIC:
5615 new_layout = ALGORITHM_RIGHT_ASYMMETRIC_6;
5616 break;
5617 case ALGORITHM_LEFT_SYMMETRIC:
5618 new_layout = ALGORITHM_LEFT_SYMMETRIC_6;
5619 break;
5620 case ALGORITHM_RIGHT_SYMMETRIC:
5621 new_layout = ALGORITHM_RIGHT_SYMMETRIC_6;
5622 break;
5623 case ALGORITHM_PARITY_0:
5624 new_layout = ALGORITHM_PARITY_0_6;
5625 break;
5626 case ALGORITHM_PARITY_N:
5627 new_layout = ALGORITHM_PARITY_N;
5628 break;
5629 default:
5630 return ERR_PTR(-EINVAL);
5631 }
5632 mddev->new_level = 6;
5633 mddev->new_layout = new_layout;
5634 mddev->delta_disks = 1;
5635 mddev->raid_disks += 1;
5636 return setup_conf(mddev);
5637}
5638
5639
NeilBrown16a53ec2006-06-26 00:27:38 -07005640static struct mdk_personality raid6_personality =
5641{
5642 .name = "raid6",
5643 .level = 6,
5644 .owner = THIS_MODULE,
5645 .make_request = make_request,
5646 .run = run,
5647 .stop = stop,
5648 .status = status,
5649 .error_handler = error,
5650 .hot_add_disk = raid5_add_disk,
5651 .hot_remove_disk= raid5_remove_disk,
5652 .spare_active = raid5_spare_active,
5653 .sync_request = sync_request,
5654 .resize = raid5_resize,
Dan Williams80c3a6c2009-03-17 18:10:40 -07005655 .size = raid5_size,
NeilBrownf4168852007-02-28 20:11:53 -08005656 .check_reshape = raid5_check_reshape,
5657 .start_reshape = raid5_start_reshape,
NeilBrowncea9c222009-03-31 15:15:05 +11005658 .finish_reshape = raid5_finish_reshape,
NeilBrown16a53ec2006-06-26 00:27:38 -07005659 .quiesce = raid5_quiesce,
NeilBrown245f46c2009-03-31 14:39:39 +11005660 .takeover = raid6_takeover,
NeilBrown88ce4932009-03-31 15:24:23 +11005661 .reconfig = raid6_reconfig,
NeilBrown16a53ec2006-06-26 00:27:38 -07005662};
NeilBrown2604b702006-01-06 00:20:36 -08005663static struct mdk_personality raid5_personality =
Linus Torvalds1da177e2005-04-16 15:20:36 -07005664{
5665 .name = "raid5",
NeilBrown2604b702006-01-06 00:20:36 -08005666 .level = 5,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005667 .owner = THIS_MODULE,
5668 .make_request = make_request,
5669 .run = run,
5670 .stop = stop,
5671 .status = status,
5672 .error_handler = error,
5673 .hot_add_disk = raid5_add_disk,
5674 .hot_remove_disk= raid5_remove_disk,
5675 .spare_active = raid5_spare_active,
5676 .sync_request = sync_request,
5677 .resize = raid5_resize,
Dan Williams80c3a6c2009-03-17 18:10:40 -07005678 .size = raid5_size,
NeilBrown63c70c42006-03-27 01:18:13 -08005679 .check_reshape = raid5_check_reshape,
5680 .start_reshape = raid5_start_reshape,
NeilBrowncea9c222009-03-31 15:15:05 +11005681 .finish_reshape = raid5_finish_reshape,
NeilBrown72626682005-09-09 16:23:54 -07005682 .quiesce = raid5_quiesce,
NeilBrownd562b0c2009-03-31 14:39:39 +11005683 .takeover = raid5_takeover,
NeilBrownb3546032009-03-31 14:56:41 +11005684 .reconfig = raid5_reconfig,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005685};
5686
NeilBrown2604b702006-01-06 00:20:36 -08005687static struct mdk_personality raid4_personality =
Linus Torvalds1da177e2005-04-16 15:20:36 -07005688{
NeilBrown2604b702006-01-06 00:20:36 -08005689 .name = "raid4",
5690 .level = 4,
5691 .owner = THIS_MODULE,
5692 .make_request = make_request,
5693 .run = run,
5694 .stop = stop,
5695 .status = status,
5696 .error_handler = error,
5697 .hot_add_disk = raid5_add_disk,
5698 .hot_remove_disk= raid5_remove_disk,
5699 .spare_active = raid5_spare_active,
5700 .sync_request = sync_request,
5701 .resize = raid5_resize,
Dan Williams80c3a6c2009-03-17 18:10:40 -07005702 .size = raid5_size,
NeilBrown3d378902007-03-26 21:32:13 -08005703 .check_reshape = raid5_check_reshape,
5704 .start_reshape = raid5_start_reshape,
NeilBrowncea9c222009-03-31 15:15:05 +11005705 .finish_reshape = raid5_finish_reshape,
NeilBrown2604b702006-01-06 00:20:36 -08005706 .quiesce = raid5_quiesce,
5707};
5708
5709static int __init raid5_init(void)
5710{
NeilBrown16a53ec2006-06-26 00:27:38 -07005711 register_md_personality(&raid6_personality);
NeilBrown2604b702006-01-06 00:20:36 -08005712 register_md_personality(&raid5_personality);
5713 register_md_personality(&raid4_personality);
5714 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005715}
5716
NeilBrown2604b702006-01-06 00:20:36 -08005717static void raid5_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005718{
NeilBrown16a53ec2006-06-26 00:27:38 -07005719 unregister_md_personality(&raid6_personality);
NeilBrown2604b702006-01-06 00:20:36 -08005720 unregister_md_personality(&raid5_personality);
5721 unregister_md_personality(&raid4_personality);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005722}
5723
5724module_init(raid5_init);
5725module_exit(raid5_exit);
5726MODULE_LICENSE("GPL");
5727MODULE_ALIAS("md-personality-4"); /* RAID5 */
NeilBrownd9d166c2006-01-06 00:20:51 -08005728MODULE_ALIAS("md-raid5");
5729MODULE_ALIAS("md-raid4");
NeilBrown2604b702006-01-06 00:20:36 -08005730MODULE_ALIAS("md-level-5");
5731MODULE_ALIAS("md-level-4");
NeilBrown16a53ec2006-06-26 00:27:38 -07005732MODULE_ALIAS("md-personality-8"); /* RAID6 */
5733MODULE_ALIAS("md-raid6");
5734MODULE_ALIAS("md-level-6");
5735
5736/* This used to be two separate modules, they were: */
5737MODULE_ALIAS("raid5");
5738MODULE_ALIAS("raid6");