blob: 2bef4e8789c870d646ce280fd711f34c0eb16f46 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _RAID10_H
2#define _RAID10_H
3
Jonathan Brassowdc280d982012-07-31 10:03:52 +10004struct raid10_info {
NeilBrown69335ef2011-12-23 10:17:54 +11005 struct md_rdev *rdev, *replacement;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 sector_t head_position;
NeilBrown2bb77732011-07-27 11:00:36 +10007 int recovery_disabled; /* matches
8 * mddev->recovery_disabled
9 * when we shouldn't try
10 * recovering this device.
11 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070012};
13
NeilBrowne879a872011-10-11 16:49:02 +110014struct r10conf {
NeilBrownfd01b882011-10-11 16:47:53 +110015 struct mddev *mddev;
Jonathan Brassowdc280d982012-07-31 10:03:52 +100016 struct raid10_info *mirrors;
17 struct raid10_info *mirrors_new, *mirrors_old;
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 spinlock_t device_lock;
19
20 /* geometry */
NeilBrown5cf00fc2012-05-21 09:28:20 +100021 struct geom {
22 int raid_disks;
23 int near_copies; /* number of copies laid out
NeilBrown69335ef2011-12-23 10:17:54 +110024 * raid0 style */
NeilBrown5cf00fc2012-05-21 09:28:20 +100025 int far_copies; /* number of copies laid out
Linus Torvalds1da177e2005-04-16 15:20:36 -070026 * at large strides across drives
27 */
NeilBrown5cf00fc2012-05-21 09:28:20 +100028 int far_offset; /* far_copies are offset by 1
NeilBrown69335ef2011-12-23 10:17:54 +110029 * stripe instead of many
NeilBrownc93983b2006-06-26 00:27:41 -070030 */
NeilBrown5cf00fc2012-05-21 09:28:20 +100031 sector_t stride; /* distance between far copies.
NeilBrownc93983b2006-06-26 00:27:41 -070032 * This is size / far_copies unless
33 * far_offset, in which case it is
34 * 1 stripe.
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 */
Jonathan Brassow475901a2013-02-21 13:28:10 +110036 int far_set_size; /* The number of devices in a set,
37 * where a 'set' are devices that
38 * contain far/offset copies of
39 * each other.
40 */
NeilBrown5cf00fc2012-05-21 09:28:20 +100041 int chunk_shift; /* shift from chunks to sectors */
42 sector_t chunk_mask;
NeilBrownf8c9e742012-05-21 09:28:33 +100043 } prev, geo;
NeilBrown5cf00fc2012-05-21 09:28:20 +100044 int copies; /* near_copies * far_copies.
45 * must be <= raid_disks
46 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
NeilBrown69335ef2011-12-23 10:17:54 +110048 sector_t dev_sectors; /* temp copy of
49 * mddev->dev_sectors */
NeilBrownf8c9e742012-05-21 09:28:33 +100050 sector_t reshape_progress;
NeilBrown3ea7daa2012-05-22 13:53:47 +100051 sector_t reshape_safe;
52 unsigned long reshape_checkpoint;
53 sector_t offset_diff;
Trela, Maciejdab8b292010-03-08 16:02:45 +110054
Linus Torvalds1da177e2005-04-16 15:20:36 -070055 struct list_head retry_list;
NeilBrown95af5872015-08-14 11:26:17 +100056 /* A separate list of r1bio which just need raid_end_bio_io called.
57 * This mustn't happen for writes which had any errors if the superblock
58 * needs to be written.
59 */
60 struct list_head bio_end_io_list;
61
NeilBrown6cce3b22006-01-06 00:20:16 -080062 /* queue pending writes and submit them on unplug */
63 struct bio_list pending_bio_list;
NeilBrown34db0cd2011-10-11 16:50:01 +110064 int pending_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
66 spinlock_t resync_lock;
Tomasz Majchrzak0e5313e2016-06-24 14:20:16 +020067 atomic_t nr_pending;
NeilBrown69335ef2011-12-23 10:17:54 +110068 int nr_waiting;
69 int nr_queued;
70 int barrier;
Tomasz Majchrzak0e5313e2016-06-24 14:20:16 +020071 int array_freeze_pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 sector_t next_resync;
NeilBrown6cce3b22006-01-06 00:20:16 -080073 int fullsync; /* set to 1 if a full sync is needed,
74 * (fresh device added).
75 * Cleared when a sync completes.
76 */
NeilBrown69335ef2011-12-23 10:17:54 +110077 int have_replacement; /* There is at least one
78 * replacement device.
79 */
NeilBrown0a27ec92006-01-06 00:20:13 -080080 wait_queue_head_t wait_barrier;
Linus Torvalds1da177e2005-04-16 15:20:36 -070081
NeilBrown69335ef2011-12-23 10:17:54 +110082 mempool_t *r10bio_pool;
83 mempool_t *r10buf_pool;
NeilBrown4443ae12006-01-06 00:20:28 -080084 struct page *tmppage;
NeilBrownfc9977d2017-04-05 14:05:51 +100085 struct bio_set *bio_split;
Trela, Maciejdab8b292010-03-08 16:02:45 +110086
87 /* When taking over an array from a different personality, we store
88 * the new thread here until we fully activate the array.
89 */
NeilBrown2b8bf342011-10-11 16:48:23 +110090 struct md_thread *thread;
Guoqing Jiang8db87912017-10-24 15:11:52 +080091
92 /*
93 * Keep track of cluster resync window to send to other nodes.
94 */
95 sector_t cluster_sync_low;
96 sector_t cluster_sync_high;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097};
98
Linus Torvalds1da177e2005-04-16 15:20:36 -070099/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100 * this is our 'private' RAID10 bio.
101 *
102 * it contains information about what kind of IO operations were started
103 * for this RAID10 operation, and about their status:
104 */
105
NeilBrown9f2c9d12011-10-11 16:48:43 +1100106struct r10bio {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 atomic_t remaining; /* 'have we finished' count,
108 * used from IRQ handlers
109 */
110 sector_t sector; /* virtual sector number */
111 int sectors;
112 unsigned long state;
NeilBrownfd01b882011-10-11 16:47:53 +1100113 struct mddev *mddev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114 /*
115 * original bio going to /dev/mdx
116 */
117 struct bio *master_bio;
118 /*
119 * if the IO is in READ direction, then this is where we read
120 */
121 int read_slot;
122
123 struct list_head retry_list;
124 /*
125 * if the IO is in WRITE direction, then multiple bios are used,
126 * one for each copy.
127 * When resyncing we also use one for each copy.
128 * When reconstructing, we use 2 bios, one for read, one for write.
129 * We choose the number when they are allocated.
NeilBrown69335ef2011-12-23 10:17:54 +1100130 * We sometimes need an extra bio to write to the replacement.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 */
NeilBrowne0ee7782012-08-18 09:51:42 +1000132 struct r10dev {
NeilBrown69335ef2011-12-23 10:17:54 +1100133 struct bio *bio;
134 union {
135 struct bio *repl_bio; /* used for resync and
136 * writes */
137 struct md_rdev *rdev; /* used for reads
138 * (read_slot >= 0) */
139 };
140 sector_t addr;
141 int devnum;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 } devs[0];
143};
144
145/* bits for r10bio.state */
NeilBrown69335ef2011-12-23 10:17:54 +1100146enum r10bio_state {
147 R10BIO_Uptodate,
148 R10BIO_IsSync,
149 R10BIO_IsRecover,
NeilBrown3ea7daa2012-05-22 13:53:47 +1000150 R10BIO_IsReshape,
NeilBrown69335ef2011-12-23 10:17:54 +1100151 R10BIO_Degraded,
NeilBrown856e08e2011-07-28 11:39:23 +1000152/* Set ReadError on bios that experience a read error
153 * so that raid10d knows what to do with them.
154 */
NeilBrown69335ef2011-12-23 10:17:54 +1100155 R10BIO_ReadError,
NeilBrown749c55e2011-07-28 11:39:24 +1000156/* If a write for this request means we can clear some
157 * known-bad-block records, we set this flag.
158 */
NeilBrown69335ef2011-12-23 10:17:54 +1100159 R10BIO_MadeGood,
160 R10BIO_WriteError,
NeilBrownf8c9e742012-05-21 09:28:33 +1000161/* During a reshape we might be performing IO on the
162 * 'previous' part of the array, in which case this
163 * flag is set
164 */
165 R10BIO_Previous,
NeilBrown8d3ca832016-11-18 16:16:12 +1100166/* failfast devices did receive failfast requests. */
167 R10BIO_FailFast,
NeilBrown69335ef2011-12-23 10:17:54 +1100168};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169#endif