blob: 79cd2b7d3128bd63d7543b0dc74a2d893c0caa1a [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002#ifndef _RAID10_H
3#define _RAID10_H
4
NeilBrownf2785b52018-02-03 09:19:30 +11005/* Note: raid10_info.rdev can be set to NULL asynchronously by
6 * raid10_remove_disk.
7 * There are three safe ways to access raid10_info.rdev.
8 * 1/ when holding mddev->reconfig_mutex
9 * 2/ when resync/recovery/reshape is known to be happening - i.e. in code
10 * that is called as part of performing resync/recovery/reshape.
11 * 3/ while holding rcu_read_lock(), use rcu_dereference to get the pointer
12 * and if it is non-NULL, increment rdev->nr_pending before dropping the
13 * RCU lock.
14 * When .rdev is set to NULL, the nr_pending count checked again and if it has
15 * been incremented, the pointer is put back in .rdev.
16 */
17
Jonathan Brassowdc280d982012-07-31 10:03:52 +100018struct raid10_info {
NeilBrown69335ef2011-12-23 10:17:54 +110019 struct md_rdev *rdev, *replacement;
Linus Torvalds1da177e2005-04-16 15:20:36 -070020 sector_t head_position;
NeilBrown2bb77732011-07-27 11:00:36 +100021 int recovery_disabled; /* matches
22 * mddev->recovery_disabled
23 * when we shouldn't try
24 * recovering this device.
25 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070026};
27
NeilBrowne879a872011-10-11 16:49:02 +110028struct r10conf {
NeilBrownfd01b882011-10-11 16:47:53 +110029 struct mddev *mddev;
Jonathan Brassowdc280d982012-07-31 10:03:52 +100030 struct raid10_info *mirrors;
31 struct raid10_info *mirrors_new, *mirrors_old;
Linus Torvalds1da177e2005-04-16 15:20:36 -070032 spinlock_t device_lock;
33
34 /* geometry */
NeilBrown5cf00fc2012-05-21 09:28:20 +100035 struct geom {
36 int raid_disks;
37 int near_copies; /* number of copies laid out
NeilBrown69335ef2011-12-23 10:17:54 +110038 * raid0 style */
NeilBrown5cf00fc2012-05-21 09:28:20 +100039 int far_copies; /* number of copies laid out
Linus Torvalds1da177e2005-04-16 15:20:36 -070040 * at large strides across drives
41 */
NeilBrown5cf00fc2012-05-21 09:28:20 +100042 int far_offset; /* far_copies are offset by 1
NeilBrown69335ef2011-12-23 10:17:54 +110043 * stripe instead of many
NeilBrownc93983b2006-06-26 00:27:41 -070044 */
NeilBrown5cf00fc2012-05-21 09:28:20 +100045 sector_t stride; /* distance between far copies.
NeilBrownc93983b2006-06-26 00:27:41 -070046 * This is size / far_copies unless
47 * far_offset, in which case it is
48 * 1 stripe.
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 */
Jonathan Brassow475901a2013-02-21 13:28:10 +110050 int far_set_size; /* The number of devices in a set,
51 * where a 'set' are devices that
52 * contain far/offset copies of
53 * each other.
54 */
NeilBrown5cf00fc2012-05-21 09:28:20 +100055 int chunk_shift; /* shift from chunks to sectors */
56 sector_t chunk_mask;
NeilBrownf8c9e742012-05-21 09:28:33 +100057 } prev, geo;
NeilBrown5cf00fc2012-05-21 09:28:20 +100058 int copies; /* near_copies * far_copies.
59 * must be <= raid_disks
60 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070061
NeilBrown69335ef2011-12-23 10:17:54 +110062 sector_t dev_sectors; /* temp copy of
63 * mddev->dev_sectors */
NeilBrownf8c9e742012-05-21 09:28:33 +100064 sector_t reshape_progress;
NeilBrown3ea7daa2012-05-22 13:53:47 +100065 sector_t reshape_safe;
66 unsigned long reshape_checkpoint;
67 sector_t offset_diff;
Trela, Maciejdab8b292010-03-08 16:02:45 +110068
Linus Torvalds1da177e2005-04-16 15:20:36 -070069 struct list_head retry_list;
NeilBrown95af5872015-08-14 11:26:17 +100070 /* A separate list of r1bio which just need raid_end_bio_io called.
71 * This mustn't happen for writes which had any errors if the superblock
72 * needs to be written.
73 */
74 struct list_head bio_end_io_list;
75
NeilBrown6cce3b22006-01-06 00:20:16 -080076 /* queue pending writes and submit them on unplug */
77 struct bio_list pending_bio_list;
NeilBrown34db0cd2011-10-11 16:50:01 +110078 int pending_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
80 spinlock_t resync_lock;
Tomasz Majchrzak0e5313e2016-06-24 14:20:16 +020081 atomic_t nr_pending;
NeilBrown69335ef2011-12-23 10:17:54 +110082 int nr_waiting;
83 int nr_queued;
84 int barrier;
Tomasz Majchrzak0e5313e2016-06-24 14:20:16 +020085 int array_freeze_pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -070086 sector_t next_resync;
NeilBrown6cce3b22006-01-06 00:20:16 -080087 int fullsync; /* set to 1 if a full sync is needed,
88 * (fresh device added).
89 * Cleared when a sync completes.
90 */
NeilBrown69335ef2011-12-23 10:17:54 +110091 int have_replacement; /* There is at least one
92 * replacement device.
93 */
NeilBrown0a27ec92006-01-06 00:20:13 -080094 wait_queue_head_t wait_barrier;
Linus Torvalds1da177e2005-04-16 15:20:36 -070095
Kent Overstreetafeee512018-05-20 18:25:52 -040096 mempool_t r10bio_pool;
97 mempool_t r10buf_pool;
NeilBrown4443ae12006-01-06 00:20:28 -080098 struct page *tmppage;
Kent Overstreetafeee512018-05-20 18:25:52 -040099 struct bio_set bio_split;
Trela, Maciejdab8b292010-03-08 16:02:45 +1100100
101 /* When taking over an array from a different personality, we store
102 * the new thread here until we fully activate the array.
103 */
NeilBrown2b8bf342011-10-11 16:48:23 +1100104 struct md_thread *thread;
Guoqing Jiang8db87912017-10-24 15:11:52 +0800105
106 /*
107 * Keep track of cluster resync window to send to other nodes.
108 */
109 sector_t cluster_sync_low;
110 sector_t cluster_sync_high;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111};
112
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114 * this is our 'private' RAID10 bio.
115 *
116 * it contains information about what kind of IO operations were started
117 * for this RAID10 operation, and about their status:
118 */
119
NeilBrown9f2c9d12011-10-11 16:48:43 +1100120struct r10bio {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 atomic_t remaining; /* 'have we finished' count,
122 * used from IRQ handlers
123 */
124 sector_t sector; /* virtual sector number */
125 int sectors;
126 unsigned long state;
NeilBrownfd01b882011-10-11 16:47:53 +1100127 struct mddev *mddev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128 /*
129 * original bio going to /dev/mdx
130 */
131 struct bio *master_bio;
132 /*
133 * if the IO is in READ direction, then this is where we read
134 */
135 int read_slot;
136
137 struct list_head retry_list;
138 /*
139 * if the IO is in WRITE direction, then multiple bios are used,
140 * one for each copy.
141 * When resyncing we also use one for each copy.
142 * When reconstructing, we use 2 bios, one for read, one for write.
143 * We choose the number when they are allocated.
NeilBrown69335ef2011-12-23 10:17:54 +1100144 * We sometimes need an extra bio to write to the replacement.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145 */
NeilBrowne0ee7782012-08-18 09:51:42 +1000146 struct r10dev {
NeilBrown69335ef2011-12-23 10:17:54 +1100147 struct bio *bio;
148 union {
149 struct bio *repl_bio; /* used for resync and
150 * writes */
151 struct md_rdev *rdev; /* used for reads
152 * (read_slot >= 0) */
153 };
154 sector_t addr;
155 int devnum;
Gustavo A. R. Silva358369f2020-05-07 14:22:10 -0500156 } devs[];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157};
158
159/* bits for r10bio.state */
NeilBrown69335ef2011-12-23 10:17:54 +1100160enum r10bio_state {
161 R10BIO_Uptodate,
162 R10BIO_IsSync,
163 R10BIO_IsRecover,
NeilBrown3ea7daa2012-05-22 13:53:47 +1000164 R10BIO_IsReshape,
NeilBrown69335ef2011-12-23 10:17:54 +1100165 R10BIO_Degraded,
NeilBrown856e08e2011-07-28 11:39:23 +1000166/* Set ReadError on bios that experience a read error
167 * so that raid10d knows what to do with them.
168 */
NeilBrown69335ef2011-12-23 10:17:54 +1100169 R10BIO_ReadError,
NeilBrown749c55e2011-07-28 11:39:24 +1000170/* If a write for this request means we can clear some
171 * known-bad-block records, we set this flag.
172 */
NeilBrown69335ef2011-12-23 10:17:54 +1100173 R10BIO_MadeGood,
174 R10BIO_WriteError,
NeilBrownf8c9e742012-05-21 09:28:33 +1000175/* During a reshape we might be performing IO on the
176 * 'previous' part of the array, in which case this
177 * flag is set
178 */
179 R10BIO_Previous,
NeilBrown8d3ca832016-11-18 16:16:12 +1100180/* failfast devices did receive failfast requests. */
181 R10BIO_FailFast,
NeilBrown69335ef2011-12-23 10:17:54 +1100182};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183#endif