blob: 7ce6047c856ea2c993152fb0710d147b93775d8a [file] [log] [blame]
Thomas Gleixneraf1a8892019-05-20 19:08:12 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 md.c : Multiple Devices driver for Linux
NeilBrownf72ffdd2014-09-30 14:23:59 +10004 Copyright (C) 1998, 1999, 2000 Ingo Molnar
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 completely rewritten, based on the MD driver code from Marc Zyngier
7
8 Changes:
9
10 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
11 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
12 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
13 - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
14 - kmod support by: Cyrus Durgin
15 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
16 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
17
18 - lots of fixes and improvements to the RAID1/RAID5 and generic
19 RAID code (such as request based resynchronization):
20
21 Neil Brown <neilb@cse.unsw.edu.au>.
22
NeilBrown32a76272005-06-21 17:17:14 -070023 - persistent bitmap code
24 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
25
NeilBrown9d487392016-11-02 14:16:49 +110026
27 Errors, Warnings, etc.
28 Please use:
29 pr_crit() for error conditions that risk data loss
30 pr_err() for error conditions that are unexpected, like an IO error
31 or internal inconsistency
32 pr_warn() for error conditions that could have been predicated, like
33 adding a device to an array when it has incompatible metadata
34 pr_info() for every interesting, very rare events, like an array starting
35 or stopping, or resync starting or stopping
36 pr_debug() for everything else.
37
Linus Torvalds1da177e2005-04-16 15:20:36 -070038*/
39
Guoqing Jiang963c5552019-06-14 17:10:36 +080040#include <linux/sched/mm.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +010041#include <linux/sched/signal.h>
NeilBrowna6fb0932005-09-09 16:23:56 -070042#include <linux/kthread.h>
NeilBrownbff61972009-03-31 14:33:13 +110043#include <linux/blkdev.h>
Vishal Vermafc974ee2015-12-24 19:20:34 -070044#include <linux/badblocks.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045#include <linux/sysctl.h>
NeilBrownbff61972009-03-31 14:33:13 +110046#include <linux/seq_file.h>
Al Viroff01bb42011-09-16 02:31:11 -040047#include <linux/fs.h>
NeilBrownd7603b72006-01-06 00:20:30 -080048#include <linux/poll.h>
NeilBrown16f17b32006-06-26 00:27:37 -070049#include <linux/ctype.h>
André Goddard Rosae7d28602009-12-14 18:01:06 -080050#include <linux/string.h>
NeilBrownfb4d8c72008-10-13 11:55:12 +110051#include <linux/hdreg.h>
52#include <linux/proc_fs.h>
53#include <linux/random.h>
Paul Gortmaker056075c2011-07-03 13:58:33 -040054#include <linux/module.h>
NeilBrownfb4d8c72008-10-13 11:55:12 +110055#include <linux/reboot.h>
NeilBrown32a76272005-06-21 17:17:14 -070056#include <linux/file.h>
Arnd Bergmannaa98aa32009-12-14 12:50:05 +110057#include <linux/compat.h>
Stephen Rothwell25570722008-10-15 09:09:21 +110058#include <linux/delay.h>
NeilBrownbff61972009-03-31 14:33:13 +110059#include <linux/raid/md_p.h>
60#include <linux/raid/md_u.h>
Christoph Hellwig74cc979c2020-03-24 08:25:19 +010061#include <linux/raid/detect.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090062#include <linux/slab.h>
NeilBrown4ad23a972017-03-15 14:05:14 +110063#include <linux/percpu-refcount.h>
Christoph Hellwigc6a564ff2020-03-25 16:48:42 +010064#include <linux/part_stat.h>
NeilBrown4ad23a972017-03-15 14:05:14 +110065
Shaohua Li504634f2016-11-18 09:44:08 -080066#include <trace/events/block.h>
NeilBrown43b2e5d2009-03-31 14:33:13 +110067#include "md.h"
Mike Snitzer935fe092017-10-10 17:02:41 -040068#include "md-bitmap.h"
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -050069#include "md-cluster.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070070
NeilBrown01f96c02011-09-21 15:30:20 +100071/* pers_list is a list of registered personalities protected
72 * by pers_lock.
73 * pers_lock does extra service to protect accesses to
74 * mddev->thread when the mutex cannot be held.
75 */
NeilBrown2604b702006-01-06 00:20:36 -080076static LIST_HEAD(pers_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -070077static DEFINE_SPINLOCK(pers_lock);
78
Kent Overstreet28dec872018-06-07 20:52:54 -040079static struct kobj_type md_ktype;
80
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -050081struct md_cluster_operations *md_cluster_ops;
Goldwyn Rodrigues589a1c42014-06-07 02:39:37 -050082EXPORT_SYMBOL(md_cluster_ops);
Christoph Hellwig2b598ee2019-04-04 18:56:14 +020083static struct module *md_cluster_mod;
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -050084
Bernd Schubert90b08712008-05-23 13:04:38 -070085static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
Tejun Heoe804ac72010-10-15 15:36:08 +020086static struct workqueue_struct *md_wq;
87static struct workqueue_struct *md_misc_wq;
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +020088static struct workqueue_struct *md_rdev_misc_wq;
Bernd Schubert90b08712008-05-23 13:04:38 -070089
NeilBrown746d3202013-04-24 11:42:41 +100090static int remove_and_add_spares(struct mddev *mddev,
91 struct md_rdev *this);
NeilBrown5aa61f42014-12-15 12:56:57 +110092static void mddev_detach(struct mddev *mddev);
NeilBrown746d3202013-04-24 11:42:41 +100093
Linus Torvalds1da177e2005-04-16 15:20:36 -070094/*
Robert Becker1e509152009-12-14 12:49:58 +110095 * Default number of read corrections we'll attempt on an rdev
96 * before ejecting it from the array. We divide the read error
97 * count by 2 for every hour elapsed between read errors.
98 */
99#define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
Zhao Heming7c9d5c52020-07-21 02:08:52 +0800100/* Default safemode delay: 200 msec */
101#define DEFAULT_SAFEMODE_DELAY ((200 * HZ)/1000 +1)
Robert Becker1e509152009-12-14 12:49:58 +1100102/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
104 * is 1000 KB/sec, so the extra system load does not show up that much.
105 * Increase it if you want to have more _guaranteed_ speed. Note that
Adrian Bunk338cec32005-09-10 00:26:54 -0700106 * the RAID driver will use the maximum available bandwidth if the IO
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 * subsystem is idle. There is also an 'absolute maximum' reconstruction
108 * speed limit - in case reconstruction slows down your system despite
109 * idle IO detection.
110 *
111 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
NeilBrown88202a02006-01-06 00:21:36 -0800112 * or /sys/block/mdX/md/sync_speed_{min,max}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 */
114
115static int sysctl_speed_limit_min = 1000;
116static int sysctl_speed_limit_max = 200000;
NeilBrownfd01b882011-10-11 16:47:53 +1100117static inline int speed_min(struct mddev *mddev)
NeilBrown88202a02006-01-06 00:21:36 -0800118{
119 return mddev->sync_speed_min ?
120 mddev->sync_speed_min : sysctl_speed_limit_min;
121}
122
NeilBrownfd01b882011-10-11 16:47:53 +1100123static inline int speed_max(struct mddev *mddev)
NeilBrown88202a02006-01-06 00:21:36 -0800124{
125 return mddev->sync_speed_max ?
126 mddev->sync_speed_max : sysctl_speed_limit_max;
127}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100129static void rdev_uninit_serial(struct md_rdev *rdev)
Guoqing Jiang3e148a32019-06-19 17:30:46 +0800130{
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100131 if (!test_and_clear_bit(CollisionCheck, &rdev->flags))
132 return;
Guoqing Jiang3e148a32019-06-19 17:30:46 +0800133
Guoqing Jiang025471f2019-12-23 10:49:01 +0100134 kvfree(rdev->serial);
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100135 rdev->serial = NULL;
Guoqing Jiang3e148a32019-06-19 17:30:46 +0800136}
137
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100138static void rdevs_uninit_serial(struct mddev *mddev)
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100139{
140 struct md_rdev *rdev;
141
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100142 rdev_for_each(rdev, mddev)
143 rdev_uninit_serial(rdev);
144}
145
146static int rdev_init_serial(struct md_rdev *rdev)
147{
Guoqing Jiang025471f2019-12-23 10:49:01 +0100148 /* serial_nums equals with BARRIER_BUCKETS_NR */
149 int i, serial_nums = 1 << ((PAGE_SHIFT - ilog2(sizeof(atomic_t))));
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100150 struct serial_in_rdev *serial = NULL;
151
152 if (test_bit(CollisionCheck, &rdev->flags))
153 return 0;
154
Guoqing Jiang025471f2019-12-23 10:49:01 +0100155 serial = kvmalloc(sizeof(struct serial_in_rdev) * serial_nums,
156 GFP_KERNEL);
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100157 if (!serial)
158 return -ENOMEM;
159
Guoqing Jiang025471f2019-12-23 10:49:01 +0100160 for (i = 0; i < serial_nums; i++) {
161 struct serial_in_rdev *serial_tmp = &serial[i];
162
163 spin_lock_init(&serial_tmp->serial_lock);
164 serial_tmp->serial_rb = RB_ROOT_CACHED;
165 init_waitqueue_head(&serial_tmp->serial_io_wait);
166 }
167
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100168 rdev->serial = serial;
169 set_bit(CollisionCheck, &rdev->flags);
170
171 return 0;
172}
173
174static int rdevs_init_serial(struct mddev *mddev)
175{
176 struct md_rdev *rdev;
177 int ret = 0;
178
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100179 rdev_for_each(rdev, mddev) {
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100180 ret = rdev_init_serial(rdev);
181 if (ret)
182 break;
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100183 }
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100184
185 /* Free all resources if pool is not existed */
186 if (ret && !mddev->serial_info_pool)
187 rdevs_uninit_serial(mddev);
188
189 return ret;
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100190}
191
Guoqing Jiang963c5552019-06-14 17:10:36 +0800192/*
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100193 * rdev needs to enable serial stuffs if it meets the conditions:
194 * 1. it is multi-queue device flaged with writemostly.
195 * 2. the write-behind mode is enabled.
196 */
197static int rdev_need_serial(struct md_rdev *rdev)
198{
199 return (rdev && rdev->mddev->bitmap_info.max_write_behind > 0 &&
Christoph Hellwige556f6b2020-06-26 10:01:56 +0200200 rdev->bdev->bd_disk->queue->nr_hw_queues != 1 &&
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100201 test_bit(WriteMostly, &rdev->flags));
202}
203
204/*
205 * Init resource for rdev(s), then create serial_info_pool if:
206 * 1. rdev is the first device which return true from rdev_enable_serial.
207 * 2. rdev is NULL, means we want to enable serialization for all rdevs.
Guoqing Jiang963c5552019-06-14 17:10:36 +0800208 */
Guoqing Jiang404659c2019-12-23 10:48:53 +0100209void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100210 bool is_suspend)
Guoqing Jiang963c5552019-06-14 17:10:36 +0800211{
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100212 int ret = 0;
213
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100214 if (rdev && !rdev_need_serial(rdev) &&
215 !test_bit(CollisionCheck, &rdev->flags))
Guoqing Jiang963c5552019-06-14 17:10:36 +0800216 return;
217
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100218 if (!is_suspend)
219 mddev_suspend(mddev);
220
221 if (!rdev)
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100222 ret = rdevs_init_serial(mddev);
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100223 else
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100224 ret = rdev_init_serial(rdev);
225 if (ret)
226 goto abort;
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100227
Guoqing Jiang404659c2019-12-23 10:48:53 +0100228 if (mddev->serial_info_pool == NULL) {
Coly Li3024ba22020-04-09 22:17:23 +0800229 /*
230 * already in memalloc noio context by
231 * mddev_suspend()
232 */
Guoqing Jiang404659c2019-12-23 10:48:53 +0100233 mddev->serial_info_pool =
234 mempool_create_kmalloc_pool(NR_SERIAL_INFOS,
235 sizeof(struct serial_info));
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100236 if (!mddev->serial_info_pool) {
237 rdevs_uninit_serial(mddev);
Guoqing Jiang404659c2019-12-23 10:48:53 +0100238 pr_err("can't alloc memory pool for serialization\n");
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100239 }
Guoqing Jiang963c5552019-06-14 17:10:36 +0800240 }
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100241
242abort:
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100243 if (!is_suspend)
244 mddev_resume(mddev);
Guoqing Jiang963c5552019-06-14 17:10:36 +0800245}
Guoqing Jiang963c5552019-06-14 17:10:36 +0800246
247/*
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100248 * Free resource from rdev(s), and destroy serial_info_pool under conditions:
249 * 1. rdev is the last device flaged with CollisionCheck.
250 * 2. when bitmap is destroyed while policy is not enabled.
251 * 3. for disable policy, the pool is destroyed only when no rdev needs it.
Guoqing Jiang963c5552019-06-14 17:10:36 +0800252 */
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100253void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
254 bool is_suspend)
Guoqing Jiang963c5552019-06-14 17:10:36 +0800255{
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100256 if (rdev && !test_bit(CollisionCheck, &rdev->flags))
Guoqing Jiang963c5552019-06-14 17:10:36 +0800257 return;
258
Guoqing Jiang404659c2019-12-23 10:48:53 +0100259 if (mddev->serial_info_pool) {
Guoqing Jiang963c5552019-06-14 17:10:36 +0800260 struct md_rdev *temp;
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100261 int num = 0; /* used to track if other rdevs need the pool */
Guoqing Jiang963c5552019-06-14 17:10:36 +0800262
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100263 if (!is_suspend)
264 mddev_suspend(mddev);
265 rdev_for_each(temp, mddev) {
266 if (!rdev) {
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100267 if (!mddev->serialize_policy ||
268 !rdev_need_serial(temp))
269 rdev_uninit_serial(temp);
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100270 else
271 num++;
272 } else if (temp != rdev &&
273 test_bit(CollisionCheck, &temp->flags))
Guoqing Jiang963c5552019-06-14 17:10:36 +0800274 num++;
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100275 }
276
277 if (rdev)
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100278 rdev_uninit_serial(rdev);
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100279
280 if (num)
281 pr_info("The mempool could be used by other devices\n");
282 else {
Guoqing Jiang404659c2019-12-23 10:48:53 +0100283 mempool_destroy(mddev->serial_info_pool);
284 mddev->serial_info_pool = NULL;
Guoqing Jiang963c5552019-06-14 17:10:36 +0800285 }
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100286 if (!is_suspend)
287 mddev_resume(mddev);
Guoqing Jiang963c5552019-06-14 17:10:36 +0800288 }
289}
290
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291static struct ctl_table_header *raid_table_header;
292
Joe Perches82592c32013-11-14 15:16:18 +1100293static struct ctl_table raid_table[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 .procname = "speed_limit_min",
296 .data = &sysctl_speed_limit_min,
297 .maxlen = sizeof(int),
NeilBrown80ca3a42006-07-10 04:44:18 -0700298 .mode = S_IRUGO|S_IWUSR,
Eric W. Biederman6d456112009-11-16 03:11:48 -0800299 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 },
301 {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 .procname = "speed_limit_max",
303 .data = &sysctl_speed_limit_max,
304 .maxlen = sizeof(int),
NeilBrown80ca3a42006-07-10 04:44:18 -0700305 .mode = S_IRUGO|S_IWUSR,
Eric W. Biederman6d456112009-11-16 03:11:48 -0800306 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 },
Eric W. Biederman894d2492009-11-05 14:34:02 -0800308 { }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309};
310
Joe Perches82592c32013-11-14 15:16:18 +1100311static struct ctl_table raid_dir_table[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 .procname = "raid",
314 .maxlen = 0,
NeilBrown80ca3a42006-07-10 04:44:18 -0700315 .mode = S_IRUGO|S_IXUGO,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 .child = raid_table,
317 },
Eric W. Biederman894d2492009-11-05 14:34:02 -0800318 { }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319};
320
Joe Perches82592c32013-11-14 15:16:18 +1100321static struct ctl_table raid_root_table[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 .procname = "dev",
324 .maxlen = 0,
325 .mode = 0555,
326 .child = raid_dir_table,
327 },
Eric W. Biederman894d2492009-11-05 14:34:02 -0800328 { }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329};
330
NeilBrownf91de922005-11-08 21:39:36 -0800331static int start_readonly;
332
NeilBrown78b63502017-04-12 16:26:13 +1000333/*
334 * The original mechanism for creating an md device is to create
335 * a device node in /dev and to open it. This causes races with device-close.
336 * The preferred method is to write to the "new_array" module parameter.
337 * This can avoid races.
338 * Setting create_on_open to false disables the original mechanism
339 * so all the races disappear.
340 */
341static bool create_on_open = true;
342
NeilBrowna167f662010-10-26 18:31:13 +1100343struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
NeilBrownfd01b882011-10-11 16:47:53 +1100344 struct mddev *mddev)
NeilBrowna167f662010-10-26 18:31:13 +1100345{
Kent Overstreetafeee512018-05-20 18:25:52 -0400346 if (!mddev || !bioset_initialized(&mddev->bio_set))
NeilBrowna167f662010-10-26 18:31:13 +1100347 return bio_alloc(gfp_mask, nr_iovecs);
348
Marcos Paulo de Souza62516912019-01-14 06:31:56 -0700349 return bio_alloc_bioset(gfp_mask, nr_iovecs, &mddev->bio_set);
NeilBrowna167f662010-10-26 18:31:13 +1100350}
351EXPORT_SYMBOL_GPL(bio_alloc_mddev);
352
NeilBrown5a850712017-06-21 09:12:21 +1000353static struct bio *md_bio_alloc_sync(struct mddev *mddev)
354{
Kent Overstreetafeee512018-05-20 18:25:52 -0400355 if (!mddev || !bioset_initialized(&mddev->sync_set))
NeilBrown5a850712017-06-21 09:12:21 +1000356 return bio_alloc(GFP_NOIO, 1);
357
Kent Overstreetafeee512018-05-20 18:25:52 -0400358 return bio_alloc_bioset(GFP_NOIO, 1, &mddev->sync_set);
NeilBrown5a850712017-06-21 09:12:21 +1000359}
360
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361/*
NeilBrownd7603b72006-01-06 00:20:30 -0800362 * We have a system wide 'event count' that is incremented
363 * on any 'interesting' event, and readers of /proc/mdstat
364 * can use 'poll' or 'select' to find out when the event
365 * count increases.
366 *
367 * Events are:
368 * start array, stop array, error, add device, remove device,
369 * start build, activate spare
370 */
NeilBrown2989ddb2006-01-06 00:20:43 -0800371static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
NeilBrownd7603b72006-01-06 00:20:30 -0800372static atomic_t md_event_count;
NeilBrownfd01b882011-10-11 16:47:53 +1100373void md_new_event(struct mddev *mddev)
NeilBrownd7603b72006-01-06 00:20:30 -0800374{
375 atomic_inc(&md_event_count);
376 wake_up(&md_event_waiters);
377}
NeilBrown29269552006-03-27 01:18:10 -0800378EXPORT_SYMBOL_GPL(md_new_event);
NeilBrownd7603b72006-01-06 00:20:30 -0800379
380/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 * Enables to iterate over all existing md arrays
382 * all_mddevs_lock protects this list.
383 */
384static LIST_HEAD(all_mddevs);
385static DEFINE_SPINLOCK(all_mddevs_lock);
386
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387/*
388 * iterates through all used mddevs in the system.
389 * We take care to grab the all_mddevs_lock whenever navigating
390 * the list, and to always hold a refcount when unlocked.
391 * Any code which breaks out of this loop while own
392 * a reference to the current mddev and must mddev_put it.
393 */
NeilBrownfd01b882011-10-11 16:47:53 +1100394#define for_each_mddev(_mddev,_tmp) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 \
NeilBrownf72ffdd2014-09-30 14:23:59 +1000396 for (({ spin_lock(&all_mddevs_lock); \
NeilBrownfd01b882011-10-11 16:47:53 +1100397 _tmp = all_mddevs.next; \
398 _mddev = NULL;}); \
399 ({ if (_tmp != &all_mddevs) \
400 mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 spin_unlock(&all_mddevs_lock); \
NeilBrownfd01b882011-10-11 16:47:53 +1100402 if (_mddev) mddev_put(_mddev); \
403 _mddev = list_entry(_tmp, struct mddev, all_mddevs); \
404 _tmp != &all_mddevs;}); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 ({ spin_lock(&all_mddevs_lock); \
NeilBrownfd01b882011-10-11 16:47:53 +1100406 _tmp = _tmp->next;}) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 )
408
NeilBrown409c57f2009-03-31 14:39:39 +1100409/* Rather than calling directly into the personality make_request function,
410 * IO requests come here first so that we can check if the device is
411 * being suspended pending a reconfiguration.
412 * We hold a refcount over the call to ->make_request. By the time that
413 * call has finished, the bio has been linked into some internal structure
414 * and so is visible to ->quiesce(), so we don't need the refcount any more.
415 */
NeilBrownb3143b92017-10-17 13:46:43 +1100416static bool is_suspended(struct mddev *mddev, struct bio *bio)
417{
418 if (mddev->suspended)
419 return true;
420 if (bio_data_dir(bio) != WRITE)
421 return false;
422 if (mddev->suspend_lo >= mddev->suspend_hi)
423 return false;
424 if (bio->bi_iter.bi_sector >= mddev->suspend_hi)
425 return false;
426 if (bio_end_sector(bio) < mddev->suspend_lo)
427 return false;
428 return true;
429}
430
Shaohua Li393debc2017-09-21 10:23:35 -0700431void md_handle_request(struct mddev *mddev, struct bio *bio)
432{
433check_suspended:
434 rcu_read_lock();
NeilBrownb3143b92017-10-17 13:46:43 +1100435 if (is_suspended(mddev, bio)) {
Shaohua Li393debc2017-09-21 10:23:35 -0700436 DEFINE_WAIT(__wait);
437 for (;;) {
438 prepare_to_wait(&mddev->sb_wait, &__wait,
439 TASK_UNINTERRUPTIBLE);
NeilBrownb3143b92017-10-17 13:46:43 +1100440 if (!is_suspended(mddev, bio))
Shaohua Li393debc2017-09-21 10:23:35 -0700441 break;
442 rcu_read_unlock();
443 schedule();
444 rcu_read_lock();
445 }
446 finish_wait(&mddev->sb_wait, &__wait);
447 }
448 atomic_inc(&mddev->active_io);
449 rcu_read_unlock();
450
451 if (!mddev->pers->make_request(mddev, bio)) {
452 atomic_dec(&mddev->active_io);
453 wake_up(&mddev->sb_wait);
454 goto check_suspended;
455 }
456
457 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
458 wake_up(&mddev->sb_wait);
459}
460EXPORT_SYMBOL(md_handle_request);
461
Artur Paszkiewicz41d2d842020-07-03 11:13:09 +0200462struct md_io {
463 struct mddev *mddev;
464 bio_end_io_t *orig_bi_end_io;
465 void *orig_bi_private;
466 unsigned long start_time;
Song Liu00fe60e2020-08-31 15:27:24 -0700467 struct hd_struct *part;
Artur Paszkiewicz41d2d842020-07-03 11:13:09 +0200468};
469
470static void md_end_io(struct bio *bio)
471{
472 struct md_io *md_io = bio->bi_private;
473 struct mddev *mddev = md_io->mddev;
474
Song Liu00fe60e2020-08-31 15:27:24 -0700475 part_end_io_acct(md_io->part, bio, md_io->start_time);
Artur Paszkiewicz41d2d842020-07-03 11:13:09 +0200476
477 bio->bi_end_io = md_io->orig_bi_end_io;
478 bio->bi_private = md_io->orig_bi_private;
479
480 mempool_free(md_io, &mddev->md_io_pool);
481
482 if (bio->bi_end_io)
483 bio->bi_end_io(bio);
484}
485
Christoph Hellwigc62b37d2020-07-01 10:59:43 +0200486static blk_qc_t md_submit_bio(struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487{
NeilBrown49077322010-03-25 16:20:56 +1100488 const int rw = bio_data_dir(bio);
Christoph Hellwige4fc5a72020-05-08 18:15:14 +0200489 struct mddev *mddev = bio->bi_disk->private_data;
NeilBrown49077322010-03-25 16:20:56 +1100490
Colin Ian King9a5a8592020-07-02 12:35:02 +0100491 if (mddev == NULL || mddev->pers == NULL) {
492 bio_io_error(bio);
493 return BLK_QC_T_NONE;
494 }
NeilBrown409c57f2009-03-31 14:39:39 +1100495
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -0300496 if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) {
497 bio_io_error(bio);
498 return BLK_QC_T_NONE;
499 }
500
Christoph Hellwigf695ca32020-07-01 10:59:39 +0200501 blk_queue_split(&bio);
Kent Overstreet54efd502015-04-23 22:37:18 -0700502
Sebastian Riemerbbfa57c2013-02-21 13:28:09 +1100503 if (mddev->ro == 1 && unlikely(rw == WRITE)) {
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200504 if (bio_sectors(bio) != 0)
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200505 bio->bi_status = BLK_STS_IOERR;
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200506 bio_endio(bio);
Jens Axboedece1632015-11-05 10:41:16 -0700507 return BLK_QC_T_NONE;
Sebastian Riemerbbfa57c2013-02-21 13:28:09 +1100508 }
NeilBrown49077322010-03-25 16:20:56 +1100509
Artur Paszkiewicz41d2d842020-07-03 11:13:09 +0200510 if (bio->bi_end_io != md_end_io) {
511 struct md_io *md_io;
512
513 md_io = mempool_alloc(&mddev->md_io_pool, GFP_NOIO);
514 md_io->mddev = mddev;
515 md_io->orig_bi_end_io = bio->bi_end_io;
516 md_io->orig_bi_private = bio->bi_private;
517
518 bio->bi_end_io = md_end_io;
519 bio->bi_private = md_io;
520
Song Liu00fe60e2020-08-31 15:27:24 -0700521 md_io->start_time = part_start_io_acct(mddev->gendisk,
522 &md_io->part, bio);
Artur Paszkiewicz41d2d842020-07-03 11:13:09 +0200523 }
524
Shaohua Li9c573de2016-04-25 16:52:38 -0700525 /* bio could be mergeable after passing to underlayer */
Jens Axboe1eff9d32016-08-05 15:35:16 -0600526 bio->bi_opf &= ~REQ_NOMERGE;
Shaohua Li393debc2017-09-21 10:23:35 -0700527
528 md_handle_request(mddev, bio);
NeilBrown49077322010-03-25 16:20:56 +1100529
Jens Axboedece1632015-11-05 10:41:16 -0700530 return BLK_QC_T_NONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531}
532
NeilBrown9e35b992010-04-06 14:23:02 +1000533/* mddev_suspend makes sure no new requests are submitted
534 * to the device, and that any requests that have been submitted
535 * are completely handled.
NeilBrownafa0f552014-12-15 12:56:58 +1100536 * Once mddev_detach() is called and completes, the module will be
537 * completely unused.
NeilBrown9e35b992010-04-06 14:23:02 +1000538 */
NeilBrownfd01b882011-10-11 16:47:53 +1100539void mddev_suspend(struct mddev *mddev)
NeilBrown409c57f2009-03-31 14:39:39 +1100540{
Heinz Mauelshagen092398d2016-05-03 19:43:57 +0200541 WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk);
NeilBrown4d5324f2017-10-19 12:17:16 +1100542 lockdep_assert_held(&mddev->reconfig_mutex);
Mikulas Patocka0dc10e52015-12-18 15:19:16 +1100543 if (mddev->suspended++)
544 return;
NeilBrown409c57f2009-03-31 14:39:39 +1100545 synchronize_rcu();
NeilBrowncc27b0c2017-06-05 16:49:39 +1000546 wake_up(&mddev->sb_wait);
NeilBrown35bfc522017-10-17 13:46:43 +1100547 set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags);
548 smp_mb__after_atomic();
NeilBrown409c57f2009-03-31 14:39:39 +1100549 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
550 mddev->pers->quiesce(mddev, 1);
NeilBrown35bfc522017-10-17 13:46:43 +1100551 clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags);
552 wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags));
Jonathan Brassow0d9f4f12012-05-16 04:06:14 -0500553
554 del_timer_sync(&mddev->safemode_timer);
Coly Li78f57ef2020-04-09 22:17:20 +0800555 /* restrict memory reclaim I/O during raid array is suspend */
556 mddev->noio_flag = memalloc_noio_save();
NeilBrown409c57f2009-03-31 14:39:39 +1100557}
NeilBrown390ee602010-06-01 19:37:27 +1000558EXPORT_SYMBOL_GPL(mddev_suspend);
NeilBrown409c57f2009-03-31 14:39:39 +1100559
NeilBrownfd01b882011-10-11 16:47:53 +1100560void mddev_resume(struct mddev *mddev)
NeilBrown409c57f2009-03-31 14:39:39 +1100561{
Coly Li78f57ef2020-04-09 22:17:20 +0800562 /* entred the memalloc scope from mddev_suspend() */
563 memalloc_noio_restore(mddev->noio_flag);
NeilBrown4d5324f2017-10-19 12:17:16 +1100564 lockdep_assert_held(&mddev->reconfig_mutex);
Mikulas Patocka0dc10e52015-12-18 15:19:16 +1100565 if (--mddev->suspended)
566 return;
NeilBrown409c57f2009-03-31 14:39:39 +1100567 wake_up(&mddev->sb_wait);
568 mddev->pers->quiesce(mddev, 0);
Jonathan Brassow0fd018a2011-06-07 17:49:36 -0500569
Jonathan Brassow47525e52012-05-22 13:55:29 +1000570 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
Jonathan Brassow0fd018a2011-06-07 17:49:36 -0500571 md_wakeup_thread(mddev->thread);
572 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
NeilBrown409c57f2009-03-31 14:39:39 +1100573}
NeilBrown390ee602010-06-01 19:37:27 +1000574EXPORT_SYMBOL_GPL(mddev_resume);
NeilBrown409c57f2009-03-31 14:39:39 +1100575
NeilBrowna2826aa2009-12-14 12:49:49 +1100576/*
Tejun Heoe9c74692010-09-03 11:56:18 +0200577 * Generic flush handling for md
NeilBrowna2826aa2009-12-14 12:49:49 +1100578 */
NeilBrown4bc034d2019-03-29 10:46:16 -0700579
580static void md_end_flush(struct bio *bio)
NeilBrowna2826aa2009-12-14 12:49:49 +1100581{
NeilBrown4bc034d2019-03-29 10:46:16 -0700582 struct md_rdev *rdev = bio->bi_private;
583 struct mddev *mddev = rdev->mddev;
NeilBrowna2826aa2009-12-14 12:49:49 +1100584
585 rdev_dec_pending(rdev, mddev);
586
NeilBrown4bc034d2019-03-29 10:46:16 -0700587 if (atomic_dec_and_test(&mddev->flush_pending)) {
588 /* The pre-request flush has finished */
589 queue_work(md_wq, &mddev->flush_work);
NeilBrowna2826aa2009-12-14 12:49:49 +1100590 }
NeilBrown4bc034d2019-03-29 10:46:16 -0700591 bio_put(bio);
NeilBrowna2826aa2009-12-14 12:49:49 +1100592}
593
NeilBrown4bc034d2019-03-29 10:46:16 -0700594static void md_submit_flush_data(struct work_struct *ws);
595
596static void submit_flushes(struct work_struct *ws)
NeilBrowna2826aa2009-12-14 12:49:49 +1100597{
NeilBrown4bc034d2019-03-29 10:46:16 -0700598 struct mddev *mddev = container_of(ws, struct mddev, flush_work);
NeilBrown3cb03002011-10-11 16:45:26 +1100599 struct md_rdev *rdev;
NeilBrowna2826aa2009-12-14 12:49:49 +1100600
NeilBrown2bc13b82019-03-29 10:46:17 -0700601 mddev->start_flush = ktime_get_boottime();
NeilBrown4bc034d2019-03-29 10:46:16 -0700602 INIT_WORK(&mddev->flush_work, md_submit_flush_data);
603 atomic_set(&mddev->flush_pending, 1);
NeilBrowna2826aa2009-12-14 12:49:49 +1100604 rcu_read_lock();
NeilBrowndafb20f2012-03-19 12:46:39 +1100605 rdev_for_each_rcu(rdev, mddev)
NeilBrowna2826aa2009-12-14 12:49:49 +1100606 if (rdev->raid_disk >= 0 &&
607 !test_bit(Faulty, &rdev->flags)) {
608 /* Take two references, one is dropped
609 * when request finishes, one after
610 * we reclaim rcu_read_lock
611 */
612 struct bio *bi;
613 atomic_inc(&rdev->nr_pending);
614 atomic_inc(&rdev->nr_pending);
615 rcu_read_unlock();
Shaohua Lib5e1b8c2012-05-21 09:26:59 +1000616 bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
Xiao Ni5a409b42018-05-21 11:49:54 +0800617 bi->bi_end_io = md_end_flush;
NeilBrown4bc034d2019-03-29 10:46:16 -0700618 bi->bi_private = rdev;
619 bio_set_dev(bi, rdev->bdev);
Christoph Hellwig70fd7612016-11-01 07:40:10 -0600620 bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
NeilBrown4bc034d2019-03-29 10:46:16 -0700621 atomic_inc(&mddev->flush_pending);
Mike Christie4e49ea42016-06-05 14:31:41 -0500622 submit_bio(bi);
NeilBrowna2826aa2009-12-14 12:49:49 +1100623 rcu_read_lock();
624 rdev_dec_pending(rdev, mddev);
625 }
626 rcu_read_unlock();
NeilBrown4bc034d2019-03-29 10:46:16 -0700627 if (atomic_dec_and_test(&mddev->flush_pending))
628 queue_work(md_wq, &mddev->flush_work);
629}
NeilBrowna2826aa2009-12-14 12:49:49 +1100630
NeilBrown4bc034d2019-03-29 10:46:16 -0700631static void md_submit_flush_data(struct work_struct *ws)
632{
633 struct mddev *mddev = container_of(ws, struct mddev, flush_work);
634 struct bio *bio = mddev->flush_bio;
635
636 /*
637 * must reset flush_bio before calling into md_handle_request to avoid a
638 * deadlock, because other bios passed md_handle_request suspend check
639 * could wait for this and below md_handle_request could wait for those
640 * bios because of suspend check
641 */
NeilBrown2bc13b82019-03-29 10:46:17 -0700642 mddev->last_flush = mddev->start_flush;
NeilBrown4bc034d2019-03-29 10:46:16 -0700643 mddev->flush_bio = NULL;
644 wake_up(&mddev->sb_wait);
645
646 if (bio->bi_iter.bi_size == 0) {
647 /* an empty barrier - all done */
648 bio_endio(bio);
649 } else {
650 bio->bi_opf &= ~REQ_PREFLUSH;
651 md_handle_request(mddev, bio);
NeilBrowna2826aa2009-12-14 12:49:49 +1100652 }
NeilBrowna2826aa2009-12-14 12:49:49 +1100653}
NeilBrown4bc034d2019-03-29 10:46:16 -0700654
David Jeffery775d7832019-09-16 13:15:14 -0400655/*
656 * Manages consolidation of flushes and submitting any flushes needed for
657 * a bio with REQ_PREFLUSH. Returns true if the bio is finished or is
658 * being finished in another context. Returns false if the flushing is
659 * complete but still needs the I/O portion of the bio to be processed.
660 */
661bool md_flush_request(struct mddev *mddev, struct bio *bio)
NeilBrown4bc034d2019-03-29 10:46:16 -0700662{
NeilBrown2bc13b82019-03-29 10:46:17 -0700663 ktime_t start = ktime_get_boottime();
NeilBrown4bc034d2019-03-29 10:46:16 -0700664 spin_lock_irq(&mddev->lock);
665 wait_event_lock_irq(mddev->sb_wait,
NeilBrown2bc13b82019-03-29 10:46:17 -0700666 !mddev->flush_bio ||
667 ktime_after(mddev->last_flush, start),
NeilBrown4bc034d2019-03-29 10:46:16 -0700668 mddev->lock);
NeilBrown2bc13b82019-03-29 10:46:17 -0700669 if (!ktime_after(mddev->last_flush, start)) {
670 WARN_ON(mddev->flush_bio);
671 mddev->flush_bio = bio;
672 bio = NULL;
673 }
NeilBrown4bc034d2019-03-29 10:46:16 -0700674 spin_unlock_irq(&mddev->lock);
675
NeilBrown2bc13b82019-03-29 10:46:17 -0700676 if (!bio) {
677 INIT_WORK(&mddev->flush_work, submit_flushes);
678 queue_work(md_wq, &mddev->flush_work);
679 } else {
680 /* flush was performed for some other bio while we waited. */
681 if (bio->bi_iter.bi_size == 0)
682 /* an empty barrier - all done */
683 bio_endio(bio);
684 else {
685 bio->bi_opf &= ~REQ_PREFLUSH;
David Jeffery775d7832019-09-16 13:15:14 -0400686 return false;
NeilBrown2bc13b82019-03-29 10:46:17 -0700687 }
688 }
David Jeffery775d7832019-09-16 13:15:14 -0400689 return true;
NeilBrown4bc034d2019-03-29 10:46:16 -0700690}
Tejun Heoe9c74692010-09-03 11:56:18 +0200691EXPORT_SYMBOL(md_flush_request);
NeilBrown409c57f2009-03-31 14:39:39 +1100692
NeilBrownfd01b882011-10-11 16:47:53 +1100693static inline struct mddev *mddev_get(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694{
695 atomic_inc(&mddev->active);
696 return mddev;
697}
698
Dan Williams5fd3a172009-03-04 00:57:25 -0700699static void mddev_delayed_delete(struct work_struct *ws);
NeilBrownd3374822009-01-09 08:31:10 +1100700
NeilBrownfd01b882011-10-11 16:47:53 +1100701static void mddev_put(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702{
703 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
704 return;
NeilBrownd3374822009-01-09 08:31:10 +1100705 if (!mddev->raid_disks && list_empty(&mddev->disks) &&
NeilBrowncbd19982009-12-30 12:08:49 +1100706 mddev->ctime == 0 && !mddev->hold_active) {
707 /* Array is not configured at all, and not held active,
708 * so destroy it */
NeilBrownaf8a2432011-12-08 15:49:46 +1100709 list_del_init(&mddev->all_mddevs);
Kent Overstreet28dec872018-06-07 20:52:54 -0400710
711 /*
712 * Call queue_work inside the spinlock so that
713 * flush_workqueue() after mddev_find will succeed in waiting
714 * for the work to be done.
715 */
716 INIT_WORK(&mddev->del_work, mddev_delayed_delete);
717 queue_work(md_misc_wq, &mddev->del_work);
NeilBrownd3374822009-01-09 08:31:10 +1100718 }
719 spin_unlock(&all_mddevs_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720}
721
Kees Cook8376d3c2017-10-16 17:01:48 -0700722static void md_safemode_timeout(struct timer_list *t);
Sasha Levin25b2edf2015-07-24 18:19:58 -0400723
NeilBrownfd01b882011-10-11 16:47:53 +1100724void mddev_init(struct mddev *mddev)
NeilBrownfafd7fb2010-04-01 15:55:30 +1100725{
Kent Overstreet28dec872018-06-07 20:52:54 -0400726 kobject_init(&mddev->kobj, &md_ktype);
NeilBrownfafd7fb2010-04-01 15:55:30 +1100727 mutex_init(&mddev->open_mutex);
728 mutex_init(&mddev->reconfig_mutex);
729 mutex_init(&mddev->bitmap_info.mutex);
730 INIT_LIST_HEAD(&mddev->disks);
731 INIT_LIST_HEAD(&mddev->all_mddevs);
Kees Cook8376d3c2017-10-16 17:01:48 -0700732 timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0);
NeilBrownfafd7fb2010-04-01 15:55:30 +1100733 atomic_set(&mddev->active, 1);
734 atomic_set(&mddev->openers, 0);
735 atomic_set(&mddev->active_io, 0);
NeilBrown85572d72014-12-15 12:56:56 +1100736 spin_lock_init(&mddev->lock);
NeilBrown4bc034d2019-03-29 10:46:16 -0700737 atomic_set(&mddev->flush_pending, 0);
NeilBrownfafd7fb2010-04-01 15:55:30 +1100738 init_waitqueue_head(&mddev->sb_wait);
739 init_waitqueue_head(&mddev->recovery_wait);
740 mddev->reshape_position = MaxSector;
NeilBrown2c810cd2012-05-21 09:27:00 +1000741 mddev->reshape_backwards = 0;
Jonathan Brassowc4a39552013-06-25 01:23:59 -0500742 mddev->last_sync_action = "none";
NeilBrownfafd7fb2010-04-01 15:55:30 +1100743 mddev->resync_min = 0;
744 mddev->resync_max = MaxSector;
745 mddev->level = LEVEL_NONE;
746}
NeilBrown390ee602010-06-01 19:37:27 +1000747EXPORT_SYMBOL_GPL(mddev_init);
NeilBrownfafd7fb2010-04-01 15:55:30 +1100748
NeilBrownf72ffdd2014-09-30 14:23:59 +1000749static struct mddev *mddev_find(dev_t unit)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750{
NeilBrownfd01b882011-10-11 16:47:53 +1100751 struct mddev *mddev, *new = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752
NeilBrown8f5f02c2011-02-16 13:58:51 +1100753 if (unit && MAJOR(unit) != MD_MAJOR)
754 unit &= ~((1<<MdpMinorShift)-1);
755
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 retry:
757 spin_lock(&all_mddevs_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758
NeilBrownefeb53c2009-01-09 08:31:10 +1100759 if (unit) {
760 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
761 if (mddev->unit == unit) {
762 mddev_get(mddev);
763 spin_unlock(&all_mddevs_lock);
764 kfree(new);
765 return mddev;
766 }
767
768 if (new) {
769 list_add(&new->all_mddevs, &all_mddevs);
770 spin_unlock(&all_mddevs_lock);
771 new->hold_active = UNTIL_IOCTL;
772 return new;
773 }
774 } else if (new) {
775 /* find an unused unit number */
776 static int next_minor = 512;
777 int start = next_minor;
778 int is_free = 0;
779 int dev = 0;
780 while (!is_free) {
781 dev = MKDEV(MD_MAJOR, next_minor);
782 next_minor++;
783 if (next_minor > MINORMASK)
784 next_minor = 0;
785 if (next_minor == start) {
786 /* Oh dear, all in use. */
787 spin_unlock(&all_mddevs_lock);
788 kfree(new);
789 return NULL;
790 }
NeilBrownf72ffdd2014-09-30 14:23:59 +1000791
NeilBrownefeb53c2009-01-09 08:31:10 +1100792 is_free = 1;
793 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
794 if (mddev->unit == dev) {
795 is_free = 0;
796 break;
797 }
798 }
799 new->unit = dev;
800 new->md_minor = MINOR(dev);
801 new->hold_active = UNTIL_STOP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802 list_add(&new->all_mddevs, &all_mddevs);
803 spin_unlock(&all_mddevs_lock);
804 return new;
805 }
806 spin_unlock(&all_mddevs_lock);
807
NeilBrown9ffae0c2006-01-06 00:20:32 -0800808 new = kzalloc(sizeof(*new), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 if (!new)
810 return NULL;
811
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 new->unit = unit;
813 if (MAJOR(unit) == MD_MAJOR)
814 new->md_minor = MINOR(unit);
815 else
816 new->md_minor = MINOR(unit) >> MdpMinorShift;
817
NeilBrownfafd7fb2010-04-01 15:55:30 +1100818 mddev_init(new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820 goto retry;
821}
822
NeilBrownb6eb1272010-04-15 10:13:47 +1000823static struct attribute_group md_redundancy_group;
824
NeilBrown5c47daf2014-12-15 12:57:01 +1100825void mddev_unlock(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826{
NeilBrowna64c8762010-04-14 17:15:37 +1000827 if (mddev->to_remove) {
NeilBrownb6eb1272010-04-15 10:13:47 +1000828 /* These cannot be removed under reconfig_mutex as
829 * an access to the files will try to take reconfig_mutex
830 * while holding the file unremovable, which leads to
831 * a deadlock.
NeilBrownbb4f1e92010-08-08 21:18:03 +1000832 * So hold set sysfs_active while the remove in happeing,
833 * and anything else which might set ->to_remove or my
834 * otherwise change the sysfs namespace will fail with
835 * -EBUSY if sysfs_active is still set.
836 * We set sysfs_active under reconfig_mutex and elsewhere
837 * test it under the same mutex to ensure its correct value
838 * is seen.
NeilBrownb6eb1272010-04-15 10:13:47 +1000839 */
NeilBrowna64c8762010-04-14 17:15:37 +1000840 struct attribute_group *to_remove = mddev->to_remove;
841 mddev->to_remove = NULL;
NeilBrownbb4f1e92010-08-08 21:18:03 +1000842 mddev->sysfs_active = 1;
NeilBrownb6eb1272010-04-15 10:13:47 +1000843 mutex_unlock(&mddev->reconfig_mutex);
844
NeilBrown00bcb4a2010-06-01 19:37:23 +1000845 if (mddev->kobj.sd) {
846 if (to_remove != &md_redundancy_group)
847 sysfs_remove_group(&mddev->kobj, to_remove);
848 if (mddev->pers == NULL ||
849 mddev->pers->sync_request == NULL) {
850 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
851 if (mddev->sysfs_action)
852 sysfs_put(mddev->sysfs_action);
Junxiao Bie8efa9b2020-08-04 17:27:18 -0700853 if (mddev->sysfs_completed)
854 sysfs_put(mddev->sysfs_completed);
855 if (mddev->sysfs_degraded)
856 sysfs_put(mddev->sysfs_degraded);
NeilBrown00bcb4a2010-06-01 19:37:23 +1000857 mddev->sysfs_action = NULL;
Junxiao Bie8efa9b2020-08-04 17:27:18 -0700858 mddev->sysfs_completed = NULL;
859 mddev->sysfs_degraded = NULL;
NeilBrown00bcb4a2010-06-01 19:37:23 +1000860 }
NeilBrowna64c8762010-04-14 17:15:37 +1000861 }
NeilBrownbb4f1e92010-08-08 21:18:03 +1000862 mddev->sysfs_active = 0;
NeilBrownb6eb1272010-04-15 10:13:47 +1000863 } else
864 mutex_unlock(&mddev->reconfig_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865
Chris Dunlop751e67c2011-10-19 16:48:26 +1100866 /* As we've dropped the mutex we need a spinlock to
867 * make sure the thread doesn't disappear
NeilBrown01f96c02011-09-21 15:30:20 +1000868 */
869 spin_lock(&pers_lock);
NeilBrown005eca52005-08-22 13:11:08 -0700870 md_wakeup_thread(mddev->thread);
NeilBrown4d5324f2017-10-19 12:17:16 +1100871 wake_up(&mddev->sb_wait);
NeilBrown01f96c02011-09-21 15:30:20 +1000872 spin_unlock(&pers_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873}
NeilBrown5c47daf2014-12-15 12:57:01 +1100874EXPORT_SYMBOL_GPL(mddev_unlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875
Goldwyn Rodrigues57d051d2015-04-14 10:43:55 -0500876struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr)
NeilBrown1ca69c42012-10-11 13:37:33 +1100877{
878 struct md_rdev *rdev;
879
880 rdev_for_each_rcu(rdev, mddev)
881 if (rdev->desc_nr == nr)
882 return rdev;
883
884 return NULL;
885}
Goldwyn Rodrigues57d051d2015-04-14 10:43:55 -0500886EXPORT_SYMBOL_GPL(md_find_rdev_nr_rcu);
NeilBrown1ca69c42012-10-11 13:37:33 +1100887
888static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889{
NeilBrown3cb03002011-10-11 16:45:26 +1100890 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891
NeilBrowndafb20f2012-03-19 12:46:39 +1100892 rdev_for_each(rdev, mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 if (rdev->bdev->bd_dev == dev)
894 return rdev;
Cheng Renquan159ec1f2009-01-09 08:31:08 +1100895
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 return NULL;
897}
898
Tomasz Majchrzak1532d9e2017-12-27 10:31:40 +0100899struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev)
NeilBrown1ca69c42012-10-11 13:37:33 +1100900{
901 struct md_rdev *rdev;
902
903 rdev_for_each_rcu(rdev, mddev)
904 if (rdev->bdev->bd_dev == dev)
905 return rdev;
906
907 return NULL;
908}
Tomasz Majchrzak1532d9e2017-12-27 10:31:40 +0100909EXPORT_SYMBOL_GPL(md_find_rdev_rcu);
NeilBrown1ca69c42012-10-11 13:37:33 +1100910
NeilBrown84fc4b52011-10-11 16:49:58 +1100911static struct md_personality *find_pers(int level, char *clevel)
NeilBrown2604b702006-01-06 00:20:36 -0800912{
NeilBrown84fc4b52011-10-11 16:49:58 +1100913 struct md_personality *pers;
NeilBrownd9d166c2006-01-06 00:20:51 -0800914 list_for_each_entry(pers, &pers_list, list) {
915 if (level != LEVEL_NONE && pers->level == level)
NeilBrown2604b702006-01-06 00:20:36 -0800916 return pers;
NeilBrownd9d166c2006-01-06 00:20:51 -0800917 if (strcmp(pers->name, clevel)==0)
918 return pers;
919 }
NeilBrown2604b702006-01-06 00:20:36 -0800920 return NULL;
921}
922
Andre Nollb73df2d2008-07-11 22:02:23 +1000923/* return the offset of the super block in 512byte sectors */
NeilBrown3cb03002011-10-11 16:45:26 +1100924static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925{
Jonathan Brassow57b2caa2011-01-14 09:14:33 +1100926 sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512;
Andre Nollb73df2d2008-07-11 22:02:23 +1000927 return MD_NEW_SIZE_SECTORS(num_sectors);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928}
929
NeilBrownf72ffdd2014-09-30 14:23:59 +1000930static int alloc_disk_sb(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 rdev->sb_page = alloc_page(GFP_KERNEL);
NeilBrown7f0f0d82016-11-02 14:16:49 +1100933 if (!rdev->sb_page)
Andre Nollebc24332008-07-11 22:02:20 +1000934 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 return 0;
936}
937
NeilBrown545c8792012-05-22 13:54:30 +1000938void md_rdev_clear(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939{
940 if (rdev->sb_page) {
NeilBrown2d1f3b52006-01-06 00:20:31 -0800941 put_page(rdev->sb_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 rdev->sb_loaded = 0;
943 rdev->sb_page = NULL;
Andre Noll0f420352008-07-11 22:02:23 +1000944 rdev->sb_start = 0;
Andre Nolldd8ac332009-03-31 14:33:13 +1100945 rdev->sectors = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 }
NeilBrown2699b672011-07-28 11:31:47 +1000947 if (rdev->bb_page) {
948 put_page(rdev->bb_page);
949 rdev->bb_page = NULL;
950 }
Dan Williamsd3b407fb2016-01-06 12:19:22 -0800951 badblocks_exit(&rdev->badblocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952}
NeilBrown545c8792012-05-22 13:54:30 +1000953EXPORT_SYMBOL_GPL(md_rdev_clear);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200955static void super_written(struct bio *bio)
NeilBrown7bfa19f2005-06-21 17:17:28 -0700956{
NeilBrown3cb03002011-10-11 16:45:26 +1100957 struct md_rdev *rdev = bio->bi_private;
NeilBrownfd01b882011-10-11 16:47:53 +1100958 struct mddev *mddev = rdev->mddev;
NeilBrown7bfa19f2005-06-21 17:17:28 -0700959
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200960 if (bio->bi_status) {
Guoqing Jiangb3db8a22020-07-28 12:01:41 +0200961 pr_err("md: %s gets error=%d\n", __func__,
962 blk_status_to_errno(bio->bi_status));
NeilBrowna9701a32005-11-08 21:39:34 -0800963 md_error(mddev, rdev);
NeilBrown46533ff2016-11-18 16:16:11 +1100964 if (!test_bit(Faulty, &rdev->flags)
965 && (bio->bi_opf & MD_FAILFAST)) {
Shaohua Li29530792016-12-08 15:48:19 -0800966 set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags);
NeilBrown46533ff2016-11-18 16:16:11 +1100967 set_bit(LastDev, &rdev->flags);
968 }
969 } else
970 clear_bit(LastDev, &rdev->flags);
NeilBrown7bfa19f2005-06-21 17:17:28 -0700971
NeilBrowna9701a32005-11-08 21:39:34 -0800972 if (atomic_dec_and_test(&mddev->pending_writes))
973 wake_up(&mddev->sb_wait);
Shaohua Lied3b98c2016-03-29 14:00:19 -0700974 rdev_dec_pending(rdev, mddev);
Neil Brownf8b58ed2005-06-27 22:29:34 -0700975 bio_put(bio);
NeilBrown7bfa19f2005-06-21 17:17:28 -0700976}
977
NeilBrownfd01b882011-10-11 16:47:53 +1100978void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
NeilBrown7bfa19f2005-06-21 17:17:28 -0700979 sector_t sector, int size, struct page *page)
980{
981 /* write first size bytes of page to sector of rdev
982 * Increment mddev->pending_writes before returning
983 * and decrement it on completion, waking up sb_wait
984 * if zero is reached.
985 * If an error occurred, call md_error
986 */
NeilBrown46533ff2016-11-18 16:16:11 +1100987 struct bio *bio;
988 int ff = 0;
989
Heinz Mauelshagen4b6c1062018-02-02 23:13:19 +0100990 if (!page)
991 return;
992
NeilBrown46533ff2016-11-18 16:16:11 +1100993 if (test_bit(Faulty, &rdev->flags))
994 return;
995
NeilBrown5a850712017-06-21 09:12:21 +1000996 bio = md_bio_alloc_sync(mddev);
NeilBrown7bfa19f2005-06-21 17:17:28 -0700997
Shaohua Lied3b98c2016-03-29 14:00:19 -0700998 atomic_inc(&rdev->nr_pending);
999
Christoph Hellwig74d46992017-08-23 19:10:32 +02001000 bio_set_dev(bio, rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev);
Kent Overstreet4f024f32013-10-11 15:44:27 -07001001 bio->bi_iter.bi_sector = sector;
NeilBrown7bfa19f2005-06-21 17:17:28 -07001002 bio_add_page(bio, page, size, 0);
1003 bio->bi_private = rdev;
1004 bio->bi_end_io = super_written;
NeilBrown46533ff2016-11-18 16:16:11 +11001005
1006 if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) &&
1007 test_bit(FailFast, &rdev->flags) &&
1008 !test_bit(LastDev, &rdev->flags))
1009 ff = MD_FAILFAST;
Jan Kara5a8948f2017-05-31 09:44:33 +02001010 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA | ff;
NeilBrowna9701a32005-11-08 21:39:34 -08001011
NeilBrown7bfa19f2005-06-21 17:17:28 -07001012 atomic_inc(&mddev->pending_writes);
Mike Christie4e49ea42016-06-05 14:31:41 -05001013 submit_bio(bio);
NeilBrowna9701a32005-11-08 21:39:34 -08001014}
1015
NeilBrown46533ff2016-11-18 16:16:11 +11001016int md_super_wait(struct mddev *mddev)
NeilBrowna9701a32005-11-08 21:39:34 -08001017{
Tejun Heoe9c74692010-09-03 11:56:18 +02001018 /* wait for all superblock writes that were scheduled to complete */
NeilBrown1967cd52014-09-09 14:20:28 +10001019 wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
Shaohua Li29530792016-12-08 15:48:19 -08001020 if (test_and_clear_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags))
NeilBrown46533ff2016-11-18 16:16:11 +11001021 return -EAGAIN;
1022 return 0;
NeilBrown7bfa19f2005-06-21 17:17:28 -07001023}
1024
NeilBrown3cb03002011-10-11 16:45:26 +11001025int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
Mike Christie796a5cf2016-06-05 14:32:07 -05001026 struct page *page, int op, int op_flags, bool metadata_op)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027{
NeilBrown5a850712017-06-21 09:12:21 +10001028 struct bio *bio = md_bio_alloc_sync(rdev->mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029 int ret;
1030
Christoph Hellwig74d46992017-08-23 19:10:32 +02001031 if (metadata_op && rdev->meta_bdev)
1032 bio_set_dev(bio, rdev->meta_bdev);
1033 else
1034 bio_set_dev(bio, rdev->bdev);
Mike Christie796a5cf2016-06-05 14:32:07 -05001035 bio_set_op_attrs(bio, op, op_flags);
Jonathan Brassowccebd4c2011-01-14 09:14:33 +11001036 if (metadata_op)
Kent Overstreet4f024f32013-10-11 15:44:27 -07001037 bio->bi_iter.bi_sector = sector + rdev->sb_start;
NeilBrown1fdd6fc92012-05-21 09:28:32 +10001038 else if (rdev->mddev->reshape_position != MaxSector &&
1039 (rdev->mddev->reshape_backwards ==
1040 (sector >= rdev->mddev->reshape_position)))
Kent Overstreet4f024f32013-10-11 15:44:27 -07001041 bio->bi_iter.bi_sector = sector + rdev->new_data_offset;
Jonathan Brassowccebd4c2011-01-14 09:14:33 +11001042 else
Kent Overstreet4f024f32013-10-11 15:44:27 -07001043 bio->bi_iter.bi_sector = sector + rdev->data_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044 bio_add_page(bio, page, size, 0);
Mike Christie4e49ea42016-06-05 14:31:41 -05001045
1046 submit_bio_wait(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001048 ret = !bio->bi_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049 bio_put(bio);
1050 return ret;
1051}
NeilBrowna8745db2006-01-06 00:20:34 -08001052EXPORT_SYMBOL_GPL(sync_page_io);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053
NeilBrownf72ffdd2014-09-30 14:23:59 +10001054static int read_disk_sb(struct md_rdev *rdev, int size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055{
1056 char b[BDEVNAME_SIZE];
NeilBrown403df472014-09-30 15:52:29 +10001057
Linus Torvalds1da177e2005-04-16 15:20:36 -07001058 if (rdev->sb_loaded)
1059 return 0;
1060
Mike Christie796a5cf2016-06-05 14:32:07 -05001061 if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062 goto fail;
1063 rdev->sb_loaded = 1;
1064 return 0;
1065
1066fail:
NeilBrown9d487392016-11-02 14:16:49 +11001067 pr_err("md: disabled device %s, could not read superblock.\n",
1068 bdevname(rdev->bdev,b));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069 return -EINVAL;
1070}
1071
Amir Goldsteine6fd2092017-05-04 16:26:20 +03001072static int md_uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073{
NeilBrownf72ffdd2014-09-30 14:23:59 +10001074 return sb1->set_uuid0 == sb2->set_uuid0 &&
Andre Noll05710462008-07-11 22:02:20 +10001075 sb1->set_uuid1 == sb2->set_uuid1 &&
1076 sb1->set_uuid2 == sb2->set_uuid2 &&
1077 sb1->set_uuid3 == sb2->set_uuid3;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078}
1079
Amir Goldsteine6fd2092017-05-04 16:26:20 +03001080static int md_sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081{
1082 int ret;
1083 mdp_super_t *tmp1, *tmp2;
1084
1085 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
1086 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
1087
1088 if (!tmp1 || !tmp2) {
1089 ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090 goto abort;
1091 }
1092
1093 *tmp1 = *sb1;
1094 *tmp2 = *sb2;
1095
1096 /*
1097 * nr_disks is not constant
1098 */
1099 tmp1->nr_disks = 0;
1100 tmp2->nr_disks = 0;
1101
Andre Nollce0c8e02008-07-11 22:02:20 +10001102 ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103abort:
Jesper Juhl990a8ba2005-06-21 17:17:30 -07001104 kfree(tmp1);
1105 kfree(tmp2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106 return ret;
1107}
1108
NeilBrown4d167f02007-05-09 02:35:37 -07001109static u32 md_csum_fold(u32 csum)
1110{
1111 csum = (csum & 0xffff) + (csum >> 16);
1112 return (csum & 0xffff) + (csum >> 16);
1113}
1114
NeilBrownf72ffdd2014-09-30 14:23:59 +10001115static unsigned int calc_sb_csum(mdp_super_t *sb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116{
NeilBrown4d167f02007-05-09 02:35:37 -07001117 u64 newcsum = 0;
1118 u32 *sb32 = (u32*)sb;
1119 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120 unsigned int disk_csum, csum;
1121
1122 disk_csum = sb->sb_csum;
1123 sb->sb_csum = 0;
NeilBrown4d167f02007-05-09 02:35:37 -07001124
1125 for (i = 0; i < MD_SB_BYTES/4 ; i++)
1126 newcsum += sb32[i];
1127 csum = (newcsum & 0xffffffff) + (newcsum>>32);
1128
NeilBrown4d167f02007-05-09 02:35:37 -07001129#ifdef CONFIG_ALPHA
1130 /* This used to use csum_partial, which was wrong for several
1131 * reasons including that different results are returned on
1132 * different architectures. It isn't critical that we get exactly
1133 * the same return value as before (we always csum_fold before
1134 * testing, and that removes any differences). However as we
1135 * know that csum_partial always returned a 16bit value on
1136 * alphas, do a fold to maximise conformity to previous behaviour.
1137 */
1138 sb->sb_csum = md_csum_fold(disk_csum);
1139#else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140 sb->sb_csum = disk_csum;
NeilBrown4d167f02007-05-09 02:35:37 -07001141#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142 return csum;
1143}
1144
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145/*
1146 * Handle superblock details.
1147 * We want to be able to handle multiple superblock formats
1148 * so we have a common interface to them all, and an array of
1149 * different handlers.
1150 * We rely on user-space to write the initial superblock, and support
1151 * reading and updating of superblocks.
1152 * Interface methods are:
NeilBrown3cb03002011-10-11 16:45:26 +11001153 * int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 * loads and validates a superblock on dev.
1155 * if refdev != NULL, compare superblocks on both devices
1156 * Return:
1157 * 0 - dev has a superblock that is compatible with refdev
1158 * 1 - dev has a superblock that is compatible and newer than refdev
1159 * so dev should be used as the refdev in future
1160 * -EINVAL superblock incompatible or invalid
1161 * -othererror e.g. -EIO
1162 *
NeilBrownfd01b882011-10-11 16:47:53 +11001163 * int validate_super(struct mddev *mddev, struct md_rdev *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164 * Verify that dev is acceptable into mddev.
1165 * The first time, mddev->raid_disks will be 0, and data from
1166 * dev should be merged in. Subsequent calls check that dev
1167 * is new enough. Return 0 or -EINVAL
1168 *
NeilBrownfd01b882011-10-11 16:47:53 +11001169 * void sync_super(struct mddev *mddev, struct md_rdev *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170 * Update the superblock for rdev with data in mddev
1171 * This does not write to disc.
1172 *
1173 */
1174
1175struct super_type {
Chris Webb0cd17fe2008-06-28 08:31:46 +10001176 char *name;
1177 struct module *owner;
NeilBrownc6563a82012-05-21 09:27:00 +10001178 int (*load_super)(struct md_rdev *rdev,
1179 struct md_rdev *refdev,
Chris Webb0cd17fe2008-06-28 08:31:46 +10001180 int minor_version);
NeilBrownc6563a82012-05-21 09:27:00 +10001181 int (*validate_super)(struct mddev *mddev,
1182 struct md_rdev *rdev);
1183 void (*sync_super)(struct mddev *mddev,
1184 struct md_rdev *rdev);
NeilBrown3cb03002011-10-11 16:45:26 +11001185 unsigned long long (*rdev_size_change)(struct md_rdev *rdev,
Andre Noll15f4a5f2008-07-21 14:42:12 +10001186 sector_t num_sectors);
NeilBrownc6563a82012-05-21 09:27:00 +10001187 int (*allow_new_offset)(struct md_rdev *rdev,
1188 unsigned long long new_offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189};
1190
1191/*
Andre Noll0894cc32009-06-18 08:49:23 +10001192 * Check that the given mddev has no bitmap.
1193 *
1194 * This function is called from the run method of all personalities that do not
1195 * support bitmaps. It prints an error message and returns non-zero if mddev
1196 * has a bitmap. Otherwise, it returns 0.
1197 *
1198 */
NeilBrownfd01b882011-10-11 16:47:53 +11001199int md_check_no_bitmap(struct mddev *mddev)
Andre Noll0894cc32009-06-18 08:49:23 +10001200{
NeilBrownc3d97142009-12-14 12:49:52 +11001201 if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
Andre Noll0894cc32009-06-18 08:49:23 +10001202 return 0;
NeilBrown9d487392016-11-02 14:16:49 +11001203 pr_warn("%s: bitmaps are not supported for %s\n",
Andre Noll0894cc32009-06-18 08:49:23 +10001204 mdname(mddev), mddev->pers->name);
1205 return 1;
1206}
1207EXPORT_SYMBOL(md_check_no_bitmap);
1208
1209/*
NeilBrownf72ffdd2014-09-30 14:23:59 +10001210 * load_super for 0.90.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 */
NeilBrown3cb03002011-10-11 16:45:26 +11001212static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213{
1214 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1215 mdp_super_t *sb;
1216 int ret;
Yufen Yu228fc7d2019-10-30 18:47:02 +08001217 bool spare_disk = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218
1219 /*
Andre Noll0f420352008-07-11 22:02:23 +10001220 * Calculate the position of the superblock (512byte sectors),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221 * it's at the end of the disk.
1222 *
1223 * It also happens to be a multiple of 4Kb.
1224 */
Jonathan Brassow57b2caa2011-01-14 09:14:33 +11001225 rdev->sb_start = calc_dev_sboffset(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226
NeilBrown0002b272005-09-09 16:23:53 -07001227 ret = read_disk_sb(rdev, MD_SB_BYTES);
NeilBrown9d487392016-11-02 14:16:49 +11001228 if (ret)
1229 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230
1231 ret = -EINVAL;
1232
1233 bdevname(rdev->bdev, b);
Namhyung Kim65a06f062011-07-27 11:00:36 +10001234 sb = page_address(rdev->sb_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235
1236 if (sb->md_magic != MD_SB_MAGIC) {
NeilBrown9d487392016-11-02 14:16:49 +11001237 pr_warn("md: invalid raid superblock magic on %s\n", b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238 goto abort;
1239 }
1240
1241 if (sb->major_version != 0 ||
NeilBrownf6705572006-03-27 01:18:11 -08001242 sb->minor_version < 90 ||
1243 sb->minor_version > 91) {
NeilBrown9d487392016-11-02 14:16:49 +11001244 pr_warn("Bad version number %d.%d on %s\n",
1245 sb->major_version, sb->minor_version, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246 goto abort;
1247 }
1248
1249 if (sb->raid_disks <= 0)
1250 goto abort;
1251
NeilBrown4d167f02007-05-09 02:35:37 -07001252 if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
NeilBrown9d487392016-11-02 14:16:49 +11001253 pr_warn("md: invalid superblock checksum on %s\n", b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 goto abort;
1255 }
1256
1257 rdev->preferred_minor = sb->md_minor;
1258 rdev->data_offset = 0;
NeilBrownc6563a82012-05-21 09:27:00 +10001259 rdev->new_data_offset = 0;
NeilBrown0002b272005-09-09 16:23:53 -07001260 rdev->sb_size = MD_SB_BYTES;
NeilBrown9f2f3832011-07-28 11:31:47 +10001261 rdev->badblocks.shift = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262
1263 if (sb->level == LEVEL_MULTIPATH)
1264 rdev->desc_nr = -1;
1265 else
1266 rdev->desc_nr = sb->this_disk.number;
1267
Yufen Yu228fc7d2019-10-30 18:47:02 +08001268 /* not spare disk, or LEVEL_MULTIPATH */
1269 if (sb->level == LEVEL_MULTIPATH ||
1270 (rdev->desc_nr >= 0 &&
Yufen Yu3b7436c2019-12-10 15:01:29 +08001271 rdev->desc_nr < MD_SB_DISKS &&
Yufen Yu228fc7d2019-10-30 18:47:02 +08001272 sb->disks[rdev->desc_nr].state &
1273 ((1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))))
1274 spare_disk = false;
1275
Harvey Harrison9a7b2b02008-04-28 02:15:49 -07001276 if (!refdev) {
Yufen Yu228fc7d2019-10-30 18:47:02 +08001277 if (!spare_disk)
Yufen Yu6a5cb532019-10-16 16:00:03 +08001278 ret = 1;
1279 else
1280 ret = 0;
Harvey Harrison9a7b2b02008-04-28 02:15:49 -07001281 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282 __u64 ev1, ev2;
Namhyung Kim65a06f062011-07-27 11:00:36 +10001283 mdp_super_t *refsb = page_address(refdev->sb_page);
Amir Goldsteine6fd2092017-05-04 16:26:20 +03001284 if (!md_uuid_equal(refsb, sb)) {
NeilBrown9d487392016-11-02 14:16:49 +11001285 pr_warn("md: %s has different UUID to %s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286 b, bdevname(refdev->bdev,b2));
1287 goto abort;
1288 }
Amir Goldsteine6fd2092017-05-04 16:26:20 +03001289 if (!md_sb_equal(refsb, sb)) {
NeilBrown9d487392016-11-02 14:16:49 +11001290 pr_warn("md: %s has same UUID but different superblock to %s\n",
1291 b, bdevname(refdev->bdev, b2));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292 goto abort;
1293 }
1294 ev1 = md_event(sb);
1295 ev2 = md_event(refsb);
Yufen Yu6a5cb532019-10-16 16:00:03 +08001296
Yufen Yu228fc7d2019-10-30 18:47:02 +08001297 if (!spare_disk && ev1 > ev2)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298 ret = 1;
NeilBrownf72ffdd2014-09-30 14:23:59 +10001299 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300 ret = 0;
1301 }
NeilBrown8190e752009-06-18 08:48:58 +10001302 rdev->sectors = rdev->sb_start;
NeilBrown667a5312012-08-16 16:46:12 +10001303 /* Limit to 4TB as metadata cannot record more than that.
1304 * (not needed for Linear and RAID0 as metadata doesn't
1305 * record this size)
1306 */
Christoph Hellwig72deb452019-04-05 18:08:59 +02001307 if ((u64)rdev->sectors >= (2ULL << 32) && sb->level >= 1)
Arnd Bergmann3312c952015-12-21 10:51:01 +11001308 rdev->sectors = (sector_t)(2ULL << 32) - 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309
NeilBrown27a7b262011-09-10 17:21:28 +10001310 if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
NeilBrown2bf071b2006-01-06 00:20:55 -08001311 /* "this cannot possibly happen" ... */
1312 ret = -EINVAL;
1313
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314 abort:
1315 return ret;
1316}
1317
1318/*
1319 * validate_super for 0.90.0
1320 */
NeilBrownfd01b882011-10-11 16:47:53 +11001321static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322{
1323 mdp_disk_t *desc;
Namhyung Kim65a06f062011-07-27 11:00:36 +10001324 mdp_super_t *sb = page_address(rdev->sb_page);
NeilBrown07d84d102006-06-26 00:27:56 -07001325 __u64 ev1 = md_event(sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326
NeilBrown41158c72005-06-21 17:17:25 -07001327 rdev->raid_disk = -1;
NeilBrownc5d79ad2008-02-06 01:39:54 -08001328 clear_bit(Faulty, &rdev->flags);
1329 clear_bit(In_sync, &rdev->flags);
NeilBrown8313b8e2013-12-12 10:13:33 +11001330 clear_bit(Bitmap_sync, &rdev->flags);
NeilBrownc5d79ad2008-02-06 01:39:54 -08001331 clear_bit(WriteMostly, &rdev->flags);
NeilBrownc5d79ad2008-02-06 01:39:54 -08001332
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333 if (mddev->raid_disks == 0) {
1334 mddev->major_version = 0;
1335 mddev->minor_version = sb->minor_version;
1336 mddev->patch_version = sb->patch_version;
NeilBrowne6910632008-02-06 01:39:51 -08001337 mddev->external = 0;
Andre Noll9d8f0362009-06-18 08:45:01 +10001338 mddev->chunk_sectors = sb->chunk_size >> 9;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339 mddev->ctime = sb->ctime;
1340 mddev->utime = sb->utime;
1341 mddev->level = sb->level;
NeilBrownd9d166c2006-01-06 00:20:51 -08001342 mddev->clevel[0] = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343 mddev->layout = sb->layout;
1344 mddev->raid_disks = sb->raid_disks;
NeilBrown27a7b262011-09-10 17:21:28 +10001345 mddev->dev_sectors = ((sector_t)sb->size) * 2;
NeilBrown07d84d102006-06-26 00:27:56 -07001346 mddev->events = ev1;
NeilBrownc3d97142009-12-14 12:49:52 +11001347 mddev->bitmap_info.offset = 0;
NeilBrown6409bb02012-05-22 13:55:07 +10001348 mddev->bitmap_info.space = 0;
1349 /* bitmap can use 60 K after the 4K superblocks */
NeilBrownc3d97142009-12-14 12:49:52 +11001350 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
NeilBrown6409bb02012-05-22 13:55:07 +10001351 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
NeilBrown2c810cd2012-05-21 09:27:00 +10001352 mddev->reshape_backwards = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353
NeilBrownf6705572006-03-27 01:18:11 -08001354 if (mddev->minor_version >= 91) {
1355 mddev->reshape_position = sb->reshape_position;
1356 mddev->delta_disks = sb->delta_disks;
1357 mddev->new_level = sb->new_level;
1358 mddev->new_layout = sb->new_layout;
Andre Noll664e7c42009-06-18 08:45:27 +10001359 mddev->new_chunk_sectors = sb->new_chunk >> 9;
NeilBrown2c810cd2012-05-21 09:27:00 +10001360 if (mddev->delta_disks < 0)
1361 mddev->reshape_backwards = 1;
NeilBrownf6705572006-03-27 01:18:11 -08001362 } else {
1363 mddev->reshape_position = MaxSector;
1364 mddev->delta_disks = 0;
1365 mddev->new_level = mddev->level;
1366 mddev->new_layout = mddev->layout;
Andre Noll664e7c42009-06-18 08:45:27 +10001367 mddev->new_chunk_sectors = mddev->chunk_sectors;
NeilBrownf6705572006-03-27 01:18:11 -08001368 }
NeilBrown33f2c352019-09-09 16:52:29 +10001369 if (mddev->level == 0)
1370 mddev->layout = -1;
NeilBrownf6705572006-03-27 01:18:11 -08001371
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372 if (sb->state & (1<<MD_SB_CLEAN))
1373 mddev->recovery_cp = MaxSector;
1374 else {
NeilBrownf72ffdd2014-09-30 14:23:59 +10001375 if (sb->events_hi == sb->cp_events_hi &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376 sb->events_lo == sb->cp_events_lo) {
1377 mddev->recovery_cp = sb->recovery_cp;
1378 } else
1379 mddev->recovery_cp = 0;
1380 }
1381
1382 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
1383 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
1384 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
1385 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
1386
1387 mddev->max_disks = MD_SB_DISKS;
NeilBrowna654b9d82005-06-21 17:17:27 -07001388
1389 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
NeilBrown6409bb02012-05-22 13:55:07 +10001390 mddev->bitmap_info.file == NULL) {
NeilBrownc3d97142009-12-14 12:49:52 +11001391 mddev->bitmap_info.offset =
1392 mddev->bitmap_info.default_offset;
NeilBrown6409bb02012-05-22 13:55:07 +10001393 mddev->bitmap_info.space =
Dave Jonesc9ad0202013-08-19 22:26:32 -04001394 mddev->bitmap_info.default_space;
NeilBrown6409bb02012-05-22 13:55:07 +10001395 }
NeilBrowna654b9d82005-06-21 17:17:27 -07001396
NeilBrown41158c72005-06-21 17:17:25 -07001397 } else if (mddev->pers == NULL) {
NeilBrownbe6800a2010-05-18 10:17:09 +10001398 /* Insist on good event counter while assembling, except
1399 * for spares (which don't need an event count) */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400 ++ev1;
NeilBrownbe6800a2010-05-18 10:17:09 +10001401 if (sb->disks[rdev->desc_nr].state & (
1402 (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
NeilBrownf72ffdd2014-09-30 14:23:59 +10001403 if (ev1 < mddev->events)
NeilBrownbe6800a2010-05-18 10:17:09 +10001404 return -EINVAL;
NeilBrown41158c72005-06-21 17:17:25 -07001405 } else if (mddev->bitmap) {
1406 /* if adding to array with a bitmap, then we can accept an
1407 * older device ... but not too old.
1408 */
NeilBrown41158c72005-06-21 17:17:25 -07001409 if (ev1 < mddev->bitmap->events_cleared)
1410 return 0;
NeilBrown8313b8e2013-12-12 10:13:33 +11001411 if (ev1 < mddev->events)
1412 set_bit(Bitmap_sync, &rdev->flags);
NeilBrown07d84d102006-06-26 00:27:56 -07001413 } else {
1414 if (ev1 < mddev->events)
1415 /* just a hot-add of a new device, leave raid_disk at -1 */
1416 return 0;
1417 }
NeilBrown41158c72005-06-21 17:17:25 -07001418
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419 if (mddev->level != LEVEL_MULTIPATH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420 desc = sb->disks + rdev->desc_nr;
1421
1422 if (desc->state & (1<<MD_DISK_FAULTY))
NeilBrownb2d444d2005-11-08 21:39:31 -08001423 set_bit(Faulty, &rdev->flags);
NeilBrown7c7546c2006-06-26 00:27:41 -07001424 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
1425 desc->raid_disk < mddev->raid_disks */) {
NeilBrownb2d444d2005-11-08 21:39:31 -08001426 set_bit(In_sync, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427 rdev->raid_disk = desc->raid_disk;
NeilBrownf4667222013-12-09 12:04:56 +11001428 rdev->saved_raid_disk = desc->raid_disk;
NeilBrown0261cd9f2009-11-13 17:40:48 +11001429 } else if (desc->state & (1<<MD_DISK_ACTIVE)) {
1430 /* active but not in sync implies recovery up to
1431 * reshape position. We don't know exactly where
1432 * that is, so set to zero for now */
1433 if (mddev->minor_version >= 91) {
1434 rdev->recovery_offset = 0;
1435 rdev->raid_disk = desc->raid_disk;
1436 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437 }
NeilBrown8ddf9ef2005-09-09 16:23:45 -07001438 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
1439 set_bit(WriteMostly, &rdev->flags);
NeilBrown688834e2016-11-18 16:16:11 +11001440 if (desc->state & (1<<MD_DISK_FAILFAST))
1441 set_bit(FailFast, &rdev->flags);
NeilBrown41158c72005-06-21 17:17:25 -07001442 } else /* MULTIPATH are always insync */
NeilBrownb2d444d2005-11-08 21:39:31 -08001443 set_bit(In_sync, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444 return 0;
1445}
1446
1447/*
1448 * sync_super for 0.90.0
1449 */
NeilBrownfd01b882011-10-11 16:47:53 +11001450static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451{
1452 mdp_super_t *sb;
NeilBrown3cb03002011-10-11 16:45:26 +11001453 struct md_rdev *rdev2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454 int next_spare = mddev->raid_disks;
NeilBrown19133a42005-11-08 21:39:35 -08001455
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456 /* make rdev->sb match mddev data..
1457 *
1458 * 1/ zero out disks
1459 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
1460 * 3/ any empty disks < next_spare become removed
1461 *
1462 * disks[0] gets initialised to REMOVED because
1463 * we cannot be sure from other fields if it has
1464 * been initialised or not.
1465 */
1466 int i;
1467 int active=0, working=0,failed=0,spare=0,nr_disks=0;
1468
NeilBrown61181562005-09-09 16:24:02 -07001469 rdev->sb_size = MD_SB_BYTES;
1470
Namhyung Kim65a06f062011-07-27 11:00:36 +10001471 sb = page_address(rdev->sb_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472
1473 memset(sb, 0, sizeof(*sb));
1474
1475 sb->md_magic = MD_SB_MAGIC;
1476 sb->major_version = mddev->major_version;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477 sb->patch_version = mddev->patch_version;
1478 sb->gvalid_words = 0; /* ignored */
1479 memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
1480 memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
1481 memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
1482 memcpy(&sb->set_uuid3, mddev->uuid+12,4);
1483
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11001484 sb->ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485 sb->level = mddev->level;
Andre Noll58c0fed2009-03-31 14:33:13 +11001486 sb->size = mddev->dev_sectors / 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487 sb->raid_disks = mddev->raid_disks;
1488 sb->md_minor = mddev->md_minor;
NeilBrowne6910632008-02-06 01:39:51 -08001489 sb->not_persistent = 0;
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11001490 sb->utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491 sb->state = 0;
1492 sb->events_hi = (mddev->events>>32);
1493 sb->events_lo = (u32)mddev->events;
1494
NeilBrownf6705572006-03-27 01:18:11 -08001495 if (mddev->reshape_position == MaxSector)
1496 sb->minor_version = 90;
1497 else {
1498 sb->minor_version = 91;
1499 sb->reshape_position = mddev->reshape_position;
1500 sb->new_level = mddev->new_level;
1501 sb->delta_disks = mddev->delta_disks;
1502 sb->new_layout = mddev->new_layout;
Andre Noll664e7c42009-06-18 08:45:27 +10001503 sb->new_chunk = mddev->new_chunk_sectors << 9;
NeilBrownf6705572006-03-27 01:18:11 -08001504 }
1505 mddev->minor_version = sb->minor_version;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506 if (mddev->in_sync)
1507 {
1508 sb->recovery_cp = mddev->recovery_cp;
1509 sb->cp_events_hi = (mddev->events>>32);
1510 sb->cp_events_lo = (u32)mddev->events;
1511 if (mddev->recovery_cp == MaxSector)
1512 sb->state = (1<< MD_SB_CLEAN);
1513 } else
1514 sb->recovery_cp = 0;
1515
1516 sb->layout = mddev->layout;
Andre Noll9d8f0362009-06-18 08:45:01 +10001517 sb->chunk_size = mddev->chunk_sectors << 9;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518
NeilBrownc3d97142009-12-14 12:49:52 +11001519 if (mddev->bitmap && mddev->bitmap_info.file == NULL)
NeilBrowna654b9d82005-06-21 17:17:27 -07001520 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
1521
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522 sb->disks[0].state = (1<<MD_DISK_REMOVED);
NeilBrowndafb20f2012-03-19 12:46:39 +11001523 rdev_for_each(rdev2, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524 mdp_disk_t *d;
NeilBrown86e6ffd2005-11-08 21:39:24 -08001525 int desc_nr;
NeilBrown0261cd9f2009-11-13 17:40:48 +11001526 int is_active = test_bit(In_sync, &rdev2->flags);
1527
1528 if (rdev2->raid_disk >= 0 &&
1529 sb->minor_version >= 91)
1530 /* we have nowhere to store the recovery_offset,
1531 * but if it is not below the reshape_position,
1532 * we can piggy-back on that.
1533 */
1534 is_active = 1;
1535 if (rdev2->raid_disk < 0 ||
1536 test_bit(Faulty, &rdev2->flags))
1537 is_active = 0;
1538 if (is_active)
NeilBrown86e6ffd2005-11-08 21:39:24 -08001539 desc_nr = rdev2->raid_disk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540 else
NeilBrown86e6ffd2005-11-08 21:39:24 -08001541 desc_nr = next_spare++;
NeilBrown19133a42005-11-08 21:39:35 -08001542 rdev2->desc_nr = desc_nr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543 d = &sb->disks[rdev2->desc_nr];
1544 nr_disks++;
1545 d->number = rdev2->desc_nr;
1546 d->major = MAJOR(rdev2->bdev->bd_dev);
1547 d->minor = MINOR(rdev2->bdev->bd_dev);
NeilBrown0261cd9f2009-11-13 17:40:48 +11001548 if (is_active)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 d->raid_disk = rdev2->raid_disk;
1550 else
1551 d->raid_disk = rdev2->desc_nr; /* compatibility */
NeilBrown1be78922006-03-27 01:18:03 -08001552 if (test_bit(Faulty, &rdev2->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553 d->state = (1<<MD_DISK_FAULTY);
NeilBrown0261cd9f2009-11-13 17:40:48 +11001554 else if (is_active) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555 d->state = (1<<MD_DISK_ACTIVE);
NeilBrown0261cd9f2009-11-13 17:40:48 +11001556 if (test_bit(In_sync, &rdev2->flags))
1557 d->state |= (1<<MD_DISK_SYNC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558 active++;
1559 working++;
1560 } else {
1561 d->state = 0;
1562 spare++;
1563 working++;
1564 }
NeilBrown8ddf9ef2005-09-09 16:23:45 -07001565 if (test_bit(WriteMostly, &rdev2->flags))
1566 d->state |= (1<<MD_DISK_WRITEMOSTLY);
NeilBrown688834e2016-11-18 16:16:11 +11001567 if (test_bit(FailFast, &rdev2->flags))
1568 d->state |= (1<<MD_DISK_FAILFAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570 /* now set the "removed" and "faulty" bits on any missing devices */
1571 for (i=0 ; i < mddev->raid_disks ; i++) {
1572 mdp_disk_t *d = &sb->disks[i];
1573 if (d->state == 0 && d->number == 0) {
1574 d->number = i;
1575 d->raid_disk = i;
1576 d->state = (1<<MD_DISK_REMOVED);
1577 d->state |= (1<<MD_DISK_FAULTY);
1578 failed++;
1579 }
1580 }
1581 sb->nr_disks = nr_disks;
1582 sb->active_disks = active;
1583 sb->working_disks = working;
1584 sb->failed_disks = failed;
1585 sb->spare_disks = spare;
1586
1587 sb->this_disk = sb->disks[rdev->desc_nr];
1588 sb->sb_csum = calc_sb_csum(sb);
1589}
1590
1591/*
Chris Webb0cd17fe2008-06-28 08:31:46 +10001592 * rdev_size_change for 0.90.0
1593 */
1594static unsigned long long
NeilBrown3cb03002011-10-11 16:45:26 +11001595super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
Chris Webb0cd17fe2008-06-28 08:31:46 +10001596{
Andre Noll58c0fed2009-03-31 14:33:13 +11001597 if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
Chris Webb0cd17fe2008-06-28 08:31:46 +10001598 return 0; /* component must fit device */
NeilBrownc3d97142009-12-14 12:49:52 +11001599 if (rdev->mddev->bitmap_info.offset)
Chris Webb0cd17fe2008-06-28 08:31:46 +10001600 return 0; /* can't move bitmap */
Jonathan Brassow57b2caa2011-01-14 09:14:33 +11001601 rdev->sb_start = calc_dev_sboffset(rdev);
Andre Noll15f4a5f2008-07-21 14:42:12 +10001602 if (!num_sectors || num_sectors > rdev->sb_start)
1603 num_sectors = rdev->sb_start;
NeilBrown27a7b262011-09-10 17:21:28 +10001604 /* Limit to 4TB as metadata cannot record more than that.
1605 * 4TB == 2^32 KB, or 2*2^32 sectors.
1606 */
Christoph Hellwig72deb452019-04-05 18:08:59 +02001607 if ((u64)num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
Arnd Bergmann3312c952015-12-21 10:51:01 +11001608 num_sectors = (sector_t)(2ULL << 32) - 2;
NeilBrown46533ff2016-11-18 16:16:11 +11001609 do {
1610 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
Chris Webb0cd17fe2008-06-28 08:31:46 +10001611 rdev->sb_page);
NeilBrown46533ff2016-11-18 16:16:11 +11001612 } while (md_super_wait(rdev->mddev) < 0);
Justin Maggardc26a44e2010-11-24 16:36:17 +11001613 return num_sectors;
Chris Webb0cd17fe2008-06-28 08:31:46 +10001614}
1615
NeilBrownc6563a82012-05-21 09:27:00 +10001616static int
1617super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset)
1618{
1619 /* non-zero offset changes not possible with v0.90 */
1620 return new_offset == 0;
1621}
Chris Webb0cd17fe2008-06-28 08:31:46 +10001622
1623/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624 * version 1 superblock
1625 */
1626
NeilBrownf72ffdd2014-09-30 14:23:59 +10001627static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628{
NeilBrown1c05b4b2006-10-21 10:24:08 -07001629 __le32 disk_csum;
1630 u32 csum;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631 unsigned long long newcsum;
1632 int size = 256 + le32_to_cpu(sb->max_dev)*2;
NeilBrown1c05b4b2006-10-21 10:24:08 -07001633 __le32 *isuper = (__le32*)sb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634
1635 disk_csum = sb->sb_csum;
1636 sb->sb_csum = 0;
1637 newcsum = 0;
NeilBrown1f3c9902012-12-11 13:09:00 +11001638 for (; size >= 4; size -= 4)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639 newcsum += le32_to_cpu(*isuper++);
1640
1641 if (size == 2)
NeilBrown1c05b4b2006-10-21 10:24:08 -07001642 newcsum += le16_to_cpu(*(__le16*) isuper);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643
1644 csum = (newcsum & 0xffffffff) + (newcsum >> 32);
1645 sb->sb_csum = disk_csum;
1646 return cpu_to_le32(csum);
1647}
1648
NeilBrown3cb03002011-10-11 16:45:26 +11001649static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650{
1651 struct mdp_superblock_1 *sb;
1652 int ret;
Andre Noll0f420352008-07-11 22:02:23 +10001653 sector_t sb_start;
NeilBrownc6563a82012-05-21 09:27:00 +10001654 sector_t sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
NeilBrown0002b272005-09-09 16:23:53 -07001656 int bmask;
Yufen Yu228fc7d2019-10-30 18:47:02 +08001657 bool spare_disk = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658
1659 /*
Andre Noll0f420352008-07-11 22:02:23 +10001660 * Calculate the position of the superblock in 512byte sectors.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661 * It is always aligned to a 4K boundary and
1662 * depeding on minor_version, it can be:
1663 * 0: At least 8K, but less than 12K, from end of device
1664 * 1: At start of device
1665 * 2: 4K from start of device.
1666 */
1667 switch(minor_version) {
1668 case 0:
Mike Snitzer77304d22010-11-08 14:39:12 +01001669 sb_start = i_size_read(rdev->bdev->bd_inode) >> 9;
Andre Noll0f420352008-07-11 22:02:23 +10001670 sb_start -= 8*2;
1671 sb_start &= ~(sector_t)(4*2-1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672 break;
1673 case 1:
Andre Noll0f420352008-07-11 22:02:23 +10001674 sb_start = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675 break;
1676 case 2:
Andre Noll0f420352008-07-11 22:02:23 +10001677 sb_start = 8;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678 break;
1679 default:
1680 return -EINVAL;
1681 }
Andre Noll0f420352008-07-11 22:02:23 +10001682 rdev->sb_start = sb_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683
NeilBrown0002b272005-09-09 16:23:53 -07001684 /* superblock is rarely larger than 1K, but it can be larger,
1685 * and it is safe to read 4k, so we do that
1686 */
1687 ret = read_disk_sb(rdev, 4096);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688 if (ret) return ret;
1689
Namhyung Kim65a06f062011-07-27 11:00:36 +10001690 sb = page_address(rdev->sb_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691
1692 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1693 sb->major_version != cpu_to_le32(1) ||
1694 le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
Andre Noll0f420352008-07-11 22:02:23 +10001695 le64_to_cpu(sb->super_offset) != rdev->sb_start ||
NeilBrown71c08052005-09-09 16:23:51 -07001696 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697 return -EINVAL;
1698
1699 if (calc_sb_1_csum(sb) != sb->sb_csum) {
NeilBrown9d487392016-11-02 14:16:49 +11001700 pr_warn("md: invalid superblock checksum on %s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701 bdevname(rdev->bdev,b));
1702 return -EINVAL;
1703 }
1704 if (le64_to_cpu(sb->data_size) < 10) {
NeilBrown9d487392016-11-02 14:16:49 +11001705 pr_warn("md: data_size too small on %s\n",
1706 bdevname(rdev->bdev,b));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707 return -EINVAL;
1708 }
NeilBrownc6563a82012-05-21 09:27:00 +10001709 if (sb->pad0 ||
1710 sb->pad3[0] ||
1711 memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1])))
1712 /* Some padding is non-zero, might be a new feature */
1713 return -EINVAL;
NeilBrowne11e93f2007-05-09 02:35:36 -07001714
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715 rdev->preferred_minor = 0xffff;
1716 rdev->data_offset = le64_to_cpu(sb->data_offset);
NeilBrownc6563a82012-05-21 09:27:00 +10001717 rdev->new_data_offset = rdev->data_offset;
1718 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
1719 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
1720 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
NeilBrown4dbcdc72006-01-06 00:20:52 -08001721 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722
NeilBrown0002b272005-09-09 16:23:53 -07001723 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
Martin K. Petersene1defc42009-05-22 17:17:49 -04001724 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
NeilBrown0002b272005-09-09 16:23:53 -07001725 if (rdev->sb_size & bmask)
NeilBrowna1801f82008-03-04 14:29:31 -08001726 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1727
1728 if (minor_version
Andre Noll0f420352008-07-11 22:02:23 +10001729 && rdev->data_offset < sb_start + (rdev->sb_size/512))
NeilBrowna1801f82008-03-04 14:29:31 -08001730 return -EINVAL;
NeilBrownc6563a82012-05-21 09:27:00 +10001731 if (minor_version
1732 && rdev->new_data_offset < sb_start + (rdev->sb_size/512))
1733 return -EINVAL;
NeilBrown0002b272005-09-09 16:23:53 -07001734
NeilBrown31b65a02006-07-10 04:44:14 -07001735 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1736 rdev->desc_nr = -1;
1737 else
1738 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1739
NeilBrown2699b672011-07-28 11:31:47 +10001740 if (!rdev->bb_page) {
1741 rdev->bb_page = alloc_page(GFP_KERNEL);
1742 if (!rdev->bb_page)
1743 return -ENOMEM;
1744 }
1745 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) &&
1746 rdev->badblocks.count == 0) {
1747 /* need to load the bad block list.
1748 * Currently we limit it to one page.
1749 */
1750 s32 offset;
1751 sector_t bb_sector;
Christoph Hellwig00485d02019-04-04 18:56:12 +02001752 __le64 *bbp;
NeilBrown2699b672011-07-28 11:31:47 +10001753 int i;
1754 int sectors = le16_to_cpu(sb->bblog_size);
1755 if (sectors > (PAGE_SIZE / 512))
1756 return -EINVAL;
1757 offset = le32_to_cpu(sb->bblog_offset);
1758 if (offset == 0)
1759 return -EINVAL;
1760 bb_sector = (long long)offset;
1761 if (!sync_page_io(rdev, bb_sector, sectors << 9,
Mike Christie796a5cf2016-06-05 14:32:07 -05001762 rdev->bb_page, REQ_OP_READ, 0, true))
NeilBrown2699b672011-07-28 11:31:47 +10001763 return -EIO;
Christoph Hellwig00485d02019-04-04 18:56:12 +02001764 bbp = (__le64 *)page_address(rdev->bb_page);
NeilBrown2699b672011-07-28 11:31:47 +10001765 rdev->badblocks.shift = sb->bblog_shift;
1766 for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
1767 u64 bb = le64_to_cpu(*bbp);
1768 int count = bb & (0x3ff);
1769 u64 sector = bb >> 10;
1770 sector <<= sb->bblog_shift;
1771 count <<= sb->bblog_shift;
1772 if (bb + 1 == 0)
1773 break;
Vishal Vermafc974ee2015-12-24 19:20:34 -07001774 if (badblocks_set(&rdev->badblocks, sector, count, 1))
NeilBrown2699b672011-07-28 11:31:47 +10001775 return -EINVAL;
1776 }
NeilBrown486adf72013-04-24 11:42:44 +10001777 } else if (sb->bblog_offset != 0)
1778 rdev->badblocks.shift = 0;
NeilBrown2699b672011-07-28 11:31:47 +10001779
Pawel Baldysiakddc08822017-08-16 17:13:45 +02001780 if ((le32_to_cpu(sb->feature_map) &
1781 (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS))) {
Artur Paszkiewiczea0213e2017-03-09 09:59:57 +01001782 rdev->ppl.offset = (__s16)le16_to_cpu(sb->ppl.offset);
1783 rdev->ppl.size = le16_to_cpu(sb->ppl.size);
1784 rdev->ppl.sector = rdev->sb_start + rdev->ppl.offset;
1785 }
1786
NeilBrown33f2c352019-09-09 16:52:29 +10001787 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT) &&
1788 sb->level != 0)
1789 return -EINVAL;
1790
Yufen Yu228fc7d2019-10-30 18:47:02 +08001791 /* not spare disk, or LEVEL_MULTIPATH */
1792 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH) ||
1793 (rdev->desc_nr >= 0 &&
1794 rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
1795 (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
1796 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL)))
1797 spare_disk = false;
Yufen Yu6a5cb532019-10-16 16:00:03 +08001798
Harvey Harrison9a7b2b02008-04-28 02:15:49 -07001799 if (!refdev) {
Yufen Yu228fc7d2019-10-30 18:47:02 +08001800 if (!spare_disk)
Yufen Yu6a5cb532019-10-16 16:00:03 +08001801 ret = 1;
1802 else
1803 ret = 0;
Harvey Harrison9a7b2b02008-04-28 02:15:49 -07001804 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805 __u64 ev1, ev2;
Namhyung Kim65a06f062011-07-27 11:00:36 +10001806 struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807
1808 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1809 sb->level != refsb->level ||
1810 sb->layout != refsb->layout ||
1811 sb->chunksize != refsb->chunksize) {
NeilBrown9d487392016-11-02 14:16:49 +11001812 pr_warn("md: %s has strangely different superblock to %s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813 bdevname(rdev->bdev,b),
1814 bdevname(refdev->bdev,b2));
1815 return -EINVAL;
1816 }
1817 ev1 = le64_to_cpu(sb->events);
1818 ev2 = le64_to_cpu(refsb->events);
1819
Yufen Yu228fc7d2019-10-30 18:47:02 +08001820 if (!spare_disk && ev1 > ev2)
NeilBrown8ed75462006-02-03 03:03:41 -08001821 ret = 1;
1822 else
1823 ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824 }
NeilBrownc6563a82012-05-21 09:27:00 +10001825 if (minor_version) {
1826 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9);
1827 sectors -= rdev->data_offset;
1828 } else
1829 sectors = rdev->sb_start;
1830 if (sectors < le64_to_cpu(sb->data_size))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831 return -EINVAL;
Andre Nolldd8ac332009-03-31 14:33:13 +11001832 rdev->sectors = le64_to_cpu(sb->data_size);
NeilBrown8ed75462006-02-03 03:03:41 -08001833 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834}
1835
NeilBrownfd01b882011-10-11 16:47:53 +11001836static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837{
Namhyung Kim65a06f062011-07-27 11:00:36 +10001838 struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
NeilBrown07d84d102006-06-26 00:27:56 -07001839 __u64 ev1 = le64_to_cpu(sb->events);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840
NeilBrown41158c72005-06-21 17:17:25 -07001841 rdev->raid_disk = -1;
NeilBrownc5d79ad2008-02-06 01:39:54 -08001842 clear_bit(Faulty, &rdev->flags);
1843 clear_bit(In_sync, &rdev->flags);
NeilBrown8313b8e2013-12-12 10:13:33 +11001844 clear_bit(Bitmap_sync, &rdev->flags);
NeilBrownc5d79ad2008-02-06 01:39:54 -08001845 clear_bit(WriteMostly, &rdev->flags);
NeilBrownc5d79ad2008-02-06 01:39:54 -08001846
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847 if (mddev->raid_disks == 0) {
1848 mddev->major_version = 1;
1849 mddev->patch_version = 0;
NeilBrowne6910632008-02-06 01:39:51 -08001850 mddev->external = 0;
Andre Noll9d8f0362009-06-18 08:45:01 +10001851 mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11001852 mddev->ctime = le64_to_cpu(sb->ctime);
1853 mddev->utime = le64_to_cpu(sb->utime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854 mddev->level = le32_to_cpu(sb->level);
NeilBrownd9d166c2006-01-06 00:20:51 -08001855 mddev->clevel[0] = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856 mddev->layout = le32_to_cpu(sb->layout);
1857 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
Andre Noll58c0fed2009-03-31 14:33:13 +11001858 mddev->dev_sectors = le64_to_cpu(sb->size);
NeilBrown07d84d102006-06-26 00:27:56 -07001859 mddev->events = ev1;
NeilBrownc3d97142009-12-14 12:49:52 +11001860 mddev->bitmap_info.offset = 0;
NeilBrown6409bb02012-05-22 13:55:07 +10001861 mddev->bitmap_info.space = 0;
1862 /* Default location for bitmap is 1K after superblock
1863 * using 3K - total of 4K
1864 */
NeilBrownc3d97142009-12-14 12:49:52 +11001865 mddev->bitmap_info.default_offset = 1024 >> 9;
NeilBrown6409bb02012-05-22 13:55:07 +10001866 mddev->bitmap_info.default_space = (4096-1024) >> 9;
NeilBrown2c810cd2012-05-21 09:27:00 +10001867 mddev->reshape_backwards = 0;
1868
Linus Torvalds1da177e2005-04-16 15:20:36 -07001869 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1870 memcpy(mddev->uuid, sb->set_uuid, 16);
1871
1872 mddev->max_disks = (4096-256)/2;
NeilBrowna654b9d82005-06-21 17:17:27 -07001873
NeilBrown71c08052005-09-09 16:23:51 -07001874 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
NeilBrown6409bb02012-05-22 13:55:07 +10001875 mddev->bitmap_info.file == NULL) {
NeilBrownc3d97142009-12-14 12:49:52 +11001876 mddev->bitmap_info.offset =
1877 (__s32)le32_to_cpu(sb->bitmap_offset);
NeilBrown6409bb02012-05-22 13:55:07 +10001878 /* Metadata doesn't record how much space is available.
1879 * For 1.0, we assume we can use up to the superblock
1880 * if before, else to 4K beyond superblock.
1881 * For others, assume no change is possible.
1882 */
1883 if (mddev->minor_version > 0)
1884 mddev->bitmap_info.space = 0;
1885 else if (mddev->bitmap_info.offset > 0)
1886 mddev->bitmap_info.space =
1887 8 - mddev->bitmap_info.offset;
1888 else
1889 mddev->bitmap_info.space =
1890 -mddev->bitmap_info.offset;
1891 }
NeilBrowne11e93f2007-05-09 02:35:36 -07001892
NeilBrownf6705572006-03-27 01:18:11 -08001893 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1894 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1895 mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1896 mddev->new_level = le32_to_cpu(sb->new_level);
1897 mddev->new_layout = le32_to_cpu(sb->new_layout);
Andre Noll664e7c42009-06-18 08:45:27 +10001898 mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
NeilBrown2c810cd2012-05-21 09:27:00 +10001899 if (mddev->delta_disks < 0 ||
1900 (mddev->delta_disks == 0 &&
1901 (le32_to_cpu(sb->feature_map)
1902 & MD_FEATURE_RESHAPE_BACKWARDS)))
1903 mddev->reshape_backwards = 1;
NeilBrownf6705572006-03-27 01:18:11 -08001904 } else {
1905 mddev->reshape_position = MaxSector;
1906 mddev->delta_disks = 0;
1907 mddev->new_level = mddev->level;
1908 mddev->new_layout = mddev->layout;
Andre Noll664e7c42009-06-18 08:45:27 +10001909 mddev->new_chunk_sectors = mddev->chunk_sectors;
NeilBrownf6705572006-03-27 01:18:11 -08001910 }
1911
NeilBrown33f2c352019-09-09 16:52:29 +10001912 if (mddev->level == 0 &&
1913 !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT))
1914 mddev->layout = -1;
1915
Song Liu486b0f72016-08-19 15:34:01 -07001916 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)
Shaohua Lia62ab492016-01-06 14:37:13 -08001917 set_bit(MD_HAS_JOURNAL, &mddev->flags);
Artur Paszkiewiczea0213e2017-03-09 09:59:57 +01001918
Pawel Baldysiakddc08822017-08-16 17:13:45 +02001919 if (le32_to_cpu(sb->feature_map) &
1920 (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS)) {
Artur Paszkiewiczea0213e2017-03-09 09:59:57 +01001921 if (le32_to_cpu(sb->feature_map) &
1922 (MD_FEATURE_BITMAP_OFFSET | MD_FEATURE_JOURNAL))
1923 return -EINVAL;
Pawel Baldysiakddc08822017-08-16 17:13:45 +02001924 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_PPL) &&
1925 (le32_to_cpu(sb->feature_map) &
1926 MD_FEATURE_MULTIPLE_PPLS))
1927 return -EINVAL;
Artur Paszkiewiczea0213e2017-03-09 09:59:57 +01001928 set_bit(MD_HAS_PPL, &mddev->flags);
1929 }
NeilBrown41158c72005-06-21 17:17:25 -07001930 } else if (mddev->pers == NULL) {
NeilBrownbe6800a2010-05-18 10:17:09 +10001931 /* Insist of good event counter while assembling, except for
1932 * spares (which don't need an event count) */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933 ++ev1;
NeilBrownbe6800a2010-05-18 10:17:09 +10001934 if (rdev->desc_nr >= 0 &&
1935 rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
Song Liua3dfbda2015-10-08 21:54:11 -07001936 (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
1937 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL))
NeilBrownbe6800a2010-05-18 10:17:09 +10001938 if (ev1 < mddev->events)
1939 return -EINVAL;
NeilBrown41158c72005-06-21 17:17:25 -07001940 } else if (mddev->bitmap) {
1941 /* If adding to array with a bitmap, then we can accept an
1942 * older device, but not too old.
1943 */
NeilBrown41158c72005-06-21 17:17:25 -07001944 if (ev1 < mddev->bitmap->events_cleared)
1945 return 0;
NeilBrown8313b8e2013-12-12 10:13:33 +11001946 if (ev1 < mddev->events)
1947 set_bit(Bitmap_sync, &rdev->flags);
NeilBrown07d84d102006-06-26 00:27:56 -07001948 } else {
1949 if (ev1 < mddev->events)
1950 /* just a hot-add of a new device, leave raid_disk at -1 */
1951 return 0;
1952 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953 if (mddev->level != LEVEL_MULTIPATH) {
1954 int role;
NeilBrown3673f302009-08-03 10:59:56 +10001955 if (rdev->desc_nr < 0 ||
1956 rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
Song Liuc4d4c912015-08-13 14:31:54 -07001957 role = MD_DISK_ROLE_SPARE;
NeilBrown3673f302009-08-03 10:59:56 +10001958 rdev->desc_nr = -1;
1959 } else
1960 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961 switch(role) {
Song Liuc4d4c912015-08-13 14:31:54 -07001962 case MD_DISK_ROLE_SPARE: /* spare */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963 break;
Song Liuc4d4c912015-08-13 14:31:54 -07001964 case MD_DISK_ROLE_FAULTY: /* faulty */
NeilBrownb2d444d2005-11-08 21:39:31 -08001965 set_bit(Faulty, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966 break;
Song Liubac624f2015-08-13 14:31:55 -07001967 case MD_DISK_ROLE_JOURNAL: /* journal device */
1968 if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)) {
1969 /* journal device without journal feature */
NeilBrown9d487392016-11-02 14:16:49 +11001970 pr_warn("md: journal device provided without journal feature, ignoring the device\n");
Song Liubac624f2015-08-13 14:31:55 -07001971 return -EINVAL;
1972 }
1973 set_bit(Journal, &rdev->flags);
Shaohua Li3069aa82015-08-13 14:31:56 -07001974 rdev->journal_tail = le64_to_cpu(sb->journal_tail);
Shaohua Li9b156032015-12-18 15:19:16 +11001975 rdev->raid_disk = 0;
Song Liubac624f2015-08-13 14:31:55 -07001976 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977 default:
NeilBrownf4667222013-12-09 12:04:56 +11001978 rdev->saved_raid_disk = role;
NeilBrown5fd6c1d2006-06-26 00:27:40 -07001979 if ((le32_to_cpu(sb->feature_map) &
NeilBrownf4667222013-12-09 12:04:56 +11001980 MD_FEATURE_RECOVERY_OFFSET)) {
NeilBrown5fd6c1d2006-06-26 00:27:40 -07001981 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
NeilBrownf4667222013-12-09 12:04:56 +11001982 if (!(le32_to_cpu(sb->feature_map) &
1983 MD_FEATURE_RECOVERY_BITMAP))
1984 rdev->saved_raid_disk = -1;
Guoqing Jiang062f5b2a2019-07-24 11:09:20 +02001985 } else {
1986 /*
1987 * If the array is FROZEN, then the device can't
1988 * be in_sync with rest of array.
1989 */
1990 if (!test_bit(MD_RECOVERY_FROZEN,
1991 &mddev->recovery))
1992 set_bit(In_sync, &rdev->flags);
1993 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994 rdev->raid_disk = role;
1995 break;
1996 }
NeilBrown8ddf9ef2005-09-09 16:23:45 -07001997 if (sb->devflags & WriteMostly1)
1998 set_bit(WriteMostly, &rdev->flags);
NeilBrown688834e2016-11-18 16:16:11 +11001999 if (sb->devflags & FailFast1)
2000 set_bit(FailFast, &rdev->flags);
NeilBrown2d78f8c2011-12-23 10:17:51 +11002001 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
2002 set_bit(Replacement, &rdev->flags);
NeilBrown41158c72005-06-21 17:17:25 -07002003 } else /* MULTIPATH are always insync */
NeilBrownb2d444d2005-11-08 21:39:31 -08002004 set_bit(In_sync, &rdev->flags);
NeilBrown41158c72005-06-21 17:17:25 -07002005
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006 return 0;
2007}
2008
NeilBrownfd01b882011-10-11 16:47:53 +11002009static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010{
2011 struct mdp_superblock_1 *sb;
NeilBrown3cb03002011-10-11 16:45:26 +11002012 struct md_rdev *rdev2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002013 int max_dev, i;
2014 /* make rdev->sb match mddev and rdev data. */
2015
Namhyung Kim65a06f062011-07-27 11:00:36 +10002016 sb = page_address(rdev->sb_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002017
2018 sb->feature_map = 0;
2019 sb->pad0 = 0;
NeilBrown5fd6c1d2006-06-26 00:27:40 -07002020 sb->recovery_offset = cpu_to_le64(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002021 memset(sb->pad3, 0, sizeof(sb->pad3));
2022
2023 sb->utime = cpu_to_le64((__u64)mddev->utime);
2024 sb->events = cpu_to_le64(mddev->events);
2025 if (mddev->in_sync)
2026 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
Shaohua Libd18f642015-09-02 13:49:50 -07002027 else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags))
2028 sb->resync_offset = cpu_to_le64(MaxSector);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029 else
2030 sb->resync_offset = cpu_to_le64(0);
2031
NeilBrown1c05b4b2006-10-21 10:24:08 -07002032 sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
NeilBrown4dbcdc72006-01-06 00:20:52 -08002033
NeilBrownf0ca3402006-02-02 14:28:04 -08002034 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
Andre Noll58c0fed2009-03-31 14:33:13 +11002035 sb->size = cpu_to_le64(mddev->dev_sectors);
Andre Noll9d8f0362009-06-18 08:45:01 +10002036 sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
NeilBrown62e1e382009-05-26 09:40:59 +10002037 sb->level = cpu_to_le32(mddev->level);
2038 sb->layout = cpu_to_le32(mddev->layout);
NeilBrown688834e2016-11-18 16:16:11 +11002039 if (test_bit(FailFast, &rdev->flags))
2040 sb->devflags |= FailFast1;
2041 else
2042 sb->devflags &= ~FailFast1;
NeilBrownf0ca3402006-02-02 14:28:04 -08002043
NeilBrownaeb9b2112011-08-25 14:43:08 +10002044 if (test_bit(WriteMostly, &rdev->flags))
2045 sb->devflags |= WriteMostly1;
2046 else
2047 sb->devflags &= ~WriteMostly1;
NeilBrownc6563a82012-05-21 09:27:00 +10002048 sb->data_offset = cpu_to_le64(rdev->data_offset);
2049 sb->data_size = cpu_to_le64(rdev->sectors);
NeilBrownaeb9b2112011-08-25 14:43:08 +10002050
NeilBrownc3d97142009-12-14 12:49:52 +11002051 if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
2052 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
NeilBrown71c08052005-09-09 16:23:51 -07002053 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
NeilBrowna654b9d82005-06-21 17:17:27 -07002054 }
NeilBrown5fd6c1d2006-06-26 00:27:40 -07002055
Shaohua Lif2076e72015-10-08 21:54:12 -07002056 if (rdev->raid_disk >= 0 && !test_bit(Journal, &rdev->flags) &&
NeilBrown97e4f422009-03-31 14:33:13 +11002057 !test_bit(In_sync, &rdev->flags)) {
NeilBrown93be75f2009-12-14 12:50:06 +11002058 sb->feature_map |=
2059 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
2060 sb->recovery_offset =
2061 cpu_to_le64(rdev->recovery_offset);
NeilBrownf4667222013-12-09 12:04:56 +11002062 if (rdev->saved_raid_disk >= 0 && mddev->bitmap)
2063 sb->feature_map |=
2064 cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP);
NeilBrown5fd6c1d2006-06-26 00:27:40 -07002065 }
Shaohua Li3069aa82015-08-13 14:31:56 -07002066 /* Note: recovery_offset and journal_tail share space */
2067 if (test_bit(Journal, &rdev->flags))
2068 sb->journal_tail = cpu_to_le64(rdev->journal_tail);
NeilBrown2d78f8c2011-12-23 10:17:51 +11002069 if (test_bit(Replacement, &rdev->flags))
2070 sb->feature_map |=
2071 cpu_to_le32(MD_FEATURE_REPLACEMENT);
NeilBrown5fd6c1d2006-06-26 00:27:40 -07002072
NeilBrownf6705572006-03-27 01:18:11 -08002073 if (mddev->reshape_position != MaxSector) {
2074 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
2075 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
2076 sb->new_layout = cpu_to_le32(mddev->new_layout);
2077 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
2078 sb->new_level = cpu_to_le32(mddev->new_level);
Andre Noll664e7c42009-06-18 08:45:27 +10002079 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
NeilBrown2c810cd2012-05-21 09:27:00 +10002080 if (mddev->delta_disks == 0 &&
2081 mddev->reshape_backwards)
2082 sb->feature_map
2083 |= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS);
NeilBrownc6563a82012-05-21 09:27:00 +10002084 if (rdev->new_data_offset != rdev->data_offset) {
2085 sb->feature_map
2086 |= cpu_to_le32(MD_FEATURE_NEW_OFFSET);
2087 sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset
2088 - rdev->data_offset));
2089 }
NeilBrownf6705572006-03-27 01:18:11 -08002090 }
NeilBrowna654b9d82005-06-21 17:17:27 -07002091
Goldwyn Rodrigues3c462c82015-08-19 07:35:54 +10002092 if (mddev_is_clustered(mddev))
2093 sb->feature_map |= cpu_to_le32(MD_FEATURE_CLUSTERED);
2094
NeilBrown2699b672011-07-28 11:31:47 +10002095 if (rdev->badblocks.count == 0)
2096 /* Nothing to do for bad blocks*/ ;
2097 else if (sb->bblog_offset == 0)
2098 /* Cannot record bad blocks on this device */
2099 md_error(mddev, rdev);
2100 else {
2101 struct badblocks *bb = &rdev->badblocks;
Christoph Hellwigae506402019-04-04 18:56:13 +02002102 __le64 *bbp = (__le64 *)page_address(rdev->bb_page);
NeilBrown2699b672011-07-28 11:31:47 +10002103 u64 *p = bb->page;
2104 sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
2105 if (bb->changed) {
2106 unsigned seq;
2107
2108retry:
2109 seq = read_seqbegin(&bb->lock);
2110
2111 memset(bbp, 0xff, PAGE_SIZE);
2112
2113 for (i = 0 ; i < bb->count ; i++) {
majianpeng35f9ac22012-11-08 08:56:27 +08002114 u64 internal_bb = p[i];
NeilBrown2699b672011-07-28 11:31:47 +10002115 u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
2116 | BB_LEN(internal_bb));
majianpeng35f9ac22012-11-08 08:56:27 +08002117 bbp[i] = cpu_to_le64(store_bb);
NeilBrown2699b672011-07-28 11:31:47 +10002118 }
NeilBrownd0962932012-03-19 12:46:41 +11002119 bb->changed = 0;
NeilBrown2699b672011-07-28 11:31:47 +10002120 if (read_seqretry(&bb->lock, seq))
2121 goto retry;
2122
2123 bb->sector = (rdev->sb_start +
2124 (int)le32_to_cpu(sb->bblog_offset));
2125 bb->size = le16_to_cpu(sb->bblog_size);
NeilBrown2699b672011-07-28 11:31:47 +10002126 }
2127 }
2128
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129 max_dev = 0;
NeilBrowndafb20f2012-03-19 12:46:39 +11002130 rdev_for_each(rdev2, mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131 if (rdev2->desc_nr+1 > max_dev)
2132 max_dev = rdev2->desc_nr+1;
NeilBrowna778b732007-05-23 13:58:10 -07002133
NeilBrown70471da2009-08-03 10:59:57 +10002134 if (max_dev > le32_to_cpu(sb->max_dev)) {
2135 int bmask;
NeilBrowna778b732007-05-23 13:58:10 -07002136 sb->max_dev = cpu_to_le32(max_dev);
NeilBrown70471da2009-08-03 10:59:57 +10002137 rdev->sb_size = max_dev * 2 + 256;
2138 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
2139 if (rdev->sb_size & bmask)
2140 rdev->sb_size = (rdev->sb_size | bmask) + 1;
NeilBrownddcf3522010-09-08 16:48:17 +10002141 } else
2142 max_dev = le32_to_cpu(sb->max_dev);
2143
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144 for (i=0; i<max_dev;i++)
Lidong Zhong8df72022017-06-12 10:45:55 +08002145 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
NeilBrownf72ffdd2014-09-30 14:23:59 +10002146
Song Liua97b7892015-10-08 21:54:09 -07002147 if (test_bit(MD_HAS_JOURNAL, &mddev->flags))
2148 sb->feature_map |= cpu_to_le32(MD_FEATURE_JOURNAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002149
Artur Paszkiewiczea0213e2017-03-09 09:59:57 +01002150 if (test_bit(MD_HAS_PPL, &mddev->flags)) {
Pawel Baldysiakddc08822017-08-16 17:13:45 +02002151 if (test_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags))
2152 sb->feature_map |=
2153 cpu_to_le32(MD_FEATURE_MULTIPLE_PPLS);
2154 else
2155 sb->feature_map |= cpu_to_le32(MD_FEATURE_PPL);
Artur Paszkiewiczea0213e2017-03-09 09:59:57 +01002156 sb->ppl.offset = cpu_to_le16(rdev->ppl.offset);
2157 sb->ppl.size = cpu_to_le16(rdev->ppl.size);
2158 }
2159
NeilBrowndafb20f2012-03-19 12:46:39 +11002160 rdev_for_each(rdev2, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161 i = rdev2->desc_nr;
NeilBrownb2d444d2005-11-08 21:39:31 -08002162 if (test_bit(Faulty, &rdev2->flags))
Song Liuc4d4c912015-08-13 14:31:54 -07002163 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY);
NeilBrownb2d444d2005-11-08 21:39:31 -08002164 else if (test_bit(In_sync, &rdev2->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
Song Liua97b7892015-10-08 21:54:09 -07002166 else if (test_bit(Journal, &rdev2->flags))
Song Liubac624f2015-08-13 14:31:55 -07002167 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_JOURNAL);
NeilBrown93be75f2009-12-14 12:50:06 +11002168 else if (rdev2->raid_disk >= 0)
NeilBrown5fd6c1d2006-06-26 00:27:40 -07002169 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170 else
Song Liuc4d4c912015-08-13 14:31:54 -07002171 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172 }
2173
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174 sb->sb_csum = calc_sb_1_csum(sb);
2175}
2176
Xiao Nid9c0fa52020-06-30 15:55:36 +08002177static sector_t super_1_choose_bm_space(sector_t dev_size)
2178{
2179 sector_t bm_space;
2180
2181 /* if the device is bigger than 8Gig, save 64k for bitmap
2182 * usage, if bigger than 200Gig, save 128k
2183 */
2184 if (dev_size < 64*2)
2185 bm_space = 0;
2186 else if (dev_size - 64*2 >= 200*1024*1024*2)
2187 bm_space = 128*2;
2188 else if (dev_size - 4*2 > 8*1024*1024*2)
2189 bm_space = 64*2;
2190 else
2191 bm_space = 4*2;
2192 return bm_space;
2193}
2194
Chris Webb0cd17fe2008-06-28 08:31:46 +10002195static unsigned long long
NeilBrown3cb03002011-10-11 16:45:26 +11002196super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
Chris Webb0cd17fe2008-06-28 08:31:46 +10002197{
2198 struct mdp_superblock_1 *sb;
Andre Noll15f4a5f2008-07-21 14:42:12 +10002199 sector_t max_sectors;
Andre Noll58c0fed2009-03-31 14:33:13 +11002200 if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
Chris Webb0cd17fe2008-06-28 08:31:46 +10002201 return 0; /* component must fit device */
NeilBrownc6563a82012-05-21 09:27:00 +10002202 if (rdev->data_offset != rdev->new_data_offset)
2203 return 0; /* too confusing */
Andre Noll0f420352008-07-11 22:02:23 +10002204 if (rdev->sb_start < rdev->data_offset) {
Chris Webb0cd17fe2008-06-28 08:31:46 +10002205 /* minor versions 1 and 2; superblock before data */
Mike Snitzer77304d22010-11-08 14:39:12 +01002206 max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
Andre Noll15f4a5f2008-07-21 14:42:12 +10002207 max_sectors -= rdev->data_offset;
2208 if (!num_sectors || num_sectors > max_sectors)
2209 num_sectors = max_sectors;
NeilBrownc3d97142009-12-14 12:49:52 +11002210 } else if (rdev->mddev->bitmap_info.offset) {
Chris Webb0cd17fe2008-06-28 08:31:46 +10002211 /* minor version 0 with bitmap we can't move */
2212 return 0;
2213 } else {
2214 /* minor version 0; superblock after data */
Xiao Nid9c0fa52020-06-30 15:55:36 +08002215 sector_t sb_start, bm_space;
2216 sector_t dev_size = i_size_read(rdev->bdev->bd_inode) >> 9;
2217
2218 /* 8K is for superblock */
2219 sb_start = dev_size - 8*2;
Andre Noll0f420352008-07-11 22:02:23 +10002220 sb_start &= ~(sector_t)(4*2 - 1);
Xiao Nid9c0fa52020-06-30 15:55:36 +08002221
2222 bm_space = super_1_choose_bm_space(dev_size);
2223
2224 /* Space that can be used to store date needs to decrease
2225 * superblock bitmap space and bad block space(4K)
2226 */
2227 max_sectors = sb_start - bm_space - 4*2;
2228
Andre Noll15f4a5f2008-07-21 14:42:12 +10002229 if (!num_sectors || num_sectors > max_sectors)
2230 num_sectors = max_sectors;
Chris Webb0cd17fe2008-06-28 08:31:46 +10002231 }
Namhyung Kim65a06f062011-07-27 11:00:36 +10002232 sb = page_address(rdev->sb_page);
Andre Noll15f4a5f2008-07-21 14:42:12 +10002233 sb->data_size = cpu_to_le64(num_sectors);
Jason Yan3fb632e2017-03-10 11:27:23 +08002234 sb->super_offset = cpu_to_le64(rdev->sb_start);
Chris Webb0cd17fe2008-06-28 08:31:46 +10002235 sb->sb_csum = calc_sb_1_csum(sb);
NeilBrown46533ff2016-11-18 16:16:11 +11002236 do {
2237 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
2238 rdev->sb_page);
2239 } while (md_super_wait(rdev->mddev) < 0);
Justin Maggardc26a44e2010-11-24 16:36:17 +11002240 return num_sectors;
NeilBrownc6563a82012-05-21 09:27:00 +10002241
2242}
2243
2244static int
2245super_1_allow_new_offset(struct md_rdev *rdev,
2246 unsigned long long new_offset)
2247{
2248 /* All necessary checks on new >= old have been done */
2249 struct bitmap *bitmap;
2250 if (new_offset >= rdev->data_offset)
2251 return 1;
2252
2253 /* with 1.0 metadata, there is no metadata to tread on
2254 * so we can always move back */
2255 if (rdev->mddev->minor_version == 0)
2256 return 1;
2257
2258 /* otherwise we must be sure not to step on
2259 * any metadata, so stay:
2260 * 36K beyond start of superblock
2261 * beyond end of badblocks
2262 * beyond write-intent bitmap
2263 */
2264 if (rdev->sb_start + (32+4)*2 > new_offset)
2265 return 0;
2266 bitmap = rdev->mddev->bitmap;
2267 if (bitmap && !rdev->mddev->bitmap_info.file &&
2268 rdev->sb_start + rdev->mddev->bitmap_info.offset +
NeilBrown1ec885c2012-05-22 13:55:10 +10002269 bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset)
NeilBrownc6563a82012-05-21 09:27:00 +10002270 return 0;
2271 if (rdev->badblocks.sector + rdev->badblocks.size > new_offset)
2272 return 0;
2273
2274 return 1;
Chris Webb0cd17fe2008-06-28 08:31:46 +10002275}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276
Adrian Bunk75c96f82005-05-05 16:16:09 -07002277static struct super_type super_types[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278 [0] = {
2279 .name = "0.90.0",
2280 .owner = THIS_MODULE,
Chris Webb0cd17fe2008-06-28 08:31:46 +10002281 .load_super = super_90_load,
2282 .validate_super = super_90_validate,
2283 .sync_super = super_90_sync,
2284 .rdev_size_change = super_90_rdev_size_change,
NeilBrownc6563a82012-05-21 09:27:00 +10002285 .allow_new_offset = super_90_allow_new_offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002286 },
2287 [1] = {
2288 .name = "md-1",
2289 .owner = THIS_MODULE,
Chris Webb0cd17fe2008-06-28 08:31:46 +10002290 .load_super = super_1_load,
2291 .validate_super = super_1_validate,
2292 .sync_super = super_1_sync,
2293 .rdev_size_change = super_1_rdev_size_change,
NeilBrownc6563a82012-05-21 09:27:00 +10002294 .allow_new_offset = super_1_allow_new_offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295 },
2296};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297
NeilBrownfd01b882011-10-11 16:47:53 +11002298static void sync_super(struct mddev *mddev, struct md_rdev *rdev)
Jonathan Brassow076f9682011-06-07 17:51:30 -05002299{
2300 if (mddev->sync_super) {
2301 mddev->sync_super(mddev, rdev);
2302 return;
2303 }
2304
2305 BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));
2306
2307 super_types[mddev->major_version].sync_super(mddev, rdev);
2308}
2309
NeilBrownfd01b882011-10-11 16:47:53 +11002310static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002311{
NeilBrown3cb03002011-10-11 16:45:26 +11002312 struct md_rdev *rdev, *rdev2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313
NeilBrown4b809912008-07-21 17:05:25 +10002314 rcu_read_lock();
Song Liu0b020e82015-09-03 23:00:35 -07002315 rdev_for_each_rcu(rdev, mddev1) {
2316 if (test_bit(Faulty, &rdev->flags) ||
2317 test_bit(Journal, &rdev->flags) ||
2318 rdev->raid_disk == -1)
2319 continue;
2320 rdev_for_each_rcu(rdev2, mddev2) {
2321 if (test_bit(Faulty, &rdev2->flags) ||
2322 test_bit(Journal, &rdev2->flags) ||
2323 rdev2->raid_disk == -1)
2324 continue;
Christoph Hellwig61a27e1f2020-09-03 07:40:58 +02002325 if (rdev->bdev->bd_disk == rdev2->bdev->bd_disk) {
NeilBrown4b809912008-07-21 17:05:25 +10002326 rcu_read_unlock();
NeilBrown7dd5e7c32007-02-28 20:11:35 -08002327 return 1;
NeilBrown4b809912008-07-21 17:05:25 +10002328 }
Song Liu0b020e82015-09-03 23:00:35 -07002329 }
2330 }
NeilBrown4b809912008-07-21 17:05:25 +10002331 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002332 return 0;
2333}
2334
2335static LIST_HEAD(pending_raid_disks);
2336
Andre Nollac5e7112009-08-03 10:59:47 +10002337/*
2338 * Try to register data integrity profile for an mddev
2339 *
2340 * This is called when an array is started and after a disk has been kicked
2341 * from the array. It only succeeds if all working and active component devices
2342 * are integrity capable with matching profiles.
2343 */
NeilBrownfd01b882011-10-11 16:47:53 +11002344int md_integrity_register(struct mddev *mddev)
Martin K. Petersen3f9d99c2009-03-31 14:27:02 +11002345{
NeilBrown3cb03002011-10-11 16:45:26 +11002346 struct md_rdev *rdev, *reference = NULL;
Martin K. Petersen3f9d99c2009-03-31 14:27:02 +11002347
Andre Nollac5e7112009-08-03 10:59:47 +10002348 if (list_empty(&mddev->disks))
2349 return 0; /* nothing to do */
Jonathan Brassow629acb62011-06-08 15:10:08 +10002350 if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
2351 return 0; /* shouldn't register, or already is */
NeilBrowndafb20f2012-03-19 12:46:39 +11002352 rdev_for_each(rdev, mddev) {
Andre Nollac5e7112009-08-03 10:59:47 +10002353 /* skip spares and non-functional disks */
2354 if (test_bit(Faulty, &rdev->flags))
2355 continue;
2356 if (rdev->raid_disk < 0)
2357 continue;
Andre Nollac5e7112009-08-03 10:59:47 +10002358 if (!reference) {
2359 /* Use the first rdev as the reference */
2360 reference = rdev;
2361 continue;
2362 }
2363 /* does this rdev's profile match the reference profile? */
2364 if (blk_integrity_compare(reference->bdev->bd_disk,
2365 rdev->bdev->bd_disk) < 0)
2366 return -EINVAL;
Martin K. Petersen3f9d99c2009-03-31 14:27:02 +11002367 }
Martin K. Petersen89078d52011-03-28 20:09:12 -04002368 if (!reference || !bdev_get_integrity(reference->bdev))
2369 return 0;
Andre Nollac5e7112009-08-03 10:59:47 +10002370 /*
2371 * All component devices are integrity capable and have matching
2372 * profiles, register the common profile for the md device.
2373 */
Martin K. Petersen25520d52015-10-21 13:19:49 -04002374 blk_integrity_register(mddev->gendisk,
2375 bdev_get_integrity(reference->bdev));
2376
NeilBrown9d487392016-11-02 14:16:49 +11002377 pr_debug("md: data integrity enabled on %s\n", mdname(mddev));
Kent Overstreetafeee512018-05-20 18:25:52 -04002378 if (bioset_integrity_create(&mddev->bio_set, BIO_POOL_SIZE)) {
NeilBrown9d487392016-11-02 14:16:49 +11002379 pr_err("md: failed to create integrity pool for %s\n",
Martin K. Petersena91a2782011-03-17 11:11:05 +01002380 mdname(mddev));
2381 return -EINVAL;
2382 }
Andre Nollac5e7112009-08-03 10:59:47 +10002383 return 0;
Martin K. Petersen3f9d99c2009-03-31 14:27:02 +11002384}
Andre Nollac5e7112009-08-03 10:59:47 +10002385EXPORT_SYMBOL(md_integrity_register);
2386
Dan Williams1501efa2016-01-13 16:00:07 -08002387/*
2388 * Attempt to add an rdev, but only if it is consistent with the current
2389 * integrity profile
2390 */
2391int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
Andre Nollac5e7112009-08-03 10:59:47 +10002392{
Jonathan Brassow2863b9e2012-10-11 13:38:58 +11002393 struct blk_integrity *bi_mddev;
Dan Williams1501efa2016-01-13 16:00:07 -08002394 char name[BDEVNAME_SIZE];
Jonathan Brassow2863b9e2012-10-11 13:38:58 +11002395
2396 if (!mddev->gendisk)
Dan Williams1501efa2016-01-13 16:00:07 -08002397 return 0;
Jonathan Brassow2863b9e2012-10-11 13:38:58 +11002398
Jonathan Brassow2863b9e2012-10-11 13:38:58 +11002399 bi_mddev = blk_get_integrity(mddev->gendisk);
Andre Nollac5e7112009-08-03 10:59:47 +10002400
2401 if (!bi_mddev) /* nothing to do */
Dan Williams1501efa2016-01-13 16:00:07 -08002402 return 0;
2403
2404 if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) {
NeilBrown9d487392016-11-02 14:16:49 +11002405 pr_err("%s: incompatible integrity profile for %s\n",
2406 mdname(mddev), bdevname(rdev->bdev, name));
Dan Williams1501efa2016-01-13 16:00:07 -08002407 return -ENXIO;
2408 }
2409
2410 return 0;
Andre Nollac5e7112009-08-03 10:59:47 +10002411}
2412EXPORT_SYMBOL(md_integrity_add_rdev);
Martin K. Petersen3f9d99c2009-03-31 14:27:02 +11002413
NeilBrownf72ffdd2014-09-30 14:23:59 +10002414static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002415{
NeilBrown7dd5e7c32007-02-28 20:11:35 -08002416 char b[BDEVNAME_SIZE];
NeilBrown5e55e2f2007-03-26 21:32:14 -08002417 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002418
Dan Williams11e2ede2008-04-30 00:52:32 -07002419 /* prevent duplicates */
2420 if (find_rdev(mddev, rdev->bdev->bd_dev))
2421 return -EEXIST;
2422
NeilBrown97b20ef2017-04-13 08:53:48 +10002423 if ((bdev_read_only(rdev->bdev) || bdev_read_only(rdev->meta_bdev)) &&
2424 mddev->pers)
2425 return -EROFS;
2426
Andre Nolldd8ac332009-03-31 14:33:13 +11002427 /* make sure rdev->sectors exceeds mddev->dev_sectors */
Shaohua Lif6b6ec52015-12-21 10:51:02 +11002428 if (!test_bit(Journal, &rdev->flags) &&
2429 rdev->sectors &&
2430 (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) {
NeilBrowna778b732007-05-23 13:58:10 -07002431 if (mddev->pers) {
2432 /* Cannot change size, so fail
2433 * If mddev->level <= 0, then we don't care
2434 * about aligning sizes (e.g. linear)
2435 */
2436 if (mddev->level > 0)
2437 return -ENOSPC;
2438 } else
Andre Nolldd8ac332009-03-31 14:33:13 +11002439 mddev->dev_sectors = rdev->sectors;
NeilBrown2bf071b2006-01-06 00:20:55 -08002440 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002441
2442 /* Verify rdev->desc_nr is unique.
2443 * If it is -1, assign a free number, else
2444 * check number is not in use
2445 */
NeilBrown4878e9e2014-09-25 17:00:11 +10002446 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002447 if (rdev->desc_nr < 0) {
2448 int choice = 0;
NeilBrown4878e9e2014-09-25 17:00:11 +10002449 if (mddev->pers)
2450 choice = mddev->raid_disks;
Goldwyn Rodrigues57d051d2015-04-14 10:43:55 -05002451 while (md_find_rdev_nr_rcu(mddev, choice))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002452 choice++;
2453 rdev->desc_nr = choice;
2454 } else {
Goldwyn Rodrigues57d051d2015-04-14 10:43:55 -05002455 if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) {
NeilBrown4878e9e2014-09-25 17:00:11 +10002456 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002457 return -EBUSY;
NeilBrown4878e9e2014-09-25 17:00:11 +10002458 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002459 }
NeilBrown4878e9e2014-09-25 17:00:11 +10002460 rcu_read_unlock();
Shaohua Lif6b6ec52015-12-21 10:51:02 +11002461 if (!test_bit(Journal, &rdev->flags) &&
2462 mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
NeilBrown9d487392016-11-02 14:16:49 +11002463 pr_warn("md: %s: array is limited to %d devices\n",
2464 mdname(mddev), mddev->max_disks);
NeilBrownde01dfa2009-02-06 18:02:46 +11002465 return -EBUSY;
2466 }
NeilBrown19133a42005-11-08 21:39:35 -08002467 bdevname(rdev->bdev,b);
Rasmus Villemoes90a9bef2015-06-25 15:02:36 -07002468 strreplace(b, '/', '!');
Greg Kroah-Hartman649316b2007-12-17 23:05:35 -07002469
Linus Torvalds1da177e2005-04-16 15:20:36 -07002470 rdev->mddev = mddev;
NeilBrown9d487392016-11-02 14:16:49 +11002471 pr_debug("md: bind<%s>\n", b);
NeilBrown86e6ffd2005-11-08 21:39:24 -08002472
Guoqing Jiang963c5552019-06-14 17:10:36 +08002473 if (mddev->raid_disks)
Guoqing Jiang404659c2019-12-23 10:48:53 +01002474 mddev_create_serial_pool(mddev, rdev, false);
Guoqing Jiang963c5552019-06-14 17:10:36 +08002475
Greg Kroah-Hartmanb2d6db52007-12-17 23:05:35 -07002476 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
NeilBrown5e55e2f2007-03-26 21:32:14 -08002477 goto fail;
NeilBrown86e6ffd2005-11-08 21:39:24 -08002478
Damien Le Moal5e3b8a82020-07-16 13:54:40 +09002479 /* failure here is OK */
Christoph Hellwig8d652692020-11-17 08:18:55 +01002480 err = sysfs_create_link(&rdev->kobj, bdev_kobj(rdev->bdev), "block");
NeilBrown00bcb4a2010-06-01 19:37:23 +10002481 rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
Junxiao Bie1a86db2020-07-14 16:10:26 -07002482 rdev->sysfs_unack_badblocks =
2483 sysfs_get_dirent_safe(rdev->kobj.sd, "unacknowledged_bad_blocks");
2484 rdev->sysfs_badblocks =
2485 sysfs_get_dirent_safe(rdev->kobj.sd, "bad_blocks");
NeilBrown3c0ee632008-10-21 13:25:28 +11002486
NeilBrown4b809912008-07-21 17:05:25 +10002487 list_add_rcu(&rdev->same_set, &mddev->disks);
Tejun Heoe09b4572010-11-13 11:55:17 +01002488 bd_link_disk_holder(rdev->bdev, mddev->gendisk);
NeilBrown4044ba52009-01-09 08:31:11 +11002489
2490 /* May as well allow recovery to be retried once */
NeilBrown53890422011-07-27 11:00:36 +10002491 mddev->recovery_disabled++;
Martin K. Petersen3f9d99c2009-03-31 14:27:02 +11002492
Linus Torvalds1da177e2005-04-16 15:20:36 -07002493 return 0;
NeilBrown5e55e2f2007-03-26 21:32:14 -08002494
2495 fail:
NeilBrown9d487392016-11-02 14:16:49 +11002496 pr_warn("md: failed to register dev-%s for %s\n",
2497 b, mdname(mddev));
NeilBrown5e55e2f2007-03-26 21:32:14 -08002498 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002499}
2500
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02002501static void rdev_delayed_delete(struct work_struct *ws)
NeilBrown5792a282007-04-04 19:08:18 -07002502{
NeilBrown3cb03002011-10-11 16:45:26 +11002503 struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work);
NeilBrown5792a282007-04-04 19:08:18 -07002504 kobject_del(&rdev->kobj);
NeilBrown177a99b2008-02-06 01:39:56 -08002505 kobject_put(&rdev->kobj);
NeilBrown5792a282007-04-04 19:08:18 -07002506}
2507
NeilBrownf72ffdd2014-09-30 14:23:59 +10002508static void unbind_rdev_from_array(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509{
2510 char b[BDEVNAME_SIZE];
NeilBrown403df472014-09-30 15:52:29 +10002511
Tejun Heo49731ba2011-01-14 18:43:57 +01002512 bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
NeilBrown4b809912008-07-21 17:05:25 +10002513 list_del_rcu(&rdev->same_set);
NeilBrown9d487392016-11-02 14:16:49 +11002514 pr_debug("md: unbind<%s>\n", bdevname(rdev->bdev,b));
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +01002515 mddev_destroy_serial_pool(rdev->mddev, rdev, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516 rdev->mddev = NULL;
NeilBrown86e6ffd2005-11-08 21:39:24 -08002517 sysfs_remove_link(&rdev->kobj, "block");
NeilBrown3c0ee632008-10-21 13:25:28 +11002518 sysfs_put(rdev->sysfs_state);
Junxiao Bie1a86db2020-07-14 16:10:26 -07002519 sysfs_put(rdev->sysfs_unack_badblocks);
2520 sysfs_put(rdev->sysfs_badblocks);
NeilBrown3c0ee632008-10-21 13:25:28 +11002521 rdev->sysfs_state = NULL;
Junxiao Bie1a86db2020-07-14 16:10:26 -07002522 rdev->sysfs_unack_badblocks = NULL;
2523 rdev->sysfs_badblocks = NULL;
NeilBrown2230dfe2011-07-28 11:31:46 +10002524 rdev->badblocks.count = 0;
NeilBrown5792a282007-04-04 19:08:18 -07002525 /* We need to delay this, otherwise we can deadlock when
NeilBrown4b809912008-07-21 17:05:25 +10002526 * writing to 'remove' to "dev/state". We also need
2527 * to delay it due to rcu usage.
NeilBrown5792a282007-04-04 19:08:18 -07002528 */
NeilBrown4b809912008-07-21 17:05:25 +10002529 synchronize_rcu();
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02002530 INIT_WORK(&rdev->del_work, rdev_delayed_delete);
NeilBrown177a99b2008-02-06 01:39:56 -08002531 kobject_get(&rdev->kobj);
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02002532 queue_work(md_rdev_misc_wq, &rdev->del_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533}
2534
2535/*
2536 * prevent the device from being mounted, repartitioned or
2537 * otherwise reused by a RAID array (or any other kernel
2538 * subsystem), by bd_claiming the device.
2539 */
NeilBrown3cb03002011-10-11 16:45:26 +11002540static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002541{
2542 int err = 0;
2543 struct block_device *bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002544
Tejun Heod4d77622010-11-13 11:55:18 +01002545 bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
NeilBrown3cb03002011-10-11 16:45:26 +11002546 shared ? (struct md_rdev *)lock_rdev : rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547 if (IS_ERR(bdev)) {
Christoph Hellwigea3edd42020-03-24 08:25:11 +01002548 pr_warn("md: could not open device unknown-block(%u,%u).\n",
2549 MAJOR(dev), MINOR(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550 return PTR_ERR(bdev);
2551 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002552 rdev->bdev = bdev;
2553 return err;
2554}
2555
NeilBrown3cb03002011-10-11 16:45:26 +11002556static void unlock_rdev(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002557{
2558 struct block_device *bdev = rdev->bdev;
2559 rdev->bdev = NULL;
Tejun Heoe525fd82010-11-13 11:55:17 +01002560 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002561}
2562
2563void md_autodetect_dev(dev_t dev);
2564
NeilBrownf72ffdd2014-09-30 14:23:59 +10002565static void export_rdev(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002566{
2567 char b[BDEVNAME_SIZE];
NeilBrown403df472014-09-30 15:52:29 +10002568
NeilBrown9d487392016-11-02 14:16:49 +11002569 pr_debug("md: export_rdev(%s)\n", bdevname(rdev->bdev,b));
NeilBrown545c8792012-05-22 13:54:30 +10002570 md_rdev_clear(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002571#ifndef MODULE
NeilBrownd0fae182008-03-04 14:29:31 -08002572 if (test_bit(AutoDetected, &rdev->flags))
2573 md_autodetect_dev(rdev->bdev->bd_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002574#endif
2575 unlock_rdev(rdev);
NeilBrown86e6ffd2005-11-08 21:39:24 -08002576 kobject_put(&rdev->kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002577}
2578
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05002579void md_kick_rdev_from_array(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002580{
2581 unbind_rdev_from_array(rdev);
2582 export_rdev(rdev);
2583}
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05002584EXPORT_SYMBOL_GPL(md_kick_rdev_from_array);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002585
NeilBrownfd01b882011-10-11 16:47:53 +11002586static void export_array(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002587{
NeilBrown0638bb02014-09-25 17:43:47 +10002588 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002589
NeilBrown0638bb02014-09-25 17:43:47 +10002590 while (!list_empty(&mddev->disks)) {
2591 rdev = list_first_entry(&mddev->disks, struct md_rdev,
2592 same_set);
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05002593 md_kick_rdev_from_array(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002594 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002595 mddev->raid_disks = 0;
2596 mddev->major_version = 0;
2597}
2598
NeilBrown6497709b2017-03-15 14:05:14 +11002599static bool set_in_sync(struct mddev *mddev)
2600{
Shaohua Liefa4b772017-10-18 22:08:13 -07002601 lockdep_assert_held(&mddev->lock);
NeilBrown4ad23a972017-03-15 14:05:14 +11002602 if (!mddev->in_sync) {
2603 mddev->sync_checkers++;
2604 spin_unlock(&mddev->lock);
2605 percpu_ref_switch_to_atomic_sync(&mddev->writes_pending);
2606 spin_lock(&mddev->lock);
2607 if (!mddev->in_sync &&
2608 percpu_ref_is_zero(&mddev->writes_pending)) {
NeilBrown6497709b2017-03-15 14:05:14 +11002609 mddev->in_sync = 1;
NeilBrown4ad23a972017-03-15 14:05:14 +11002610 /*
2611 * Ensure ->in_sync is visible before we clear
2612 * ->sync_checkers.
2613 */
NeilBrown55cc39f2017-03-15 14:05:14 +11002614 smp_mb();
NeilBrown6497709b2017-03-15 14:05:14 +11002615 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
2616 sysfs_notify_dirent_safe(mddev->sysfs_state);
2617 }
NeilBrown4ad23a972017-03-15 14:05:14 +11002618 if (--mddev->sync_checkers == 0)
2619 percpu_ref_switch_to_percpu(&mddev->writes_pending);
NeilBrown6497709b2017-03-15 14:05:14 +11002620 }
2621 if (mddev->safemode == 1)
2622 mddev->safemode = 0;
2623 return mddev->in_sync;
2624}
2625
NeilBrownf72ffdd2014-09-30 14:23:59 +10002626static void sync_sbs(struct mddev *mddev, int nospares)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002627{
NeilBrown42543762006-06-26 00:27:57 -07002628 /* Update each superblock (in-memory image), but
2629 * if we are allowed to, skip spares which already
2630 * have the right event counter, or have one earlier
2631 * (which would mean they aren't being marked as dirty
2632 * with the rest of the array)
2633 */
NeilBrown3cb03002011-10-11 16:45:26 +11002634 struct md_rdev *rdev;
NeilBrowndafb20f2012-03-19 12:46:39 +11002635 rdev_for_each(rdev, mddev) {
NeilBrown42543762006-06-26 00:27:57 -07002636 if (rdev->sb_events == mddev->events ||
2637 (nospares &&
2638 rdev->raid_disk < 0 &&
NeilBrown42543762006-06-26 00:27:57 -07002639 rdev->sb_events+1 == mddev->events)) {
2640 /* Don't update this superblock */
2641 rdev->sb_loaded = 2;
2642 } else {
Jonathan Brassow076f9682011-06-07 17:51:30 -05002643 sync_super(mddev, rdev);
NeilBrown42543762006-06-26 00:27:57 -07002644 rdev->sb_loaded = 1;
2645 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002646 }
2647}
2648
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002649static bool does_sb_need_changing(struct mddev *mddev)
2650{
2651 struct md_rdev *rdev;
2652 struct mdp_superblock_1 *sb;
2653 int role;
2654
2655 /* Find a good rdev */
2656 rdev_for_each(rdev, mddev)
2657 if ((rdev->raid_disk >= 0) && !test_bit(Faulty, &rdev->flags))
2658 break;
2659
2660 /* No good device found. */
2661 if (!rdev)
2662 return false;
2663
2664 sb = page_address(rdev->sb_page);
2665 /* Check if a device has become faulty or a spare become active */
2666 rdev_for_each(rdev, mddev) {
2667 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
2668 /* Device activated? */
2669 if (role == 0xffff && rdev->raid_disk >=0 &&
2670 !test_bit(Faulty, &rdev->flags))
2671 return true;
2672 /* Device turned faulty? */
2673 if (test_bit(Faulty, &rdev->flags) && (role < 0xfffd))
2674 return true;
2675 }
2676
2677 /* Check if any mddev parameters have changed */
2678 if ((mddev->dev_sectors != le64_to_cpu(sb->size)) ||
2679 (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) ||
Jason Yan13459212017-03-10 11:49:12 +08002680 (mddev->layout != le32_to_cpu(sb->layout)) ||
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002681 (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) ||
2682 (mddev->chunk_sectors != le32_to_cpu(sb->chunksize)))
2683 return true;
2684
2685 return false;
2686}
2687
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05002688void md_update_sb(struct mddev *mddev, int force_change)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002689{
NeilBrown3cb03002011-10-11 16:45:26 +11002690 struct md_rdev *rdev;
NeilBrown06d91a52005-06-21 17:17:12 -07002691 int sync_req;
NeilBrown42543762006-06-26 00:27:57 -07002692 int nospares = 0;
NeilBrown2699b672011-07-28 11:31:47 +10002693 int any_badblocks_changed = 0;
Guoqing Jiang23b63f92015-10-12 17:21:30 +08002694 int ret = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002695
NeilBrownd87f0642013-04-24 11:42:40 +10002696 if (mddev->ro) {
2697 if (force_change)
Shaohua Li29530792016-12-08 15:48:19 -08002698 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrownd87f0642013-04-24 11:42:40 +10002699 return;
2700 }
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002701
Guoqing Jiang2c97cf12016-05-02 11:33:09 -04002702repeat:
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002703 if (mddev_is_clustered(mddev)) {
Shaohua Li29530792016-12-08 15:48:19 -08002704 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002705 force_change = 1;
Shaohua Li29530792016-12-08 15:48:19 -08002706 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
Guoqing Jiang85ad1d12016-05-03 22:22:13 -04002707 nospares = 1;
Guoqing Jiang23b63f92015-10-12 17:21:30 +08002708 ret = md_cluster_ops->metadata_update_start(mddev);
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002709 /* Has someone else has updated the sb */
2710 if (!does_sb_need_changing(mddev)) {
Guoqing Jiang23b63f92015-10-12 17:21:30 +08002711 if (ret == 0)
2712 md_cluster_ops->metadata_update_cancel(mddev);
Shaohua Li29530792016-12-08 15:48:19 -08002713 bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
2714 BIT(MD_SB_CHANGE_DEVS) |
2715 BIT(MD_SB_CHANGE_CLEAN));
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002716 return;
2717 }
2718 }
Guoqing Jiang2c97cf12016-05-02 11:33:09 -04002719
NeilBrowndb0505d2017-10-17 16:18:36 +11002720 /*
2721 * First make sure individual recovery_offsets are correct
2722 * curr_resync_completed can only be used during recovery.
2723 * During reshape/resync it might use array-addresses rather
2724 * that device addresses.
2725 */
NeilBrowndafb20f2012-03-19 12:46:39 +11002726 rdev_for_each(rdev, mddev) {
NeilBrown3a3a5dd2010-08-16 18:09:31 +10002727 if (rdev->raid_disk >= 0 &&
2728 mddev->delta_disks >= 0 &&
NeilBrowndb0505d2017-10-17 16:18:36 +11002729 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
2730 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery) &&
2731 !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
Shaohua Lif2076e72015-10-08 21:54:12 -07002732 !test_bit(Journal, &rdev->flags) &&
NeilBrown3a3a5dd2010-08-16 18:09:31 +10002733 !test_bit(In_sync, &rdev->flags) &&
2734 mddev->curr_resync_completed > rdev->recovery_offset)
2735 rdev->recovery_offset = mddev->curr_resync_completed;
2736
NeilBrownf72ffdd2014-09-30 14:23:59 +10002737 }
Dan Williamsbd52b742010-08-30 17:33:33 +10002738 if (!mddev->persistent) {
Shaohua Li29530792016-12-08 15:48:19 -08002739 clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
2740 clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrownde393cd2011-07-28 11:31:48 +10002741 if (!mddev->external) {
Shaohua Li29530792016-12-08 15:48:19 -08002742 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
NeilBrowndafb20f2012-03-19 12:46:39 +11002743 rdev_for_each(rdev, mddev) {
NeilBrownde393cd2011-07-28 11:31:48 +10002744 if (rdev->badblocks.changed) {
NeilBrownd0962932012-03-19 12:46:41 +11002745 rdev->badblocks.changed = 0;
Vishal Vermafc974ee2015-12-24 19:20:34 -07002746 ack_all_badblocks(&rdev->badblocks);
NeilBrownde393cd2011-07-28 11:31:48 +10002747 md_error(mddev, rdev);
2748 }
2749 clear_bit(Blocked, &rdev->flags);
2750 clear_bit(BlockedBadBlocks, &rdev->flags);
2751 wake_up(&rdev->blocked_wait);
2752 }
2753 }
NeilBrown3a3a5dd2010-08-16 18:09:31 +10002754 wake_up(&mddev->sb_wait);
2755 return;
2756 }
2757
NeilBrown85572d72014-12-15 12:56:56 +11002758 spin_lock(&mddev->lock);
NeilBrown84692192006-08-27 01:23:49 -07002759
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11002760 mddev->utime = ktime_get_real_seconds();
NeilBrown3a3a5dd2010-08-16 18:09:31 +10002761
Shaohua Li29530792016-12-08 15:48:19 -08002762 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
NeilBrown850b2b422006-10-03 01:15:46 -07002763 force_change = 1;
Shaohua Li29530792016-12-08 15:48:19 -08002764 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
NeilBrown850b2b422006-10-03 01:15:46 -07002765 /* just a clean<-> dirty transition, possibly leave spares alone,
2766 * though if events isn't the right even/odd, we will have to do
2767 * spares after all
2768 */
2769 nospares = 1;
2770 if (force_change)
2771 nospares = 0;
2772 if (mddev->degraded)
NeilBrown84692192006-08-27 01:23:49 -07002773 /* If the array is degraded, then skipping spares is both
2774 * dangerous and fairly pointless.
2775 * Dangerous because a device that was removed from the array
2776 * might have a event_count that still looks up-to-date,
2777 * so it can be re-added without a resync.
2778 * Pointless because if there are any spares to skip,
2779 * then a recovery will happen and soon that array won't
2780 * be degraded any more and the spare can go back to sleep then.
2781 */
NeilBrown850b2b422006-10-03 01:15:46 -07002782 nospares = 0;
NeilBrown84692192006-08-27 01:23:49 -07002783
NeilBrown06d91a52005-06-21 17:17:12 -07002784 sync_req = mddev->in_sync;
NeilBrown42543762006-06-26 00:27:57 -07002785
2786 /* If this is just a dirty<->clean transition, and the array is clean
2787 * and 'events' is odd, we can roll back to the previous clean state */
NeilBrown850b2b422006-10-03 01:15:46 -07002788 if (nospares
NeilBrown42543762006-06-26 00:27:57 -07002789 && (mddev->in_sync && mddev->recovery_cp == MaxSector)
NeilBrowna8707c02010-05-18 09:28:43 +10002790 && mddev->can_decrease_events
2791 && mddev->events != 1) {
NeilBrown42543762006-06-26 00:27:57 -07002792 mddev->events--;
NeilBrowna8707c02010-05-18 09:28:43 +10002793 mddev->can_decrease_events = 0;
2794 } else {
NeilBrown42543762006-06-26 00:27:57 -07002795 /* otherwise we have to go forward and ... */
2796 mddev->events ++;
NeilBrowna8707c02010-05-18 09:28:43 +10002797 mddev->can_decrease_events = nospares;
NeilBrown42543762006-06-26 00:27:57 -07002798 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799
NeilBrown403df472014-09-30 15:52:29 +10002800 /*
2801 * This 64-bit counter should never wrap.
2802 * Either we are in around ~1 trillion A.C., assuming
2803 * 1 reboot per second, or we have a bug...
2804 */
2805 WARN_ON(mddev->events == 0);
NeilBrown2699b672011-07-28 11:31:47 +10002806
NeilBrowndafb20f2012-03-19 12:46:39 +11002807 rdev_for_each(rdev, mddev) {
NeilBrown2699b672011-07-28 11:31:47 +10002808 if (rdev->badblocks.changed)
2809 any_badblocks_changed++;
NeilBrownde393cd2011-07-28 11:31:48 +10002810 if (test_bit(Faulty, &rdev->flags))
2811 set_bit(FaultRecorded, &rdev->flags);
2812 }
NeilBrown2699b672011-07-28 11:31:47 +10002813
NeilBrowne6910632008-02-06 01:39:51 -08002814 sync_sbs(mddev, nospares);
NeilBrown85572d72014-12-15 12:56:56 +11002815 spin_unlock(&mddev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002816
NeilBrown36a4e1f2011-10-07 14:23:17 +11002817 pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
2818 mdname(mddev), mddev->in_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002819
Shaohua Li504634f2016-11-18 09:44:08 -08002820 if (mddev->queue)
2821 blk_add_trace_msg(mddev->queue, "md md_update_sb");
NeilBrown46533ff2016-11-18 16:16:11 +11002822rewrite:
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002823 md_bitmap_update_sb(mddev->bitmap);
NeilBrowndafb20f2012-03-19 12:46:39 +11002824 rdev_for_each(rdev, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002825 char b[BDEVNAME_SIZE];
NeilBrown36a4e1f2011-10-07 14:23:17 +11002826
NeilBrown42543762006-06-26 00:27:57 -07002827 if (rdev->sb_loaded != 1)
2828 continue; /* no noise on spare devices */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002829
NeilBrownf4667222013-12-09 12:04:56 +11002830 if (!test_bit(Faulty, &rdev->flags)) {
NeilBrown7bfa19f2005-06-21 17:17:28 -07002831 md_super_write(mddev,rdev,
Andre Noll0f420352008-07-11 22:02:23 +10002832 rdev->sb_start, rdev->sb_size,
NeilBrown7bfa19f2005-06-21 17:17:28 -07002833 rdev->sb_page);
NeilBrown36a4e1f2011-10-07 14:23:17 +11002834 pr_debug("md: (write) %s's sb offset: %llu\n",
2835 bdevname(rdev->bdev, b),
2836 (unsigned long long)rdev->sb_start);
NeilBrown42543762006-06-26 00:27:57 -07002837 rdev->sb_events = mddev->events;
NeilBrown2699b672011-07-28 11:31:47 +10002838 if (rdev->badblocks.size) {
2839 md_super_write(mddev, rdev,
2840 rdev->badblocks.sector,
2841 rdev->badblocks.size << 9,
2842 rdev->bb_page);
2843 rdev->badblocks.size = 0;
2844 }
NeilBrown7bfa19f2005-06-21 17:17:28 -07002845
NeilBrownf4667222013-12-09 12:04:56 +11002846 } else
NeilBrown36a4e1f2011-10-07 14:23:17 +11002847 pr_debug("md: %s (skipping faulty)\n",
2848 bdevname(rdev->bdev, b));
Andrei Warkentind70ed2e2011-10-18 12:16:48 +11002849
NeilBrown7bfa19f2005-06-21 17:17:28 -07002850 if (mddev->level == LEVEL_MULTIPATH)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002851 /* only need to write one superblock... */
2852 break;
2853 }
NeilBrown46533ff2016-11-18 16:16:11 +11002854 if (md_super_wait(mddev) < 0)
2855 goto rewrite;
Shaohua Li29530792016-12-08 15:48:19 -08002856 /* if there was a failure, MD_SB_CHANGE_DEVS was set, and we re-write super */
NeilBrown7bfa19f2005-06-21 17:17:28 -07002857
Guoqing Jiang2c97cf12016-05-02 11:33:09 -04002858 if (mddev_is_clustered(mddev) && ret == 0)
2859 md_cluster_ops->metadata_update_finish(mddev);
2860
NeilBrown850b2b422006-10-03 01:15:46 -07002861 if (mddev->in_sync != sync_req ||
Shaohua Li29530792016-12-08 15:48:19 -08002862 !bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
2863 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_CLEAN)))
NeilBrown06d91a52005-06-21 17:17:12 -07002864 /* have to write it out again */
NeilBrown06d91a52005-06-21 17:17:12 -07002865 goto repeat;
NeilBrown3d310eb2005-06-21 17:17:26 -07002866 wake_up(&mddev->sb_wait);
NeilBrownacb180b2009-04-14 16:28:34 +10002867 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
Junxiao Bie1a86db2020-07-14 16:10:26 -07002868 sysfs_notify_dirent_safe(mddev->sysfs_completed);
NeilBrown06d91a52005-06-21 17:17:12 -07002869
NeilBrowndafb20f2012-03-19 12:46:39 +11002870 rdev_for_each(rdev, mddev) {
NeilBrownde393cd2011-07-28 11:31:48 +10002871 if (test_and_clear_bit(FaultRecorded, &rdev->flags))
2872 clear_bit(Blocked, &rdev->flags);
2873
2874 if (any_badblocks_changed)
Vishal Vermafc974ee2015-12-24 19:20:34 -07002875 ack_all_badblocks(&rdev->badblocks);
NeilBrownde393cd2011-07-28 11:31:48 +10002876 clear_bit(BlockedBadBlocks, &rdev->flags);
2877 wake_up(&rdev->blocked_wait);
2878 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002879}
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05002880EXPORT_SYMBOL(md_update_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002881
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002882static int add_bound_rdev(struct md_rdev *rdev)
2883{
2884 struct mddev *mddev = rdev->mddev;
2885 int err = 0;
Shaohua Li87d4d912016-01-06 14:37:14 -08002886 bool add_journal = test_bit(Journal, &rdev->flags);
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002887
Shaohua Li87d4d912016-01-06 14:37:14 -08002888 if (!mddev->pers->hot_remove_disk || add_journal) {
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002889 /* If there is hot_add_disk but no hot_remove_disk
2890 * then added disks for geometry changes,
2891 * and should be added immediately.
2892 */
2893 super_types[mddev->major_version].
2894 validate_super(mddev, rdev);
Shaohua Li87d4d912016-01-06 14:37:14 -08002895 if (add_journal)
2896 mddev_suspend(mddev);
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002897 err = mddev->pers->hot_add_disk(mddev, rdev);
Shaohua Li87d4d912016-01-06 14:37:14 -08002898 if (add_journal)
2899 mddev_resume(mddev);
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002900 if (err) {
Guoqing Jiangdb767672016-06-02 23:32:05 -04002901 md_kick_rdev_from_array(rdev);
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002902 return err;
2903 }
2904 }
2905 sysfs_notify_dirent_safe(rdev->sysfs_state);
2906
Shaohua Li29530792016-12-08 15:48:19 -08002907 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002908 if (mddev->degraded)
2909 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
2910 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2911 md_new_event(mddev);
2912 md_wakeup_thread(mddev->thread);
2913 return 0;
2914}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002915
Andre Noll7f6ce762008-03-23 18:34:54 +01002916/* words written to sysfs files may, or may not, be \n terminated.
NeilBrownbce74da2006-01-06 00:20:41 -08002917 * We want to accept with case. For this we use cmd_match.
2918 */
2919static int cmd_match(const char *cmd, const char *str)
2920{
2921 /* See if cmd, written into a sysfs file, matches
2922 * str. They must either be the same, or cmd can
2923 * have a trailing newline
2924 */
2925 while (*cmd && *str && *cmd == *str) {
2926 cmd++;
2927 str++;
2928 }
2929 if (*cmd == '\n')
2930 cmd++;
2931 if (*str || *cmd)
2932 return 0;
2933 return 1;
2934}
2935
NeilBrown86e6ffd2005-11-08 21:39:24 -08002936struct rdev_sysfs_entry {
2937 struct attribute attr;
NeilBrown3cb03002011-10-11 16:45:26 +11002938 ssize_t (*show)(struct md_rdev *, char *);
2939 ssize_t (*store)(struct md_rdev *, const char *, size_t);
NeilBrown86e6ffd2005-11-08 21:39:24 -08002940};
2941
2942static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11002943state_show(struct md_rdev *rdev, char *page)
NeilBrown86e6ffd2005-11-08 21:39:24 -08002944{
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02002945 char *sep = ",";
NeilBrown20a49ff2008-02-06 01:39:57 -08002946 size_t len = 0;
Mark Rutland6aa7de02017-10-23 14:07:29 -07002947 unsigned long flags = READ_ONCE(rdev->flags);
NeilBrown86e6ffd2005-11-08 21:39:24 -08002948
NeilBrown758bfc82014-12-15 12:56:59 +11002949 if (test_bit(Faulty, &flags) ||
Tomasz Majchrzakdcbcb482016-10-21 16:27:08 +02002950 (!test_bit(ExternalBbl, &flags) &&
2951 rdev->badblocks.unacked_exist))
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02002952 len += sprintf(page+len, "faulty%s", sep);
2953 if (test_bit(In_sync, &flags))
2954 len += sprintf(page+len, "in_sync%s", sep);
2955 if (test_bit(Journal, &flags))
2956 len += sprintf(page+len, "journal%s", sep);
2957 if (test_bit(WriteMostly, &flags))
2958 len += sprintf(page+len, "write_mostly%s", sep);
NeilBrown758bfc82014-12-15 12:56:59 +11002959 if (test_bit(Blocked, &flags) ||
NeilBrown52c64152011-12-08 16:22:48 +11002960 (rdev->badblocks.unacked_exist
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02002961 && !test_bit(Faulty, &flags)))
2962 len += sprintf(page+len, "blocked%s", sep);
NeilBrown758bfc82014-12-15 12:56:59 +11002963 if (!test_bit(Faulty, &flags) &&
Shaohua Lif2076e72015-10-08 21:54:12 -07002964 !test_bit(Journal, &flags) &&
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02002965 !test_bit(In_sync, &flags))
2966 len += sprintf(page+len, "spare%s", sep);
2967 if (test_bit(WriteErrorSeen, &flags))
2968 len += sprintf(page+len, "write_error%s", sep);
2969 if (test_bit(WantReplacement, &flags))
2970 len += sprintf(page+len, "want_replacement%s", sep);
2971 if (test_bit(Replacement, &flags))
2972 len += sprintf(page+len, "replacement%s", sep);
2973 if (test_bit(ExternalBbl, &flags))
2974 len += sprintf(page+len, "external_bbl%s", sep);
NeilBrown688834e2016-11-18 16:16:11 +11002975 if (test_bit(FailFast, &flags))
2976 len += sprintf(page+len, "failfast%s", sep);
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02002977
2978 if (len)
2979 len -= strlen(sep);
NeilBrown2d78f8c2011-12-23 10:17:51 +11002980
NeilBrown86e6ffd2005-11-08 21:39:24 -08002981 return len+sprintf(page+len, "\n");
2982}
2983
NeilBrown45dc2de2006-06-26 00:27:58 -07002984static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11002985state_store(struct md_rdev *rdev, const char *buf, size_t len)
NeilBrown45dc2de2006-06-26 00:27:58 -07002986{
2987 /* can write
NeilBrownde393cd2011-07-28 11:31:48 +10002988 * faulty - simulates an error
NeilBrown45dc2de2006-06-26 00:27:58 -07002989 * remove - disconnects the device
NeilBrownf6556752006-06-26 00:28:01 -07002990 * writemostly - sets write_mostly
2991 * -writemostly - clears write_mostly
NeilBrownde393cd2011-07-28 11:31:48 +10002992 * blocked - sets the Blocked flags
2993 * -blocked - clears the Blocked and possibly simulates an error
NeilBrown6d56e272009-04-14 12:01:57 +10002994 * insync - sets Insync providing device isn't active
NeilBrownf4667222013-12-09 12:04:56 +11002995 * -insync - clear Insync for a device with a slot assigned,
2996 * so that it gets rebuilt based on bitmap
NeilBrownd7a9d442011-07-28 11:31:48 +10002997 * write_error - sets WriteErrorSeen
2998 * -write_error - clears WriteErrorSeen
NeilBrown688834e2016-11-18 16:16:11 +11002999 * {,-}failfast - set/clear FailFast
NeilBrown45dc2de2006-06-26 00:27:58 -07003000 */
3001 int err = -EINVAL;
3002 if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
3003 md_error(rdev->mddev, rdev);
NeilBrown5ef56c82011-08-25 14:42:51 +10003004 if (test_bit(Faulty, &rdev->flags))
3005 err = 0;
3006 else
3007 err = -EBUSY;
NeilBrown45dc2de2006-06-26 00:27:58 -07003008 } else if (cmd_match(buf, "remove")) {
Shaohua Li5d881782016-07-28 09:06:34 -07003009 if (rdev->mddev->pers) {
3010 clear_bit(Blocked, &rdev->flags);
3011 remove_and_add_spares(rdev->mddev, rdev);
3012 }
NeilBrown45dc2de2006-06-26 00:27:58 -07003013 if (rdev->raid_disk >= 0)
3014 err = -EBUSY;
3015 else {
NeilBrownfd01b882011-10-11 16:47:53 +11003016 struct mddev *mddev = rdev->mddev;
NeilBrown45dc2de2006-06-26 00:27:58 -07003017 err = 0;
Guoqing Jianga9720902015-10-12 17:21:27 +08003018 if (mddev_is_clustered(mddev))
3019 err = md_cluster_ops->remove_disk(mddev, rdev);
3020
3021 if (err == 0) {
3022 md_kick_rdev_from_array(rdev);
NeilBrown060b0682016-11-04 16:46:03 +11003023 if (mddev->pers) {
Shaohua Li29530792016-12-08 15:48:19 -08003024 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrown060b0682016-11-04 16:46:03 +11003025 md_wakeup_thread(mddev->thread);
3026 }
Guoqing Jianga9720902015-10-12 17:21:27 +08003027 md_new_event(mddev);
3028 }
NeilBrown45dc2de2006-06-26 00:27:58 -07003029 }
NeilBrownf6556752006-06-26 00:28:01 -07003030 } else if (cmd_match(buf, "writemostly")) {
3031 set_bit(WriteMostly, &rdev->flags);
Guoqing Jiang404659c2019-12-23 10:48:53 +01003032 mddev_create_serial_pool(rdev->mddev, rdev, false);
NeilBrownf6556752006-06-26 00:28:01 -07003033 err = 0;
3034 } else if (cmd_match(buf, "-writemostly")) {
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +01003035 mddev_destroy_serial_pool(rdev->mddev, rdev, false);
NeilBrownf6556752006-06-26 00:28:01 -07003036 clear_bit(WriteMostly, &rdev->flags);
3037 err = 0;
Dan Williams6bfe0b42008-04-30 00:52:32 -07003038 } else if (cmd_match(buf, "blocked")) {
3039 set_bit(Blocked, &rdev->flags);
3040 err = 0;
3041 } else if (cmd_match(buf, "-blocked")) {
NeilBrownde393cd2011-07-28 11:31:48 +10003042 if (!test_bit(Faulty, &rdev->flags) &&
Tomasz Majchrzakdcbcb482016-10-21 16:27:08 +02003043 !test_bit(ExternalBbl, &rdev->flags) &&
NeilBrown7da64a02011-08-30 16:20:17 +10003044 rdev->badblocks.unacked_exist) {
NeilBrownde393cd2011-07-28 11:31:48 +10003045 /* metadata handler doesn't understand badblocks,
3046 * so we need to fail the device
3047 */
3048 md_error(rdev->mddev, rdev);
3049 }
Dan Williams6bfe0b42008-04-30 00:52:32 -07003050 clear_bit(Blocked, &rdev->flags);
NeilBrownde393cd2011-07-28 11:31:48 +10003051 clear_bit(BlockedBadBlocks, &rdev->flags);
Dan Williams6bfe0b42008-04-30 00:52:32 -07003052 wake_up(&rdev->blocked_wait);
3053 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3054 md_wakeup_thread(rdev->mddev->thread);
3055
3056 err = 0;
NeilBrown6d56e272009-04-14 12:01:57 +10003057 } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
3058 set_bit(In_sync, &rdev->flags);
3059 err = 0;
NeilBrown688834e2016-11-18 16:16:11 +11003060 } else if (cmd_match(buf, "failfast")) {
3061 set_bit(FailFast, &rdev->flags);
3062 err = 0;
3063 } else if (cmd_match(buf, "-failfast")) {
3064 clear_bit(FailFast, &rdev->flags);
3065 err = 0;
Shaohua Lif2076e72015-10-08 21:54:12 -07003066 } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 &&
3067 !test_bit(Journal, &rdev->flags)) {
NeilBrowne1960f82014-09-30 15:24:25 +10003068 if (rdev->mddev->pers == NULL) {
3069 clear_bit(In_sync, &rdev->flags);
3070 rdev->saved_raid_disk = rdev->raid_disk;
3071 rdev->raid_disk = -1;
3072 err = 0;
3073 }
NeilBrownd7a9d442011-07-28 11:31:48 +10003074 } else if (cmd_match(buf, "write_error")) {
3075 set_bit(WriteErrorSeen, &rdev->flags);
3076 err = 0;
3077 } else if (cmd_match(buf, "-write_error")) {
3078 clear_bit(WriteErrorSeen, &rdev->flags);
3079 err = 0;
NeilBrown2d78f8c2011-12-23 10:17:51 +11003080 } else if (cmd_match(buf, "want_replacement")) {
3081 /* Any non-spare device that is not a replacement can
3082 * become want_replacement at any time, but we then need to
3083 * check if recovery is needed.
3084 */
3085 if (rdev->raid_disk >= 0 &&
Shaohua Lif2076e72015-10-08 21:54:12 -07003086 !test_bit(Journal, &rdev->flags) &&
NeilBrown2d78f8c2011-12-23 10:17:51 +11003087 !test_bit(Replacement, &rdev->flags))
3088 set_bit(WantReplacement, &rdev->flags);
3089 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3090 md_wakeup_thread(rdev->mddev->thread);
3091 err = 0;
3092 } else if (cmd_match(buf, "-want_replacement")) {
3093 /* Clearing 'want_replacement' is always allowed.
3094 * Once replacements starts it is too late though.
3095 */
3096 err = 0;
3097 clear_bit(WantReplacement, &rdev->flags);
3098 } else if (cmd_match(buf, "replacement")) {
3099 /* Can only set a device as a replacement when array has not
3100 * yet been started. Once running, replacement is automatic
3101 * from spares, or by assigning 'slot'.
3102 */
3103 if (rdev->mddev->pers)
3104 err = -EBUSY;
3105 else {
3106 set_bit(Replacement, &rdev->flags);
3107 err = 0;
3108 }
3109 } else if (cmd_match(buf, "-replacement")) {
3110 /* Similarly, can only clear Replacement before start */
3111 if (rdev->mddev->pers)
3112 err = -EBUSY;
3113 else {
3114 clear_bit(Replacement, &rdev->flags);
3115 err = 0;
3116 }
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05003117 } else if (cmd_match(buf, "re-add")) {
Yufen Yuee37e622019-04-02 14:22:14 +08003118 if (!rdev->mddev->pers)
3119 err = -EINVAL;
3120 else if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
3121 rdev->saved_raid_disk >= 0) {
Goldwyn Rodrigues97f6cd32015-04-14 10:45:42 -05003122 /* clear_bit is performed _after_ all the devices
3123 * have their local Faulty bit cleared. If any writes
3124 * happen in the meantime in the local node, they
3125 * will land in the local bitmap, which will be synced
3126 * by this node eventually
3127 */
3128 if (!mddev_is_clustered(rdev->mddev) ||
3129 (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) {
3130 clear_bit(Faulty, &rdev->flags);
3131 err = add_bound_rdev(rdev);
3132 }
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05003133 } else
3134 err = -EBUSY;
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02003135 } else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) {
3136 set_bit(ExternalBbl, &rdev->flags);
3137 rdev->badblocks.shift = 0;
3138 err = 0;
3139 } else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) {
3140 clear_bit(ExternalBbl, &rdev->flags);
3141 err = 0;
NeilBrown45dc2de2006-06-26 00:27:58 -07003142 }
NeilBrown00bcb4a2010-06-01 19:37:23 +10003143 if (!err)
3144 sysfs_notify_dirent_safe(rdev->sysfs_state);
NeilBrown45dc2de2006-06-26 00:27:58 -07003145 return err ? err : len;
3146}
NeilBrown80ca3a42006-07-10 04:44:18 -07003147static struct rdev_sysfs_entry rdev_state =
NeilBrown750f1992014-09-30 08:53:05 +10003148__ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
NeilBrown86e6ffd2005-11-08 21:39:24 -08003149
3150static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003151errors_show(struct md_rdev *rdev, char *page)
NeilBrown4dbcdc72006-01-06 00:20:52 -08003152{
3153 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
3154}
3155
3156static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003157errors_store(struct md_rdev *rdev, const char *buf, size_t len)
NeilBrown4dbcdc72006-01-06 00:20:52 -08003158{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03003159 unsigned int n;
3160 int rv;
3161
3162 rv = kstrtouint(buf, 10, &n);
3163 if (rv < 0)
3164 return rv;
3165 atomic_set(&rdev->corrected_errors, n);
3166 return len;
NeilBrown4dbcdc72006-01-06 00:20:52 -08003167}
3168static struct rdev_sysfs_entry rdev_errors =
NeilBrown80ca3a42006-07-10 04:44:18 -07003169__ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
NeilBrown4dbcdc72006-01-06 00:20:52 -08003170
NeilBrown014236d2006-01-06 00:20:55 -08003171static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003172slot_show(struct md_rdev *rdev, char *page)
NeilBrown014236d2006-01-06 00:20:55 -08003173{
Shaohua Lif2076e72015-10-08 21:54:12 -07003174 if (test_bit(Journal, &rdev->flags))
3175 return sprintf(page, "journal\n");
3176 else if (rdev->raid_disk < 0)
NeilBrown014236d2006-01-06 00:20:55 -08003177 return sprintf(page, "none\n");
3178 else
3179 return sprintf(page, "%d\n", rdev->raid_disk);
3180}
3181
3182static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003183slot_store(struct md_rdev *rdev, const char *buf, size_t len)
NeilBrown014236d2006-01-06 00:20:55 -08003184{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03003185 int slot;
NeilBrownc303da62008-02-06 01:39:51 -08003186 int err;
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03003187
Shaohua Lif2076e72015-10-08 21:54:12 -07003188 if (test_bit(Journal, &rdev->flags))
3189 return -EBUSY;
NeilBrown014236d2006-01-06 00:20:55 -08003190 if (strncmp(buf, "none", 4)==0)
3191 slot = -1;
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03003192 else {
3193 err = kstrtouint(buf, 10, (unsigned int *)&slot);
3194 if (err < 0)
3195 return err;
3196 }
Neil Brown6c2fce22008-06-28 08:31:31 +10003197 if (rdev->mddev->pers && slot == -1) {
NeilBrownc303da62008-02-06 01:39:51 -08003198 /* Setting 'slot' on an active array requires also
3199 * updating the 'rd%d' link, and communicating
3200 * with the personality with ->hot_*_disk.
3201 * For now we only support removing
3202 * failed/spare devices. This normally happens automatically,
3203 * but not when the metadata is externally managed.
3204 */
NeilBrownc303da62008-02-06 01:39:51 -08003205 if (rdev->raid_disk == -1)
3206 return -EEXIST;
3207 /* personality does all needed checks */
Namhyung Kim01393f32011-06-09 11:42:54 +10003208 if (rdev->mddev->pers->hot_remove_disk == NULL)
NeilBrownc303da62008-02-06 01:39:51 -08003209 return -EINVAL;
NeilBrown746d3202013-04-24 11:42:41 +10003210 clear_bit(Blocked, &rdev->flags);
3211 remove_and_add_spares(rdev->mddev, rdev);
3212 if (rdev->raid_disk >= 0)
3213 return -EBUSY;
NeilBrownc303da62008-02-06 01:39:51 -08003214 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3215 md_wakeup_thread(rdev->mddev->thread);
Neil Brown6c2fce22008-06-28 08:31:31 +10003216 } else if (rdev->mddev->pers) {
Neil Brown6c2fce22008-06-28 08:31:31 +10003217 /* Activating a spare .. or possibly reactivating
NeilBrown6d56e272009-04-14 12:01:57 +10003218 * if we ever get bitmaps working here.
Neil Brown6c2fce22008-06-28 08:31:31 +10003219 */
Goldwyn Rodriguescb01c542015-12-18 15:19:16 +11003220 int err;
Neil Brown6c2fce22008-06-28 08:31:31 +10003221
3222 if (rdev->raid_disk != -1)
3223 return -EBUSY;
3224
NeilBrownc6751b22011-02-02 11:57:13 +11003225 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
3226 return -EBUSY;
3227
Neil Brown6c2fce22008-06-28 08:31:31 +10003228 if (rdev->mddev->pers->hot_add_disk == NULL)
3229 return -EINVAL;
3230
NeilBrownba1b41b2011-01-14 09:14:34 +11003231 if (slot >= rdev->mddev->raid_disks &&
3232 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
3233 return -ENOSPC;
3234
Neil Brown6c2fce22008-06-28 08:31:31 +10003235 rdev->raid_disk = slot;
3236 if (test_bit(In_sync, &rdev->flags))
3237 rdev->saved_raid_disk = slot;
3238 else
3239 rdev->saved_raid_disk = -1;
NeilBrownd30519f2011-10-18 12:13:47 +11003240 clear_bit(In_sync, &rdev->flags);
NeilBrown8313b8e2013-12-12 10:13:33 +11003241 clear_bit(Bitmap_sync, &rdev->flags);
Guoqing Jiang3f79cc22020-04-04 23:57:11 +02003242 err = rdev->mddev->pers->hot_add_disk(rdev->mddev, rdev);
Goldwyn Rodriguescb01c542015-12-18 15:19:16 +11003243 if (err) {
3244 rdev->raid_disk = -1;
3245 return err;
3246 } else
3247 sysfs_notify_dirent_safe(rdev->sysfs_state);
Damien Le Moal5e3b8a82020-07-16 13:54:40 +09003248 /* failure here is OK */;
3249 sysfs_link_rdev(rdev->mddev, rdev);
Neil Brown6c2fce22008-06-28 08:31:31 +10003250 /* don't wakeup anyone, leave that to userspace. */
NeilBrownc303da62008-02-06 01:39:51 -08003251 } else {
NeilBrownba1b41b2011-01-14 09:14:34 +11003252 if (slot >= rdev->mddev->raid_disks &&
3253 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
NeilBrownc303da62008-02-06 01:39:51 -08003254 return -ENOSPC;
3255 rdev->raid_disk = slot;
3256 /* assume it is working */
NeilBrownc5d79ad2008-02-06 01:39:54 -08003257 clear_bit(Faulty, &rdev->flags);
3258 clear_bit(WriteMostly, &rdev->flags);
NeilBrownc303da62008-02-06 01:39:51 -08003259 set_bit(In_sync, &rdev->flags);
NeilBrown00bcb4a2010-06-01 19:37:23 +10003260 sysfs_notify_dirent_safe(rdev->sysfs_state);
NeilBrownc303da62008-02-06 01:39:51 -08003261 }
NeilBrown014236d2006-01-06 00:20:55 -08003262 return len;
3263}
3264
NeilBrown014236d2006-01-06 00:20:55 -08003265static struct rdev_sysfs_entry rdev_slot =
NeilBrown80ca3a42006-07-10 04:44:18 -07003266__ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
NeilBrown014236d2006-01-06 00:20:55 -08003267
NeilBrown93c8cad2006-01-06 00:20:56 -08003268static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003269offset_show(struct md_rdev *rdev, char *page)
NeilBrown93c8cad2006-01-06 00:20:56 -08003270{
Andrew Morton6961ece2006-01-06 00:20:59 -08003271 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
NeilBrown93c8cad2006-01-06 00:20:56 -08003272}
3273
3274static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003275offset_store(struct md_rdev *rdev, const char *buf, size_t len)
NeilBrown93c8cad2006-01-06 00:20:56 -08003276{
NeilBrownc6563a82012-05-21 09:27:00 +10003277 unsigned long long offset;
Jingoo Hanb29bebd2013-06-01 16:15:16 +09003278 if (kstrtoull(buf, 10, &offset) < 0)
NeilBrown93c8cad2006-01-06 00:20:56 -08003279 return -EINVAL;
Neil Brown8ed0a522008-06-28 08:31:29 +10003280 if (rdev->mddev->pers && rdev->raid_disk >= 0)
NeilBrown93c8cad2006-01-06 00:20:56 -08003281 return -EBUSY;
Andre Nolldd8ac332009-03-31 14:33:13 +11003282 if (rdev->sectors && rdev->mddev->external)
NeilBrownc5d79ad2008-02-06 01:39:54 -08003283 /* Must set offset before size, so overlap checks
3284 * can be sane */
3285 return -EBUSY;
NeilBrown93c8cad2006-01-06 00:20:56 -08003286 rdev->data_offset = offset;
NeilBrown25f7fd42012-07-19 15:59:18 +10003287 rdev->new_data_offset = offset;
NeilBrown93c8cad2006-01-06 00:20:56 -08003288 return len;
3289}
3290
3291static struct rdev_sysfs_entry rdev_offset =
NeilBrown80ca3a42006-07-10 04:44:18 -07003292__ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
NeilBrown93c8cad2006-01-06 00:20:56 -08003293
NeilBrownc6563a82012-05-21 09:27:00 +10003294static ssize_t new_offset_show(struct md_rdev *rdev, char *page)
3295{
3296 return sprintf(page, "%llu\n",
3297 (unsigned long long)rdev->new_data_offset);
3298}
3299
3300static ssize_t new_offset_store(struct md_rdev *rdev,
3301 const char *buf, size_t len)
3302{
3303 unsigned long long new_offset;
3304 struct mddev *mddev = rdev->mddev;
3305
Jingoo Hanb29bebd2013-06-01 16:15:16 +09003306 if (kstrtoull(buf, 10, &new_offset) < 0)
NeilBrownc6563a82012-05-21 09:27:00 +10003307 return -EINVAL;
3308
NeilBrownf851b602014-12-11 10:02:10 +11003309 if (mddev->sync_thread ||
3310 test_bit(MD_RECOVERY_RUNNING,&mddev->recovery))
NeilBrownc6563a82012-05-21 09:27:00 +10003311 return -EBUSY;
3312 if (new_offset == rdev->data_offset)
3313 /* reset is always permitted */
3314 ;
3315 else if (new_offset > rdev->data_offset) {
3316 /* must not push array size beyond rdev_sectors */
3317 if (new_offset - rdev->data_offset
3318 + mddev->dev_sectors > rdev->sectors)
3319 return -E2BIG;
3320 }
3321 /* Metadata worries about other space details. */
3322
3323 /* decreasing the offset is inconsistent with a backwards
3324 * reshape.
3325 */
3326 if (new_offset < rdev->data_offset &&
3327 mddev->reshape_backwards)
3328 return -EINVAL;
3329 /* Increasing offset is inconsistent with forwards
3330 * reshape. reshape_direction should be set to
3331 * 'backwards' first.
3332 */
3333 if (new_offset > rdev->data_offset &&
3334 !mddev->reshape_backwards)
3335 return -EINVAL;
3336
3337 if (mddev->pers && mddev->persistent &&
3338 !super_types[mddev->major_version]
3339 .allow_new_offset(rdev, new_offset))
3340 return -E2BIG;
3341 rdev->new_data_offset = new_offset;
3342 if (new_offset > rdev->data_offset)
3343 mddev->reshape_backwards = 1;
3344 else if (new_offset < rdev->data_offset)
3345 mddev->reshape_backwards = 0;
3346
3347 return len;
3348}
3349static struct rdev_sysfs_entry rdev_new_offset =
3350__ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store);
3351
NeilBrown83303b62006-01-06 00:21:06 -08003352static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003353rdev_size_show(struct md_rdev *rdev, char *page)
NeilBrown83303b62006-01-06 00:21:06 -08003354{
Andre Nolldd8ac332009-03-31 14:33:13 +11003355 return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
NeilBrown83303b62006-01-06 00:21:06 -08003356}
3357
NeilBrownc5d79ad2008-02-06 01:39:54 -08003358static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
3359{
3360 /* check if two start/length pairs overlap */
3361 if (s1+l1 <= s2)
3362 return 0;
3363 if (s2+l2 <= s1)
3364 return 0;
3365 return 1;
3366}
3367
Dan Williamsb522adc2009-03-31 15:00:31 +11003368static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
3369{
3370 unsigned long long blocks;
3371 sector_t new;
3372
Jingoo Hanb29bebd2013-06-01 16:15:16 +09003373 if (kstrtoull(buf, 10, &blocks) < 0)
Dan Williamsb522adc2009-03-31 15:00:31 +11003374 return -EINVAL;
3375
3376 if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
3377 return -EINVAL; /* sector conversion overflow */
3378
3379 new = blocks * 2;
3380 if (new != blocks * 2)
3381 return -EINVAL; /* unsigned long long to sector_t overflow */
3382
3383 *sectors = new;
3384 return 0;
3385}
3386
NeilBrown83303b62006-01-06 00:21:06 -08003387static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003388rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
NeilBrown83303b62006-01-06 00:21:06 -08003389{
NeilBrownfd01b882011-10-11 16:47:53 +11003390 struct mddev *my_mddev = rdev->mddev;
Andre Nolldd8ac332009-03-31 14:33:13 +11003391 sector_t oldsectors = rdev->sectors;
Dan Williamsb522adc2009-03-31 15:00:31 +11003392 sector_t sectors;
NeilBrown27c529b2008-03-04 14:29:33 -08003393
Shaohua Lif2076e72015-10-08 21:54:12 -07003394 if (test_bit(Journal, &rdev->flags))
3395 return -EBUSY;
Dan Williamsb522adc2009-03-31 15:00:31 +11003396 if (strict_blocks_to_sectors(buf, &sectors) < 0)
Neil Brownd7027452008-07-12 10:37:50 +10003397 return -EINVAL;
NeilBrownc6563a82012-05-21 09:27:00 +10003398 if (rdev->data_offset != rdev->new_data_offset)
3399 return -EINVAL; /* too confusing */
Chris Webb0cd17fe2008-06-28 08:31:46 +10003400 if (my_mddev->pers && rdev->raid_disk >= 0) {
Neil Brownd7027452008-07-12 10:37:50 +10003401 if (my_mddev->persistent) {
Andre Nolldd8ac332009-03-31 14:33:13 +11003402 sectors = super_types[my_mddev->major_version].
3403 rdev_size_change(rdev, sectors);
3404 if (!sectors)
Chris Webb0cd17fe2008-06-28 08:31:46 +10003405 return -EBUSY;
Andre Nolldd8ac332009-03-31 14:33:13 +11003406 } else if (!sectors)
Mike Snitzer77304d22010-11-08 14:39:12 +01003407 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
Andre Nolldd8ac332009-03-31 14:33:13 +11003408 rdev->data_offset;
NeilBrowna6468532013-02-21 14:33:17 +11003409 if (!my_mddev->pers->resize)
3410 /* Cannot change size for RAID0 or Linear etc */
3411 return -EINVAL;
Chris Webb0cd17fe2008-06-28 08:31:46 +10003412 }
Andre Nolldd8ac332009-03-31 14:33:13 +11003413 if (sectors < my_mddev->dev_sectors)
Chris Webb7d3c6f82008-10-13 11:55:11 +11003414 return -EINVAL; /* component must fit device */
Chris Webb0cd17fe2008-06-28 08:31:46 +10003415
Andre Nolldd8ac332009-03-31 14:33:13 +11003416 rdev->sectors = sectors;
3417 if (sectors > oldsectors && my_mddev->external) {
NeilBrown8b1afc32014-09-29 15:33:20 +10003418 /* Need to check that all other rdevs with the same
3419 * ->bdev do not overlap. 'rcu' is sufficient to walk
3420 * the rdev lists safely.
3421 * This check does not provide a hard guarantee, it
3422 * just helps avoid dangerous mistakes.
NeilBrownc5d79ad2008-02-06 01:39:54 -08003423 */
NeilBrownfd01b882011-10-11 16:47:53 +11003424 struct mddev *mddev;
NeilBrownc5d79ad2008-02-06 01:39:54 -08003425 int overlap = 0;
Cheng Renquan159ec1f2009-01-09 08:31:08 +11003426 struct list_head *tmp;
NeilBrownc5d79ad2008-02-06 01:39:54 -08003427
NeilBrown8b1afc32014-09-29 15:33:20 +10003428 rcu_read_lock();
NeilBrown29ac4aa2008-02-06 01:39:58 -08003429 for_each_mddev(mddev, tmp) {
NeilBrown3cb03002011-10-11 16:45:26 +11003430 struct md_rdev *rdev2;
NeilBrownc5d79ad2008-02-06 01:39:54 -08003431
NeilBrowndafb20f2012-03-19 12:46:39 +11003432 rdev_for_each(rdev2, mddev)
NeilBrownf21e9ff2011-01-31 12:10:09 +11003433 if (rdev->bdev == rdev2->bdev &&
3434 rdev != rdev2 &&
3435 overlaps(rdev->data_offset, rdev->sectors,
3436 rdev2->data_offset,
3437 rdev2->sectors)) {
NeilBrownc5d79ad2008-02-06 01:39:54 -08003438 overlap = 1;
3439 break;
3440 }
NeilBrownc5d79ad2008-02-06 01:39:54 -08003441 if (overlap) {
3442 mddev_put(mddev);
3443 break;
3444 }
3445 }
NeilBrown8b1afc32014-09-29 15:33:20 +10003446 rcu_read_unlock();
NeilBrownc5d79ad2008-02-06 01:39:54 -08003447 if (overlap) {
3448 /* Someone else could have slipped in a size
3449 * change here, but doing so is just silly.
Andre Nolldd8ac332009-03-31 14:33:13 +11003450 * We put oldsectors back because we *know* it is
NeilBrownc5d79ad2008-02-06 01:39:54 -08003451 * safe, and trust userspace not to race with
3452 * itself
3453 */
Andre Nolldd8ac332009-03-31 14:33:13 +11003454 rdev->sectors = oldsectors;
NeilBrownc5d79ad2008-02-06 01:39:54 -08003455 return -EBUSY;
3456 }
3457 }
NeilBrown83303b62006-01-06 00:21:06 -08003458 return len;
3459}
3460
3461static struct rdev_sysfs_entry rdev_size =
NeilBrown80ca3a42006-07-10 04:44:18 -07003462__ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
NeilBrown83303b62006-01-06 00:21:06 -08003463
NeilBrown3cb03002011-10-11 16:45:26 +11003464static ssize_t recovery_start_show(struct md_rdev *rdev, char *page)
Dan Williams06e3c812009-12-12 21:17:12 -07003465{
3466 unsigned long long recovery_start = rdev->recovery_offset;
3467
3468 if (test_bit(In_sync, &rdev->flags) ||
3469 recovery_start == MaxSector)
3470 return sprintf(page, "none\n");
3471
3472 return sprintf(page, "%llu\n", recovery_start);
3473}
3474
NeilBrown3cb03002011-10-11 16:45:26 +11003475static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len)
Dan Williams06e3c812009-12-12 21:17:12 -07003476{
3477 unsigned long long recovery_start;
3478
3479 if (cmd_match(buf, "none"))
3480 recovery_start = MaxSector;
Jingoo Hanb29bebd2013-06-01 16:15:16 +09003481 else if (kstrtoull(buf, 10, &recovery_start))
Dan Williams06e3c812009-12-12 21:17:12 -07003482 return -EINVAL;
3483
3484 if (rdev->mddev->pers &&
3485 rdev->raid_disk >= 0)
3486 return -EBUSY;
3487
3488 rdev->recovery_offset = recovery_start;
3489 if (recovery_start == MaxSector)
3490 set_bit(In_sync, &rdev->flags);
3491 else
3492 clear_bit(In_sync, &rdev->flags);
3493 return len;
3494}
3495
3496static struct rdev_sysfs_entry rdev_recovery_start =
3497__ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
3498
Vishal Vermafc974ee2015-12-24 19:20:34 -07003499/* sysfs access to bad-blocks list.
3500 * We present two files.
3501 * 'bad-blocks' lists sector numbers and lengths of ranges that
3502 * are recorded as bad. The list is truncated to fit within
3503 * the one-page limit of sysfs.
3504 * Writing "sector length" to this file adds an acknowledged
3505 * bad block list.
3506 * 'unacknowledged-bad-blocks' lists bad blocks that have not yet
3507 * been acknowledged. Writing to this file adds bad blocks
3508 * without acknowledging them. This is largely for testing.
3509 */
NeilBrown3cb03002011-10-11 16:45:26 +11003510static ssize_t bb_show(struct md_rdev *rdev, char *page)
NeilBrown16c791a2011-07-28 11:31:47 +10003511{
3512 return badblocks_show(&rdev->badblocks, page, 0);
3513}
NeilBrown3cb03002011-10-11 16:45:26 +11003514static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len)
NeilBrown16c791a2011-07-28 11:31:47 +10003515{
NeilBrownde393cd2011-07-28 11:31:48 +10003516 int rv = badblocks_store(&rdev->badblocks, page, len, 0);
3517 /* Maybe that ack was all we needed */
3518 if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags))
3519 wake_up(&rdev->blocked_wait);
3520 return rv;
NeilBrown16c791a2011-07-28 11:31:47 +10003521}
3522static struct rdev_sysfs_entry rdev_bad_blocks =
3523__ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store);
3524
NeilBrown3cb03002011-10-11 16:45:26 +11003525static ssize_t ubb_show(struct md_rdev *rdev, char *page)
NeilBrown16c791a2011-07-28 11:31:47 +10003526{
3527 return badblocks_show(&rdev->badblocks, page, 1);
3528}
NeilBrown3cb03002011-10-11 16:45:26 +11003529static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len)
NeilBrown16c791a2011-07-28 11:31:47 +10003530{
3531 return badblocks_store(&rdev->badblocks, page, len, 1);
3532}
3533static struct rdev_sysfs_entry rdev_unack_bad_blocks =
3534__ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store);
3535
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01003536static ssize_t
3537ppl_sector_show(struct md_rdev *rdev, char *page)
3538{
3539 return sprintf(page, "%llu\n", (unsigned long long)rdev->ppl.sector);
3540}
3541
3542static ssize_t
3543ppl_sector_store(struct md_rdev *rdev, const char *buf, size_t len)
3544{
3545 unsigned long long sector;
3546
3547 if (kstrtoull(buf, 10, &sector) < 0)
3548 return -EINVAL;
3549 if (sector != (sector_t)sector)
3550 return -EINVAL;
3551
3552 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) &&
3553 rdev->raid_disk >= 0)
3554 return -EBUSY;
3555
3556 if (rdev->mddev->persistent) {
3557 if (rdev->mddev->major_version == 0)
3558 return -EINVAL;
3559 if ((sector > rdev->sb_start &&
3560 sector - rdev->sb_start > S16_MAX) ||
3561 (sector < rdev->sb_start &&
3562 rdev->sb_start - sector > -S16_MIN))
3563 return -EINVAL;
3564 rdev->ppl.offset = sector - rdev->sb_start;
3565 } else if (!rdev->mddev->external) {
3566 return -EBUSY;
3567 }
3568 rdev->ppl.sector = sector;
3569 return len;
3570}
3571
3572static struct rdev_sysfs_entry rdev_ppl_sector =
3573__ATTR(ppl_sector, S_IRUGO|S_IWUSR, ppl_sector_show, ppl_sector_store);
3574
3575static ssize_t
3576ppl_size_show(struct md_rdev *rdev, char *page)
3577{
3578 return sprintf(page, "%u\n", rdev->ppl.size);
3579}
3580
3581static ssize_t
3582ppl_size_store(struct md_rdev *rdev, const char *buf, size_t len)
3583{
3584 unsigned int size;
3585
3586 if (kstrtouint(buf, 10, &size) < 0)
3587 return -EINVAL;
3588
3589 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) &&
3590 rdev->raid_disk >= 0)
3591 return -EBUSY;
3592
3593 if (rdev->mddev->persistent) {
3594 if (rdev->mddev->major_version == 0)
3595 return -EINVAL;
3596 if (size > U16_MAX)
3597 return -EINVAL;
3598 } else if (!rdev->mddev->external) {
3599 return -EBUSY;
3600 }
3601 rdev->ppl.size = size;
3602 return len;
3603}
3604
3605static struct rdev_sysfs_entry rdev_ppl_size =
3606__ATTR(ppl_size, S_IRUGO|S_IWUSR, ppl_size_show, ppl_size_store);
3607
NeilBrown86e6ffd2005-11-08 21:39:24 -08003608static struct attribute *rdev_default_attrs[] = {
3609 &rdev_state.attr,
NeilBrown4dbcdc72006-01-06 00:20:52 -08003610 &rdev_errors.attr,
NeilBrown014236d2006-01-06 00:20:55 -08003611 &rdev_slot.attr,
NeilBrown93c8cad2006-01-06 00:20:56 -08003612 &rdev_offset.attr,
NeilBrownc6563a82012-05-21 09:27:00 +10003613 &rdev_new_offset.attr,
NeilBrown83303b62006-01-06 00:21:06 -08003614 &rdev_size.attr,
Dan Williams06e3c812009-12-12 21:17:12 -07003615 &rdev_recovery_start.attr,
NeilBrown16c791a2011-07-28 11:31:47 +10003616 &rdev_bad_blocks.attr,
3617 &rdev_unack_bad_blocks.attr,
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01003618 &rdev_ppl_sector.attr,
3619 &rdev_ppl_size.attr,
NeilBrown86e6ffd2005-11-08 21:39:24 -08003620 NULL,
3621};
3622static ssize_t
3623rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
3624{
3625 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
NeilBrown3cb03002011-10-11 16:45:26 +11003626 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
NeilBrown86e6ffd2005-11-08 21:39:24 -08003627
3628 if (!entry->show)
3629 return -EIO;
NeilBrown758bfc82014-12-15 12:56:59 +11003630 if (!rdev->mddev)
Marcos Paulo de Souza168b3052019-06-14 15:41:06 -07003631 return -ENODEV;
NeilBrown758bfc82014-12-15 12:56:59 +11003632 return entry->show(rdev, page);
NeilBrown86e6ffd2005-11-08 21:39:24 -08003633}
3634
3635static ssize_t
3636rdev_attr_store(struct kobject *kobj, struct attribute *attr,
3637 const char *page, size_t length)
3638{
3639 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
NeilBrown3cb03002011-10-11 16:45:26 +11003640 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
NeilBrown27c529b2008-03-04 14:29:33 -08003641 ssize_t rv;
NeilBrownfd01b882011-10-11 16:47:53 +11003642 struct mddev *mddev = rdev->mddev;
NeilBrown86e6ffd2005-11-08 21:39:24 -08003643
3644 if (!entry->store)
3645 return -EIO;
NeilBrown67463ac2006-07-10 04:44:19 -07003646 if (!capable(CAP_SYS_ADMIN))
3647 return -EACCES;
Pawel Baldysiakc42d3242019-03-27 13:48:21 +01003648 rv = mddev ? mddev_lock(mddev) : -ENODEV;
NeilBrownca388052008-02-06 01:39:55 -08003649 if (!rv) {
NeilBrown27c529b2008-03-04 14:29:33 -08003650 if (rdev->mddev == NULL)
Pawel Baldysiakc42d3242019-03-27 13:48:21 +01003651 rv = -ENODEV;
NeilBrown27c529b2008-03-04 14:29:33 -08003652 else
3653 rv = entry->store(rdev, page, length);
Dan Williams6a518302008-04-30 00:52:28 -07003654 mddev_unlock(mddev);
NeilBrownca388052008-02-06 01:39:55 -08003655 }
3656 return rv;
NeilBrown86e6ffd2005-11-08 21:39:24 -08003657}
3658
3659static void rdev_free(struct kobject *ko)
3660{
NeilBrown3cb03002011-10-11 16:45:26 +11003661 struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj);
NeilBrown86e6ffd2005-11-08 21:39:24 -08003662 kfree(rdev);
3663}
Emese Revfy52cf25d2010-01-19 02:58:23 +01003664static const struct sysfs_ops rdev_sysfs_ops = {
NeilBrown86e6ffd2005-11-08 21:39:24 -08003665 .show = rdev_attr_show,
3666 .store = rdev_attr_store,
3667};
3668static struct kobj_type rdev_ktype = {
3669 .release = rdev_free,
3670 .sysfs_ops = &rdev_sysfs_ops,
3671 .default_attrs = rdev_default_attrs,
3672};
3673
NeilBrown3cb03002011-10-11 16:45:26 +11003674int md_rdev_init(struct md_rdev *rdev)
NeilBrowne8bb9a82010-06-01 19:37:26 +10003675{
3676 rdev->desc_nr = -1;
3677 rdev->saved_raid_disk = -1;
3678 rdev->raid_disk = -1;
3679 rdev->flags = 0;
3680 rdev->data_offset = 0;
NeilBrownc6563a82012-05-21 09:27:00 +10003681 rdev->new_data_offset = 0;
NeilBrowne8bb9a82010-06-01 19:37:26 +10003682 rdev->sb_events = 0;
Arnd Bergmann0e3ef492016-06-17 17:33:10 +02003683 rdev->last_read_error = 0;
NeilBrown2699b672011-07-28 11:31:47 +10003684 rdev->sb_loaded = 0;
3685 rdev->bb_page = NULL;
NeilBrowne8bb9a82010-06-01 19:37:26 +10003686 atomic_set(&rdev->nr_pending, 0);
3687 atomic_set(&rdev->read_errors, 0);
3688 atomic_set(&rdev->corrected_errors, 0);
3689
3690 INIT_LIST_HEAD(&rdev->same_set);
3691 init_waitqueue_head(&rdev->blocked_wait);
NeilBrown2230dfe2011-07-28 11:31:46 +10003692
3693 /* Add space to store bad block list.
3694 * This reserves the space even on arrays where it cannot
3695 * be used - I wonder if that matters
3696 */
Vishal Vermafc974ee2015-12-24 19:20:34 -07003697 return badblocks_init(&rdev->badblocks, 0);
NeilBrowne8bb9a82010-06-01 19:37:26 +10003698}
3699EXPORT_SYMBOL_GPL(md_rdev_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003700/*
3701 * Import a device. If 'super_format' >= 0, then sanity check the superblock
3702 *
3703 * mark the device faulty if:
3704 *
3705 * - the device is nonexistent (zero size)
3706 * - the device has no valid superblock
3707 *
3708 * a faulty rdev _never_ has rdev->sb set.
3709 */
NeilBrown3cb03002011-10-11 16:45:26 +11003710static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003711{
3712 char b[BDEVNAME_SIZE];
3713 int err;
NeilBrown3cb03002011-10-11 16:45:26 +11003714 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003715 sector_t size;
3716
NeilBrown9ffae0c2006-01-06 00:20:32 -08003717 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
NeilBrown9d487392016-11-02 14:16:49 +11003718 if (!rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003719 return ERR_PTR(-ENOMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003720
NeilBrown2230dfe2011-07-28 11:31:46 +10003721 err = md_rdev_init(rdev);
3722 if (err)
3723 goto abort_free;
3724 err = alloc_disk_sb(rdev);
3725 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003726 goto abort_free;
3727
NeilBrownc5d79ad2008-02-06 01:39:54 -08003728 err = lock_rdev(rdev, newdev, super_format == -2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003729 if (err)
3730 goto abort_free;
3731
Greg Kroah-Hartmanf9cb0742007-12-17 23:05:35 -07003732 kobject_init(&rdev->kobj, &rdev_ktype);
NeilBrown86e6ffd2005-11-08 21:39:24 -08003733
Mike Snitzer77304d22010-11-08 14:39:12 +01003734 size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003735 if (!size) {
NeilBrown9d487392016-11-02 14:16:49 +11003736 pr_warn("md: %s has zero or unknown size, marking faulty!\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003737 bdevname(rdev->bdev,b));
3738 err = -EINVAL;
3739 goto abort_free;
3740 }
3741
3742 if (super_format >= 0) {
3743 err = super_types[super_format].
3744 load_super(rdev, NULL, super_minor);
3745 if (err == -EINVAL) {
NeilBrown9d487392016-11-02 14:16:49 +11003746 pr_warn("md: %s does not have a valid v%d.%d superblock, not importing!\n",
NeilBrowndf968c42007-07-17 04:06:11 -07003747 bdevname(rdev->bdev,b),
NeilBrown9d487392016-11-02 14:16:49 +11003748 super_format, super_minor);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003749 goto abort_free;
3750 }
3751 if (err < 0) {
NeilBrown9d487392016-11-02 14:16:49 +11003752 pr_warn("md: could not read %s's sb, not importing!\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003753 bdevname(rdev->bdev,b));
3754 goto abort_free;
3755 }
3756 }
Dan Williams6bfe0b42008-04-30 00:52:32 -07003757
Linus Torvalds1da177e2005-04-16 15:20:36 -07003758 return rdev;
3759
3760abort_free:
NeilBrown2699b672011-07-28 11:31:47 +10003761 if (rdev->bdev)
3762 unlock_rdev(rdev);
NeilBrown545c8792012-05-22 13:54:30 +10003763 md_rdev_clear(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003764 kfree(rdev);
3765 return ERR_PTR(err);
3766}
3767
3768/*
3769 * Check a full RAID array for plausibility
3770 */
3771
Yufen Yu6a5cb532019-10-16 16:00:03 +08003772static int analyze_sbs(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003773{
3774 int i;
NeilBrown3cb03002011-10-11 16:45:26 +11003775 struct md_rdev *rdev, *freshest, *tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003776 char b[BDEVNAME_SIZE];
3777
3778 freshest = NULL;
NeilBrowndafb20f2012-03-19 12:46:39 +11003779 rdev_for_each_safe(rdev, tmp, mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003780 switch (super_types[mddev->major_version].
3781 load_super(rdev, freshest, mddev->minor_version)) {
3782 case 1:
3783 freshest = rdev;
3784 break;
3785 case 0:
3786 break;
3787 default:
NeilBrown9d487392016-11-02 14:16:49 +11003788 pr_warn("md: fatal superblock inconsistency in %s -- removing from array\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003789 bdevname(rdev->bdev,b));
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05003790 md_kick_rdev_from_array(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003791 }
3792
Yufen Yu6a5cb532019-10-16 16:00:03 +08003793 /* Cannot find a valid fresh disk */
3794 if (!freshest) {
3795 pr_warn("md: cannot find a valid disk\n");
3796 return -EINVAL;
3797 }
3798
Linus Torvalds1da177e2005-04-16 15:20:36 -07003799 super_types[mddev->major_version].
3800 validate_super(mddev, freshest);
3801
3802 i = 0;
NeilBrowndafb20f2012-03-19 12:46:39 +11003803 rdev_for_each_safe(rdev, tmp, mddev) {
NeilBrown233fca32010-04-14 17:02:09 +10003804 if (mddev->max_disks &&
3805 (rdev->desc_nr >= mddev->max_disks ||
3806 i > mddev->max_disks)) {
NeilBrown9d487392016-11-02 14:16:49 +11003807 pr_warn("md: %s: %s: only %d devices permitted\n",
3808 mdname(mddev), bdevname(rdev->bdev, b),
3809 mddev->max_disks);
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05003810 md_kick_rdev_from_array(rdev);
NeilBrownde01dfa2009-02-06 18:02:46 +11003811 continue;
3812 }
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05003813 if (rdev != freshest) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003814 if (super_types[mddev->major_version].
3815 validate_super(mddev, rdev)) {
NeilBrown9d487392016-11-02 14:16:49 +11003816 pr_warn("md: kicking non-fresh %s from array!\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003817 bdevname(rdev->bdev,b));
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05003818 md_kick_rdev_from_array(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003819 continue;
3820 }
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05003821 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003822 if (mddev->level == LEVEL_MULTIPATH) {
3823 rdev->desc_nr = i++;
3824 rdev->raid_disk = rdev->desc_nr;
NeilBrownb2d444d2005-11-08 21:39:31 -08003825 set_bit(In_sync, &rdev->flags);
Shaohua Lif2076e72015-10-08 21:54:12 -07003826 } else if (rdev->raid_disk >=
3827 (mddev->raid_disks - min(0, mddev->delta_disks)) &&
3828 !test_bit(Journal, &rdev->flags)) {
NeilBrowna778b732007-05-23 13:58:10 -07003829 rdev->raid_disk = -1;
3830 clear_bit(In_sync, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003831 }
3832 }
Yufen Yu6a5cb532019-10-16 16:00:03 +08003833
3834 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003835}
3836
NeilBrown72e02072009-12-14 12:49:55 +11003837/* Read a fixed-point number.
3838 * Numbers in sysfs attributes should be in "standard" units where
3839 * possible, so time should be in seconds.
NeilBrownf72ffdd2014-09-30 14:23:59 +10003840 * However we internally use a a much smaller unit such as
NeilBrown72e02072009-12-14 12:49:55 +11003841 * milliseconds or jiffies.
3842 * This function takes a decimal number with a possible fractional
3843 * component, and produces an integer which is the result of
3844 * multiplying that number by 10^'scale'.
3845 * all without any floating-point arithmetic.
3846 */
3847int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
3848{
3849 unsigned long result = 0;
3850 long decimals = -1;
3851 while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
3852 if (*cp == '.')
3853 decimals = 0;
3854 else if (decimals < scale) {
3855 unsigned int value;
3856 value = *cp - '0';
3857 result = result * 10 + value;
3858 if (decimals >= 0)
3859 decimals++;
3860 }
3861 cp++;
3862 }
3863 if (*cp == '\n')
3864 cp++;
3865 if (*cp)
3866 return -EINVAL;
3867 if (decimals < 0)
3868 decimals = 0;
Andy Shevchenkocf891602019-07-23 23:41:55 +03003869 *res = result * int_pow(10, scale - decimals);
NeilBrown72e02072009-12-14 12:49:55 +11003870 return 0;
3871}
3872
NeilBrowneae17012005-11-08 21:39:23 -08003873static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11003874safe_delay_show(struct mddev *mddev, char *page)
NeilBrown16f17b32006-06-26 00:27:37 -07003875{
3876 int msec = (mddev->safemode_delay*1000)/HZ;
3877 return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
3878}
3879static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11003880safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
NeilBrown16f17b32006-06-26 00:27:37 -07003881{
NeilBrown16f17b32006-06-26 00:27:37 -07003882 unsigned long msec;
Dan Williams97ce0a72008-09-24 22:48:19 -07003883
Goldwyn Rodrigues28c1b9f2015-10-22 16:01:25 +11003884 if (mddev_is_clustered(mddev)) {
NeilBrown9d487392016-11-02 14:16:49 +11003885 pr_warn("md: Safemode is disabled for clustered mode\n");
Goldwyn Rodrigues28c1b9f2015-10-22 16:01:25 +11003886 return -EINVAL;
3887 }
3888
NeilBrown72e02072009-12-14 12:49:55 +11003889 if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
NeilBrown16f17b32006-06-26 00:27:37 -07003890 return -EINVAL;
NeilBrown16f17b32006-06-26 00:27:37 -07003891 if (msec == 0)
3892 mddev->safemode_delay = 0;
3893 else {
NeilBrown19052c02008-08-05 15:54:13 +10003894 unsigned long old_delay = mddev->safemode_delay;
NeilBrown1b30e662014-12-15 12:57:00 +11003895 unsigned long new_delay = (msec*HZ)/1000;
3896
3897 if (new_delay == 0)
3898 new_delay = 1;
3899 mddev->safemode_delay = new_delay;
3900 if (new_delay < old_delay || old_delay == 0)
3901 mod_timer(&mddev->safemode_timer, jiffies+1);
NeilBrown16f17b32006-06-26 00:27:37 -07003902 }
3903 return len;
3904}
3905static struct md_sysfs_entry md_safe_delay =
NeilBrown80ca3a42006-07-10 04:44:18 -07003906__ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
NeilBrown16f17b32006-06-26 00:27:37 -07003907
3908static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11003909level_show(struct mddev *mddev, char *page)
NeilBrowneae17012005-11-08 21:39:23 -08003910{
NeilBrown36d091f2014-12-15 12:56:58 +11003911 struct md_personality *p;
3912 int ret;
3913 spin_lock(&mddev->lock);
3914 p = mddev->pers;
NeilBrownd9d166c2006-01-06 00:20:51 -08003915 if (p)
NeilBrown36d091f2014-12-15 12:56:58 +11003916 ret = sprintf(page, "%s\n", p->name);
NeilBrownd9d166c2006-01-06 00:20:51 -08003917 else if (mddev->clevel[0])
NeilBrown36d091f2014-12-15 12:56:58 +11003918 ret = sprintf(page, "%s\n", mddev->clevel);
NeilBrownd9d166c2006-01-06 00:20:51 -08003919 else if (mddev->level != LEVEL_NONE)
NeilBrown36d091f2014-12-15 12:56:58 +11003920 ret = sprintf(page, "%d\n", mddev->level);
NeilBrownd9d166c2006-01-06 00:20:51 -08003921 else
NeilBrown36d091f2014-12-15 12:56:58 +11003922 ret = 0;
3923 spin_unlock(&mddev->lock);
3924 return ret;
NeilBrowneae17012005-11-08 21:39:23 -08003925}
3926
NeilBrownd9d166c2006-01-06 00:20:51 -08003927static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11003928level_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrownd9d166c2006-01-06 00:20:51 -08003929{
Dan Williamsf2859af2010-05-02 10:04:16 -07003930 char clevel[16];
NeilBrown67918752014-12-15 12:57:01 +11003931 ssize_t rv;
3932 size_t slen = len;
NeilBrowndb721d32014-12-15 12:56:58 +11003933 struct md_personality *pers, *oldpers;
Dan Williamsf2859af2010-05-02 10:04:16 -07003934 long level;
NeilBrowndb721d32014-12-15 12:56:58 +11003935 void *priv, *oldpriv;
NeilBrown3cb03002011-10-11 16:45:26 +11003936 struct md_rdev *rdev;
NeilBrown245f46c2009-03-31 14:39:39 +11003937
NeilBrown67918752014-12-15 12:57:01 +11003938 if (slen == 0 || slen >= sizeof(clevel))
3939 return -EINVAL;
3940
3941 rv = mddev_lock(mddev);
3942 if (rv)
NeilBrown245f46c2009-03-31 14:39:39 +11003943 return rv;
NeilBrown67918752014-12-15 12:57:01 +11003944
3945 if (mddev->pers == NULL) {
3946 strncpy(mddev->clevel, buf, slen);
3947 if (mddev->clevel[slen-1] == '\n')
3948 slen--;
3949 mddev->clevel[slen] = 0;
3950 mddev->level = LEVEL_NONE;
3951 rv = len;
3952 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11003953 }
NeilBrown67918752014-12-15 12:57:01 +11003954 rv = -EROFS;
NeilBrownbd8839e2014-05-28 13:39:21 +10003955 if (mddev->ro)
NeilBrown67918752014-12-15 12:57:01 +11003956 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11003957
3958 /* request to change the personality. Need to ensure:
3959 * - array is not engaged in resync/recovery/reshape
3960 * - old personality can be suspended
3961 * - new personality will access other array.
3962 */
3963
NeilBrown67918752014-12-15 12:57:01 +11003964 rv = -EBUSY;
NeilBrownbb4f1e92010-08-08 21:18:03 +10003965 if (mddev->sync_thread ||
NeilBrownf851b602014-12-11 10:02:10 +11003966 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
NeilBrownbb4f1e92010-08-08 21:18:03 +10003967 mddev->reshape_position != MaxSector ||
3968 mddev->sysfs_active)
NeilBrown67918752014-12-15 12:57:01 +11003969 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11003970
NeilBrown67918752014-12-15 12:57:01 +11003971 rv = -EINVAL;
NeilBrown245f46c2009-03-31 14:39:39 +11003972 if (!mddev->pers->quiesce) {
NeilBrown9d487392016-11-02 14:16:49 +11003973 pr_warn("md: %s: %s does not support online personality change\n",
3974 mdname(mddev), mddev->pers->name);
NeilBrown67918752014-12-15 12:57:01 +11003975 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11003976 }
3977
3978 /* Now find the new personality */
NeilBrown67918752014-12-15 12:57:01 +11003979 strncpy(clevel, buf, slen);
3980 if (clevel[slen-1] == '\n')
3981 slen--;
3982 clevel[slen] = 0;
Jingoo Hanb29bebd2013-06-01 16:15:16 +09003983 if (kstrtol(clevel, 10, &level))
Dan Williamsf2859af2010-05-02 10:04:16 -07003984 level = LEVEL_NONE;
NeilBrown245f46c2009-03-31 14:39:39 +11003985
Dan Williamsf2859af2010-05-02 10:04:16 -07003986 if (request_module("md-%s", clevel) != 0)
3987 request_module("md-level-%s", clevel);
NeilBrown245f46c2009-03-31 14:39:39 +11003988 spin_lock(&pers_lock);
Dan Williamsf2859af2010-05-02 10:04:16 -07003989 pers = find_pers(level, clevel);
NeilBrown245f46c2009-03-31 14:39:39 +11003990 if (!pers || !try_module_get(pers->owner)) {
3991 spin_unlock(&pers_lock);
NeilBrown9d487392016-11-02 14:16:49 +11003992 pr_warn("md: personality %s not loaded\n", clevel);
NeilBrown67918752014-12-15 12:57:01 +11003993 rv = -EINVAL;
3994 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11003995 }
3996 spin_unlock(&pers_lock);
3997
3998 if (pers == mddev->pers) {
3999 /* Nothing to do! */
4000 module_put(pers->owner);
NeilBrown67918752014-12-15 12:57:01 +11004001 rv = len;
4002 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11004003 }
4004 if (!pers->takeover) {
4005 module_put(pers->owner);
NeilBrown9d487392016-11-02 14:16:49 +11004006 pr_warn("md: %s: %s does not support personality takeover\n",
4007 mdname(mddev), clevel);
NeilBrown67918752014-12-15 12:57:01 +11004008 rv = -EINVAL;
4009 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11004010 }
4011
NeilBrowndafb20f2012-03-19 12:46:39 +11004012 rdev_for_each(rdev, mddev)
NeilBrowne93f68a2010-06-15 09:36:03 +01004013 rdev->new_raid_disk = rdev->raid_disk;
4014
NeilBrown245f46c2009-03-31 14:39:39 +11004015 /* ->takeover must set new_* and/or delta_disks
4016 * if it succeeds, and may set them when it fails.
4017 */
4018 priv = pers->takeover(mddev);
4019 if (IS_ERR(priv)) {
4020 mddev->new_level = mddev->level;
4021 mddev->new_layout = mddev->layout;
Andre Noll664e7c42009-06-18 08:45:27 +10004022 mddev->new_chunk_sectors = mddev->chunk_sectors;
NeilBrown245f46c2009-03-31 14:39:39 +11004023 mddev->raid_disks -= mddev->delta_disks;
4024 mddev->delta_disks = 0;
NeilBrown2c810cd2012-05-21 09:27:00 +10004025 mddev->reshape_backwards = 0;
NeilBrown245f46c2009-03-31 14:39:39 +11004026 module_put(pers->owner);
NeilBrown9d487392016-11-02 14:16:49 +11004027 pr_warn("md: %s: %s would not accept array\n",
4028 mdname(mddev), clevel);
NeilBrown67918752014-12-15 12:57:01 +11004029 rv = PTR_ERR(priv);
4030 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11004031 }
4032
4033 /* Looks like we have a winner */
4034 mddev_suspend(mddev);
NeilBrown5aa61f42014-12-15 12:56:57 +11004035 mddev_detach(mddev);
NeilBrown36d091f2014-12-15 12:56:58 +11004036
4037 spin_lock(&mddev->lock);
NeilBrowndb721d32014-12-15 12:56:58 +11004038 oldpers = mddev->pers;
4039 oldpriv = mddev->private;
4040 mddev->pers = pers;
4041 mddev->private = priv;
4042 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
4043 mddev->level = mddev->new_level;
4044 mddev->layout = mddev->new_layout;
4045 mddev->chunk_sectors = mddev->new_chunk_sectors;
4046 mddev->delta_disks = 0;
4047 mddev->reshape_backwards = 0;
4048 mddev->degraded = 0;
NeilBrown36d091f2014-12-15 12:56:58 +11004049 spin_unlock(&mddev->lock);
NeilBrownf72ffdd2014-09-30 14:23:59 +10004050
NeilBrowndb721d32014-12-15 12:56:58 +11004051 if (oldpers->sync_request == NULL &&
Trela Maciej54071b32010-03-08 16:02:42 +11004052 mddev->external) {
4053 /* We are converting from a no-redundancy array
4054 * to a redundancy array and metadata is managed
4055 * externally so we need to be sure that writes
4056 * won't block due to a need to transition
4057 * clean->dirty
4058 * until external management is started.
4059 */
4060 mddev->in_sync = 0;
4061 mddev->safemode_delay = 0;
4062 mddev->safemode = 0;
4063 }
4064
NeilBrowndb721d32014-12-15 12:56:58 +11004065 oldpers->free(mddev, oldpriv);
4066
4067 if (oldpers->sync_request == NULL &&
4068 pers->sync_request != NULL) {
4069 /* need to add the md_redundancy_group */
4070 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
NeilBrown9d487392016-11-02 14:16:49 +11004071 pr_warn("md: cannot register extra attributes for %s\n",
4072 mdname(mddev));
NeilBrowndb721d32014-12-15 12:56:58 +11004073 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
Junxiao Bie8efa9b2020-08-04 17:27:18 -07004074 mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed");
4075 mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded");
NeilBrowndb721d32014-12-15 12:56:58 +11004076 }
4077 if (oldpers->sync_request != NULL &&
4078 pers->sync_request == NULL) {
4079 /* need to remove the md_redundancy_group */
4080 if (mddev->to_remove == NULL)
4081 mddev->to_remove = &md_redundancy_group;
4082 }
4083
Alexey Obitotskiy4cb9da72016-06-23 12:11:01 +02004084 module_put(oldpers->owner);
4085
NeilBrowndafb20f2012-03-19 12:46:39 +11004086 rdev_for_each(rdev, mddev) {
NeilBrowne93f68a2010-06-15 09:36:03 +01004087 if (rdev->raid_disk < 0)
4088 continue;
NeilBrownbf2cb0d2011-01-14 09:14:34 +11004089 if (rdev->new_raid_disk >= mddev->raid_disks)
NeilBrowne93f68a2010-06-15 09:36:03 +01004090 rdev->new_raid_disk = -1;
4091 if (rdev->new_raid_disk == rdev->raid_disk)
4092 continue;
Namhyung Kim36fad852011-07-27 11:00:36 +10004093 sysfs_unlink_rdev(mddev, rdev);
NeilBrowne93f68a2010-06-15 09:36:03 +01004094 }
NeilBrowndafb20f2012-03-19 12:46:39 +11004095 rdev_for_each(rdev, mddev) {
NeilBrowne93f68a2010-06-15 09:36:03 +01004096 if (rdev->raid_disk < 0)
4097 continue;
4098 if (rdev->new_raid_disk == rdev->raid_disk)
4099 continue;
4100 rdev->raid_disk = rdev->new_raid_disk;
4101 if (rdev->raid_disk < 0)
NeilBrown3a981b02009-08-03 10:59:55 +10004102 clear_bit(In_sync, &rdev->flags);
NeilBrowne93f68a2010-06-15 09:36:03 +01004103 else {
Namhyung Kim36fad852011-07-27 11:00:36 +10004104 if (sysfs_link_rdev(mddev, rdev))
NeilBrown9d487392016-11-02 14:16:49 +11004105 pr_warn("md: cannot register rd%d for %s after level change\n",
4106 rdev->raid_disk, mdname(mddev));
NeilBrown3a981b02009-08-03 10:59:55 +10004107 }
NeilBrowne93f68a2010-06-15 09:36:03 +01004108 }
4109
NeilBrowndb721d32014-12-15 12:56:58 +11004110 if (pers->sync_request == NULL) {
Trela, Maciej9af204c2010-03-08 16:02:44 +11004111 /* this is now an array without redundancy, so
4112 * it must always be in_sync
4113 */
4114 mddev->in_sync = 1;
4115 del_timer_sync(&mddev->safemode_timer);
4116 }
NeilBrown02e5f5c2013-11-14 15:16:15 +11004117 blk_set_stacking_limits(&mddev->queue->limits);
NeilBrown245f46c2009-03-31 14:39:39 +11004118 pers->run(mddev);
Shaohua Li29530792016-12-08 15:48:19 -08004119 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
Jonathan Brassow47525e52012-05-22 13:55:29 +10004120 mddev_resume(mddev);
NeilBrown830778a2014-01-14 15:17:03 +11004121 if (!mddev->thread)
4122 md_update_sb(mddev, 1);
Junxiao Bie1a86db2020-07-14 16:10:26 -07004123 sysfs_notify_dirent_safe(mddev->sysfs_level);
Dan Williamsbb7f8d22010-05-01 18:14:57 -07004124 md_new_event(mddev);
NeilBrown67918752014-12-15 12:57:01 +11004125 rv = len;
4126out_unlock:
4127 mddev_unlock(mddev);
NeilBrownd9d166c2006-01-06 00:20:51 -08004128 return rv;
4129}
4130
4131static struct md_sysfs_entry md_level =
NeilBrown80ca3a42006-07-10 04:44:18 -07004132__ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
NeilBrowneae17012005-11-08 21:39:23 -08004133
NeilBrownd4dbd022006-06-26 00:27:59 -07004134static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004135layout_show(struct mddev *mddev, char *page)
NeilBrownd4dbd022006-06-26 00:27:59 -07004136{
4137 /* just a number, not meaningful for all levels */
NeilBrown08a02ec2007-05-09 02:35:38 -07004138 if (mddev->reshape_position != MaxSector &&
4139 mddev->layout != mddev->new_layout)
4140 return sprintf(page, "%d (%d)\n",
4141 mddev->new_layout, mddev->layout);
NeilBrownd4dbd022006-06-26 00:27:59 -07004142 return sprintf(page, "%d\n", mddev->layout);
4143}
4144
4145static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004146layout_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrownd4dbd022006-06-26 00:27:59 -07004147{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004148 unsigned int n;
NeilBrown67918752014-12-15 12:57:01 +11004149 int err;
NeilBrownd4dbd022006-06-26 00:27:59 -07004150
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004151 err = kstrtouint(buf, 10, &n);
4152 if (err < 0)
4153 return err;
NeilBrown67918752014-12-15 12:57:01 +11004154 err = mddev_lock(mddev);
4155 if (err)
4156 return err;
NeilBrownd4dbd022006-06-26 00:27:59 -07004157
NeilBrownb3546032009-03-31 14:56:41 +11004158 if (mddev->pers) {
NeilBrown50ac1682009-06-18 08:47:55 +10004159 if (mddev->pers->check_reshape == NULL)
NeilBrown67918752014-12-15 12:57:01 +11004160 err = -EBUSY;
4161 else if (mddev->ro)
4162 err = -EROFS;
4163 else {
4164 mddev->new_layout = n;
4165 err = mddev->pers->check_reshape(mddev);
4166 if (err)
4167 mddev->new_layout = mddev->layout;
NeilBrown597a7112009-06-18 08:47:42 +10004168 }
NeilBrownb3546032009-03-31 14:56:41 +11004169 } else {
NeilBrown08a02ec2007-05-09 02:35:38 -07004170 mddev->new_layout = n;
NeilBrownb3546032009-03-31 14:56:41 +11004171 if (mddev->reshape_position == MaxSector)
4172 mddev->layout = n;
4173 }
NeilBrown67918752014-12-15 12:57:01 +11004174 mddev_unlock(mddev);
4175 return err ?: len;
NeilBrownd4dbd022006-06-26 00:27:59 -07004176}
4177static struct md_sysfs_entry md_layout =
NeilBrown80ca3a42006-07-10 04:44:18 -07004178__ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
NeilBrownd4dbd022006-06-26 00:27:59 -07004179
NeilBrowneae17012005-11-08 21:39:23 -08004180static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004181raid_disks_show(struct mddev *mddev, char *page)
NeilBrowneae17012005-11-08 21:39:23 -08004182{
NeilBrownbb636542005-11-08 21:39:45 -08004183 if (mddev->raid_disks == 0)
4184 return 0;
NeilBrown08a02ec2007-05-09 02:35:38 -07004185 if (mddev->reshape_position != MaxSector &&
4186 mddev->delta_disks != 0)
4187 return sprintf(page, "%d (%d)\n", mddev->raid_disks,
4188 mddev->raid_disks - mddev->delta_disks);
NeilBrowneae17012005-11-08 21:39:23 -08004189 return sprintf(page, "%d\n", mddev->raid_disks);
4190}
4191
NeilBrownfd01b882011-10-11 16:47:53 +11004192static int update_raid_disks(struct mddev *mddev, int raid_disks);
NeilBrownda943b992006-01-06 00:20:54 -08004193
4194static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004195raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrownda943b992006-01-06 00:20:54 -08004196{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004197 unsigned int n;
NeilBrown67918752014-12-15 12:57:01 +11004198 int err;
NeilBrownda943b992006-01-06 00:20:54 -08004199
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004200 err = kstrtouint(buf, 10, &n);
4201 if (err < 0)
4202 return err;
NeilBrownda943b992006-01-06 00:20:54 -08004203
NeilBrown67918752014-12-15 12:57:01 +11004204 err = mddev_lock(mddev);
4205 if (err)
4206 return err;
NeilBrownda943b992006-01-06 00:20:54 -08004207 if (mddev->pers)
NeilBrown67918752014-12-15 12:57:01 +11004208 err = update_raid_disks(mddev, n);
NeilBrown08a02ec2007-05-09 02:35:38 -07004209 else if (mddev->reshape_position != MaxSector) {
NeilBrownc6563a82012-05-21 09:27:00 +10004210 struct md_rdev *rdev;
NeilBrown08a02ec2007-05-09 02:35:38 -07004211 int olddisks = mddev->raid_disks - mddev->delta_disks;
NeilBrownc6563a82012-05-21 09:27:00 +10004212
NeilBrown67918752014-12-15 12:57:01 +11004213 err = -EINVAL;
NeilBrownc6563a82012-05-21 09:27:00 +10004214 rdev_for_each(rdev, mddev) {
4215 if (olddisks < n &&
4216 rdev->data_offset < rdev->new_data_offset)
NeilBrown67918752014-12-15 12:57:01 +11004217 goto out_unlock;
NeilBrownc6563a82012-05-21 09:27:00 +10004218 if (olddisks > n &&
4219 rdev->data_offset > rdev->new_data_offset)
NeilBrown67918752014-12-15 12:57:01 +11004220 goto out_unlock;
NeilBrownc6563a82012-05-21 09:27:00 +10004221 }
NeilBrown67918752014-12-15 12:57:01 +11004222 err = 0;
NeilBrown08a02ec2007-05-09 02:35:38 -07004223 mddev->delta_disks = n - olddisks;
4224 mddev->raid_disks = n;
NeilBrown2c810cd2012-05-21 09:27:00 +10004225 mddev->reshape_backwards = (mddev->delta_disks < 0);
NeilBrown08a02ec2007-05-09 02:35:38 -07004226 } else
NeilBrownda943b992006-01-06 00:20:54 -08004227 mddev->raid_disks = n;
NeilBrown67918752014-12-15 12:57:01 +11004228out_unlock:
4229 mddev_unlock(mddev);
4230 return err ? err : len;
NeilBrownda943b992006-01-06 00:20:54 -08004231}
4232static struct md_sysfs_entry md_raid_disks =
NeilBrown80ca3a42006-07-10 04:44:18 -07004233__ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
NeilBrowneae17012005-11-08 21:39:23 -08004234
NeilBrown24dd4692005-11-08 21:39:26 -08004235static ssize_t
Sebastian Parschauerec164d072020-07-28 12:01:39 +02004236uuid_show(struct mddev *mddev, char *page)
4237{
4238 return sprintf(page, "%pU\n", mddev->uuid);
4239}
4240static struct md_sysfs_entry md_uuid =
4241__ATTR(uuid, S_IRUGO, uuid_show, NULL);
4242
4243static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004244chunk_size_show(struct mddev *mddev, char *page)
NeilBrown3b343802006-01-06 00:20:47 -08004245{
NeilBrown08a02ec2007-05-09 02:35:38 -07004246 if (mddev->reshape_position != MaxSector &&
Andre Noll664e7c42009-06-18 08:45:27 +10004247 mddev->chunk_sectors != mddev->new_chunk_sectors)
4248 return sprintf(page, "%d (%d)\n",
4249 mddev->new_chunk_sectors << 9,
Andre Noll9d8f0362009-06-18 08:45:01 +10004250 mddev->chunk_sectors << 9);
4251 return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
NeilBrown3b343802006-01-06 00:20:47 -08004252}
4253
4254static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004255chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown3b343802006-01-06 00:20:47 -08004256{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004257 unsigned long n;
NeilBrown67918752014-12-15 12:57:01 +11004258 int err;
NeilBrown3b343802006-01-06 00:20:47 -08004259
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004260 err = kstrtoul(buf, 10, &n);
4261 if (err < 0)
4262 return err;
NeilBrown3b343802006-01-06 00:20:47 -08004263
NeilBrown67918752014-12-15 12:57:01 +11004264 err = mddev_lock(mddev);
4265 if (err)
4266 return err;
NeilBrownb3546032009-03-31 14:56:41 +11004267 if (mddev->pers) {
NeilBrown50ac1682009-06-18 08:47:55 +10004268 if (mddev->pers->check_reshape == NULL)
NeilBrown67918752014-12-15 12:57:01 +11004269 err = -EBUSY;
4270 else if (mddev->ro)
4271 err = -EROFS;
4272 else {
4273 mddev->new_chunk_sectors = n >> 9;
4274 err = mddev->pers->check_reshape(mddev);
4275 if (err)
4276 mddev->new_chunk_sectors = mddev->chunk_sectors;
NeilBrown597a7112009-06-18 08:47:42 +10004277 }
NeilBrownb3546032009-03-31 14:56:41 +11004278 } else {
Andre Noll664e7c42009-06-18 08:45:27 +10004279 mddev->new_chunk_sectors = n >> 9;
NeilBrownb3546032009-03-31 14:56:41 +11004280 if (mddev->reshape_position == MaxSector)
Andre Noll9d8f0362009-06-18 08:45:01 +10004281 mddev->chunk_sectors = n >> 9;
NeilBrownb3546032009-03-31 14:56:41 +11004282 }
NeilBrown67918752014-12-15 12:57:01 +11004283 mddev_unlock(mddev);
4284 return err ?: len;
NeilBrown3b343802006-01-06 00:20:47 -08004285}
4286static struct md_sysfs_entry md_chunk_size =
NeilBrown80ca3a42006-07-10 04:44:18 -07004287__ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
NeilBrown3b343802006-01-06 00:20:47 -08004288
NeilBrowna94213b2006-06-26 00:28:00 -07004289static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004290resync_start_show(struct mddev *mddev, char *page)
NeilBrowna94213b2006-06-26 00:28:00 -07004291{
NeilBrownd1a7c502009-03-31 15:24:32 +11004292 if (mddev->recovery_cp == MaxSector)
4293 return sprintf(page, "none\n");
NeilBrowna94213b2006-06-26 00:28:00 -07004294 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
4295}
4296
4297static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004298resync_start_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrowna94213b2006-06-26 00:28:00 -07004299{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004300 unsigned long long n;
NeilBrown67918752014-12-15 12:57:01 +11004301 int err;
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004302
4303 if (cmd_match(buf, "none"))
4304 n = MaxSector;
4305 else {
4306 err = kstrtoull(buf, 10, &n);
4307 if (err < 0)
4308 return err;
4309 if (n != (sector_t)n)
4310 return -EINVAL;
4311 }
NeilBrowna94213b2006-06-26 00:28:00 -07004312
NeilBrown67918752014-12-15 12:57:01 +11004313 err = mddev_lock(mddev);
4314 if (err)
4315 return err;
NeilBrownb0986362011-05-11 15:52:21 +10004316 if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
NeilBrown67918752014-12-15 12:57:01 +11004317 err = -EBUSY;
NeilBrowna94213b2006-06-26 00:28:00 -07004318
NeilBrown67918752014-12-15 12:57:01 +11004319 if (!err) {
4320 mddev->recovery_cp = n;
4321 if (mddev->pers)
Shaohua Li29530792016-12-08 15:48:19 -08004322 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
NeilBrown67918752014-12-15 12:57:01 +11004323 }
4324 mddev_unlock(mddev);
4325 return err ?: len;
NeilBrowna94213b2006-06-26 00:28:00 -07004326}
4327static struct md_sysfs_entry md_resync_start =
NeilBrown750f1992014-09-30 08:53:05 +10004328__ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR,
4329 resync_start_show, resync_start_store);
NeilBrowna94213b2006-06-26 00:28:00 -07004330
NeilBrown9e653b62006-06-26 00:27:58 -07004331/*
4332 * The array state can be:
4333 *
4334 * clear
4335 * No devices, no size, no level
4336 * Equivalent to STOP_ARRAY ioctl
4337 * inactive
4338 * May have some settings, but array is not active
4339 * all IO results in error
4340 * When written, doesn't tear down array, but just stops it
4341 * suspended (not supported yet)
4342 * All IO requests will block. The array can be reconfigured.
Andre Noll910d8cb2008-03-25 21:00:53 +01004343 * Writing this, if accepted, will block until array is quiescent
NeilBrown9e653b62006-06-26 00:27:58 -07004344 * readonly
4345 * no resync can happen. no superblocks get written.
4346 * write requests fail
4347 * read-auto
4348 * like readonly, but behaves like 'clean' on a write request.
4349 *
4350 * clean - no pending writes, but otherwise active.
4351 * When written to inactive array, starts without resync
4352 * If a write request arrives then
4353 * if metadata is known, mark 'dirty' and switch to 'active'.
4354 * if not known, block and switch to write-pending
4355 * If written to an active array that has pending writes, then fails.
4356 * active
4357 * fully active: IO and resync can be happening.
4358 * When written to inactive array, starts with resync
4359 *
4360 * write-pending
4361 * clean, but writes are blocked waiting for 'active' to be written.
4362 *
4363 * active-idle
4364 * like active, but no writes have been seen for a while (100msec).
4365 *
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -03004366 * broken
4367 * RAID0/LINEAR-only: same as clean, but array is missing a member.
4368 * It's useful because RAID0/LINEAR mounted-arrays aren't stopped
4369 * when a member is gone, so this state will at least alert the
4370 * user that something is wrong.
NeilBrown9e653b62006-06-26 00:27:58 -07004371 */
4372enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -03004373 write_pending, active_idle, broken, bad_word};
Adrian Bunk05381952006-06-26 00:28:01 -07004374static char *array_states[] = {
NeilBrown9e653b62006-06-26 00:27:58 -07004375 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -03004376 "write-pending", "active-idle", "broken", NULL };
NeilBrown9e653b62006-06-26 00:27:58 -07004377
4378static int match_word(const char *word, char **list)
4379{
4380 int n;
4381 for (n=0; list[n]; n++)
4382 if (cmd_match(word, list[n]))
4383 break;
4384 return n;
4385}
4386
4387static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004388array_state_show(struct mddev *mddev, char *page)
NeilBrown9e653b62006-06-26 00:27:58 -07004389{
4390 enum array_state st = inactive;
4391
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -03004392 if (mddev->pers && !test_bit(MD_NOT_READY, &mddev->flags)) {
NeilBrown9e653b62006-06-26 00:27:58 -07004393 switch(mddev->ro) {
4394 case 1:
4395 st = readonly;
4396 break;
4397 case 2:
4398 st = read_auto;
4399 break;
4400 case 0:
NeilBrown55cc39f2017-03-15 14:05:14 +11004401 spin_lock(&mddev->lock);
Shaohua Li29530792016-12-08 15:48:19 -08004402 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
NeilBrowne6910632008-02-06 01:39:51 -08004403 st = write_pending;
Tomasz Majchrzak16f88942016-10-24 12:47:28 +02004404 else if (mddev->in_sync)
4405 st = clean;
NeilBrown9e653b62006-06-26 00:27:58 -07004406 else if (mddev->safemode)
4407 st = active_idle;
4408 else
4409 st = active;
NeilBrown55cc39f2017-03-15 14:05:14 +11004410 spin_unlock(&mddev->lock);
NeilBrown9e653b62006-06-26 00:27:58 -07004411 }
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -03004412
4413 if (test_bit(MD_BROKEN, &mddev->flags) && st == clean)
4414 st = broken;
4415 } else {
NeilBrown9e653b62006-06-26 00:27:58 -07004416 if (list_empty(&mddev->disks) &&
4417 mddev->raid_disks == 0 &&
Andre Noll58c0fed2009-03-31 14:33:13 +11004418 mddev->dev_sectors == 0)
NeilBrown9e653b62006-06-26 00:27:58 -07004419 st = clear;
4420 else
4421 st = inactive;
4422 }
4423 return sprintf(page, "%s\n", array_states[st]);
4424}
4425
NeilBrownf72ffdd2014-09-30 14:23:59 +10004426static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev);
4427static int md_set_readonly(struct mddev *mddev, struct block_device *bdev);
NeilBrownfd01b882011-10-11 16:47:53 +11004428static int restart_array(struct mddev *mddev);
NeilBrown9e653b62006-06-26 00:27:58 -07004429
4430static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004431array_state_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown9e653b62006-06-26 00:27:58 -07004432{
NeilBrown6497709b2017-03-15 14:05:14 +11004433 int err = 0;
NeilBrown9e653b62006-06-26 00:27:58 -07004434 enum array_state st = match_word(buf, array_states);
NeilBrown67918752014-12-15 12:57:01 +11004435
4436 if (mddev->pers && (st == active || st == clean) && mddev->ro != 1) {
4437 /* don't take reconfig_mutex when toggling between
4438 * clean and active
4439 */
4440 spin_lock(&mddev->lock);
4441 if (st == active) {
4442 restart_array(mddev);
Shaohua Li29530792016-12-08 15:48:19 -08004443 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
Tomasz Majchrzak91a6c4a2016-10-25 17:07:08 +02004444 md_wakeup_thread(mddev->thread);
NeilBrown67918752014-12-15 12:57:01 +11004445 wake_up(&mddev->sb_wait);
NeilBrown67918752014-12-15 12:57:01 +11004446 } else /* st == clean */ {
4447 restart_array(mddev);
NeilBrown6497709b2017-03-15 14:05:14 +11004448 if (!set_in_sync(mddev))
NeilBrown67918752014-12-15 12:57:01 +11004449 err = -EBUSY;
4450 }
Tomasz Majchrzak573275b2016-06-30 10:47:09 +02004451 if (!err)
4452 sysfs_notify_dirent_safe(mddev->sysfs_state);
NeilBrown67918752014-12-15 12:57:01 +11004453 spin_unlock(&mddev->lock);
NeilBrownc008f1d2015-06-12 19:46:44 +10004454 return err ?: len;
NeilBrown67918752014-12-15 12:57:01 +11004455 }
4456 err = mddev_lock(mddev);
4457 if (err)
4458 return err;
4459 err = -EINVAL;
NeilBrown9e653b62006-06-26 00:27:58 -07004460 switch(st) {
4461 case bad_word:
4462 break;
4463 case clear:
4464 /* stopping an active array */
NeilBrowna05b7ea2012-07-19 15:59:18 +10004465 err = do_md_stop(mddev, 0, NULL);
NeilBrown9e653b62006-06-26 00:27:58 -07004466 break;
4467 case inactive:
4468 /* stopping an active array */
NeilBrown90cf1952012-07-31 10:04:55 +10004469 if (mddev->pers)
NeilBrowna05b7ea2012-07-19 15:59:18 +10004470 err = do_md_stop(mddev, 2, NULL);
NeilBrown90cf1952012-07-31 10:04:55 +10004471 else
NeilBrowne6910632008-02-06 01:39:51 -08004472 err = 0; /* already inactive */
NeilBrown9e653b62006-06-26 00:27:58 -07004473 break;
4474 case suspended:
4475 break; /* not supported yet */
4476 case readonly:
4477 if (mddev->pers)
NeilBrowna05b7ea2012-07-19 15:59:18 +10004478 err = md_set_readonly(mddev, NULL);
NeilBrown9e653b62006-06-26 00:27:58 -07004479 else {
4480 mddev->ro = 1;
NeilBrown648b6292008-04-30 00:52:30 -07004481 set_disk_ro(mddev->gendisk, 1);
NeilBrown9e653b62006-06-26 00:27:58 -07004482 err = do_md_run(mddev);
4483 }
4484 break;
4485 case read_auto:
NeilBrown9e653b62006-06-26 00:27:58 -07004486 if (mddev->pers) {
NeilBrown80268ee2008-10-13 11:55:12 +11004487 if (mddev->ro == 0)
NeilBrowna05b7ea2012-07-19 15:59:18 +10004488 err = md_set_readonly(mddev, NULL);
NeilBrown80268ee2008-10-13 11:55:12 +11004489 else if (mddev->ro == 1)
NeilBrown648b6292008-04-30 00:52:30 -07004490 err = restart_array(mddev);
4491 if (err == 0) {
4492 mddev->ro = 2;
4493 set_disk_ro(mddev->gendisk, 0);
4494 }
NeilBrown9e653b62006-06-26 00:27:58 -07004495 } else {
4496 mddev->ro = 2;
4497 err = do_md_run(mddev);
4498 }
4499 break;
4500 case clean:
4501 if (mddev->pers) {
Song Liu339421d2015-10-08 21:54:13 -07004502 err = restart_array(mddev);
4503 if (err)
4504 break;
NeilBrown85572d72014-12-15 12:56:56 +11004505 spin_lock(&mddev->lock);
NeilBrown6497709b2017-03-15 14:05:14 +11004506 if (!set_in_sync(mddev))
NeilBrowne6910632008-02-06 01:39:51 -08004507 err = -EBUSY;
NeilBrown85572d72014-12-15 12:56:56 +11004508 spin_unlock(&mddev->lock);
NeilBrown5bf29592009-05-07 12:50:57 +10004509 } else
4510 err = -EINVAL;
NeilBrown9e653b62006-06-26 00:27:58 -07004511 break;
4512 case active:
4513 if (mddev->pers) {
Song Liu339421d2015-10-08 21:54:13 -07004514 err = restart_array(mddev);
4515 if (err)
4516 break;
Shaohua Li29530792016-12-08 15:48:19 -08004517 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
NeilBrown9e653b62006-06-26 00:27:58 -07004518 wake_up(&mddev->sb_wait);
4519 err = 0;
4520 } else {
4521 mddev->ro = 0;
NeilBrown648b6292008-04-30 00:52:30 -07004522 set_disk_ro(mddev->gendisk, 0);
NeilBrown9e653b62006-06-26 00:27:58 -07004523 err = do_md_run(mddev);
4524 }
4525 break;
4526 case write_pending:
4527 case active_idle:
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -03004528 case broken:
NeilBrown9e653b62006-06-26 00:27:58 -07004529 /* these cannot be set */
4530 break;
4531 }
NeilBrown67918752014-12-15 12:57:01 +11004532
4533 if (!err) {
NeilBrown1d23f172011-12-08 15:49:12 +11004534 if (mddev->hold_active == UNTIL_IOCTL)
4535 mddev->hold_active = 0;
NeilBrown00bcb4a2010-06-01 19:37:23 +10004536 sysfs_notify_dirent_safe(mddev->sysfs_state);
Neil Brown0fd62b82008-06-28 08:31:36 +10004537 }
NeilBrown67918752014-12-15 12:57:01 +11004538 mddev_unlock(mddev);
4539 return err ?: len;
NeilBrown9e653b62006-06-26 00:27:58 -07004540}
NeilBrown80ca3a42006-07-10 04:44:18 -07004541static struct md_sysfs_entry md_array_state =
NeilBrown750f1992014-09-30 08:53:05 +10004542__ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
NeilBrown9e653b62006-06-26 00:27:58 -07004543
NeilBrown6d7ff7382006-01-06 00:21:16 -08004544static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004545max_corrected_read_errors_show(struct mddev *mddev, char *page) {
Robert Becker1e509152009-12-14 12:49:58 +11004546 return sprintf(page, "%d\n",
4547 atomic_read(&mddev->max_corr_read_errors));
4548}
4549
4550static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004551max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len)
Robert Becker1e509152009-12-14 12:49:58 +11004552{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004553 unsigned int n;
4554 int rv;
Robert Becker1e509152009-12-14 12:49:58 +11004555
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004556 rv = kstrtouint(buf, 10, &n);
4557 if (rv < 0)
4558 return rv;
4559 atomic_set(&mddev->max_corr_read_errors, n);
4560 return len;
Robert Becker1e509152009-12-14 12:49:58 +11004561}
4562
4563static struct md_sysfs_entry max_corr_read_errors =
4564__ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
4565 max_corrected_read_errors_store);
4566
4567static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004568null_show(struct mddev *mddev, char *page)
NeilBrown6d7ff7382006-01-06 00:21:16 -08004569{
4570 return -EINVAL;
4571}
4572
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02004573/* need to ensure rdev_delayed_delete() has completed */
4574static void flush_rdev_wq(struct mddev *mddev)
4575{
4576 struct md_rdev *rdev;
4577
4578 rcu_read_lock();
4579 rdev_for_each_rcu(rdev, mddev)
4580 if (work_pending(&rdev->del_work)) {
4581 flush_workqueue(md_rdev_misc_wq);
4582 break;
4583 }
4584 rcu_read_unlock();
4585}
4586
NeilBrown6d7ff7382006-01-06 00:21:16 -08004587static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004588new_dev_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown6d7ff7382006-01-06 00:21:16 -08004589{
4590 /* buf must be %d:%d\n? giving major and minor numbers */
4591 /* The new device is added to the array.
4592 * If the array has a persistent superblock, we read the
4593 * superblock to initialise info and check validity.
4594 * Otherwise, only checking done is that in bind_rdev_to_array,
4595 * which mainly checks size.
4596 */
4597 char *e;
4598 int major = simple_strtoul(buf, &e, 10);
4599 int minor;
4600 dev_t dev;
NeilBrown3cb03002011-10-11 16:45:26 +11004601 struct md_rdev *rdev;
NeilBrown6d7ff7382006-01-06 00:21:16 -08004602 int err;
4603
4604 if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
4605 return -EINVAL;
4606 minor = simple_strtoul(e+1, &e, 10);
4607 if (*e && *e != '\n')
4608 return -EINVAL;
4609 dev = MKDEV(major, minor);
4610 if (major != MAJOR(dev) ||
4611 minor != MINOR(dev))
4612 return -EOVERFLOW;
4613
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02004614 flush_rdev_wq(mddev);
NeilBrown67918752014-12-15 12:57:01 +11004615 err = mddev_lock(mddev);
4616 if (err)
4617 return err;
NeilBrown6d7ff7382006-01-06 00:21:16 -08004618 if (mddev->persistent) {
4619 rdev = md_import_device(dev, mddev->major_version,
4620 mddev->minor_version);
4621 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
NeilBrown3cb03002011-10-11 16:45:26 +11004622 struct md_rdev *rdev0
4623 = list_entry(mddev->disks.next,
4624 struct md_rdev, same_set);
NeilBrown6d7ff7382006-01-06 00:21:16 -08004625 err = super_types[mddev->major_version]
4626 .load_super(rdev, rdev0, mddev->minor_version);
4627 if (err < 0)
4628 goto out;
4629 }
NeilBrownc5d79ad2008-02-06 01:39:54 -08004630 } else if (mddev->external)
4631 rdev = md_import_device(dev, -2, -1);
4632 else
NeilBrown6d7ff7382006-01-06 00:21:16 -08004633 rdev = md_import_device(dev, -1, -1);
4634
NeilBrown9a8c0fa2015-06-25 17:06:40 +10004635 if (IS_ERR(rdev)) {
4636 mddev_unlock(mddev);
NeilBrown6d7ff7382006-01-06 00:21:16 -08004637 return PTR_ERR(rdev);
NeilBrown9a8c0fa2015-06-25 17:06:40 +10004638 }
NeilBrown6d7ff7382006-01-06 00:21:16 -08004639 err = bind_rdev_to_array(rdev, mddev);
4640 out:
4641 if (err)
4642 export_rdev(rdev);
NeilBrown67918752014-12-15 12:57:01 +11004643 mddev_unlock(mddev);
Alexey Obitotskiy5492c462017-07-28 15:49:25 +02004644 if (!err)
4645 md_new_event(mddev);
NeilBrown6d7ff7382006-01-06 00:21:16 -08004646 return err ? err : len;
4647}
4648
4649static struct md_sysfs_entry md_new_device =
NeilBrown80ca3a42006-07-10 04:44:18 -07004650__ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
NeilBrown3b343802006-01-06 00:20:47 -08004651
4652static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004653bitmap_store(struct mddev *mddev, const char *buf, size_t len)
Paul Clements9b1d1da2006-10-03 01:15:49 -07004654{
4655 char *end;
4656 unsigned long chunk, end_chunk;
NeilBrown67918752014-12-15 12:57:01 +11004657 int err;
Paul Clements9b1d1da2006-10-03 01:15:49 -07004658
NeilBrown67918752014-12-15 12:57:01 +11004659 err = mddev_lock(mddev);
4660 if (err)
4661 return err;
Paul Clements9b1d1da2006-10-03 01:15:49 -07004662 if (!mddev->bitmap)
4663 goto out;
4664 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
4665 while (*buf) {
4666 chunk = end_chunk = simple_strtoul(buf, &end, 0);
4667 if (buf == end) break;
4668 if (*end == '-') { /* range */
4669 buf = end + 1;
4670 end_chunk = simple_strtoul(buf, &end, 0);
4671 if (buf == end) break;
4672 }
4673 if (*end && !isspace(*end)) break;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07004674 md_bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
André Goddard Rosae7d28602009-12-14 18:01:06 -08004675 buf = skip_spaces(end);
Paul Clements9b1d1da2006-10-03 01:15:49 -07004676 }
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07004677 md_bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
Paul Clements9b1d1da2006-10-03 01:15:49 -07004678out:
NeilBrown67918752014-12-15 12:57:01 +11004679 mddev_unlock(mddev);
Paul Clements9b1d1da2006-10-03 01:15:49 -07004680 return len;
4681}
4682
4683static struct md_sysfs_entry md_bitmap =
4684__ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
4685
4686static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004687size_show(struct mddev *mddev, char *page)
NeilBrowna35b0d62006-01-06 00:20:49 -08004688{
Andre Noll58c0fed2009-03-31 14:33:13 +11004689 return sprintf(page, "%llu\n",
4690 (unsigned long long)mddev->dev_sectors / 2);
NeilBrowna35b0d62006-01-06 00:20:49 -08004691}
4692
NeilBrownfd01b882011-10-11 16:47:53 +11004693static int update_size(struct mddev *mddev, sector_t num_sectors);
NeilBrowna35b0d62006-01-06 00:20:49 -08004694
4695static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004696size_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrowna35b0d62006-01-06 00:20:49 -08004697{
4698 /* If array is inactive, we can reduce the component size, but
4699 * not increase it (except from 0).
4700 * If array is active, we can try an on-line resize
4701 */
Dan Williamsb522adc2009-03-31 15:00:31 +11004702 sector_t sectors;
4703 int err = strict_blocks_to_sectors(buf, &sectors);
NeilBrowna35b0d62006-01-06 00:20:49 -08004704
Andre Noll58c0fed2009-03-31 14:33:13 +11004705 if (err < 0)
4706 return err;
NeilBrown67918752014-12-15 12:57:01 +11004707 err = mddev_lock(mddev);
4708 if (err)
4709 return err;
NeilBrowna35b0d62006-01-06 00:20:49 -08004710 if (mddev->pers) {
Andre Noll58c0fed2009-03-31 14:33:13 +11004711 err = update_size(mddev, sectors);
Xiao Ni4ba1e782016-06-12 17:18:00 +08004712 if (err == 0)
4713 md_update_sb(mddev, 1);
NeilBrowna35b0d62006-01-06 00:20:49 -08004714 } else {
Andre Noll58c0fed2009-03-31 14:33:13 +11004715 if (mddev->dev_sectors == 0 ||
4716 mddev->dev_sectors > sectors)
4717 mddev->dev_sectors = sectors;
NeilBrowna35b0d62006-01-06 00:20:49 -08004718 else
4719 err = -ENOSPC;
4720 }
NeilBrown67918752014-12-15 12:57:01 +11004721 mddev_unlock(mddev);
NeilBrowna35b0d62006-01-06 00:20:49 -08004722 return err ? err : len;
4723}
4724
4725static struct md_sysfs_entry md_size =
NeilBrown80ca3a42006-07-10 04:44:18 -07004726__ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
NeilBrowna35b0d62006-01-06 00:20:49 -08004727
Masanari Iida83f0d772012-10-30 00:18:08 +09004728/* Metadata version.
NeilBrowne6910632008-02-06 01:39:51 -08004729 * This is one of
4730 * 'none' for arrays with no metadata (good luck...)
4731 * 'external' for arrays with externally managed metadata,
NeilBrown8bb93aa2006-01-06 00:20:50 -08004732 * or N.M for internally known formats
4733 */
4734static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004735metadata_show(struct mddev *mddev, char *page)
NeilBrown8bb93aa2006-01-06 00:20:50 -08004736{
4737 if (mddev->persistent)
4738 return sprintf(page, "%d.%d\n",
4739 mddev->major_version, mddev->minor_version);
NeilBrowne6910632008-02-06 01:39:51 -08004740 else if (mddev->external)
4741 return sprintf(page, "external:%s\n", mddev->metadata_type);
NeilBrown8bb93aa2006-01-06 00:20:50 -08004742 else
4743 return sprintf(page, "none\n");
4744}
4745
4746static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004747metadata_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown8bb93aa2006-01-06 00:20:50 -08004748{
4749 int major, minor;
4750 char *e;
NeilBrown67918752014-12-15 12:57:01 +11004751 int err;
NeilBrownea43ddd2008-10-13 11:55:11 +11004752 /* Changing the details of 'external' metadata is
4753 * always permitted. Otherwise there must be
4754 * no devices attached to the array.
4755 */
NeilBrown67918752014-12-15 12:57:01 +11004756
4757 err = mddev_lock(mddev);
4758 if (err)
4759 return err;
4760 err = -EBUSY;
NeilBrownea43ddd2008-10-13 11:55:11 +11004761 if (mddev->external && strncmp(buf, "external:", 9) == 0)
4762 ;
4763 else if (!list_empty(&mddev->disks))
NeilBrown67918752014-12-15 12:57:01 +11004764 goto out_unlock;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004765
NeilBrown67918752014-12-15 12:57:01 +11004766 err = 0;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004767 if (cmd_match(buf, "none")) {
4768 mddev->persistent = 0;
NeilBrowne6910632008-02-06 01:39:51 -08004769 mddev->external = 0;
4770 mddev->major_version = 0;
4771 mddev->minor_version = 90;
NeilBrown67918752014-12-15 12:57:01 +11004772 goto out_unlock;
NeilBrowne6910632008-02-06 01:39:51 -08004773 }
4774 if (strncmp(buf, "external:", 9) == 0) {
NeilBrown20a49ff2008-02-06 01:39:57 -08004775 size_t namelen = len-9;
NeilBrowne6910632008-02-06 01:39:51 -08004776 if (namelen >= sizeof(mddev->metadata_type))
4777 namelen = sizeof(mddev->metadata_type)-1;
4778 strncpy(mddev->metadata_type, buf+9, namelen);
4779 mddev->metadata_type[namelen] = 0;
4780 if (namelen && mddev->metadata_type[namelen-1] == '\n')
4781 mddev->metadata_type[--namelen] = 0;
4782 mddev->persistent = 0;
4783 mddev->external = 1;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004784 mddev->major_version = 0;
4785 mddev->minor_version = 90;
NeilBrown67918752014-12-15 12:57:01 +11004786 goto out_unlock;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004787 }
4788 major = simple_strtoul(buf, &e, 10);
NeilBrown67918752014-12-15 12:57:01 +11004789 err = -EINVAL;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004790 if (e==buf || *e != '.')
NeilBrown67918752014-12-15 12:57:01 +11004791 goto out_unlock;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004792 buf = e+1;
4793 minor = simple_strtoul(buf, &e, 10);
NeilBrown3f9d7b02006-12-22 01:11:41 -08004794 if (e==buf || (*e && *e != '\n') )
NeilBrown67918752014-12-15 12:57:01 +11004795 goto out_unlock;
4796 err = -ENOENT;
Ahmed S. Darwish50511da2007-05-09 02:35:34 -07004797 if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
NeilBrown67918752014-12-15 12:57:01 +11004798 goto out_unlock;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004799 mddev->major_version = major;
4800 mddev->minor_version = minor;
4801 mddev->persistent = 1;
NeilBrowne6910632008-02-06 01:39:51 -08004802 mddev->external = 0;
NeilBrown67918752014-12-15 12:57:01 +11004803 err = 0;
4804out_unlock:
4805 mddev_unlock(mddev);
4806 return err ?: len;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004807}
4808
4809static struct md_sysfs_entry md_metadata =
NeilBrown750f1992014-09-30 08:53:05 +10004810__ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
NeilBrown8bb93aa2006-01-06 00:20:50 -08004811
NeilBrowna35b0d62006-01-06 00:20:49 -08004812static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004813action_show(struct mddev *mddev, char *page)
NeilBrown24dd4692005-11-08 21:39:26 -08004814{
NeilBrown7eec3142005-11-08 21:39:44 -08004815 char *type = "idle";
NeilBrownb7b17c92014-12-15 12:56:59 +11004816 unsigned long recovery = mddev->recovery;
4817 if (test_bit(MD_RECOVERY_FROZEN, &recovery))
NeilBrownb6a9ce62009-05-26 09:41:17 +10004818 type = "frozen";
NeilBrownb7b17c92014-12-15 12:56:59 +11004819 else if (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
4820 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery))) {
4821 if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
NeilBrownccfcc3c2006-03-27 01:18:09 -08004822 type = "reshape";
NeilBrownb7b17c92014-12-15 12:56:59 +11004823 else if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
4824 if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
NeilBrown24dd4692005-11-08 21:39:26 -08004825 type = "resync";
NeilBrownb7b17c92014-12-15 12:56:59 +11004826 else if (test_bit(MD_RECOVERY_CHECK, &recovery))
NeilBrown24dd4692005-11-08 21:39:26 -08004827 type = "check";
4828 else
4829 type = "repair";
NeilBrownb7b17c92014-12-15 12:56:59 +11004830 } else if (test_bit(MD_RECOVERY_RECOVER, &recovery))
NeilBrown24dd4692005-11-08 21:39:26 -08004831 type = "recover";
NeilBrown985ca972015-07-06 12:26:57 +10004832 else if (mddev->reshape_position != MaxSector)
4833 type = "reshape";
NeilBrown24dd4692005-11-08 21:39:26 -08004834 }
4835 return sprintf(page, "%s\n", type);
4836}
4837
4838static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004839action_store(struct mddev *mddev, const char *page, size_t len)
NeilBrown24dd4692005-11-08 21:39:26 -08004840{
NeilBrown7eec3142005-11-08 21:39:44 -08004841 if (!mddev->pers || !mddev->pers->sync_request)
4842 return -EINVAL;
4843
NeilBrownb6a9ce62009-05-26 09:41:17 +10004844
4845 if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
NeilBrown56ccc112015-05-28 17:53:29 +10004846 if (cmd_match(page, "frozen"))
4847 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4848 else
4849 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
NeilBrown8e8e2512015-06-12 19:51:27 +10004850 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
4851 mddev_lock(mddev) == 0) {
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02004852 if (work_pending(&mddev->del_work))
4853 flush_workqueue(md_misc_wq);
NeilBrown8e8e2512015-06-12 19:51:27 +10004854 if (mddev->sync_thread) {
4855 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
NeilBrown67918752014-12-15 12:57:01 +11004856 md_reap_sync_thread(mddev);
NeilBrown67918752014-12-15 12:57:01 +11004857 }
NeilBrown8e8e2512015-06-12 19:51:27 +10004858 mddev_unlock(mddev);
NeilBrown7eec3142005-11-08 21:39:44 -08004859 }
NeilBrown312045e2015-12-21 11:01:21 +11004860 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
NeilBrown24dd4692005-11-08 21:39:26 -08004861 return -EBUSY;
Neil Brown72a23c22008-06-28 08:31:41 +10004862 else if (cmd_match(page, "resync"))
NeilBrown56ccc112015-05-28 17:53:29 +10004863 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
Neil Brown72a23c22008-06-28 08:31:41 +10004864 else if (cmd_match(page, "recover")) {
NeilBrown56ccc112015-05-28 17:53:29 +10004865 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
Neil Brown72a23c22008-06-28 08:31:41 +10004866 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
Neil Brown72a23c22008-06-28 08:31:41 +10004867 } else if (cmd_match(page, "reshape")) {
NeilBrown16484bf2006-03-27 01:18:13 -08004868 int err;
4869 if (mddev->pers->start_reshape == NULL)
4870 return -EINVAL;
NeilBrown67918752014-12-15 12:57:01 +11004871 err = mddev_lock(mddev);
4872 if (!err) {
NeilBrown312045e2015-12-21 11:01:21 +11004873 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4874 err = -EBUSY;
4875 else {
4876 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4877 err = mddev->pers->start_reshape(mddev);
4878 }
NeilBrown67918752014-12-15 12:57:01 +11004879 mddev_unlock(mddev);
4880 }
NeilBrown16484bf2006-03-27 01:18:13 -08004881 if (err)
4882 return err;
Junxiao Bie1a86db2020-07-14 16:10:26 -07004883 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
NeilBrown16484bf2006-03-27 01:18:13 -08004884 } else {
NeilBrownbce74da2006-01-06 00:20:41 -08004885 if (cmd_match(page, "check"))
NeilBrown7eec3142005-11-08 21:39:44 -08004886 set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
NeilBrown2adc7d42006-05-20 14:59:57 -07004887 else if (!cmd_match(page, "repair"))
NeilBrown7eec3142005-11-08 21:39:44 -08004888 return -EINVAL;
NeilBrown56ccc112015-05-28 17:53:29 +10004889 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
NeilBrown7eec3142005-11-08 21:39:44 -08004890 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
4891 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
NeilBrown7eec3142005-11-08 21:39:44 -08004892 }
NeilBrown48c26dd2012-10-11 14:19:39 +11004893 if (mddev->ro == 2) {
4894 /* A write to sync_action is enough to justify
4895 * canceling read-auto mode
4896 */
4897 mddev->ro = 0;
4898 md_wakeup_thread(mddev->sync_thread);
4899 }
NeilBrown03c902e2006-01-06 00:20:46 -08004900 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
NeilBrown24dd4692005-11-08 21:39:26 -08004901 md_wakeup_thread(mddev->thread);
NeilBrown00bcb4a2010-06-01 19:37:23 +10004902 sysfs_notify_dirent_safe(mddev->sysfs_action);
NeilBrown24dd4692005-11-08 21:39:26 -08004903 return len;
4904}
4905
Jonathan Brassowc4a39552013-06-25 01:23:59 -05004906static struct md_sysfs_entry md_scan_mode =
NeilBrown750f1992014-09-30 08:53:05 +10004907__ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
Jonathan Brassowc4a39552013-06-25 01:23:59 -05004908
4909static ssize_t
4910last_sync_action_show(struct mddev *mddev, char *page)
4911{
4912 return sprintf(page, "%s\n", mddev->last_sync_action);
4913}
4914
4915static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action);
4916
NeilBrown9d888832005-11-08 21:39:26 -08004917static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004918mismatch_cnt_show(struct mddev *mddev, char *page)
NeilBrown9d888832005-11-08 21:39:26 -08004919{
4920 return sprintf(page, "%llu\n",
Jianpeng Ma7f7583d2012-10-11 14:17:59 +11004921 (unsigned long long)
4922 atomic64_read(&mddev->resync_mismatches));
NeilBrown9d888832005-11-08 21:39:26 -08004923}
4924
NeilBrown80ca3a42006-07-10 04:44:18 -07004925static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
NeilBrown9d888832005-11-08 21:39:26 -08004926
NeilBrown88202a02006-01-06 00:21:36 -08004927static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004928sync_min_show(struct mddev *mddev, char *page)
NeilBrown88202a02006-01-06 00:21:36 -08004929{
4930 return sprintf(page, "%d (%s)\n", speed_min(mddev),
4931 mddev->sync_speed_min ? "local": "system");
4932}
4933
4934static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004935sync_min_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown88202a02006-01-06 00:21:36 -08004936{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004937 unsigned int min;
4938 int rv;
4939
NeilBrown88202a02006-01-06 00:21:36 -08004940 if (strncmp(buf, "system", 6)==0) {
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004941 min = 0;
4942 } else {
4943 rv = kstrtouint(buf, 10, &min);
4944 if (rv < 0)
4945 return rv;
4946 if (min == 0)
4947 return -EINVAL;
NeilBrown88202a02006-01-06 00:21:36 -08004948 }
NeilBrown88202a02006-01-06 00:21:36 -08004949 mddev->sync_speed_min = min;
4950 return len;
4951}
4952
4953static struct md_sysfs_entry md_sync_min =
4954__ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
4955
4956static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004957sync_max_show(struct mddev *mddev, char *page)
NeilBrown88202a02006-01-06 00:21:36 -08004958{
4959 return sprintf(page, "%d (%s)\n", speed_max(mddev),
4960 mddev->sync_speed_max ? "local": "system");
4961}
4962
4963static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004964sync_max_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown88202a02006-01-06 00:21:36 -08004965{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004966 unsigned int max;
4967 int rv;
4968
NeilBrown88202a02006-01-06 00:21:36 -08004969 if (strncmp(buf, "system", 6)==0) {
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004970 max = 0;
4971 } else {
4972 rv = kstrtouint(buf, 10, &max);
4973 if (rv < 0)
4974 return rv;
4975 if (max == 0)
4976 return -EINVAL;
NeilBrown88202a02006-01-06 00:21:36 -08004977 }
NeilBrown88202a02006-01-06 00:21:36 -08004978 mddev->sync_speed_max = max;
4979 return len;
4980}
4981
4982static struct md_sysfs_entry md_sync_max =
4983__ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
4984
Iustin Popd7f3d292007-10-16 23:30:54 -07004985static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004986degraded_show(struct mddev *mddev, char *page)
Iustin Popd7f3d292007-10-16 23:30:54 -07004987{
4988 return sprintf(page, "%d\n", mddev->degraded);
4989}
4990static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
NeilBrown88202a02006-01-06 00:21:36 -08004991
4992static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004993sync_force_parallel_show(struct mddev *mddev, char *page)
Bernd Schubert90b08712008-05-23 13:04:38 -07004994{
4995 return sprintf(page, "%d\n", mddev->parallel_resync);
4996}
4997
4998static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004999sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len)
Bernd Schubert90b08712008-05-23 13:04:38 -07005000{
5001 long n;
5002
Jingoo Hanb29bebd2013-06-01 16:15:16 +09005003 if (kstrtol(buf, 10, &n))
Bernd Schubert90b08712008-05-23 13:04:38 -07005004 return -EINVAL;
5005
5006 if (n != 0 && n != 1)
5007 return -EINVAL;
5008
5009 mddev->parallel_resync = n;
5010
5011 if (mddev->sync_thread)
5012 wake_up(&resync_wait);
5013
5014 return len;
5015}
5016
5017/* force parallel resync, even with shared block devices */
5018static struct md_sysfs_entry md_sync_force_parallel =
5019__ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
5020 sync_force_parallel_show, sync_force_parallel_store);
5021
5022static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005023sync_speed_show(struct mddev *mddev, char *page)
NeilBrown88202a02006-01-06 00:21:36 -08005024{
5025 unsigned long resync, dt, db;
NeilBrownd1a7c502009-03-31 15:24:32 +11005026 if (mddev->curr_resync == 0)
5027 return sprintf(page, "none\n");
Andre Noll9687a602008-03-25 22:24:09 +01005028 resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
5029 dt = (jiffies - mddev->resync_mark) / HZ;
NeilBrown88202a02006-01-06 00:21:36 -08005030 if (!dt) dt++;
Andre Noll9687a602008-03-25 22:24:09 +01005031 db = resync - mddev->resync_mark_cnt;
5032 return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
NeilBrown88202a02006-01-06 00:21:36 -08005033}
5034
NeilBrown80ca3a42006-07-10 04:44:18 -07005035static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
NeilBrown88202a02006-01-06 00:21:36 -08005036
5037static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005038sync_completed_show(struct mddev *mddev, char *page)
NeilBrown88202a02006-01-06 00:21:36 -08005039{
RĂ©mi RĂ©rolle13ae8642011-01-14 09:14:34 +11005040 unsigned long long max_sectors, resync;
NeilBrown88202a02006-01-06 00:21:36 -08005041
NeilBrownacb180b2009-04-14 16:28:34 +10005042 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5043 return sprintf(page, "none\n");
5044
NeilBrown72f36d52012-10-11 14:25:57 +11005045 if (mddev->curr_resync == 1 ||
5046 mddev->curr_resync == 2)
5047 return sprintf(page, "delayed\n");
5048
NeilBrownc804cde2012-05-21 09:28:33 +10005049 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
5050 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
Andre Noll58c0fed2009-03-31 14:33:13 +11005051 max_sectors = mddev->resync_max_sectors;
NeilBrown88202a02006-01-06 00:21:36 -08005052 else
Andre Noll58c0fed2009-03-31 14:33:13 +11005053 max_sectors = mddev->dev_sectors;
NeilBrown88202a02006-01-06 00:21:36 -08005054
NeilBrownacb180b2009-04-14 16:28:34 +10005055 resync = mddev->curr_resync_completed;
RĂ©mi RĂ©rolle13ae8642011-01-14 09:14:34 +11005056 return sprintf(page, "%llu / %llu\n", resync, max_sectors);
NeilBrown88202a02006-01-06 00:21:36 -08005057}
5058
NeilBrown750f1992014-09-30 08:53:05 +10005059static struct md_sysfs_entry md_sync_completed =
5060 __ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL);
NeilBrown88202a02006-01-06 00:21:36 -08005061
NeilBrowne464eaf2006-03-27 01:18:14 -08005062static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005063min_sync_show(struct mddev *mddev, char *page)
Neil Brown5e96ee62008-06-28 08:31:24 +10005064{
5065 return sprintf(page, "%llu\n",
5066 (unsigned long long)mddev->resync_min);
5067}
5068static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005069min_sync_store(struct mddev *mddev, const char *buf, size_t len)
Neil Brown5e96ee62008-06-28 08:31:24 +10005070{
5071 unsigned long long min;
NeilBrown23da4222014-12-15 12:57:01 +11005072 int err;
NeilBrown23da4222014-12-15 12:57:01 +11005073
Jingoo Hanb29bebd2013-06-01 16:15:16 +09005074 if (kstrtoull(buf, 10, &min))
Neil Brown5e96ee62008-06-28 08:31:24 +10005075 return -EINVAL;
NeilBrown23da4222014-12-15 12:57:01 +11005076
5077 spin_lock(&mddev->lock);
5078 err = -EINVAL;
Neil Brown5e96ee62008-06-28 08:31:24 +10005079 if (min > mddev->resync_max)
NeilBrown23da4222014-12-15 12:57:01 +11005080 goto out_unlock;
5081
5082 err = -EBUSY;
Neil Brown5e96ee62008-06-28 08:31:24 +10005083 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
NeilBrown23da4222014-12-15 12:57:01 +11005084 goto out_unlock;
Neil Brown5e96ee62008-06-28 08:31:24 +10005085
NeilBrown50c37b12015-03-23 17:36:38 +11005086 /* Round down to multiple of 4K for safety */
5087 mddev->resync_min = round_down(min, 8);
NeilBrown23da4222014-12-15 12:57:01 +11005088 err = 0;
Neil Brown5e96ee62008-06-28 08:31:24 +10005089
NeilBrown23da4222014-12-15 12:57:01 +11005090out_unlock:
5091 spin_unlock(&mddev->lock);
5092 return err ?: len;
Neil Brown5e96ee62008-06-28 08:31:24 +10005093}
5094
5095static struct md_sysfs_entry md_min_sync =
5096__ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
5097
5098static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005099max_sync_show(struct mddev *mddev, char *page)
NeilBrownc6207272008-02-06 01:39:52 -08005100{
5101 if (mddev->resync_max == MaxSector)
5102 return sprintf(page, "max\n");
5103 else
5104 return sprintf(page, "%llu\n",
5105 (unsigned long long)mddev->resync_max);
5106}
5107static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005108max_sync_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrownc6207272008-02-06 01:39:52 -08005109{
NeilBrown23da4222014-12-15 12:57:01 +11005110 int err;
5111 spin_lock(&mddev->lock);
NeilBrownc6207272008-02-06 01:39:52 -08005112 if (strncmp(buf, "max", 3) == 0)
5113 mddev->resync_max = MaxSector;
5114 else {
Neil Brown5e96ee62008-06-28 08:31:24 +10005115 unsigned long long max;
NeilBrown23da4222014-12-15 12:57:01 +11005116 int chunk;
5117
5118 err = -EINVAL;
Jingoo Hanb29bebd2013-06-01 16:15:16 +09005119 if (kstrtoull(buf, 10, &max))
NeilBrown23da4222014-12-15 12:57:01 +11005120 goto out_unlock;
Neil Brown5e96ee62008-06-28 08:31:24 +10005121 if (max < mddev->resync_min)
NeilBrown23da4222014-12-15 12:57:01 +11005122 goto out_unlock;
5123
5124 err = -EBUSY;
NeilBrownc6207272008-02-06 01:39:52 -08005125 if (max < mddev->resync_max &&
NeilBrown4d484a42009-08-13 10:41:50 +10005126 mddev->ro == 0 &&
NeilBrownc6207272008-02-06 01:39:52 -08005127 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
NeilBrown23da4222014-12-15 12:57:01 +11005128 goto out_unlock;
NeilBrownc6207272008-02-06 01:39:52 -08005129
5130 /* Must be a multiple of chunk_size */
NeilBrown23da4222014-12-15 12:57:01 +11005131 chunk = mddev->chunk_sectors;
5132 if (chunk) {
raz ben yehuda2ac06c32009-06-16 17:01:42 +10005133 sector_t temp = max;
NeilBrown23da4222014-12-15 12:57:01 +11005134
5135 err = -EINVAL;
5136 if (sector_div(temp, chunk))
5137 goto out_unlock;
NeilBrownc6207272008-02-06 01:39:52 -08005138 }
5139 mddev->resync_max = max;
5140 }
5141 wake_up(&mddev->recovery_wait);
NeilBrown23da4222014-12-15 12:57:01 +11005142 err = 0;
5143out_unlock:
5144 spin_unlock(&mddev->lock);
5145 return err ?: len;
NeilBrownc6207272008-02-06 01:39:52 -08005146}
5147
5148static struct md_sysfs_entry md_max_sync =
5149__ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
5150
5151static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005152suspend_lo_show(struct mddev *mddev, char *page)
NeilBrowne464eaf2006-03-27 01:18:14 -08005153{
5154 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
5155}
5156
5157static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005158suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrowne464eaf2006-03-27 01:18:14 -08005159{
NeilBrownb03e0cc2017-10-19 12:49:15 +11005160 unsigned long long new;
NeilBrown67918752014-12-15 12:57:01 +11005161 int err;
NeilBrowne464eaf2006-03-27 01:18:14 -08005162
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03005163 err = kstrtoull(buf, 10, &new);
5164 if (err < 0)
5165 return err;
5166 if (new != (sector_t)new)
NeilBrowne464eaf2006-03-27 01:18:14 -08005167 return -EINVAL;
NeilBrown23ddff32011-01-14 09:14:34 +11005168
NeilBrown67918752014-12-15 12:57:01 +11005169 err = mddev_lock(mddev);
5170 if (err)
5171 return err;
5172 err = -EINVAL;
5173 if (mddev->pers == NULL ||
5174 mddev->pers->quiesce == NULL)
5175 goto unlock;
NeilBrownb03e0cc2017-10-19 12:49:15 +11005176 mddev_suspend(mddev);
NeilBrown23ddff32011-01-14 09:14:34 +11005177 mddev->suspend_lo = new;
NeilBrownb03e0cc2017-10-19 12:49:15 +11005178 mddev_resume(mddev);
5179
NeilBrown67918752014-12-15 12:57:01 +11005180 err = 0;
5181unlock:
5182 mddev_unlock(mddev);
5183 return err ?: len;
NeilBrowne464eaf2006-03-27 01:18:14 -08005184}
5185static struct md_sysfs_entry md_suspend_lo =
5186__ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
5187
NeilBrowne464eaf2006-03-27 01:18:14 -08005188static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005189suspend_hi_show(struct mddev *mddev, char *page)
NeilBrowne464eaf2006-03-27 01:18:14 -08005190{
5191 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
5192}
5193
5194static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005195suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrowne464eaf2006-03-27 01:18:14 -08005196{
NeilBrownb03e0cc2017-10-19 12:49:15 +11005197 unsigned long long new;
NeilBrown67918752014-12-15 12:57:01 +11005198 int err;
NeilBrowne464eaf2006-03-27 01:18:14 -08005199
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03005200 err = kstrtoull(buf, 10, &new);
5201 if (err < 0)
5202 return err;
5203 if (new != (sector_t)new)
NeilBrowne464eaf2006-03-27 01:18:14 -08005204 return -EINVAL;
NeilBrown23ddff32011-01-14 09:14:34 +11005205
NeilBrown67918752014-12-15 12:57:01 +11005206 err = mddev_lock(mddev);
5207 if (err)
5208 return err;
5209 err = -EINVAL;
NeilBrownb03e0cc2017-10-19 12:49:15 +11005210 if (mddev->pers == NULL)
NeilBrown67918752014-12-15 12:57:01 +11005211 goto unlock;
NeilBrownb03e0cc2017-10-19 12:49:15 +11005212
5213 mddev_suspend(mddev);
NeilBrown23ddff32011-01-14 09:14:34 +11005214 mddev->suspend_hi = new;
NeilBrownb03e0cc2017-10-19 12:49:15 +11005215 mddev_resume(mddev);
5216
NeilBrown67918752014-12-15 12:57:01 +11005217 err = 0;
5218unlock:
5219 mddev_unlock(mddev);
5220 return err ?: len;
NeilBrowne464eaf2006-03-27 01:18:14 -08005221}
5222static struct md_sysfs_entry md_suspend_hi =
5223__ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
5224
NeilBrown08a02ec2007-05-09 02:35:38 -07005225static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005226reshape_position_show(struct mddev *mddev, char *page)
NeilBrown08a02ec2007-05-09 02:35:38 -07005227{
5228 if (mddev->reshape_position != MaxSector)
5229 return sprintf(page, "%llu\n",
5230 (unsigned long long)mddev->reshape_position);
5231 strcpy(page, "none\n");
5232 return 5;
5233}
5234
5235static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005236reshape_position_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown08a02ec2007-05-09 02:35:38 -07005237{
NeilBrownc6563a82012-05-21 09:27:00 +10005238 struct md_rdev *rdev;
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03005239 unsigned long long new;
NeilBrown67918752014-12-15 12:57:01 +11005240 int err;
NeilBrown67918752014-12-15 12:57:01 +11005241
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03005242 err = kstrtoull(buf, 10, &new);
5243 if (err < 0)
5244 return err;
5245 if (new != (sector_t)new)
NeilBrown08a02ec2007-05-09 02:35:38 -07005246 return -EINVAL;
NeilBrown67918752014-12-15 12:57:01 +11005247 err = mddev_lock(mddev);
5248 if (err)
5249 return err;
5250 err = -EBUSY;
5251 if (mddev->pers)
5252 goto unlock;
NeilBrown08a02ec2007-05-09 02:35:38 -07005253 mddev->reshape_position = new;
5254 mddev->delta_disks = 0;
NeilBrown2c810cd2012-05-21 09:27:00 +10005255 mddev->reshape_backwards = 0;
NeilBrown08a02ec2007-05-09 02:35:38 -07005256 mddev->new_level = mddev->level;
5257 mddev->new_layout = mddev->layout;
Andre Noll664e7c42009-06-18 08:45:27 +10005258 mddev->new_chunk_sectors = mddev->chunk_sectors;
NeilBrownc6563a82012-05-21 09:27:00 +10005259 rdev_for_each(rdev, mddev)
5260 rdev->new_data_offset = rdev->data_offset;
NeilBrown67918752014-12-15 12:57:01 +11005261 err = 0;
5262unlock:
5263 mddev_unlock(mddev);
5264 return err ?: len;
NeilBrown08a02ec2007-05-09 02:35:38 -07005265}
5266
5267static struct md_sysfs_entry md_reshape_position =
5268__ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
5269 reshape_position_store);
5270
Dan Williamsb522adc2009-03-31 15:00:31 +11005271static ssize_t
NeilBrown2c810cd2012-05-21 09:27:00 +10005272reshape_direction_show(struct mddev *mddev, char *page)
5273{
5274 return sprintf(page, "%s\n",
5275 mddev->reshape_backwards ? "backwards" : "forwards");
5276}
5277
5278static ssize_t
5279reshape_direction_store(struct mddev *mddev, const char *buf, size_t len)
5280{
5281 int backwards = 0;
NeilBrown67918752014-12-15 12:57:01 +11005282 int err;
5283
NeilBrown2c810cd2012-05-21 09:27:00 +10005284 if (cmd_match(buf, "forwards"))
5285 backwards = 0;
5286 else if (cmd_match(buf, "backwards"))
5287 backwards = 1;
5288 else
5289 return -EINVAL;
5290 if (mddev->reshape_backwards == backwards)
5291 return len;
5292
NeilBrown67918752014-12-15 12:57:01 +11005293 err = mddev_lock(mddev);
5294 if (err)
5295 return err;
NeilBrown2c810cd2012-05-21 09:27:00 +10005296 /* check if we are allowed to change */
5297 if (mddev->delta_disks)
NeilBrown67918752014-12-15 12:57:01 +11005298 err = -EBUSY;
5299 else if (mddev->persistent &&
NeilBrown2c810cd2012-05-21 09:27:00 +10005300 mddev->major_version == 0)
NeilBrown67918752014-12-15 12:57:01 +11005301 err = -EINVAL;
5302 else
5303 mddev->reshape_backwards = backwards;
5304 mddev_unlock(mddev);
5305 return err ?: len;
NeilBrown2c810cd2012-05-21 09:27:00 +10005306}
5307
5308static struct md_sysfs_entry md_reshape_direction =
5309__ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show,
5310 reshape_direction_store);
5311
5312static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005313array_size_show(struct mddev *mddev, char *page)
Dan Williamsb522adc2009-03-31 15:00:31 +11005314{
5315 if (mddev->external_size)
5316 return sprintf(page, "%llu\n",
5317 (unsigned long long)mddev->array_sectors/2);
5318 else
5319 return sprintf(page, "default\n");
5320}
5321
5322static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005323array_size_store(struct mddev *mddev, const char *buf, size_t len)
Dan Williamsb522adc2009-03-31 15:00:31 +11005324{
5325 sector_t sectors;
NeilBrown67918752014-12-15 12:57:01 +11005326 int err;
5327
5328 err = mddev_lock(mddev);
5329 if (err)
5330 return err;
Dan Williamsb522adc2009-03-31 15:00:31 +11005331
Guoqing Jiangab5a98b2016-05-02 11:33:13 -04005332 /* cluster raid doesn't support change array_sectors */
Zhilong Liub6708832017-04-10 14:15:55 +08005333 if (mddev_is_clustered(mddev)) {
5334 mddev_unlock(mddev);
Guoqing Jiangab5a98b2016-05-02 11:33:13 -04005335 return -EINVAL;
Zhilong Liub6708832017-04-10 14:15:55 +08005336 }
Guoqing Jiangab5a98b2016-05-02 11:33:13 -04005337
Dan Williamsb522adc2009-03-31 15:00:31 +11005338 if (strncmp(buf, "default", 7) == 0) {
5339 if (mddev->pers)
5340 sectors = mddev->pers->size(mddev, 0, 0);
5341 else
5342 sectors = mddev->array_sectors;
5343
5344 mddev->external_size = 0;
5345 } else {
5346 if (strict_blocks_to_sectors(buf, &sectors) < 0)
NeilBrown67918752014-12-15 12:57:01 +11005347 err = -EINVAL;
5348 else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
5349 err = -E2BIG;
5350 else
5351 mddev->external_size = 1;
Dan Williamsb522adc2009-03-31 15:00:31 +11005352 }
5353
NeilBrown67918752014-12-15 12:57:01 +11005354 if (!err) {
5355 mddev->array_sectors = sectors;
Christoph Hellwig2c247c52020-11-16 15:57:11 +01005356 if (mddev->pers)
5357 set_capacity_and_notify(mddev->gendisk,
5358 mddev->array_sectors);
NeilBrowncbe6ef12011-02-16 13:58:38 +11005359 }
NeilBrown67918752014-12-15 12:57:01 +11005360 mddev_unlock(mddev);
5361 return err ?: len;
Dan Williamsb522adc2009-03-31 15:00:31 +11005362}
5363
5364static struct md_sysfs_entry md_array_size =
5365__ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
5366 array_size_store);
NeilBrowne464eaf2006-03-27 01:18:14 -08005367
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005368static ssize_t
5369consistency_policy_show(struct mddev *mddev, char *page)
5370{
5371 int ret;
5372
5373 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
5374 ret = sprintf(page, "journal\n");
5375 } else if (test_bit(MD_HAS_PPL, &mddev->flags)) {
5376 ret = sprintf(page, "ppl\n");
5377 } else if (mddev->bitmap) {
5378 ret = sprintf(page, "bitmap\n");
5379 } else if (mddev->pers) {
5380 if (mddev->pers->sync_request)
5381 ret = sprintf(page, "resync\n");
5382 else
5383 ret = sprintf(page, "none\n");
5384 } else {
5385 ret = sprintf(page, "unknown\n");
5386 }
5387
5388 return ret;
5389}
5390
5391static ssize_t
5392consistency_policy_store(struct mddev *mddev, const char *buf, size_t len)
5393{
Artur Paszkiewiczba903a32017-03-09 10:00:03 +01005394 int err = 0;
5395
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005396 if (mddev->pers) {
Artur Paszkiewiczba903a32017-03-09 10:00:03 +01005397 if (mddev->pers->change_consistency_policy)
5398 err = mddev->pers->change_consistency_policy(mddev, buf);
5399 else
5400 err = -EBUSY;
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005401 } else if (mddev->external && strncmp(buf, "ppl", 3) == 0) {
5402 set_bit(MD_HAS_PPL, &mddev->flags);
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005403 } else {
Artur Paszkiewiczba903a32017-03-09 10:00:03 +01005404 err = -EINVAL;
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005405 }
Artur Paszkiewiczba903a32017-03-09 10:00:03 +01005406
5407 return err ? err : len;
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005408}
5409
5410static struct md_sysfs_entry md_consistency_policy =
5411__ATTR(consistency_policy, S_IRUGO | S_IWUSR, consistency_policy_show,
5412 consistency_policy_store);
5413
Guoqing Jiang9a567842019-07-24 11:09:19 +02005414static ssize_t fail_last_dev_show(struct mddev *mddev, char *page)
5415{
5416 return sprintf(page, "%d\n", mddev->fail_last_dev);
5417}
5418
5419/*
5420 * Setting fail_last_dev to true to allow last device to be forcibly removed
5421 * from RAID1/RAID10.
5422 */
5423static ssize_t
5424fail_last_dev_store(struct mddev *mddev, const char *buf, size_t len)
5425{
5426 int ret;
5427 bool value;
5428
5429 ret = kstrtobool(buf, &value);
5430 if (ret)
5431 return ret;
5432
5433 if (value != mddev->fail_last_dev)
5434 mddev->fail_last_dev = value;
5435
5436 return len;
5437}
5438static struct md_sysfs_entry md_fail_last_dev =
5439__ATTR(fail_last_dev, S_IRUGO | S_IWUSR, fail_last_dev_show,
5440 fail_last_dev_store);
5441
Guoqing Jiang3938f5f2019-12-23 10:48:56 +01005442static ssize_t serialize_policy_show(struct mddev *mddev, char *page)
5443{
5444 if (mddev->pers == NULL || (mddev->pers->level != 1))
5445 return sprintf(page, "n/a\n");
5446 else
5447 return sprintf(page, "%d\n", mddev->serialize_policy);
5448}
5449
5450/*
5451 * Setting serialize_policy to true to enforce write IO is not reordered
5452 * for raid1.
5453 */
5454static ssize_t
5455serialize_policy_store(struct mddev *mddev, const char *buf, size_t len)
5456{
5457 int err;
5458 bool value;
5459
5460 err = kstrtobool(buf, &value);
5461 if (err)
5462 return err;
5463
5464 if (value == mddev->serialize_policy)
5465 return len;
5466
5467 err = mddev_lock(mddev);
5468 if (err)
5469 return err;
5470 if (mddev->pers == NULL || (mddev->pers->level != 1)) {
5471 pr_err("md: serialize_policy is only effective for raid1\n");
5472 err = -EINVAL;
5473 goto unlock;
5474 }
5475
5476 mddev_suspend(mddev);
5477 if (value)
5478 mddev_create_serial_pool(mddev, NULL, true);
5479 else
5480 mddev_destroy_serial_pool(mddev, NULL, true);
5481 mddev->serialize_policy = value;
5482 mddev_resume(mddev);
5483unlock:
5484 mddev_unlock(mddev);
5485 return err ?: len;
5486}
5487
5488static struct md_sysfs_entry md_serialize_policy =
5489__ATTR(serialize_policy, S_IRUGO | S_IWUSR, serialize_policy_show,
5490 serialize_policy_store);
5491
5492
NeilBrowneae17012005-11-08 21:39:23 -08005493static struct attribute *md_default_attrs[] = {
5494 &md_level.attr,
NeilBrownd4dbd022006-06-26 00:27:59 -07005495 &md_layout.attr,
NeilBrowneae17012005-11-08 21:39:23 -08005496 &md_raid_disks.attr,
Sebastian Parschauerec164d072020-07-28 12:01:39 +02005497 &md_uuid.attr,
NeilBrown3b343802006-01-06 00:20:47 -08005498 &md_chunk_size.attr,
NeilBrowna35b0d62006-01-06 00:20:49 -08005499 &md_size.attr,
NeilBrowna94213b2006-06-26 00:28:00 -07005500 &md_resync_start.attr,
NeilBrown8bb93aa2006-01-06 00:20:50 -08005501 &md_metadata.attr,
NeilBrown6d7ff7382006-01-06 00:21:16 -08005502 &md_new_device.attr,
NeilBrown16f17b32006-06-26 00:27:37 -07005503 &md_safe_delay.attr,
NeilBrown9e653b62006-06-26 00:27:58 -07005504 &md_array_state.attr,
NeilBrown08a02ec2007-05-09 02:35:38 -07005505 &md_reshape_position.attr,
NeilBrown2c810cd2012-05-21 09:27:00 +10005506 &md_reshape_direction.attr,
Dan Williamsb522adc2009-03-31 15:00:31 +11005507 &md_array_size.attr,
Robert Becker1e509152009-12-14 12:49:58 +11005508 &max_corr_read_errors.attr,
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005509 &md_consistency_policy.attr,
Guoqing Jiang9a567842019-07-24 11:09:19 +02005510 &md_fail_last_dev.attr,
Guoqing Jiang3938f5f2019-12-23 10:48:56 +01005511 &md_serialize_policy.attr,
NeilBrown411036f2005-11-08 21:39:40 -08005512 NULL,
5513};
5514
5515static struct attribute *md_redundancy_attrs[] = {
NeilBrown24dd4692005-11-08 21:39:26 -08005516 &md_scan_mode.attr,
Jonathan Brassowc4a39552013-06-25 01:23:59 -05005517 &md_last_scan_mode.attr,
NeilBrown9d888832005-11-08 21:39:26 -08005518 &md_mismatches.attr,
NeilBrown88202a02006-01-06 00:21:36 -08005519 &md_sync_min.attr,
5520 &md_sync_max.attr,
5521 &md_sync_speed.attr,
Bernd Schubert90b08712008-05-23 13:04:38 -07005522 &md_sync_force_parallel.attr,
NeilBrown88202a02006-01-06 00:21:36 -08005523 &md_sync_completed.attr,
Neil Brown5e96ee62008-06-28 08:31:24 +10005524 &md_min_sync.attr,
NeilBrownc6207272008-02-06 01:39:52 -08005525 &md_max_sync.attr,
NeilBrowne464eaf2006-03-27 01:18:14 -08005526 &md_suspend_lo.attr,
5527 &md_suspend_hi.attr,
Paul Clements9b1d1da2006-10-03 01:15:49 -07005528 &md_bitmap.attr,
Iustin Popd7f3d292007-10-16 23:30:54 -07005529 &md_degraded.attr,
NeilBrowneae17012005-11-08 21:39:23 -08005530 NULL,
5531};
NeilBrown411036f2005-11-08 21:39:40 -08005532static struct attribute_group md_redundancy_group = {
5533 .name = NULL,
5534 .attrs = md_redundancy_attrs,
5535};
5536
NeilBrowneae17012005-11-08 21:39:23 -08005537static ssize_t
5538md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
5539{
5540 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
NeilBrownfd01b882011-10-11 16:47:53 +11005541 struct mddev *mddev = container_of(kobj, struct mddev, kobj);
NeilBrown96de1e62005-11-08 21:39:39 -08005542 ssize_t rv;
NeilBrowneae17012005-11-08 21:39:23 -08005543
5544 if (!entry->show)
5545 return -EIO;
NeilBrownaf8a2432011-12-08 15:49:46 +11005546 spin_lock(&all_mddevs_lock);
5547 if (list_empty(&mddev->all_mddevs)) {
5548 spin_unlock(&all_mddevs_lock);
5549 return -EBUSY;
5550 }
5551 mddev_get(mddev);
5552 spin_unlock(&all_mddevs_lock);
5553
NeilBrownb7b17c92014-12-15 12:56:59 +11005554 rv = entry->show(mddev, page);
NeilBrownaf8a2432011-12-08 15:49:46 +11005555 mddev_put(mddev);
NeilBrown96de1e62005-11-08 21:39:39 -08005556 return rv;
NeilBrowneae17012005-11-08 21:39:23 -08005557}
5558
5559static ssize_t
5560md_attr_store(struct kobject *kobj, struct attribute *attr,
5561 const char *page, size_t length)
5562{
5563 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
NeilBrownfd01b882011-10-11 16:47:53 +11005564 struct mddev *mddev = container_of(kobj, struct mddev, kobj);
NeilBrown96de1e62005-11-08 21:39:39 -08005565 ssize_t rv;
NeilBrowneae17012005-11-08 21:39:23 -08005566
5567 if (!entry->store)
5568 return -EIO;
NeilBrown67463ac2006-07-10 04:44:19 -07005569 if (!capable(CAP_SYS_ADMIN))
5570 return -EACCES;
NeilBrownaf8a2432011-12-08 15:49:46 +11005571 spin_lock(&all_mddevs_lock);
5572 if (list_empty(&mddev->all_mddevs)) {
5573 spin_unlock(&all_mddevs_lock);
5574 return -EBUSY;
5575 }
5576 mddev_get(mddev);
5577 spin_unlock(&all_mddevs_lock);
NeilBrown67918752014-12-15 12:57:01 +11005578 rv = entry->store(mddev, page, length);
NeilBrownaf8a2432011-12-08 15:49:46 +11005579 mddev_put(mddev);
NeilBrown96de1e62005-11-08 21:39:39 -08005580 return rv;
NeilBrowneae17012005-11-08 21:39:23 -08005581}
5582
5583static void md_free(struct kobject *ko)
5584{
NeilBrownfd01b882011-10-11 16:47:53 +11005585 struct mddev *mddev = container_of(ko, struct mddev, kobj);
NeilBrowna21d1502009-01-09 08:31:09 +11005586
5587 if (mddev->sysfs_state)
5588 sysfs_put(mddev->sysfs_state);
Junxiao Bie1a86db2020-07-14 16:10:26 -07005589 if (mddev->sysfs_level)
5590 sysfs_put(mddev->sysfs_level);
5591
Bart Van Assched8115c352018-02-28 10:15:29 -08005592 if (mddev->gendisk)
5593 del_gendisk(mddev->gendisk);
NeilBrown6cd18e72015-04-27 14:12:22 +10005594 if (mddev->queue)
5595 blk_cleanup_queue(mddev->queue);
Bart Van Assched8115c352018-02-28 10:15:29 -08005596 if (mddev->gendisk)
NeilBrowna21d1502009-01-09 08:31:09 +11005597 put_disk(mddev->gendisk);
NeilBrown4ad23a972017-03-15 14:05:14 +11005598 percpu_ref_exit(&mddev->writes_pending);
NeilBrowna21d1502009-01-09 08:31:09 +11005599
Kent Overstreet28dec872018-06-07 20:52:54 -04005600 bioset_exit(&mddev->bio_set);
5601 bioset_exit(&mddev->sync_set);
Artur Paszkiewicz41d2d842020-07-03 11:13:09 +02005602 mempool_exit(&mddev->md_io_pool);
NeilBrowneae17012005-11-08 21:39:23 -08005603 kfree(mddev);
5604}
5605
Emese Revfy52cf25d2010-01-19 02:58:23 +01005606static const struct sysfs_ops md_sysfs_ops = {
NeilBrowneae17012005-11-08 21:39:23 -08005607 .show = md_attr_show,
5608 .store = md_attr_store,
5609};
5610static struct kobj_type md_ktype = {
5611 .release = md_free,
5612 .sysfs_ops = &md_sysfs_ops,
5613 .default_attrs = md_default_attrs,
5614};
5615
Linus Torvalds1da177e2005-04-16 15:20:36 -07005616int mdp_major = 0;
5617
Dan Williams5fd3a172009-03-04 00:57:25 -07005618static void mddev_delayed_delete(struct work_struct *ws)
5619{
NeilBrownfd01b882011-10-11 16:47:53 +11005620 struct mddev *mddev = container_of(ws, struct mddev, del_work);
Dan Williams5fd3a172009-03-04 00:57:25 -07005621
NeilBrown43a70502009-12-14 12:49:55 +11005622 sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
Dan Williams5fd3a172009-03-04 00:57:25 -07005623 kobject_del(&mddev->kobj);
5624 kobject_put(&mddev->kobj);
5625}
5626
NeilBrown4ad23a972017-03-15 14:05:14 +11005627static void no_op(struct percpu_ref *r) {}
5628
NeilBrowna415c0f2017-06-05 16:05:13 +10005629int mddev_init_writes_pending(struct mddev *mddev)
5630{
5631 if (mddev->writes_pending.percpu_count_ptr)
5632 return 0;
Roman Gushchinddde2af2019-05-07 10:01:49 -07005633 if (percpu_ref_init(&mddev->writes_pending, no_op,
5634 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL) < 0)
NeilBrowna415c0f2017-06-05 16:05:13 +10005635 return -ENOMEM;
5636 /* We want to start with the refcount at zero */
5637 percpu_ref_put(&mddev->writes_pending);
5638 return 0;
5639}
5640EXPORT_SYMBOL_GPL(mddev_init_writes_pending);
5641
NeilBrownefeb53c2009-01-09 08:31:10 +11005642static int md_alloc(dev_t dev, char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005643{
NeilBrown039b7222017-04-12 16:26:13 +10005644 /*
5645 * If dev is zero, name is the name of a device to allocate with
5646 * an arbitrary minor number. It will be "md_???"
5647 * If dev is non-zero it must be a device number with a MAJOR of
5648 * MD_MAJOR or mdp_major. In this case, if "name" is NULL, then
5649 * the device is being created by opening a node in /dev.
5650 * If "name" is not NULL, the device is being created by
5651 * writing to /sys/module/md_mod/parameters/new_array.
5652 */
Arjan van de Ven48c9c272006-03-27 01:18:20 -08005653 static DEFINE_MUTEX(disks_mutex);
NeilBrownfd01b882011-10-11 16:47:53 +11005654 struct mddev *mddev = mddev_find(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005655 struct gendisk *disk;
NeilBrownefeb53c2009-01-09 08:31:10 +11005656 int partitioned;
5657 int shift;
5658 int unit;
Greg Kroah-Hartman3830c622007-12-17 15:54:39 -04005659 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005660
5661 if (!mddev)
NeilBrownefeb53c2009-01-09 08:31:10 +11005662 return -ENODEV;
5663
5664 partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
5665 shift = partitioned ? MdpMinorShift : 0;
5666 unit = MINOR(mddev->unit) >> shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005667
Tejun Heoe804ac72010-10-15 15:36:08 +02005668 /* wait for any previous instance of this device to be
5669 * completely removed (mddev_delayed_delete).
NeilBrownd3374822009-01-09 08:31:10 +11005670 */
Tejun Heoe804ac72010-10-15 15:36:08 +02005671 flush_workqueue(md_misc_wq);
NeilBrownd3374822009-01-09 08:31:10 +11005672
Arjan van de Ven48c9c272006-03-27 01:18:20 -08005673 mutex_lock(&disks_mutex);
NeilBrown0909dc42009-07-01 12:27:21 +10005674 error = -EEXIST;
5675 if (mddev->gendisk)
5676 goto abort;
NeilBrownefeb53c2009-01-09 08:31:10 +11005677
NeilBrown039b7222017-04-12 16:26:13 +10005678 if (name && !dev) {
NeilBrownefeb53c2009-01-09 08:31:10 +11005679 /* Need to ensure that 'name' is not a duplicate.
5680 */
NeilBrownfd01b882011-10-11 16:47:53 +11005681 struct mddev *mddev2;
NeilBrownefeb53c2009-01-09 08:31:10 +11005682 spin_lock(&all_mddevs_lock);
5683
5684 list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
5685 if (mddev2->gendisk &&
5686 strcmp(mddev2->gendisk->disk_name, name) == 0) {
5687 spin_unlock(&all_mddevs_lock);
NeilBrown0909dc42009-07-01 12:27:21 +10005688 goto abort;
NeilBrownefeb53c2009-01-09 08:31:10 +11005689 }
5690 spin_unlock(&all_mddevs_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005691 }
NeilBrown039b7222017-04-12 16:26:13 +10005692 if (name && dev)
5693 /*
5694 * Creating /dev/mdNNN via "newarray", so adjust hold_active.
5695 */
5696 mddev->hold_active = UNTIL_STOP;
NeilBrown8b765392009-01-09 08:31:08 +11005697
Artur Paszkiewicz41d2d842020-07-03 11:13:09 +02005698 error = mempool_init_kmalloc_pool(&mddev->md_io_pool, BIO_POOL_SIZE,
5699 sizeof(struct md_io));
5700 if (error)
5701 goto abort;
5702
NeilBrown0909dc42009-07-01 12:27:21 +10005703 error = -ENOMEM;
Christoph Hellwigc62b37d2020-07-01 10:59:43 +02005704 mddev->queue = blk_alloc_queue(NUMA_NO_NODE);
NeilBrown0909dc42009-07-01 12:27:21 +10005705 if (!mddev->queue)
5706 goto abort;
NeilBrown409c57f2009-03-31 14:39:39 +11005707
Martin K. Petersenb1bd0552012-01-11 16:27:11 +01005708 blk_set_stacking_limits(&mddev->queue->limits);
NeilBrown8b765392009-01-09 08:31:08 +11005709
Linus Torvalds1da177e2005-04-16 15:20:36 -07005710 disk = alloc_disk(1 << shift);
5711 if (!disk) {
NeilBrown8b765392009-01-09 08:31:08 +11005712 blk_cleanup_queue(mddev->queue);
5713 mddev->queue = NULL;
NeilBrown0909dc42009-07-01 12:27:21 +10005714 goto abort;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005715 }
NeilBrownefeb53c2009-01-09 08:31:10 +11005716 disk->major = MAJOR(mddev->unit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005717 disk->first_minor = unit << shift;
NeilBrownefeb53c2009-01-09 08:31:10 +11005718 if (name)
5719 strcpy(disk->disk_name, name);
5720 else if (partitioned)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005721 sprintf(disk->disk_name, "md_d%d", unit);
Greg Kroah-Hartmance7b0f462005-06-20 21:15:16 -07005722 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07005723 sprintf(disk->disk_name, "md%d", unit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005724 disk->fops = &md_fops;
5725 disk->private_data = mddev;
5726 disk->queue = mddev->queue;
Jens Axboe56883a72016-03-30 10:16:53 -06005727 blk_queue_write_cache(mddev->queue, true, true);
NeilBrown92850bb2008-10-21 13:25:32 +11005728 /* Allow extended partitions. This makes the
NeilBrownd3374822009-01-09 08:31:10 +11005729 * 'mdp' device redundant, but we can't really
NeilBrown92850bb2008-10-21 13:25:32 +11005730 * remove it now.
5731 */
5732 disk->flags |= GENHD_FL_EXT_DEVT;
Christoph Hellwiga564e232020-07-08 14:25:41 +02005733 disk->events |= DISK_EVENT_MEDIA_CHANGE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005734 mddev->gendisk = disk;
NeilBrownb0140892011-05-10 17:49:01 +10005735 /* As soon as we call add_disk(), another thread could get
5736 * through to md_open, so make sure it doesn't get too far
5737 */
5738 mutex_lock(&mddev->open_mutex);
5739 add_disk(disk);
5740
Kent Overstreet28dec872018-06-07 20:52:54 -04005741 error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md");
NeilBrown0909dc42009-07-01 12:27:21 +10005742 if (error) {
5743 /* This isn't possible, but as kobject_init_and_add is marked
5744 * __must_check, we must do something with the result
5745 */
NeilBrown9d487392016-11-02 14:16:49 +11005746 pr_debug("md: cannot register %s/md - name in use\n",
5747 disk->disk_name);
NeilBrown0909dc42009-07-01 12:27:21 +10005748 error = 0;
5749 }
NeilBrown00bcb4a2010-06-01 19:37:23 +10005750 if (mddev->kobj.sd &&
5751 sysfs_create_group(&mddev->kobj, &md_bitmap_group))
NeilBrown9d487392016-11-02 14:16:49 +11005752 pr_debug("pointless warning\n");
NeilBrownb0140892011-05-10 17:49:01 +10005753 mutex_unlock(&mddev->open_mutex);
NeilBrown0909dc42009-07-01 12:27:21 +10005754 abort:
5755 mutex_unlock(&disks_mutex);
NeilBrown00bcb4a2010-06-01 19:37:23 +10005756 if (!error && mddev->kobj.sd) {
Greg Kroah-Hartman3830c622007-12-17 15:54:39 -04005757 kobject_uevent(&mddev->kobj, KOBJ_ADD);
NeilBrown00bcb4a2010-06-01 19:37:23 +10005758 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
Junxiao Bie1a86db2020-07-14 16:10:26 -07005759 mddev->sysfs_level = sysfs_get_dirent_safe(mddev->kobj.sd, "level");
NeilBrownb62b7592008-10-21 13:25:21 +11005760 }
NeilBrownd3374822009-01-09 08:31:10 +11005761 mddev_put(mddev);
NeilBrown0909dc42009-07-01 12:27:21 +10005762 return error;
NeilBrownefeb53c2009-01-09 08:31:10 +11005763}
5764
Christoph Hellwig28144f92020-10-29 15:58:34 +01005765static void md_probe(dev_t dev)
NeilBrownefeb53c2009-01-09 08:31:10 +11005766{
Christoph Hellwig28144f92020-10-29 15:58:34 +01005767 if (MAJOR(dev) == MD_MAJOR && MINOR(dev) >= 512)
5768 return;
NeilBrown78b63502017-04-12 16:26:13 +10005769 if (create_on_open)
5770 md_alloc(dev, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005771}
5772
Kees Cooke4dca7b2017-10-17 19:04:42 -07005773static int add_named_array(const char *val, const struct kernel_param *kp)
NeilBrownefeb53c2009-01-09 08:31:10 +11005774{
NeilBrown039b7222017-04-12 16:26:13 +10005775 /*
5776 * val must be "md_*" or "mdNNN".
5777 * For "md_*" we allocate an array with a large free minor number, and
NeilBrownefeb53c2009-01-09 08:31:10 +11005778 * set the name to val. val must not already be an active name.
NeilBrown039b7222017-04-12 16:26:13 +10005779 * For "mdNNN" we allocate an array with the minor number NNN
5780 * which must not already be in use.
NeilBrownefeb53c2009-01-09 08:31:10 +11005781 */
5782 int len = strlen(val);
5783 char buf[DISK_NAME_LEN];
NeilBrown039b7222017-04-12 16:26:13 +10005784 unsigned long devnum;
NeilBrownefeb53c2009-01-09 08:31:10 +11005785
5786 while (len && val[len-1] == '\n')
5787 len--;
5788 if (len >= DISK_NAME_LEN)
5789 return -E2BIG;
5790 strlcpy(buf, val, len+1);
NeilBrown039b7222017-04-12 16:26:13 +10005791 if (strncmp(buf, "md_", 3) == 0)
5792 return md_alloc(0, buf);
5793 if (strncmp(buf, "md", 2) == 0 &&
5794 isdigit(buf[2]) &&
5795 kstrtoul(buf+2, 10, &devnum) == 0 &&
5796 devnum <= MINORMASK)
5797 return md_alloc(MKDEV(MD_MAJOR, devnum), NULL);
5798
5799 return -EINVAL;
NeilBrownefeb53c2009-01-09 08:31:10 +11005800}
5801
Kees Cook8376d3c2017-10-16 17:01:48 -07005802static void md_safemode_timeout(struct timer_list *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005803{
Kees Cook8376d3c2017-10-16 17:01:48 -07005804 struct mddev *mddev = from_timer(mddev, t, safemode_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005805
NeilBrown4ad23a972017-03-15 14:05:14 +11005806 mddev->safemode = 1;
5807 if (mddev->external)
5808 sysfs_notify_dirent_safe(mddev->sysfs_state);
5809
Linus Torvalds1da177e2005-04-16 15:20:36 -07005810 md_wakeup_thread(mddev->thread);
5811}
5812
NeilBrown6ff8d8ec2006-01-06 00:20:15 -08005813static int start_dirty_degraded;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005814
NeilBrownfd01b882011-10-11 16:47:53 +11005815int md_run(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005816{
NeilBrown2604b702006-01-06 00:20:36 -08005817 int err;
NeilBrown3cb03002011-10-11 16:45:26 +11005818 struct md_rdev *rdev;
NeilBrown84fc4b52011-10-11 16:49:58 +11005819 struct md_personality *pers;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005820
NeilBrowna757e642005-04-16 15:26:42 -07005821 if (list_empty(&mddev->disks))
5822 /* cannot run an array with no devices.. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005823 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005824
5825 if (mddev->pers)
5826 return -EBUSY;
NeilBrownbb4f1e92010-08-08 21:18:03 +10005827 /* Cannot run until previous stop completes properly */
5828 if (mddev->sysfs_active)
5829 return -EBUSY;
NeilBrownb6eb1272010-04-15 10:13:47 +10005830
Linus Torvalds1da177e2005-04-16 15:20:36 -07005831 /*
5832 * Analyze all RAID superblock(s)
5833 */
NeilBrown1ec4a932008-02-06 01:39:53 -08005834 if (!mddev->raid_disks) {
5835 if (!mddev->persistent)
5836 return -EINVAL;
Yufen Yu6a5cb532019-10-16 16:00:03 +08005837 err = analyze_sbs(mddev);
5838 if (err)
5839 return -EINVAL;
NeilBrown1ec4a932008-02-06 01:39:53 -08005840 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005841
NeilBrownd9d166c2006-01-06 00:20:51 -08005842 if (mddev->level != LEVEL_NONE)
5843 request_module("md-level-%d", mddev->level);
5844 else if (mddev->clevel[0])
5845 request_module("md-%s", mddev->clevel);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005846
5847 /*
5848 * Drop all container device buffers, from now on
5849 * the only valid external interface is through the md
5850 * device.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005851 */
Heinz Mauelshagen4b6c1062018-02-02 23:13:19 +01005852 mddev->has_superblocks = false;
NeilBrowndafb20f2012-03-19 12:46:39 +11005853 rdev_for_each(rdev, mddev) {
NeilBrownb2d444d2005-11-08 21:39:31 -08005854 if (test_bit(Faulty, &rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005855 continue;
5856 sync_blockdev(rdev->bdev);
Peter Zijlstraf98393a2007-05-06 14:49:54 -07005857 invalidate_bdev(rdev->bdev);
NeilBrown97b20ef2017-04-13 08:53:48 +10005858 if (mddev->ro != 1 &&
5859 (bdev_read_only(rdev->bdev) ||
5860 bdev_read_only(rdev->meta_bdev))) {
5861 mddev->ro = 1;
5862 if (mddev->gendisk)
5863 set_disk_ro(mddev->gendisk, 1);
5864 }
NeilBrownf0d76d72007-07-17 04:06:12 -07005865
Heinz Mauelshagen4b6c1062018-02-02 23:13:19 +01005866 if (rdev->sb_page)
5867 mddev->has_superblocks = true;
5868
NeilBrownf0d76d72007-07-17 04:06:12 -07005869 /* perform some consistency tests on the device.
5870 * We don't want the data to overlap the metadata,
Andre Noll58c0fed2009-03-31 14:33:13 +11005871 * Internal Bitmap issues have been handled elsewhere.
NeilBrownf0d76d72007-07-17 04:06:12 -07005872 */
Jonathan Brassowa6ff7e02011-01-14 09:14:34 +11005873 if (rdev->meta_bdev) {
5874 /* Nothing to check */;
5875 } else if (rdev->data_offset < rdev->sb_start) {
Andre Noll58c0fed2009-03-31 14:33:13 +11005876 if (mddev->dev_sectors &&
5877 rdev->data_offset + mddev->dev_sectors
Andre Noll0f420352008-07-11 22:02:23 +10005878 > rdev->sb_start) {
NeilBrown9d487392016-11-02 14:16:49 +11005879 pr_warn("md: %s: data overlaps metadata\n",
5880 mdname(mddev));
NeilBrownf0d76d72007-07-17 04:06:12 -07005881 return -EINVAL;
5882 }
5883 } else {
Andre Noll0f420352008-07-11 22:02:23 +10005884 if (rdev->sb_start + rdev->sb_size/512
NeilBrownf0d76d72007-07-17 04:06:12 -07005885 > rdev->data_offset) {
NeilBrown9d487392016-11-02 14:16:49 +11005886 pr_warn("md: %s: metadata overlaps data\n",
5887 mdname(mddev));
NeilBrownf0d76d72007-07-17 04:06:12 -07005888 return -EINVAL;
5889 }
5890 }
NeilBrown00bcb4a2010-06-01 19:37:23 +10005891 sysfs_notify_dirent_safe(rdev->sysfs_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005892 }
5893
Kent Overstreetafeee512018-05-20 18:25:52 -04005894 if (!bioset_initialized(&mddev->bio_set)) {
5895 err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
5896 if (err)
5897 return err;
Ming Lei10273172017-02-14 23:29:00 +08005898 }
Kent Overstreetafeee512018-05-20 18:25:52 -04005899 if (!bioset_initialized(&mddev->sync_set)) {
5900 err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
5901 if (err)
Kent Overstreet28dec872018-06-07 20:52:54 -04005902 return err;
NeilBrown5a850712017-06-21 09:12:21 +10005903 }
NeilBrowna167f662010-10-26 18:31:13 +11005904
Linus Torvalds1da177e2005-04-16 15:20:36 -07005905 spin_lock(&pers_lock);
NeilBrownd9d166c2006-01-06 00:20:51 -08005906 pers = find_pers(mddev->level, mddev->clevel);
NeilBrown2604b702006-01-06 00:20:36 -08005907 if (!pers || !try_module_get(pers->owner)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005908 spin_unlock(&pers_lock);
NeilBrownd9d166c2006-01-06 00:20:51 -08005909 if (mddev->level != LEVEL_NONE)
NeilBrown9d487392016-11-02 14:16:49 +11005910 pr_warn("md: personality for level %d is not loaded!\n",
5911 mddev->level);
NeilBrownd9d166c2006-01-06 00:20:51 -08005912 else
NeilBrown9d487392016-11-02 14:16:49 +11005913 pr_warn("md: personality for level %s is not loaded!\n",
5914 mddev->clevel);
Shaohua Libfc9dfd2018-06-13 08:39:49 -07005915 err = -EINVAL;
5916 goto abort;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005917 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005918 spin_unlock(&pers_lock);
NeilBrown34817e82009-03-31 14:39:38 +11005919 if (mddev->level != pers->level) {
5920 mddev->level = pers->level;
5921 mddev->new_level = pers->level;
5922 }
NeilBrownd9d166c2006-01-06 00:20:51 -08005923 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005924
NeilBrownf6705572006-03-27 01:18:11 -08005925 if (mddev->reshape_position != MaxSector &&
NeilBrown63c70c42006-03-27 01:18:13 -08005926 pers->start_reshape == NULL) {
NeilBrownf6705572006-03-27 01:18:11 -08005927 /* This personality cannot handle reshaping... */
NeilBrownf6705572006-03-27 01:18:11 -08005928 module_put(pers->owner);
Shaohua Libfc9dfd2018-06-13 08:39:49 -07005929 err = -EINVAL;
5930 goto abort;
NeilBrownf6705572006-03-27 01:18:11 -08005931 }
5932
NeilBrown7dd5e7c32007-02-28 20:11:35 -08005933 if (pers->sync_request) {
5934 /* Warn if this is a potentially silly
5935 * configuration.
5936 */
5937 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
NeilBrown3cb03002011-10-11 16:45:26 +11005938 struct md_rdev *rdev2;
NeilBrown7dd5e7c32007-02-28 20:11:35 -08005939 int warned = 0;
Cheng Renquan159ec1f2009-01-09 08:31:08 +11005940
NeilBrowndafb20f2012-03-19 12:46:39 +11005941 rdev_for_each(rdev, mddev)
5942 rdev_for_each(rdev2, mddev) {
NeilBrown7dd5e7c32007-02-28 20:11:35 -08005943 if (rdev < rdev2 &&
Christoph Hellwig61a27e1f2020-09-03 07:40:58 +02005944 rdev->bdev->bd_disk ==
5945 rdev2->bdev->bd_disk) {
NeilBrown9d487392016-11-02 14:16:49 +11005946 pr_warn("%s: WARNING: %s appears to be on the same physical disk as %s.\n",
5947 mdname(mddev),
5948 bdevname(rdev->bdev,b),
5949 bdevname(rdev2->bdev,b2));
NeilBrown7dd5e7c32007-02-28 20:11:35 -08005950 warned = 1;
5951 }
5952 }
Cheng Renquan159ec1f2009-01-09 08:31:08 +11005953
NeilBrown7dd5e7c32007-02-28 20:11:35 -08005954 if (warned)
NeilBrown9d487392016-11-02 14:16:49 +11005955 pr_warn("True protection against single-disk failure might be compromised.\n");
NeilBrown7dd5e7c32007-02-28 20:11:35 -08005956 }
5957
NeilBrown657390d2005-08-26 18:34:16 -07005958 mddev->recovery = 0;
Andre Noll58c0fed2009-03-31 14:33:13 +11005959 /* may be over-ridden by personality */
5960 mddev->resync_max_sectors = mddev->dev_sectors;
5961
NeilBrown6ff8d8ec2006-01-06 00:20:15 -08005962 mddev->ok_start_degraded = start_dirty_degraded;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005963
NeilBrown0f9552b52009-12-30 12:08:50 +11005964 if (start_readonly && mddev->ro == 0)
NeilBrownf91de922005-11-08 21:39:36 -08005965 mddev->ro = 2; /* read-only, but switch on first write */
5966
NeilBrown36d091f2014-12-15 12:56:58 +11005967 err = pers->run(mddev);
Andre Noll13e53df2008-03-26 00:07:03 +01005968 if (err)
NeilBrown9d487392016-11-02 14:16:49 +11005969 pr_warn("md: pers->run() failed ...\n");
NeilBrown36d091f2014-12-15 12:56:58 +11005970 else if (pers->size(mddev, 0, 0) < mddev->array_sectors) {
NeilBrown9d487392016-11-02 14:16:49 +11005971 WARN_ONCE(!mddev->external_size,
5972 "%s: default size too small, but 'external_size' not in effect?\n",
5973 __func__);
5974 pr_warn("md: invalid array_size %llu > default size %llu\n",
5975 (unsigned long long)mddev->array_sectors / 2,
5976 (unsigned long long)pers->size(mddev, 0, 0) / 2);
Dan Williamsb522adc2009-03-31 15:00:31 +11005977 err = -EINVAL;
Dan Williamsb522adc2009-03-31 15:00:31 +11005978 }
NeilBrown36d091f2014-12-15 12:56:58 +11005979 if (err == 0 && pers->sync_request &&
NeilBrownef99bf42012-05-22 13:55:08 +10005980 (mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05005981 struct bitmap *bitmap;
5982
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07005983 bitmap = md_bitmap_create(mddev, -1);
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05005984 if (IS_ERR(bitmap)) {
5985 err = PTR_ERR(bitmap);
NeilBrown9d487392016-11-02 14:16:49 +11005986 pr_warn("%s: failed to create bitmap (%d)\n",
5987 mdname(mddev), err);
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05005988 } else
5989 mddev->bitmap = bitmap;
5990
NeilBrownb15c2e52006-01-06 00:20:16 -08005991 }
Guoqing Jiangd4945492019-06-14 17:10:39 +08005992 if (err)
5993 goto bitmap_abort;
Guoqing Jiang3e148a32019-06-19 17:30:46 +08005994
5995 if (mddev->bitmap_info.max_write_behind > 0) {
Guoqing Jiang3e173ab2019-12-23 10:48:54 +01005996 bool create_pool = false;
Guoqing Jiang3e148a32019-06-19 17:30:46 +08005997
5998 rdev_for_each(rdev, mddev) {
5999 if (test_bit(WriteMostly, &rdev->flags) &&
Guoqing Jiang404659c2019-12-23 10:48:53 +01006000 rdev_init_serial(rdev))
Guoqing Jiang3e173ab2019-12-23 10:48:54 +01006001 create_pool = true;
Guoqing Jiang3e148a32019-06-19 17:30:46 +08006002 }
Guoqing Jiang3e173ab2019-12-23 10:48:54 +01006003 if (create_pool && mddev->serial_info_pool == NULL) {
Guoqing Jiang404659c2019-12-23 10:48:53 +01006004 mddev->serial_info_pool =
6005 mempool_create_kmalloc_pool(NR_SERIAL_INFOS,
6006 sizeof(struct serial_info));
6007 if (!mddev->serial_info_pool) {
Guoqing Jiang3e148a32019-06-19 17:30:46 +08006008 err = -ENOMEM;
Guoqing Jiangd4945492019-06-14 17:10:39 +08006009 goto bitmap_abort;
Guoqing Jiang3e148a32019-06-19 17:30:46 +08006010 }
6011 }
6012 }
6013
NeilBrown5c675f82014-12-15 12:56:56 +11006014 if (mddev->queue) {
Shaohua Libb086a82016-09-30 09:45:40 -07006015 bool nonrot = true;
6016
6017 rdev_for_each(rdev, mddev) {
6018 if (rdev->raid_disk >= 0 &&
6019 !blk_queue_nonrot(bdev_get_queue(rdev->bdev))) {
6020 nonrot = false;
6021 break;
6022 }
6023 }
6024 if (mddev->degraded)
6025 nonrot = false;
6026 if (nonrot)
Bart Van Assche8b904b52018-03-07 17:10:10 -08006027 blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue);
Shaohua Libb086a82016-09-30 09:45:40 -07006028 else
Bart Van Assche8b904b52018-03-07 17:10:10 -08006029 blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue);
NeilBrown5c675f82014-12-15 12:56:56 +11006030 }
NeilBrown36d091f2014-12-15 12:56:58 +11006031 if (pers->sync_request) {
NeilBrown00bcb4a2010-06-01 19:37:23 +10006032 if (mddev->kobj.sd &&
6033 sysfs_create_group(&mddev->kobj, &md_redundancy_group))
NeilBrown9d487392016-11-02 14:16:49 +11006034 pr_warn("md: cannot register extra attributes for %s\n",
6035 mdname(mddev));
NeilBrown00bcb4a2010-06-01 19:37:23 +10006036 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
Junxiao Bie8efa9b2020-08-04 17:27:18 -07006037 mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed");
6038 mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded");
NeilBrown5e55e2f2007-03-26 21:32:14 -08006039 } else if (mddev->ro == 2) /* auto-readonly not meaningful */
NeilBrownfd9d49c2005-11-08 21:39:42 -08006040 mddev->ro = 0;
6041
Robert Becker1e509152009-12-14 12:49:58 +11006042 atomic_set(&mddev->max_corr_read_errors,
6043 MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006044 mddev->safemode = 0;
Goldwyn Rodrigues28c1b9f2015-10-22 16:01:25 +11006045 if (mddev_is_clustered(mddev))
6046 mddev->safemode_delay = 0;
6047 else
Zhao Heming7c9d5c52020-07-21 02:08:52 +08006048 mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006049 mddev->in_sync = 1;
NeilBrown0ca69882011-01-14 09:14:33 +11006050 smp_wmb();
NeilBrown36d091f2014-12-15 12:56:58 +11006051 spin_lock(&mddev->lock);
6052 mddev->pers = pers;
NeilBrown36d091f2014-12-15 12:56:58 +11006053 spin_unlock(&mddev->lock);
NeilBrowndafb20f2012-03-19 12:46:39 +11006054 rdev_for_each(rdev, mddev)
Namhyung Kim36fad852011-07-27 11:00:36 +10006055 if (rdev->raid_disk >= 0)
Yufen Yue5b521e2019-06-14 15:41:07 -07006056 sysfs_link_rdev(mddev, rdev); /* failure here is OK */
NeilBrownf72ffdd2014-09-30 14:23:59 +10006057
NeilBrowna4a3d262015-07-17 11:57:30 +10006058 if (mddev->degraded && !mddev->ro)
6059 /* This ensures that recovering status is reported immediately
6060 * via sysfs - until a lack of spares is confirmed.
6061 */
6062 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006063 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
NeilBrownf72ffdd2014-09-30 14:23:59 +10006064
Shaohua Li29530792016-12-08 15:48:19 -08006065 if (mddev->sb_flags)
NeilBrown850b2b422006-10-03 01:15:46 -07006066 md_update_sb(mddev, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006067
NeilBrownd7603b72006-01-06 00:20:30 -08006068 md_new_event(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006069 return 0;
Xiao Nib1261942018-01-24 12:17:38 +08006070
Guoqing Jiangd4945492019-06-14 17:10:39 +08006071bitmap_abort:
6072 mddev_detach(mddev);
6073 if (mddev->private)
6074 pers->free(mddev, mddev->private);
6075 mddev->private = NULL;
6076 module_put(pers->owner);
6077 md_bitmap_destroy(mddev);
Xiao Nib1261942018-01-24 12:17:38 +08006078abort:
NeilBrown4bc034d2019-03-29 10:46:16 -07006079 bioset_exit(&mddev->bio_set);
6080 bioset_exit(&mddev->sync_set);
Xiao Nib1261942018-01-24 12:17:38 +08006081 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006082}
NeilBrown390ee602010-06-01 19:37:27 +10006083EXPORT_SYMBOL_GPL(md_run);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006084
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02006085int do_md_run(struct mddev *mddev)
NeilBrownfe60b012010-03-29 11:10:42 +11006086{
6087 int err;
6088
NeilBrown9d4b45d2019-08-20 10:21:09 +10006089 set_bit(MD_NOT_READY, &mddev->flags);
NeilBrownfe60b012010-03-29 11:10:42 +11006090 err = md_run(mddev);
6091 if (err)
6092 goto out;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07006093 err = md_bitmap_load(mddev);
NeilBrown69e51b42010-06-01 19:37:35 +10006094 if (err) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07006095 md_bitmap_destroy(mddev);
NeilBrown69e51b42010-06-01 19:37:35 +10006096 goto out;
6097 }
Jonathan Brassow0fd018a2011-06-07 17:49:36 -05006098
Goldwyn Rodrigues28c1b9f2015-10-22 16:01:25 +11006099 if (mddev_is_clustered(mddev))
6100 md_allow_write(mddev);
6101
Song Liud5d885f2017-11-19 22:17:01 -08006102 /* run start up tasks that require md_thread */
6103 md_start(mddev);
6104
Jonathan Brassow0fd018a2011-06-07 17:49:36 -05006105 md_wakeup_thread(mddev->thread);
6106 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
6107
Christoph Hellwig2c247c52020-11-16 15:57:11 +01006108 set_capacity_and_notify(mddev->gendisk, mddev->array_sectors);
NeilBrown9d4b45d2019-08-20 10:21:09 +10006109 clear_bit(MD_NOT_READY, &mddev->flags);
NeilBrownf0b4f7e2011-02-24 17:26:41 +11006110 mddev->changed = 1;
NeilBrownfe60b012010-03-29 11:10:42 +11006111 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
NeilBrown9d4b45d2019-08-20 10:21:09 +10006112 sysfs_notify_dirent_safe(mddev->sysfs_state);
6113 sysfs_notify_dirent_safe(mddev->sysfs_action);
Junxiao Bie1a86db2020-07-14 16:10:26 -07006114 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
NeilBrownfe60b012010-03-29 11:10:42 +11006115out:
NeilBrown9d4b45d2019-08-20 10:21:09 +10006116 clear_bit(MD_NOT_READY, &mddev->flags);
NeilBrownfe60b012010-03-29 11:10:42 +11006117 return err;
6118}
6119
Song Liud5d885f2017-11-19 22:17:01 -08006120int md_start(struct mddev *mddev)
6121{
6122 int ret = 0;
6123
6124 if (mddev->pers->start) {
6125 set_bit(MD_RECOVERY_WAIT, &mddev->recovery);
6126 md_wakeup_thread(mddev->thread);
6127 ret = mddev->pers->start(mddev);
6128 clear_bit(MD_RECOVERY_WAIT, &mddev->recovery);
6129 md_wakeup_thread(mddev->sync_thread);
6130 }
6131 return ret;
6132}
6133EXPORT_SYMBOL_GPL(md_start);
6134
NeilBrownfd01b882011-10-11 16:47:53 +11006135static int restart_array(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006136{
6137 struct gendisk *disk = mddev->gendisk;
NeilBrown97b20ef2017-04-13 08:53:48 +10006138 struct md_rdev *rdev;
6139 bool has_journal = false;
6140 bool has_readonly = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006141
Andre Noll80fab1d2008-07-11 22:02:21 +10006142 /* Complain if it has no devices */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006143 if (list_empty(&mddev->disks))
Andre Noll80fab1d2008-07-11 22:02:21 +10006144 return -ENXIO;
6145 if (!mddev->pers)
6146 return -EINVAL;
6147 if (!mddev->ro)
6148 return -EBUSY;
Song Liu339421d2015-10-08 21:54:13 -07006149
NeilBrown97b20ef2017-04-13 08:53:48 +10006150 rcu_read_lock();
6151 rdev_for_each_rcu(rdev, mddev) {
6152 if (test_bit(Journal, &rdev->flags) &&
6153 !test_bit(Faulty, &rdev->flags))
6154 has_journal = true;
6155 if (bdev_read_only(rdev->bdev))
6156 has_readonly = true;
Song Liu339421d2015-10-08 21:54:13 -07006157 }
NeilBrown97b20ef2017-04-13 08:53:48 +10006158 rcu_read_unlock();
6159 if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !has_journal)
6160 /* Don't restart rw with journal missing/faulty */
6161 return -EINVAL;
6162 if (has_readonly)
6163 return -EROFS;
Song Liu339421d2015-10-08 21:54:13 -07006164
Andre Noll80fab1d2008-07-11 22:02:21 +10006165 mddev->safemode = 0;
6166 mddev->ro = 0;
6167 set_disk_ro(disk, 0);
NeilBrown9d487392016-11-02 14:16:49 +11006168 pr_debug("md: %s switched to read-write mode.\n", mdname(mddev));
Andre Noll80fab1d2008-07-11 22:02:21 +10006169 /* Kick recovery or resync if necessary */
6170 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6171 md_wakeup_thread(mddev->thread);
6172 md_wakeup_thread(mddev->sync_thread);
NeilBrown00bcb4a2010-06-01 19:37:23 +10006173 sysfs_notify_dirent_safe(mddev->sysfs_state);
Andre Noll80fab1d2008-07-11 22:02:21 +10006174 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006175}
6176
NeilBrownfd01b882011-10-11 16:47:53 +11006177static void md_clean(struct mddev *mddev)
NeilBrown6177b472010-03-29 11:37:13 +11006178{
6179 mddev->array_sectors = 0;
6180 mddev->external_size = 0;
6181 mddev->dev_sectors = 0;
6182 mddev->raid_disks = 0;
6183 mddev->recovery_cp = 0;
6184 mddev->resync_min = 0;
6185 mddev->resync_max = MaxSector;
6186 mddev->reshape_position = MaxSector;
6187 mddev->external = 0;
6188 mddev->persistent = 0;
6189 mddev->level = LEVEL_NONE;
6190 mddev->clevel[0] = 0;
6191 mddev->flags = 0;
Shaohua Li29530792016-12-08 15:48:19 -08006192 mddev->sb_flags = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006193 mddev->ro = 0;
6194 mddev->metadata_type[0] = 0;
6195 mddev->chunk_sectors = 0;
6196 mddev->ctime = mddev->utime = 0;
6197 mddev->layout = 0;
6198 mddev->max_disks = 0;
6199 mddev->events = 0;
NeilBrowna8707c02010-05-18 09:28:43 +10006200 mddev->can_decrease_events = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006201 mddev->delta_disks = 0;
NeilBrown2c810cd2012-05-21 09:27:00 +10006202 mddev->reshape_backwards = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006203 mddev->new_level = LEVEL_NONE;
6204 mddev->new_layout = 0;
6205 mddev->new_chunk_sectors = 0;
6206 mddev->curr_resync = 0;
Jianpeng Ma7f7583d2012-10-11 14:17:59 +11006207 atomic64_set(&mddev->resync_mismatches, 0);
NeilBrown6177b472010-03-29 11:37:13 +11006208 mddev->suspend_lo = mddev->suspend_hi = 0;
6209 mddev->sync_speed_min = mddev->sync_speed_max = 0;
6210 mddev->recovery = 0;
6211 mddev->in_sync = 0;
NeilBrownf0b4f7e2011-02-24 17:26:41 +11006212 mddev->changed = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006213 mddev->degraded = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006214 mddev->safemode = 0;
NeilBrownbd691922015-06-25 17:01:40 +10006215 mddev->private = NULL;
Guoqing Jiangc20c33f2016-08-12 13:42:38 +08006216 mddev->cluster_info = NULL;
NeilBrown6177b472010-03-29 11:37:13 +11006217 mddev->bitmap_info.offset = 0;
6218 mddev->bitmap_info.default_offset = 0;
NeilBrown6409bb02012-05-22 13:55:07 +10006219 mddev->bitmap_info.default_space = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006220 mddev->bitmap_info.chunksize = 0;
6221 mddev->bitmap_info.daemon_sleep = 0;
6222 mddev->bitmap_info.max_write_behind = 0;
Guoqing Jiangc20c33f2016-08-12 13:42:38 +08006223 mddev->bitmap_info.nodes = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006224}
6225
NeilBrownfd01b882011-10-11 16:47:53 +11006226static void __md_stop_writes(struct mddev *mddev)
NeilBrowna047e122010-03-29 12:07:53 +11006227{
NeilBrown6b6204e2013-05-09 09:48:30 +10006228 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
Guoqing Jiang21e09582020-04-04 23:57:07 +02006229 if (work_pending(&mddev->del_work))
6230 flush_workqueue(md_misc_wq);
NeilBrowna047e122010-03-29 12:07:53 +11006231 if (mddev->sync_thread) {
NeilBrowna047e122010-03-29 12:07:53 +11006232 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10006233 md_reap_sync_thread(mddev);
NeilBrowna047e122010-03-29 12:07:53 +11006234 }
6235
6236 del_timer_sync(&mddev->safemode_timer);
6237
Shaohua Li034e33f2016-11-21 10:29:19 -08006238 if (mddev->pers && mddev->pers->quiesce) {
6239 mddev->pers->quiesce(mddev, 1);
6240 mddev->pers->quiesce(mddev, 0);
6241 }
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07006242 md_bitmap_flush(mddev);
NeilBrowna047e122010-03-29 12:07:53 +11006243
NeilBrownb6d428c2013-04-24 11:42:42 +10006244 if (mddev->ro == 0 &&
Goldwyn Rodrigues28c1b9f2015-10-22 16:01:25 +11006245 ((!mddev->in_sync && !mddev_is_clustered(mddev)) ||
Shaohua Li29530792016-12-08 15:48:19 -08006246 mddev->sb_flags)) {
NeilBrowna047e122010-03-29 12:07:53 +11006247 /* mark array as shutdown cleanly */
Goldwyn Rodrigues28c1b9f2015-10-22 16:01:25 +11006248 if (!mddev_is_clustered(mddev))
6249 mddev->in_sync = 1;
NeilBrowna047e122010-03-29 12:07:53 +11006250 md_update_sb(mddev, 1);
6251 }
Guoqing Jiang69b00b52019-12-23 10:49:00 +01006252 /* disable policy to guarantee rdevs free resources for serialization */
6253 mddev->serialize_policy = 0;
6254 mddev_destroy_serial_pool(mddev, NULL, true);
NeilBrowna047e122010-03-29 12:07:53 +11006255}
NeilBrowndefad612011-01-14 09:14:33 +11006256
NeilBrownfd01b882011-10-11 16:47:53 +11006257void md_stop_writes(struct mddev *mddev)
NeilBrowndefad612011-01-14 09:14:33 +11006258{
NeilBrown29f097c2013-11-14 17:54:51 +11006259 mddev_lock_nointr(mddev);
NeilBrowndefad612011-01-14 09:14:33 +11006260 __md_stop_writes(mddev);
6261 mddev_unlock(mddev);
6262}
NeilBrown390ee602010-06-01 19:37:27 +10006263EXPORT_SYMBOL_GPL(md_stop_writes);
NeilBrowna047e122010-03-29 12:07:53 +11006264
NeilBrown5aa61f42014-12-15 12:56:57 +11006265static void mddev_detach(struct mddev *mddev)
6266{
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07006267 md_bitmap_wait_behind_writes(mddev);
Guoqing Jiang6b40bec2020-02-11 11:10:04 +01006268 if (mddev->pers && mddev->pers->quiesce && !mddev->suspended) {
NeilBrown5aa61f42014-12-15 12:56:57 +11006269 mddev->pers->quiesce(mddev, 1);
6270 mddev->pers->quiesce(mddev, 0);
6271 }
6272 md_unregister_thread(&mddev->thread);
6273 if (mddev->queue)
6274 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
6275}
6276
NeilBrown5eff3c42012-11-19 10:47:48 +11006277static void __md_stop(struct mddev *mddev)
NeilBrown6177b472010-03-29 11:37:13 +11006278{
NeilBrown36d091f2014-12-15 12:56:58 +11006279 struct md_personality *pers = mddev->pers;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07006280 md_bitmap_destroy(mddev);
NeilBrown5aa61f42014-12-15 12:56:57 +11006281 mddev_detach(mddev);
NeilBrownee5d0042015-07-22 10:20:07 +10006282 /* Ensure ->event_work is done */
Guoqing Jiang21e09582020-04-04 23:57:07 +02006283 if (mddev->event_work.func)
6284 flush_workqueue(md_misc_wq);
NeilBrown36d091f2014-12-15 12:56:58 +11006285 spin_lock(&mddev->lock);
NeilBrown6177b472010-03-29 11:37:13 +11006286 mddev->pers = NULL;
NeilBrown36d091f2014-12-15 12:56:58 +11006287 spin_unlock(&mddev->lock);
6288 pers->free(mddev, mddev->private);
NeilBrownbd691922015-06-25 17:01:40 +10006289 mddev->private = NULL;
NeilBrown36d091f2014-12-15 12:56:58 +11006290 if (pers->sync_request && mddev->to_remove == NULL)
6291 mddev->to_remove = &md_redundancy_group;
6292 module_put(pers->owner);
NeilBrowncca9cf92010-04-01 12:08:16 +11006293 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
Jack Wang6aaa58c2018-10-19 16:21:31 +02006294}
6295
6296void md_stop(struct mddev *mddev)
6297{
6298 /* stop the array and free an attached data structures.
6299 * This is called from dm-raid
6300 */
6301 __md_stop(mddev);
Kent Overstreetafeee512018-05-20 18:25:52 -04006302 bioset_exit(&mddev->bio_set);
6303 bioset_exit(&mddev->sync_set);
NeilBrown5eff3c42012-11-19 10:47:48 +11006304}
6305
NeilBrown390ee602010-06-01 19:37:27 +10006306EXPORT_SYMBOL_GPL(md_stop);
NeilBrown6177b472010-03-29 11:37:13 +11006307
NeilBrowna05b7ea2012-07-19 15:59:18 +10006308static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
NeilBrowna4bd82d2010-03-29 13:23:10 +11006309{
6310 int err = 0;
NeilBrown30b8feb2013-11-14 15:16:17 +11006311 int did_freeze = 0;
6312
6313 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
6314 did_freeze = 1;
6315 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6316 md_wakeup_thread(mddev->thread);
6317 }
NeilBrownf851b602014-12-11 10:02:10 +11006318 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
NeilBrown30b8feb2013-11-14 15:16:17 +11006319 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
NeilBrownf851b602014-12-11 10:02:10 +11006320 if (mddev->sync_thread)
NeilBrown30b8feb2013-11-14 15:16:17 +11006321 /* Thread might be blocked waiting for metadata update
6322 * which will now never happen */
6323 wake_up_process(mddev->sync_thread->tsk);
NeilBrownf851b602014-12-11 10:02:10 +11006324
Shaohua Li29530792016-12-08 15:48:19 -08006325 if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
NeilBrown88724bf2015-09-24 14:00:51 +10006326 return -EBUSY;
NeilBrown30b8feb2013-11-14 15:16:17 +11006327 mddev_unlock(mddev);
NeilBrownf851b602014-12-11 10:02:10 +11006328 wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
6329 &mddev->recovery));
NeilBrown88724bf2015-09-24 14:00:51 +10006330 wait_event(mddev->sb_wait,
Shaohua Li29530792016-12-08 15:48:19 -08006331 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
NeilBrown30b8feb2013-11-14 15:16:17 +11006332 mddev_lock_nointr(mddev);
6333
NeilBrowna4bd82d2010-03-29 13:23:10 +11006334 mutex_lock(&mddev->open_mutex);
NeilBrown9ba3b7f2014-09-09 14:00:15 +10006335 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
NeilBrown30b8feb2013-11-14 15:16:17 +11006336 mddev->sync_thread ||
Guoqing Jiangaf8d8e62016-08-12 13:42:37 +08006337 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
NeilBrown9d487392016-11-02 14:16:49 +11006338 pr_warn("md: %s still in use.\n",mdname(mddev));
NeilBrown30b8feb2013-11-14 15:16:17 +11006339 if (did_freeze) {
6340 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
NeilBrown45eaf452014-10-29 08:49:50 +11006341 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
NeilBrown30b8feb2013-11-14 15:16:17 +11006342 md_wakeup_thread(mddev->thread);
6343 }
NeilBrowna4bd82d2010-03-29 13:23:10 +11006344 err = -EBUSY;
6345 goto out;
6346 }
6347 if (mddev->pers) {
NeilBrowndefad612011-01-14 09:14:33 +11006348 __md_stop_writes(mddev);
NeilBrowna4bd82d2010-03-29 13:23:10 +11006349
6350 err = -ENXIO;
6351 if (mddev->ro==1)
6352 goto out;
6353 mddev->ro = 1;
6354 set_disk_ro(mddev->gendisk, 1);
6355 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
NeilBrown45eaf452014-10-29 08:49:50 +11006356 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6357 md_wakeup_thread(mddev->thread);
NeilBrown00bcb4a2010-06-01 19:37:23 +10006358 sysfs_notify_dirent_safe(mddev->sysfs_state);
NeilBrown30b8feb2013-11-14 15:16:17 +11006359 err = 0;
NeilBrowna4bd82d2010-03-29 13:23:10 +11006360 }
6361out:
6362 mutex_unlock(&mddev->open_mutex);
6363 return err;
6364}
6365
NeilBrown9e653b62006-06-26 00:27:58 -07006366/* mode:
6367 * 0 - completely stop and dis-assemble array
NeilBrown9e653b62006-06-26 00:27:58 -07006368 * 2 - stop but do not disassemble array
6369 */
NeilBrownf72ffdd2014-09-30 14:23:59 +10006370static int do_md_stop(struct mddev *mddev, int mode,
NeilBrowna05b7ea2012-07-19 15:59:18 +10006371 struct block_device *bdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006372{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006373 struct gendisk *disk = mddev->gendisk;
NeilBrown3cb03002011-10-11 16:45:26 +11006374 struct md_rdev *rdev;
NeilBrown30b8feb2013-11-14 15:16:17 +11006375 int did_freeze = 0;
6376
6377 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
6378 did_freeze = 1;
6379 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6380 md_wakeup_thread(mddev->thread);
6381 }
NeilBrownf851b602014-12-11 10:02:10 +11006382 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
NeilBrown30b8feb2013-11-14 15:16:17 +11006383 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
NeilBrownf851b602014-12-11 10:02:10 +11006384 if (mddev->sync_thread)
NeilBrown30b8feb2013-11-14 15:16:17 +11006385 /* Thread might be blocked waiting for metadata update
6386 * which will now never happen */
6387 wake_up_process(mddev->sync_thread->tsk);
NeilBrownf851b602014-12-11 10:02:10 +11006388
NeilBrown30b8feb2013-11-14 15:16:17 +11006389 mddev_unlock(mddev);
NeilBrownf851b602014-12-11 10:02:10 +11006390 wait_event(resync_wait, (mddev->sync_thread == NULL &&
6391 !test_bit(MD_RECOVERY_RUNNING,
6392 &mddev->recovery)));
NeilBrown30b8feb2013-11-14 15:16:17 +11006393 mddev_lock_nointr(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006394
NeilBrownc8c00a62009-08-10 12:50:52 +10006395 mutex_lock(&mddev->open_mutex);
NeilBrown9ba3b7f2014-09-09 14:00:15 +10006396 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
NeilBrown30b8feb2013-11-14 15:16:17 +11006397 mddev->sysfs_active ||
6398 mddev->sync_thread ||
Guoqing Jiangaf8d8e62016-08-12 13:42:37 +08006399 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
NeilBrown9d487392016-11-02 14:16:49 +11006400 pr_warn("md: %s still in use.\n",mdname(mddev));
NeilBrown6e17b022010-08-07 21:41:19 +10006401 mutex_unlock(&mddev->open_mutex);
NeilBrown30b8feb2013-11-14 15:16:17 +11006402 if (did_freeze) {
6403 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
NeilBrown45eaf452014-10-29 08:49:50 +11006404 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
NeilBrown30b8feb2013-11-14 15:16:17 +11006405 md_wakeup_thread(mddev->thread);
6406 }
NeilBrown260fa032013-08-27 16:44:13 +10006407 return -EBUSY;
6408 }
NeilBrown6e17b022010-08-07 21:41:19 +10006409 if (mddev->pers) {
NeilBrowna4bd82d2010-03-29 13:23:10 +11006410 if (mddev->ro)
6411 set_disk_ro(disk, 0);
NeilBrown409c57f2009-03-31 14:39:39 +11006412
NeilBrowndefad612011-01-14 09:14:33 +11006413 __md_stop_writes(mddev);
NeilBrown5eff3c42012-11-19 10:47:48 +11006414 __md_stop(mddev);
NeilBrown6177b472010-03-29 11:37:13 +11006415
NeilBrowna4bd82d2010-03-29 13:23:10 +11006416 /* tell userspace to handle 'inactive' */
NeilBrown00bcb4a2010-06-01 19:37:23 +10006417 sysfs_notify_dirent_safe(mddev->sysfs_state);
NeilBrown0d4ca602006-12-10 02:20:44 -08006418
NeilBrowndafb20f2012-03-19 12:46:39 +11006419 rdev_for_each(rdev, mddev)
Namhyung Kim36fad852011-07-27 11:00:36 +10006420 if (rdev->raid_disk >= 0)
6421 sysfs_unlink_rdev(mddev, rdev);
NeilBrownc4647292009-05-07 12:51:06 +10006422
Christoph Hellwig2c247c52020-11-16 15:57:11 +01006423 set_capacity_and_notify(disk, 0);
NeilBrown6e17b022010-08-07 21:41:19 +10006424 mutex_unlock(&mddev->open_mutex);
NeilBrownf0b4f7e2011-02-24 17:26:41 +11006425 mddev->changed = 1;
NeilBrown0d4ca602006-12-10 02:20:44 -08006426
NeilBrowna4bd82d2010-03-29 13:23:10 +11006427 if (mddev->ro)
6428 mddev->ro = 0;
NeilBrown6e17b022010-08-07 21:41:19 +10006429 } else
6430 mutex_unlock(&mddev->open_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006431 /*
6432 * Free resources if final stop
6433 */
NeilBrown9e653b62006-06-26 00:27:58 -07006434 if (mode == 0) {
NeilBrown9d487392016-11-02 14:16:49 +11006435 pr_info("md: %s stopped.\n", mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006436
NeilBrownc3d97142009-12-14 12:49:52 +11006437 if (mddev->bitmap_info.file) {
NeilBrown4af1a042014-12-15 12:57:00 +11006438 struct file *f = mddev->bitmap_info.file;
6439 spin_lock(&mddev->lock);
NeilBrownc3d97142009-12-14 12:49:52 +11006440 mddev->bitmap_info.file = NULL;
NeilBrown4af1a042014-12-15 12:57:00 +11006441 spin_unlock(&mddev->lock);
6442 fput(f);
NeilBrown978f9462006-02-02 14:28:05 -08006443 }
NeilBrownc3d97142009-12-14 12:49:52 +11006444 mddev->bitmap_info.offset = 0;
NeilBrown978f9462006-02-02 14:28:05 -08006445
Linus Torvalds1da177e2005-04-16 15:20:36 -07006446 export_array(mddev);
6447
NeilBrown6177b472010-03-29 11:37:13 +11006448 md_clean(mddev);
NeilBrownefeb53c2009-01-09 08:31:10 +11006449 if (mddev->hold_active == UNTIL_STOP)
6450 mddev->hold_active = 0;
NeilBrowna4bd82d2010-03-29 13:23:10 +11006451 }
NeilBrownd7603b72006-01-06 00:20:30 -08006452 md_new_event(mddev);
NeilBrown00bcb4a2010-06-01 19:37:23 +10006453 sysfs_notify_dirent_safe(mddev->sysfs_state);
NeilBrown6e17b022010-08-07 21:41:19 +10006454 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006455}
6456
Jeff Garzikfdee8ae2006-12-10 02:20:50 -08006457#ifndef MODULE
NeilBrownfd01b882011-10-11 16:47:53 +11006458static void autorun_array(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006459{
NeilBrown3cb03002011-10-11 16:45:26 +11006460 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006461 int err;
6462
NeilBrowna757e642005-04-16 15:26:42 -07006463 if (list_empty(&mddev->disks))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006464 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006465
NeilBrown9d487392016-11-02 14:16:49 +11006466 pr_info("md: running: ");
Linus Torvalds1da177e2005-04-16 15:20:36 -07006467
NeilBrowndafb20f2012-03-19 12:46:39 +11006468 rdev_for_each(rdev, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006469 char b[BDEVNAME_SIZE];
NeilBrown9d487392016-11-02 14:16:49 +11006470 pr_cont("<%s>", bdevname(rdev->bdev,b));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006471 }
NeilBrown9d487392016-11-02 14:16:49 +11006472 pr_cont("\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07006473
NeilBrownd710e132008-10-13 11:55:12 +11006474 err = do_md_run(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006475 if (err) {
NeilBrown9d487392016-11-02 14:16:49 +11006476 pr_warn("md: do_md_run() returned %d\n", err);
NeilBrowna05b7ea2012-07-19 15:59:18 +10006477 do_md_stop(mddev, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006478 }
6479}
6480
6481/*
6482 * lets try to run arrays based on all disks that have arrived
6483 * until now. (those are in pending_raid_disks)
6484 *
6485 * the method: pick the first pending disk, collect all disks with
6486 * the same UUID, remove all from the pending list and put them into
6487 * the 'same_array' list. Then order this list based on superblock
6488 * update time (freshest comes first), kick out 'old' disks and
6489 * compare superblocks. If everything's fine then run it.
6490 *
6491 * If "unit" is allocated, then bump its reference count
6492 */
6493static void autorun_devices(int part)
6494{
NeilBrown3cb03002011-10-11 16:45:26 +11006495 struct md_rdev *rdev0, *rdev, *tmp;
NeilBrownfd01b882011-10-11 16:47:53 +11006496 struct mddev *mddev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006497 char b[BDEVNAME_SIZE];
6498
NeilBrown9d487392016-11-02 14:16:49 +11006499 pr_info("md: autorun ...\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07006500 while (!list_empty(&pending_raid_disks)) {
NeilBrowne8703fe2006-10-03 01:15:59 -07006501 int unit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006502 dev_t dev;
NeilBrownad01c9e2006-03-27 01:18:07 -08006503 LIST_HEAD(candidates);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006504 rdev0 = list_entry(pending_raid_disks.next,
NeilBrown3cb03002011-10-11 16:45:26 +11006505 struct md_rdev, same_set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006506
NeilBrown9d487392016-11-02 14:16:49 +11006507 pr_debug("md: considering %s ...\n", bdevname(rdev0->bdev,b));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006508 INIT_LIST_HEAD(&candidates);
Cheng Renquan159ec1f2009-01-09 08:31:08 +11006509 rdev_for_each_list(rdev, tmp, &pending_raid_disks)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006510 if (super_90_load(rdev, rdev0, 0) >= 0) {
NeilBrown9d487392016-11-02 14:16:49 +11006511 pr_debug("md: adding %s ...\n",
6512 bdevname(rdev->bdev,b));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006513 list_move(&rdev->same_set, &candidates);
6514 }
6515 /*
6516 * now we have a set of devices, with all of them having
6517 * mostly sane superblocks. It's time to allocate the
6518 * mddev.
6519 */
NeilBrowne8703fe2006-10-03 01:15:59 -07006520 if (part) {
6521 dev = MKDEV(mdp_major,
6522 rdev0->preferred_minor << MdpMinorShift);
6523 unit = MINOR(dev) >> MdpMinorShift;
6524 } else {
6525 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
6526 unit = MINOR(dev);
6527 }
6528 if (rdev0->preferred_minor != unit) {
NeilBrown9d487392016-11-02 14:16:49 +11006529 pr_warn("md: unit number in %s is bad: %d\n",
6530 bdevname(rdev0->bdev, b), rdev0->preferred_minor);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006531 break;
6532 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006533
Christoph Hellwig28144f92020-10-29 15:58:34 +01006534 md_probe(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006535 mddev = mddev_find(dev);
Neil Brown9bbbca32008-06-28 08:31:17 +10006536 if (!mddev || !mddev->gendisk) {
6537 if (mddev)
6538 mddev_put(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006539 break;
6540 }
NeilBrownf72ffdd2014-09-30 14:23:59 +10006541 if (mddev_lock(mddev))
NeilBrown9d487392016-11-02 14:16:49 +11006542 pr_warn("md: %s locked, cannot run\n", mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006543 else if (mddev->raid_disks || mddev->major_version
6544 || !list_empty(&mddev->disks)) {
NeilBrown9d487392016-11-02 14:16:49 +11006545 pr_warn("md: %s already running, cannot run %s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006546 mdname(mddev), bdevname(rdev0->bdev,b));
6547 mddev_unlock(mddev);
6548 } else {
NeilBrown9d487392016-11-02 14:16:49 +11006549 pr_debug("md: created %s\n", mdname(mddev));
NeilBrown1ec4a932008-02-06 01:39:53 -08006550 mddev->persistent = 1;
Cheng Renquan159ec1f2009-01-09 08:31:08 +11006551 rdev_for_each_list(rdev, tmp, &candidates) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006552 list_del_init(&rdev->same_set);
6553 if (bind_rdev_to_array(rdev, mddev))
6554 export_rdev(rdev);
6555 }
6556 autorun_array(mddev);
6557 mddev_unlock(mddev);
6558 }
6559 /* on success, candidates will be empty, on error
6560 * it won't...
6561 */
Cheng Renquan159ec1f2009-01-09 08:31:08 +11006562 rdev_for_each_list(rdev, tmp, &candidates) {
NeilBrown4b809912008-07-21 17:05:25 +10006563 list_del_init(&rdev->same_set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006564 export_rdev(rdev);
NeilBrown4b809912008-07-21 17:05:25 +10006565 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006566 mddev_put(mddev);
6567 }
NeilBrown9d487392016-11-02 14:16:49 +11006568 pr_info("md: ... autorun DONE.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07006569}
Jeff Garzikfdee8ae2006-12-10 02:20:50 -08006570#endif /* !MODULE */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006571
NeilBrownf72ffdd2014-09-30 14:23:59 +10006572static int get_version(void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006573{
6574 mdu_version_t ver;
6575
6576 ver.major = MD_MAJOR_VERSION;
6577 ver.minor = MD_MINOR_VERSION;
6578 ver.patchlevel = MD_PATCHLEVEL_VERSION;
6579
6580 if (copy_to_user(arg, &ver, sizeof(ver)))
6581 return -EFAULT;
6582
6583 return 0;
6584}
6585
NeilBrownf72ffdd2014-09-30 14:23:59 +10006586static int get_array_info(struct mddev *mddev, void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006587{
6588 mdu_array_info_t info;
NeilBrowna9f326e2009-09-23 18:06:41 +10006589 int nr,working,insync,failed,spare;
NeilBrown3cb03002011-10-11 16:45:26 +11006590 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006591
NeilBrown1ca69c42012-10-11 13:37:33 +11006592 nr = working = insync = failed = spare = 0;
6593 rcu_read_lock();
6594 rdev_for_each_rcu(rdev, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006595 nr++;
NeilBrownb2d444d2005-11-08 21:39:31 -08006596 if (test_bit(Faulty, &rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006597 failed++;
6598 else {
6599 working++;
NeilBrownb2d444d2005-11-08 21:39:31 -08006600 if (test_bit(In_sync, &rdev->flags))
NeilBrownf72ffdd2014-09-30 14:23:59 +10006601 insync++;
Song Liub347af82016-08-11 17:14:45 -07006602 else if (test_bit(Journal, &rdev->flags))
6603 /* TODO: add journal count to md_u.h */
6604 ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006605 else
6606 spare++;
6607 }
6608 }
NeilBrown1ca69c42012-10-11 13:37:33 +11006609 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006610
6611 info.major_version = mddev->major_version;
6612 info.minor_version = mddev->minor_version;
6613 info.patch_version = MD_PATCHLEVEL_VERSION;
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11006614 info.ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006615 info.level = mddev->level;
Andre Noll58c0fed2009-03-31 14:33:13 +11006616 info.size = mddev->dev_sectors / 2;
6617 if (info.size != mddev->dev_sectors / 2) /* overflow */
NeilBrown284ae7c2006-02-03 03:03:40 -08006618 info.size = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006619 info.nr_disks = nr;
6620 info.raid_disks = mddev->raid_disks;
6621 info.md_minor = mddev->md_minor;
6622 info.not_persistent= !mddev->persistent;
6623
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11006624 info.utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006625 info.state = 0;
6626 if (mddev->in_sync)
6627 info.state = (1<<MD_SB_CLEAN);
NeilBrownc3d97142009-12-14 12:49:52 +11006628 if (mddev->bitmap && mddev->bitmap_info.offset)
NeilBrown9bd35922014-07-02 11:35:06 +10006629 info.state |= (1<<MD_SB_BITMAP_PRESENT);
Goldwyn Rodriguesca8895d2014-11-26 12:22:03 -06006630 if (mddev_is_clustered(mddev))
6631 info.state |= (1<<MD_SB_CLUSTERED);
NeilBrowna9f326e2009-09-23 18:06:41 +10006632 info.active_disks = insync;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006633 info.working_disks = working;
6634 info.failed_disks = failed;
6635 info.spare_disks = spare;
6636
6637 info.layout = mddev->layout;
Andre Noll9d8f0362009-06-18 08:45:01 +10006638 info.chunk_size = mddev->chunk_sectors << 9;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006639
6640 if (copy_to_user(arg, &info, sizeof(info)))
6641 return -EFAULT;
6642
6643 return 0;
6644}
6645
NeilBrownf72ffdd2014-09-30 14:23:59 +10006646static int get_bitmap_file(struct mddev *mddev, void __user * arg)
NeilBrown32a76272005-06-21 17:17:14 -07006647{
6648 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
NeilBrownf4ad3d32014-12-15 12:57:00 +11006649 char *ptr;
NeilBrown4af1a042014-12-15 12:57:00 +11006650 int err;
NeilBrown32a76272005-06-21 17:17:14 -07006651
Benjamin Randazzob6878d92015-07-25 16:36:50 +02006652 file = kzalloc(sizeof(*file), GFP_NOIO);
NeilBrown32a76272005-06-21 17:17:14 -07006653 if (!file)
NeilBrown4af1a042014-12-15 12:57:00 +11006654 return -ENOMEM;
NeilBrown32a76272005-06-21 17:17:14 -07006655
NeilBrown32a76272005-06-21 17:17:14 -07006656 err = 0;
NeilBrown4af1a042014-12-15 12:57:00 +11006657 spin_lock(&mddev->lock);
Benjamin Randazzo25eafe12015-07-25 16:36:50 +02006658 /* bitmap enabled */
6659 if (mddev->bitmap_info.file) {
6660 ptr = file_path(mddev->bitmap_info.file, file->pathname,
6661 sizeof(file->pathname));
6662 if (IS_ERR(ptr))
6663 err = PTR_ERR(ptr);
6664 else
6665 memmove(file->pathname, ptr,
6666 sizeof(file->pathname)-(ptr-file->pathname));
6667 }
NeilBrown4af1a042014-12-15 12:57:00 +11006668 spin_unlock(&mddev->lock);
6669
6670 if (err == 0 &&
6671 copy_to_user(arg, file, sizeof(*file)))
NeilBrown32a76272005-06-21 17:17:14 -07006672 err = -EFAULT;
NeilBrown4af1a042014-12-15 12:57:00 +11006673
NeilBrown32a76272005-06-21 17:17:14 -07006674 kfree(file);
6675 return err;
6676}
6677
NeilBrownf72ffdd2014-09-30 14:23:59 +10006678static int get_disk_info(struct mddev *mddev, void __user * arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006679{
6680 mdu_disk_info_t info;
NeilBrown3cb03002011-10-11 16:45:26 +11006681 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006682
6683 if (copy_from_user(&info, arg, sizeof(info)))
6684 return -EFAULT;
6685
NeilBrown1ca69c42012-10-11 13:37:33 +11006686 rcu_read_lock();
Goldwyn Rodrigues57d051d2015-04-14 10:43:55 -05006687 rdev = md_find_rdev_nr_rcu(mddev, info.number);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006688 if (rdev) {
6689 info.major = MAJOR(rdev->bdev->bd_dev);
6690 info.minor = MINOR(rdev->bdev->bd_dev);
6691 info.raid_disk = rdev->raid_disk;
6692 info.state = 0;
NeilBrownb2d444d2005-11-08 21:39:31 -08006693 if (test_bit(Faulty, &rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006694 info.state |= (1<<MD_DISK_FAULTY);
NeilBrownb2d444d2005-11-08 21:39:31 -08006695 else if (test_bit(In_sync, &rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006696 info.state |= (1<<MD_DISK_ACTIVE);
6697 info.state |= (1<<MD_DISK_SYNC);
6698 }
Shaohua Li9efdca12015-10-12 16:59:50 -07006699 if (test_bit(Journal, &rdev->flags))
Song Liubac624f2015-08-13 14:31:55 -07006700 info.state |= (1<<MD_DISK_JOURNAL);
NeilBrown8ddf9ef2005-09-09 16:23:45 -07006701 if (test_bit(WriteMostly, &rdev->flags))
6702 info.state |= (1<<MD_DISK_WRITEMOSTLY);
NeilBrown688834e2016-11-18 16:16:11 +11006703 if (test_bit(FailFast, &rdev->flags))
6704 info.state |= (1<<MD_DISK_FAILFAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006705 } else {
6706 info.major = info.minor = 0;
6707 info.raid_disk = -1;
6708 info.state = (1<<MD_DISK_REMOVED);
6709 }
NeilBrown1ca69c42012-10-11 13:37:33 +11006710 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006711
6712 if (copy_to_user(arg, &info, sizeof(info)))
6713 return -EFAULT;
6714
6715 return 0;
6716}
6717
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02006718int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006719{
6720 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
NeilBrown3cb03002011-10-11 16:45:26 +11006721 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006722 dev_t dev = MKDEV(info->major,info->minor);
6723
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006724 if (mddev_is_clustered(mddev) &&
6725 !(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) {
NeilBrown9d487392016-11-02 14:16:49 +11006726 pr_warn("%s: Cannot add to clustered mddev.\n",
6727 mdname(mddev));
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006728 return -EINVAL;
6729 }
6730
Linus Torvalds1da177e2005-04-16 15:20:36 -07006731 if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
6732 return -EOVERFLOW;
6733
6734 if (!mddev->raid_disks) {
6735 int err;
6736 /* expecting a device which has a superblock */
6737 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
6738 if (IS_ERR(rdev)) {
NeilBrown9d487392016-11-02 14:16:49 +11006739 pr_warn("md: md_import_device returned %ld\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006740 PTR_ERR(rdev));
6741 return PTR_ERR(rdev);
6742 }
6743 if (!list_empty(&mddev->disks)) {
NeilBrown3cb03002011-10-11 16:45:26 +11006744 struct md_rdev *rdev0
6745 = list_entry(mddev->disks.next,
6746 struct md_rdev, same_set);
NeilBrowna9f326e2009-09-23 18:06:41 +10006747 err = super_types[mddev->major_version]
Linus Torvalds1da177e2005-04-16 15:20:36 -07006748 .load_super(rdev, rdev0, mddev->minor_version);
6749 if (err < 0) {
NeilBrown9d487392016-11-02 14:16:49 +11006750 pr_warn("md: %s has different UUID to %s\n",
NeilBrownf72ffdd2014-09-30 14:23:59 +10006751 bdevname(rdev->bdev,b),
Linus Torvalds1da177e2005-04-16 15:20:36 -07006752 bdevname(rdev0->bdev,b2));
6753 export_rdev(rdev);
6754 return -EINVAL;
6755 }
6756 }
6757 err = bind_rdev_to_array(rdev, mddev);
6758 if (err)
6759 export_rdev(rdev);
6760 return err;
6761 }
6762
6763 /*
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02006764 * md_add_new_disk can be used once the array is assembled
Linus Torvalds1da177e2005-04-16 15:20:36 -07006765 * to add "hot spares". They must already have a superblock
6766 * written
6767 */
6768 if (mddev->pers) {
6769 int err;
6770 if (!mddev->pers->hot_add_disk) {
NeilBrown9d487392016-11-02 14:16:49 +11006771 pr_warn("%s: personality does not support diskops!\n",
6772 mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006773 return -EINVAL;
6774 }
NeilBrown7b1e35f2005-09-09 16:23:50 -07006775 if (mddev->persistent)
6776 rdev = md_import_device(dev, mddev->major_version,
6777 mddev->minor_version);
6778 else
6779 rdev = md_import_device(dev, -1, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006780 if (IS_ERR(rdev)) {
NeilBrown9d487392016-11-02 14:16:49 +11006781 pr_warn("md: md_import_device returned %ld\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006782 PTR_ERR(rdev));
6783 return PTR_ERR(rdev);
6784 }
NeilBrown1a855a02010-12-09 16:36:28 +11006785 /* set saved_raid_disk if appropriate */
NeilBrown41158c72005-06-21 17:17:25 -07006786 if (!mddev->persistent) {
6787 if (info->state & (1<<MD_DISK_SYNC) &&
NeilBrownbf572542011-01-12 09:03:35 +11006788 info->raid_disk < mddev->raid_disks) {
NeilBrown41158c72005-06-21 17:17:25 -07006789 rdev->raid_disk = info->raid_disk;
NeilBrownbf572542011-01-12 09:03:35 +11006790 set_bit(In_sync, &rdev->flags);
NeilBrown8313b8e2013-12-12 10:13:33 +11006791 clear_bit(Bitmap_sync, &rdev->flags);
NeilBrownbf572542011-01-12 09:03:35 +11006792 } else
NeilBrown41158c72005-06-21 17:17:25 -07006793 rdev->raid_disk = -1;
NeilBrownf4667222013-12-09 12:04:56 +11006794 rdev->saved_raid_disk = rdev->raid_disk;
NeilBrown41158c72005-06-21 17:17:25 -07006795 } else
6796 super_types[mddev->major_version].
6797 validate_super(mddev, rdev);
NeilBrownbedd86b2011-05-11 14:26:20 +10006798 if ((info->state & (1<<MD_DISK_SYNC)) &&
NeilBrownf4563092012-07-03 15:59:06 +10006799 rdev->raid_disk != info->raid_disk) {
NeilBrownbedd86b2011-05-11 14:26:20 +10006800 /* This was a hot-add request, but events doesn't
6801 * match, so reject it.
6802 */
6803 export_rdev(rdev);
6804 return -EINVAL;
6805 }
6806
NeilBrownb2d444d2005-11-08 21:39:31 -08006807 clear_bit(In_sync, &rdev->flags); /* just to be sure */
NeilBrown8ddf9ef2005-09-09 16:23:45 -07006808 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
6809 set_bit(WriteMostly, &rdev->flags);
NeilBrown575a80f2009-03-31 14:33:13 +11006810 else
6811 clear_bit(WriteMostly, &rdev->flags);
NeilBrown688834e2016-11-18 16:16:11 +11006812 if (info->state & (1<<MD_DISK_FAILFAST))
6813 set_bit(FailFast, &rdev->flags);
6814 else
6815 clear_bit(FailFast, &rdev->flags);
NeilBrown8ddf9ef2005-09-09 16:23:45 -07006816
Shaohua Lif6b6ec52015-12-21 10:51:02 +11006817 if (info->state & (1<<MD_DISK_JOURNAL)) {
6818 struct md_rdev *rdev2;
6819 bool has_journal = false;
6820
6821 /* make sure no existing journal disk */
6822 rdev_for_each(rdev2, mddev) {
6823 if (test_bit(Journal, &rdev2->flags)) {
6824 has_journal = true;
6825 break;
6826 }
6827 }
NeilBrown230b55f2017-10-17 14:24:09 +11006828 if (has_journal || mddev->bitmap) {
Shaohua Lif6b6ec52015-12-21 10:51:02 +11006829 export_rdev(rdev);
6830 return -EBUSY;
6831 }
Song Liubac624f2015-08-13 14:31:55 -07006832 set_bit(Journal, &rdev->flags);
Shaohua Lif6b6ec52015-12-21 10:51:02 +11006833 }
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006834 /*
6835 * check whether the device shows up in other nodes
6836 */
6837 if (mddev_is_clustered(mddev)) {
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006838 if (info->state & (1 << MD_DISK_CANDIDATE))
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006839 set_bit(Candidate, &rdev->flags);
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006840 else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) {
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006841 /* --add initiated by this node */
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006842 err = md_cluster_ops->add_new_disk(mddev, rdev);
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006843 if (err) {
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006844 export_rdev(rdev);
6845 return err;
6846 }
6847 }
6848 }
6849
Linus Torvalds1da177e2005-04-16 15:20:36 -07006850 rdev->raid_disk = -1;
6851 err = bind_rdev_to_array(rdev, mddev);
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006852
Linus Torvalds1da177e2005-04-16 15:20:36 -07006853 if (err)
6854 export_rdev(rdev);
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006855
6856 if (mddev_is_clustered(mddev)) {
Guoqing Jiange566aef2016-08-12 13:42:34 +08006857 if (info->state & (1 << MD_DISK_CANDIDATE)) {
6858 if (!err) {
6859 err = md_cluster_ops->new_disk_ack(mddev,
6860 err == 0);
6861 if (err)
6862 md_kick_rdev_from_array(rdev);
6863 }
6864 } else {
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006865 if (err)
6866 md_cluster_ops->add_new_disk_cancel(mddev);
6867 else
6868 err = add_bound_rdev(rdev);
6869 }
6870
6871 } else if (!err)
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05006872 err = add_bound_rdev(rdev);
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006873
Linus Torvalds1da177e2005-04-16 15:20:36 -07006874 return err;
6875 }
6876
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02006877 /* otherwise, md_add_new_disk is only allowed
Linus Torvalds1da177e2005-04-16 15:20:36 -07006878 * for major_version==0 superblocks
6879 */
6880 if (mddev->major_version != 0) {
NeilBrown9d487392016-11-02 14:16:49 +11006881 pr_warn("%s: ADD_NEW_DISK not supported\n", mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006882 return -EINVAL;
6883 }
6884
6885 if (!(info->state & (1<<MD_DISK_FAULTY))) {
6886 int err;
NeilBrownd710e132008-10-13 11:55:12 +11006887 rdev = md_import_device(dev, -1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006888 if (IS_ERR(rdev)) {
NeilBrown9d487392016-11-02 14:16:49 +11006889 pr_warn("md: error, md_import_device() returned %ld\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006890 PTR_ERR(rdev));
6891 return PTR_ERR(rdev);
6892 }
6893 rdev->desc_nr = info->number;
6894 if (info->raid_disk < mddev->raid_disks)
6895 rdev->raid_disk = info->raid_disk;
6896 else
6897 rdev->raid_disk = -1;
6898
Linus Torvalds1da177e2005-04-16 15:20:36 -07006899 if (rdev->raid_disk < mddev->raid_disks)
NeilBrownb2d444d2005-11-08 21:39:31 -08006900 if (info->state & (1<<MD_DISK_SYNC))
6901 set_bit(In_sync, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006902
NeilBrown8ddf9ef2005-09-09 16:23:45 -07006903 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
6904 set_bit(WriteMostly, &rdev->flags);
NeilBrown688834e2016-11-18 16:16:11 +11006905 if (info->state & (1<<MD_DISK_FAILFAST))
6906 set_bit(FailFast, &rdev->flags);
NeilBrown8ddf9ef2005-09-09 16:23:45 -07006907
Linus Torvalds1da177e2005-04-16 15:20:36 -07006908 if (!mddev->persistent) {
NeilBrown9d487392016-11-02 14:16:49 +11006909 pr_debug("md: nonpersistent superblock ...\n");
Mike Snitzer77304d22010-11-08 14:39:12 +01006910 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
6911 } else
Jonathan Brassow57b2caa2011-01-14 09:14:33 +11006912 rdev->sb_start = calc_dev_sboffset(rdev);
NeilBrown8190e752009-06-18 08:48:58 +10006913 rdev->sectors = rdev->sb_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006914
NeilBrown2bf071b2006-01-06 00:20:55 -08006915 err = bind_rdev_to_array(rdev, mddev);
6916 if (err) {
6917 export_rdev(rdev);
6918 return err;
6919 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006920 }
6921
6922 return 0;
6923}
6924
NeilBrownf72ffdd2014-09-30 14:23:59 +10006925static int hot_remove_disk(struct mddev *mddev, dev_t dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006926{
6927 char b[BDEVNAME_SIZE];
NeilBrown3cb03002011-10-11 16:45:26 +11006928 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006929
Yufen Yuc42a0e22018-05-04 18:08:10 +08006930 if (!mddev->pers)
6931 return -ENODEV;
6932
Linus Torvalds1da177e2005-04-16 15:20:36 -07006933 rdev = find_rdev(mddev, dev);
6934 if (!rdev)
6935 return -ENXIO;
6936
Goldwyn Rodrigues2910ff12015-09-28 10:27:26 -05006937 if (rdev->raid_disk < 0)
6938 goto kick_rdev;
Goldwyn Rodrigues293467a2014-06-07 01:44:51 -05006939
NeilBrown3ea8929d2013-04-24 11:42:41 +10006940 clear_bit(Blocked, &rdev->flags);
6941 remove_and_add_spares(mddev, rdev);
6942
Linus Torvalds1da177e2005-04-16 15:20:36 -07006943 if (rdev->raid_disk >= 0)
6944 goto busy;
6945
Goldwyn Rodrigues2910ff12015-09-28 10:27:26 -05006946kick_rdev:
Goldwyn Rodrigues54a88392015-12-21 10:51:00 +11006947 if (mddev_is_clustered(mddev))
Goldwyn Rodrigues88bcfef2015-04-14 10:44:44 -05006948 md_cluster_ops->remove_disk(mddev, rdev);
6949
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05006950 md_kick_rdev_from_array(rdev);
Shaohua Li29530792016-12-08 15:48:19 -08006951 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrown060b0682016-11-04 16:46:03 +11006952 if (mddev->thread)
6953 md_wakeup_thread(mddev->thread);
6954 else
6955 md_update_sb(mddev, 1);
NeilBrownd7603b72006-01-06 00:20:30 -08006956 md_new_event(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006957
6958 return 0;
6959busy:
NeilBrown9d487392016-11-02 14:16:49 +11006960 pr_debug("md: cannot remove active disk %s from %s ...\n",
6961 bdevname(rdev->bdev,b), mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006962 return -EBUSY;
6963}
6964
NeilBrownf72ffdd2014-09-30 14:23:59 +10006965static int hot_add_disk(struct mddev *mddev, dev_t dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006966{
6967 char b[BDEVNAME_SIZE];
6968 int err;
NeilBrown3cb03002011-10-11 16:45:26 +11006969 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006970
6971 if (!mddev->pers)
6972 return -ENODEV;
6973
6974 if (mddev->major_version != 0) {
NeilBrown9d487392016-11-02 14:16:49 +11006975 pr_warn("%s: HOT_ADD may only be used with version-0 superblocks.\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006976 mdname(mddev));
6977 return -EINVAL;
6978 }
6979 if (!mddev->pers->hot_add_disk) {
NeilBrown9d487392016-11-02 14:16:49 +11006980 pr_warn("%s: personality does not support diskops!\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006981 mdname(mddev));
6982 return -EINVAL;
6983 }
6984
NeilBrownd710e132008-10-13 11:55:12 +11006985 rdev = md_import_device(dev, -1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006986 if (IS_ERR(rdev)) {
NeilBrown9d487392016-11-02 14:16:49 +11006987 pr_warn("md: error, md_import_device() returned %ld\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006988 PTR_ERR(rdev));
6989 return -EINVAL;
6990 }
6991
6992 if (mddev->persistent)
Jonathan Brassow57b2caa2011-01-14 09:14:33 +11006993 rdev->sb_start = calc_dev_sboffset(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006994 else
Mike Snitzer77304d22010-11-08 14:39:12 +01006995 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006996
NeilBrown8190e752009-06-18 08:48:58 +10006997 rdev->sectors = rdev->sb_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006998
NeilBrownb2d444d2005-11-08 21:39:31 -08006999 if (test_bit(Faulty, &rdev->flags)) {
NeilBrown9d487392016-11-02 14:16:49 +11007000 pr_warn("md: can not hot-add faulty %s disk to %s!\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07007001 bdevname(rdev->bdev,b), mdname(mddev));
7002 err = -EINVAL;
7003 goto abort_export;
7004 }
Goldwyn Rodrigues293467a2014-06-07 01:44:51 -05007005
NeilBrownb2d444d2005-11-08 21:39:31 -08007006 clear_bit(In_sync, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007007 rdev->desc_nr = -1;
NeilBrown58427302006-10-06 00:44:04 -07007008 rdev->saved_raid_disk = -1;
NeilBrown2bf071b2006-01-06 00:20:55 -08007009 err = bind_rdev_to_array(rdev, mddev);
7010 if (err)
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05007011 goto abort_export;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007012
7013 /*
7014 * The rest should better be atomic, we can have disk failures
7015 * noticed in interrupt contexts ...
7016 */
7017
Linus Torvalds1da177e2005-04-16 15:20:36 -07007018 rdev->raid_disk = -1;
7019
Shaohua Li29530792016-12-08 15:48:19 -08007020 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrown060b0682016-11-04 16:46:03 +11007021 if (!mddev->thread)
7022 md_update_sb(mddev, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007023 /*
7024 * Kick recovery, maybe this spare has to be added to the
7025 * array immediately.
7026 */
7027 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7028 md_wakeup_thread(mddev->thread);
NeilBrownd7603b72006-01-06 00:20:30 -08007029 md_new_event(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007030 return 0;
7031
Linus Torvalds1da177e2005-04-16 15:20:36 -07007032abort_export:
7033 export_rdev(rdev);
7034 return err;
7035}
7036
NeilBrownfd01b882011-10-11 16:47:53 +11007037static int set_bitmap_file(struct mddev *mddev, int fd)
NeilBrown32a76272005-06-21 17:17:14 -07007038{
NeilBrown035328c2014-04-09 12:25:40 +10007039 int err = 0;
NeilBrown32a76272005-06-21 17:17:14 -07007040
NeilBrown36fa3062005-09-09 16:23:45 -07007041 if (mddev->pers) {
NeilBrownd66b1b32014-08-08 15:40:24 +10007042 if (!mddev->pers->quiesce || !mddev->thread)
NeilBrown36fa3062005-09-09 16:23:45 -07007043 return -EBUSY;
7044 if (mddev->recovery || mddev->sync_thread)
7045 return -EBUSY;
7046 /* we should be able to change the bitmap.. */
NeilBrown32a76272005-06-21 17:17:14 -07007047 }
7048
NeilBrown36fa3062005-09-09 16:23:45 -07007049 if (fd >= 0) {
NeilBrown035328c2014-04-09 12:25:40 +10007050 struct inode *inode;
NeilBrown1e594bb2014-12-15 12:57:00 +11007051 struct file *f;
NeilBrown36fa3062005-09-09 16:23:45 -07007052
NeilBrown1e594bb2014-12-15 12:57:00 +11007053 if (mddev->bitmap || mddev->bitmap_info.file)
7054 return -EEXIST; /* cannot add when bitmap is present */
7055 f = fget(fd);
7056
7057 if (f == NULL) {
NeilBrown9d487392016-11-02 14:16:49 +11007058 pr_warn("%s: error: failed to get bitmap file\n",
7059 mdname(mddev));
NeilBrown36fa3062005-09-09 16:23:45 -07007060 return -EBADF;
7061 }
7062
NeilBrown1e594bb2014-12-15 12:57:00 +11007063 inode = f->f_mapping->host;
NeilBrown035328c2014-04-09 12:25:40 +10007064 if (!S_ISREG(inode->i_mode)) {
NeilBrown9d487392016-11-02 14:16:49 +11007065 pr_warn("%s: error: bitmap file must be a regular file\n",
7066 mdname(mddev));
NeilBrown035328c2014-04-09 12:25:40 +10007067 err = -EBADF;
NeilBrown1e594bb2014-12-15 12:57:00 +11007068 } else if (!(f->f_mode & FMODE_WRITE)) {
NeilBrown9d487392016-11-02 14:16:49 +11007069 pr_warn("%s: error: bitmap file must open for write\n",
7070 mdname(mddev));
NeilBrown035328c2014-04-09 12:25:40 +10007071 err = -EBADF;
7072 } else if (atomic_read(&inode->i_writecount) != 1) {
NeilBrown9d487392016-11-02 14:16:49 +11007073 pr_warn("%s: error: bitmap file is already in use\n",
7074 mdname(mddev));
NeilBrown035328c2014-04-09 12:25:40 +10007075 err = -EBUSY;
7076 }
7077 if (err) {
NeilBrown1e594bb2014-12-15 12:57:00 +11007078 fput(f);
NeilBrown36fa3062005-09-09 16:23:45 -07007079 return err;
7080 }
NeilBrown1e594bb2014-12-15 12:57:00 +11007081 mddev->bitmap_info.file = f;
NeilBrownc3d97142009-12-14 12:49:52 +11007082 mddev->bitmap_info.offset = 0; /* file overrides offset */
NeilBrown36fa3062005-09-09 16:23:45 -07007083 } else if (mddev->bitmap == NULL)
7084 return -ENOENT; /* cannot remove what isn't there */
7085 err = 0;
7086 if (mddev->pers) {
NeilBrown69e51b42010-06-01 19:37:35 +10007087 if (fd >= 0) {
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05007088 struct bitmap *bitmap;
7089
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007090 bitmap = md_bitmap_create(mddev, -1);
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007091 mddev_suspend(mddev);
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05007092 if (!IS_ERR(bitmap)) {
7093 mddev->bitmap = bitmap;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007094 err = md_bitmap_load(mddev);
NeilBrownba599ac2015-02-25 11:44:11 +11007095 } else
7096 err = PTR_ERR(bitmap);
NeilBrown52a0d492017-10-17 13:46:43 +11007097 if (err) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007098 md_bitmap_destroy(mddev);
NeilBrown52a0d492017-10-17 13:46:43 +11007099 fd = -1;
7100 }
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007101 mddev_resume(mddev);
NeilBrown52a0d492017-10-17 13:46:43 +11007102 } else if (fd < 0) {
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007103 mddev_suspend(mddev);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007104 md_bitmap_destroy(mddev);
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007105 mddev_resume(mddev);
NeilBrownd7375ab2006-06-26 00:27:43 -07007106 }
NeilBrownd7375ab2006-06-26 00:27:43 -07007107 }
7108 if (fd < 0) {
NeilBrown4af1a042014-12-15 12:57:00 +11007109 struct file *f = mddev->bitmap_info.file;
7110 if (f) {
7111 spin_lock(&mddev->lock);
7112 mddev->bitmap_info.file = NULL;
7113 spin_unlock(&mddev->lock);
7114 fput(f);
7115 }
NeilBrown36fa3062005-09-09 16:23:45 -07007116 }
7117
NeilBrown32a76272005-06-21 17:17:14 -07007118 return err;
7119}
7120
Linus Torvalds1da177e2005-04-16 15:20:36 -07007121/*
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02007122 * md_set_array_info is used two different ways
Linus Torvalds1da177e2005-04-16 15:20:36 -07007123 * The original usage is when creating a new array.
7124 * In this usage, raid_disks is > 0 and it together with
7125 * level, size, not_persistent,layout,chunksize determine the
7126 * shape of the array.
7127 * This will always create an array with a type-0.90.0 superblock.
7128 * The newer usage is when assembling an array.
7129 * In this case raid_disks will be 0, and the major_version field is
7130 * use to determine which style super-blocks are to be found on the devices.
7131 * The minor and patch _version numbers are also kept incase the
7132 * super_block handler wishes to interpret them.
7133 */
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02007134int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007135{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007136 if (info->raid_disks == 0) {
7137 /* just setting version number for superblock loading */
7138 if (info->major_version < 0 ||
Ahmed S. Darwish50511da2007-05-09 02:35:34 -07007139 info->major_version >= ARRAY_SIZE(super_types) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07007140 super_types[info->major_version].name == NULL) {
7141 /* maybe try to auto-load a module? */
NeilBrown9d487392016-11-02 14:16:49 +11007142 pr_warn("md: superblock version %d not known\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07007143 info->major_version);
7144 return -EINVAL;
7145 }
7146 mddev->major_version = info->major_version;
7147 mddev->minor_version = info->minor_version;
7148 mddev->patch_version = info->patch_version;
NeilBrown3f9d7b02006-12-22 01:11:41 -08007149 mddev->persistent = !info->not_persistent;
NeilBrowncbd19982009-12-30 12:08:49 +11007150 /* ensure mddev_put doesn't delete this now that there
7151 * is some minimal configuration.
7152 */
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11007153 mddev->ctime = ktime_get_real_seconds();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007154 return 0;
7155 }
7156 mddev->major_version = MD_MAJOR_VERSION;
7157 mddev->minor_version = MD_MINOR_VERSION;
7158 mddev->patch_version = MD_PATCHLEVEL_VERSION;
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11007159 mddev->ctime = ktime_get_real_seconds();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007160
7161 mddev->level = info->level;
NeilBrown17115e02006-01-16 22:14:57 -08007162 mddev->clevel[0] = 0;
Andre Noll58c0fed2009-03-31 14:33:13 +11007163 mddev->dev_sectors = 2 * (sector_t)info->size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007164 mddev->raid_disks = info->raid_disks;
7165 /* don't set md_minor, it is determined by which /dev/md* was
7166 * openned
7167 */
7168 if (info->state & (1<<MD_SB_CLEAN))
7169 mddev->recovery_cp = MaxSector;
7170 else
7171 mddev->recovery_cp = 0;
7172 mddev->persistent = ! info->not_persistent;
NeilBrowne6910632008-02-06 01:39:51 -08007173 mddev->external = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007174
7175 mddev->layout = info->layout;
NeilBrown33f2c352019-09-09 16:52:29 +10007176 if (mddev->level == 0)
7177 /* Cannot trust RAID0 layout info here */
7178 mddev->layout = -1;
Andre Noll9d8f0362009-06-18 08:45:01 +10007179 mddev->chunk_sectors = info->chunk_size >> 9;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007180
Shaohua Li29530792016-12-08 15:48:19 -08007181 if (mddev->persistent) {
NeilBrown1b3bae42017-03-01 07:31:28 +11007182 mddev->max_disks = MD_SB_DISKS;
7183 mddev->flags = 0;
7184 mddev->sb_flags = 0;
Shaohua Li29530792016-12-08 15:48:19 -08007185 }
7186 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007187
NeilBrownc3d97142009-12-14 12:49:52 +11007188 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
NeilBrown6409bb02012-05-22 13:55:07 +10007189 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
NeilBrownc3d97142009-12-14 12:49:52 +11007190 mddev->bitmap_info.offset = 0;
NeilBrownb2a27032005-11-28 13:44:12 -08007191
NeilBrownf6705572006-03-27 01:18:11 -08007192 mddev->reshape_position = MaxSector;
7193
Linus Torvalds1da177e2005-04-16 15:20:36 -07007194 /*
7195 * Generate a 128 bit UUID
7196 */
7197 get_random_bytes(mddev->uuid, 16);
7198
NeilBrownf6705572006-03-27 01:18:11 -08007199 mddev->new_level = mddev->level;
Andre Noll664e7c42009-06-18 08:45:27 +10007200 mddev->new_chunk_sectors = mddev->chunk_sectors;
NeilBrownf6705572006-03-27 01:18:11 -08007201 mddev->new_layout = mddev->layout;
7202 mddev->delta_disks = 0;
NeilBrown2c810cd2012-05-21 09:27:00 +10007203 mddev->reshape_backwards = 0;
NeilBrownf6705572006-03-27 01:18:11 -08007204
Linus Torvalds1da177e2005-04-16 15:20:36 -07007205 return 0;
7206}
7207
NeilBrownfd01b882011-10-11 16:47:53 +11007208void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors)
Dan Williams1f403622009-03-31 14:59:03 +11007209{
Shaohua Liefa4b772017-10-18 22:08:13 -07007210 lockdep_assert_held(&mddev->reconfig_mutex);
Dan Williamsb522adc2009-03-31 15:00:31 +11007211
7212 if (mddev->external_size)
7213 return;
7214
Dan Williams1f403622009-03-31 14:59:03 +11007215 mddev->array_sectors = array_sectors;
7216}
7217EXPORT_SYMBOL(md_set_array_sectors);
7218
NeilBrownfd01b882011-10-11 16:47:53 +11007219static int update_size(struct mddev *mddev, sector_t num_sectors)
NeilBrowna35b0d62006-01-06 00:20:49 -08007220{
NeilBrown3cb03002011-10-11 16:45:26 +11007221 struct md_rdev *rdev;
NeilBrowna35b0d62006-01-06 00:20:49 -08007222 int rv;
Andre Nolld71f9f82008-07-11 22:02:22 +10007223 int fit = (num_sectors == 0);
Guoqing Jiang818da592017-03-01 16:42:40 +08007224 sector_t old_dev_sectors = mddev->dev_sectors;
Guoqing Jiangab5a98b2016-05-02 11:33:13 -04007225
NeilBrowna35b0d62006-01-06 00:20:49 -08007226 if (mddev->pers->resize == NULL)
7227 return -EINVAL;
Andre Nolld71f9f82008-07-11 22:02:22 +10007228 /* The "num_sectors" is the number of sectors of each device that
7229 * is used. This can only make sense for arrays with redundancy.
7230 * linear and raid0 always use whatever space is available. We can only
7231 * consider changing this number if no resync or reconstruction is
7232 * happening, and if the new size is acceptable. It must fit before the
Andre Noll0f420352008-07-11 22:02:23 +10007233 * sb_start or, if that is <data_offset, it must fit before the size
Andre Nolld71f9f82008-07-11 22:02:22 +10007234 * of each device. If num_sectors is zero, we find the largest size
7235 * that fits.
NeilBrowna35b0d62006-01-06 00:20:49 -08007236 */
NeilBrownf851b602014-12-11 10:02:10 +11007237 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
7238 mddev->sync_thread)
NeilBrowna35b0d62006-01-06 00:20:49 -08007239 return -EBUSY;
NeilBrownbd8839e2014-05-28 13:39:21 +10007240 if (mddev->ro)
7241 return -EROFS;
NeilBrowna4a61252012-05-22 13:55:27 +10007242
NeilBrowndafb20f2012-03-19 12:46:39 +11007243 rdev_for_each(rdev, mddev) {
Andre Nolldd8ac332009-03-31 14:33:13 +11007244 sector_t avail = rdev->sectors;
NeilBrown01ab5662006-10-28 10:38:30 -07007245
Andre Nolld71f9f82008-07-11 22:02:22 +10007246 if (fit && (num_sectors == 0 || num_sectors > avail))
7247 num_sectors = avail;
7248 if (avail < num_sectors)
NeilBrowna35b0d62006-01-06 00:20:49 -08007249 return -ENOSPC;
7250 }
Andre Nolld71f9f82008-07-11 22:02:22 +10007251 rv = mddev->pers->resize(mddev, num_sectors);
Guoqing Jiangc9483632017-02-24 11:15:23 +08007252 if (!rv) {
Guoqing Jiang818da592017-03-01 16:42:40 +08007253 if (mddev_is_clustered(mddev))
7254 md_cluster_ops->update_size(mddev, old_dev_sectors);
7255 else if (mddev->queue) {
Christoph Hellwig2c247c52020-11-16 15:57:11 +01007256 set_capacity_and_notify(mddev->gendisk,
7257 mddev->array_sectors);
Guoqing Jiangc9483632017-02-24 11:15:23 +08007258 }
7259 }
NeilBrowna35b0d62006-01-06 00:20:49 -08007260 return rv;
7261}
7262
NeilBrownfd01b882011-10-11 16:47:53 +11007263static int update_raid_disks(struct mddev *mddev, int raid_disks)
NeilBrownda943b992006-01-06 00:20:54 -08007264{
7265 int rv;
NeilBrownc6563a82012-05-21 09:27:00 +10007266 struct md_rdev *rdev;
NeilBrownda943b992006-01-06 00:20:54 -08007267 /* change the number of raid disks */
NeilBrown63c70c42006-03-27 01:18:13 -08007268 if (mddev->pers->check_reshape == NULL)
NeilBrownda943b992006-01-06 00:20:54 -08007269 return -EINVAL;
NeilBrownbd8839e2014-05-28 13:39:21 +10007270 if (mddev->ro)
7271 return -EROFS;
NeilBrownda943b992006-01-06 00:20:54 -08007272 if (raid_disks <= 0 ||
NeilBrown233fca32010-04-14 17:02:09 +10007273 (mddev->max_disks && raid_disks >= mddev->max_disks))
NeilBrownda943b992006-01-06 00:20:54 -08007274 return -EINVAL;
NeilBrownf851b602014-12-11 10:02:10 +11007275 if (mddev->sync_thread ||
7276 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
7277 mddev->reshape_position != MaxSector)
NeilBrownda943b992006-01-06 00:20:54 -08007278 return -EBUSY;
NeilBrownc6563a82012-05-21 09:27:00 +10007279
7280 rdev_for_each(rdev, mddev) {
7281 if (mddev->raid_disks < raid_disks &&
7282 rdev->data_offset < rdev->new_data_offset)
7283 return -EINVAL;
7284 if (mddev->raid_disks > raid_disks &&
7285 rdev->data_offset > rdev->new_data_offset)
7286 return -EINVAL;
7287 }
7288
NeilBrown63c70c42006-03-27 01:18:13 -08007289 mddev->delta_disks = raid_disks - mddev->raid_disks;
NeilBrown2c810cd2012-05-21 09:27:00 +10007290 if (mddev->delta_disks < 0)
7291 mddev->reshape_backwards = 1;
7292 else if (mddev->delta_disks > 0)
7293 mddev->reshape_backwards = 0;
NeilBrown63c70c42006-03-27 01:18:13 -08007294
7295 rv = mddev->pers->check_reshape(mddev);
NeilBrown2c810cd2012-05-21 09:27:00 +10007296 if (rv < 0) {
NeilBrownde171cb2011-01-31 11:57:42 +11007297 mddev->delta_disks = 0;
NeilBrown2c810cd2012-05-21 09:27:00 +10007298 mddev->reshape_backwards = 0;
7299 }
NeilBrownda943b992006-01-06 00:20:54 -08007300 return rv;
7301}
7302
Linus Torvalds1da177e2005-04-16 15:20:36 -07007303/*
7304 * update_array_info is used to change the configuration of an
7305 * on-line array.
7306 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
7307 * fields in the info are checked against the array.
7308 * Any differences that cannot be handled will cause an error.
7309 * Normally, only one change can be managed at a time.
7310 */
NeilBrownfd01b882011-10-11 16:47:53 +11007311static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007312{
7313 int rv = 0;
7314 int cnt = 0;
NeilBrown36fa3062005-09-09 16:23:45 -07007315 int state = 0;
7316
7317 /* calculate expected state,ignoring low bits */
NeilBrownc3d97142009-12-14 12:49:52 +11007318 if (mddev->bitmap && mddev->bitmap_info.offset)
NeilBrown36fa3062005-09-09 16:23:45 -07007319 state |= (1 << MD_SB_BITMAP_PRESENT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007320
7321 if (mddev->major_version != info->major_version ||
7322 mddev->minor_version != info->minor_version ||
7323/* mddev->patch_version != info->patch_version || */
7324 mddev->ctime != info->ctime ||
7325 mddev->level != info->level ||
7326/* mddev->layout != info->layout || */
Firo Yang4e023612015-06-11 09:41:10 +08007327 mddev->persistent != !info->not_persistent ||
Andre Noll9d8f0362009-06-18 08:45:01 +10007328 mddev->chunk_sectors != info->chunk_size >> 9 ||
NeilBrown36fa3062005-09-09 16:23:45 -07007329 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
7330 ((state^info->state) & 0xfffffe00)
7331 )
Linus Torvalds1da177e2005-04-16 15:20:36 -07007332 return -EINVAL;
7333 /* Check there is only one change */
Andre Noll58c0fed2009-03-31 14:33:13 +11007334 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
7335 cnt++;
7336 if (mddev->raid_disks != info->raid_disks)
7337 cnt++;
7338 if (mddev->layout != info->layout)
7339 cnt++;
7340 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
7341 cnt++;
7342 if (cnt == 0)
7343 return 0;
7344 if (cnt > 1)
7345 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007346
7347 if (mddev->layout != info->layout) {
7348 /* Change layout
7349 * we don't need to do anything at the md level, the
7350 * personality will take care of it all.
7351 */
NeilBrown50ac1682009-06-18 08:47:55 +10007352 if (mddev->pers->check_reshape == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007353 return -EINVAL;
NeilBrown597a7112009-06-18 08:47:42 +10007354 else {
7355 mddev->new_layout = info->layout;
NeilBrown50ac1682009-06-18 08:47:55 +10007356 rv = mddev->pers->check_reshape(mddev);
NeilBrown597a7112009-06-18 08:47:42 +10007357 if (rv)
7358 mddev->new_layout = mddev->layout;
7359 return rv;
7360 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007361 }
Andre Noll58c0fed2009-03-31 14:33:13 +11007362 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
Andre Nolld71f9f82008-07-11 22:02:22 +10007363 rv = update_size(mddev, (sector_t)info->size * 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007364
NeilBrownda943b992006-01-06 00:20:54 -08007365 if (mddev->raid_disks != info->raid_disks)
7366 rv = update_raid_disks(mddev, info->raid_disks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007367
NeilBrown36fa3062005-09-09 16:23:45 -07007368 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
Goldwyn Rodrigues293467a2014-06-07 01:44:51 -05007369 if (mddev->pers->quiesce == NULL || mddev->thread == NULL) {
7370 rv = -EINVAL;
7371 goto err;
7372 }
7373 if (mddev->recovery || mddev->sync_thread) {
7374 rv = -EBUSY;
7375 goto err;
7376 }
NeilBrown36fa3062005-09-09 16:23:45 -07007377 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05007378 struct bitmap *bitmap;
NeilBrown36fa3062005-09-09 16:23:45 -07007379 /* add the bitmap */
Goldwyn Rodrigues293467a2014-06-07 01:44:51 -05007380 if (mddev->bitmap) {
7381 rv = -EEXIST;
7382 goto err;
7383 }
7384 if (mddev->bitmap_info.default_offset == 0) {
7385 rv = -EINVAL;
7386 goto err;
7387 }
NeilBrownc3d97142009-12-14 12:49:52 +11007388 mddev->bitmap_info.offset =
7389 mddev->bitmap_info.default_offset;
NeilBrown6409bb02012-05-22 13:55:07 +10007390 mddev->bitmap_info.space =
7391 mddev->bitmap_info.default_space;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007392 bitmap = md_bitmap_create(mddev, -1);
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007393 mddev_suspend(mddev);
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05007394 if (!IS_ERR(bitmap)) {
7395 mddev->bitmap = bitmap;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007396 rv = md_bitmap_load(mddev);
NeilBrownba599ac2015-02-25 11:44:11 +11007397 } else
7398 rv = PTR_ERR(bitmap);
NeilBrown36fa3062005-09-09 16:23:45 -07007399 if (rv)
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007400 md_bitmap_destroy(mddev);
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007401 mddev_resume(mddev);
NeilBrown36fa3062005-09-09 16:23:45 -07007402 } else {
7403 /* remove the bitmap */
Goldwyn Rodrigues293467a2014-06-07 01:44:51 -05007404 if (!mddev->bitmap) {
7405 rv = -ENOENT;
7406 goto err;
7407 }
7408 if (mddev->bitmap->storage.file) {
7409 rv = -EINVAL;
7410 goto err;
7411 }
Guoqing Jiangf6a2dc62015-12-21 10:51:00 +11007412 if (mddev->bitmap_info.nodes) {
7413 /* hold PW on all the bitmap lock */
7414 if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) {
NeilBrown9d487392016-11-02 14:16:49 +11007415 pr_warn("md: can't change bitmap to none since the array is in use by more than one node\n");
Guoqing Jiangf6a2dc62015-12-21 10:51:00 +11007416 rv = -EPERM;
7417 md_cluster_ops->unlock_all_bitmaps(mddev);
7418 goto err;
7419 }
7420
7421 mddev->bitmap_info.nodes = 0;
7422 md_cluster_ops->leave(mddev);
Zhao Hemingedee9df2020-07-21 02:08:53 +08007423 module_put(md_cluster_mod);
Zhao Heming7c9d5c52020-07-21 02:08:52 +08007424 mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY;
Guoqing Jiangf6a2dc62015-12-21 10:51:00 +11007425 }
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007426 mddev_suspend(mddev);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007427 md_bitmap_destroy(mddev);
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007428 mddev_resume(mddev);
NeilBrownc3d97142009-12-14 12:49:52 +11007429 mddev->bitmap_info.offset = 0;
NeilBrown36fa3062005-09-09 16:23:45 -07007430 }
7431 }
NeilBrown850b2b422006-10-03 01:15:46 -07007432 md_update_sb(mddev, 1);
Goldwyn Rodrigues293467a2014-06-07 01:44:51 -05007433 return rv;
7434err:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007435 return rv;
7436}
7437
NeilBrownfd01b882011-10-11 16:47:53 +11007438static int set_disk_faulty(struct mddev *mddev, dev_t dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007439{
NeilBrown3cb03002011-10-11 16:45:26 +11007440 struct md_rdev *rdev;
NeilBrown1ca69c42012-10-11 13:37:33 +11007441 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007442
7443 if (mddev->pers == NULL)
7444 return -ENODEV;
7445
NeilBrown1ca69c42012-10-11 13:37:33 +11007446 rcu_read_lock();
Tomasz Majchrzak1532d9e2017-12-27 10:31:40 +01007447 rdev = md_find_rdev_rcu(mddev, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007448 if (!rdev)
NeilBrown1ca69c42012-10-11 13:37:33 +11007449 err = -ENODEV;
7450 else {
7451 md_error(mddev, rdev);
7452 if (!test_bit(Faulty, &rdev->flags))
7453 err = -EBUSY;
7454 }
7455 rcu_read_unlock();
7456 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007457}
7458
Andre Noll2f9618c2008-04-25 18:57:58 +02007459/*
7460 * We have a problem here : there is no easy way to give a CHS
7461 * virtual geometry. We currently pretend that we have a 2 heads
7462 * 4 sectors (with a BIG number of cylinders...). This drives
7463 * dosfs just mad... ;-)
7464 */
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08007465static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
7466{
NeilBrownfd01b882011-10-11 16:47:53 +11007467 struct mddev *mddev = bdev->bd_disk->private_data;
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08007468
7469 geo->heads = 2;
7470 geo->sectors = 4;
NeilBrown49ce6ce2010-03-29 10:51:42 +11007471 geo->cylinders = mddev->array_sectors / 8;
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08007472 return 0;
7473}
7474
Nicolas Schichancb335f82014-01-15 16:58:52 +01007475static inline bool md_ioctl_valid(unsigned int cmd)
7476{
7477 switch (cmd) {
7478 case ADD_NEW_DISK:
Nicolas Schichancb335f82014-01-15 16:58:52 +01007479 case GET_ARRAY_INFO:
7480 case GET_BITMAP_FILE:
7481 case GET_DISK_INFO:
7482 case HOT_ADD_DISK:
7483 case HOT_REMOVE_DISK:
Nicolas Schichancb335f82014-01-15 16:58:52 +01007484 case RAID_VERSION:
7485 case RESTART_ARRAY_RW:
7486 case RUN_ARRAY:
7487 case SET_ARRAY_INFO:
7488 case SET_BITMAP_FILE:
7489 case SET_DISK_FAULTY:
7490 case STOP_ARRAY:
7491 case STOP_ARRAY_RO:
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05007492 case CLUSTERED_DISK_NACK:
Nicolas Schichancb335f82014-01-15 16:58:52 +01007493 return true;
7494 default:
7495 return false;
7496 }
7497}
7498
Al Viroa39907f2008-03-02 10:31:15 -05007499static int md_ioctl(struct block_device *bdev, fmode_t mode,
Linus Torvalds1da177e2005-04-16 15:20:36 -07007500 unsigned int cmd, unsigned long arg)
7501{
7502 int err = 0;
7503 void __user *argp = (void __user *)arg;
NeilBrownfd01b882011-10-11 16:47:53 +11007504 struct mddev *mddev = NULL;
NeilBrown065e5192017-04-06 11:16:33 +08007505 bool did_set_md_closing = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007506
Nicolas Schichancb335f82014-01-15 16:58:52 +01007507 if (!md_ioctl_valid(cmd))
7508 return -ENOTTY;
7509
NeilBrown506c9e42011-12-23 10:17:26 +11007510 switch (cmd) {
7511 case RAID_VERSION:
7512 case GET_ARRAY_INFO:
7513 case GET_DISK_INFO:
7514 break;
7515 default:
7516 if (!capable(CAP_SYS_ADMIN))
7517 return -EACCES;
7518 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007519
7520 /*
7521 * Commands dealing with the RAID driver but not any
7522 * particular array:
7523 */
NeilBrownc02c0ae2012-12-11 13:39:21 +11007524 switch (cmd) {
7525 case RAID_VERSION:
7526 err = get_version(argp);
NeilBrown3adc28d2014-09-30 15:46:41 +10007527 goto out;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007528 default:;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007529 }
7530
7531 /*
7532 * Commands creating/starting a new array:
7533 */
7534
Al Viroa39907f2008-03-02 10:31:15 -05007535 mddev = bdev->bd_disk->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007536
7537 if (!mddev) {
7538 BUG();
NeilBrown3adc28d2014-09-30 15:46:41 +10007539 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007540 }
7541
NeilBrown1ca69c42012-10-11 13:37:33 +11007542 /* Some actions do not requires the mutex */
7543 switch (cmd) {
7544 case GET_ARRAY_INFO:
7545 if (!mddev->raid_disks && !mddev->external)
7546 err = -ENODEV;
7547 else
7548 err = get_array_info(mddev, argp);
NeilBrown3adc28d2014-09-30 15:46:41 +10007549 goto out;
NeilBrown1ca69c42012-10-11 13:37:33 +11007550
7551 case GET_DISK_INFO:
7552 if (!mddev->raid_disks && !mddev->external)
7553 err = -ENODEV;
7554 else
7555 err = get_disk_info(mddev, argp);
NeilBrown3adc28d2014-09-30 15:46:41 +10007556 goto out;
NeilBrown1ca69c42012-10-11 13:37:33 +11007557
7558 case SET_DISK_FAULTY:
7559 err = set_disk_faulty(mddev, new_decode_dev(arg));
NeilBrown3adc28d2014-09-30 15:46:41 +10007560 goto out;
NeilBrown4af1a042014-12-15 12:57:00 +11007561
7562 case GET_BITMAP_FILE:
7563 err = get_bitmap_file(mddev, argp);
7564 goto out;
7565
NeilBrown1ca69c42012-10-11 13:37:33 +11007566 }
7567
Guoqing Jiang78b990c2020-04-04 23:57:10 +02007568 if (cmd == ADD_NEW_DISK || cmd == HOT_ADD_DISK)
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02007569 flush_rdev_wq(mddev);
NeilBrowna7a3f082012-12-11 13:35:54 +11007570
Hannes Reinecke90f5f7a2013-04-02 08:38:55 +02007571 if (cmd == HOT_REMOVE_DISK)
7572 /* need to ensure recovery thread has run */
7573 wait_event_interruptible_timeout(mddev->sb_wait,
7574 !test_bit(MD_RECOVERY_NEEDED,
Shaohua Li82a301c2016-12-08 15:48:18 -08007575 &mddev->recovery),
Hannes Reinecke90f5f7a2013-04-02 08:38:55 +02007576 msecs_to_jiffies(5000));
NeilBrown260fa032013-08-27 16:44:13 +10007577 if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
7578 /* Need to flush page cache, and ensure no-one else opens
7579 * and writes
7580 */
7581 mutex_lock(&mddev->open_mutex);
NeilBrown9ba3b7f2014-09-09 14:00:15 +10007582 if (mddev->pers && atomic_read(&mddev->openers) > 1) {
NeilBrown260fa032013-08-27 16:44:13 +10007583 mutex_unlock(&mddev->open_mutex);
7584 err = -EBUSY;
NeilBrown3adc28d2014-09-30 15:46:41 +10007585 goto out;
NeilBrown260fa032013-08-27 16:44:13 +10007586 }
NeilBrown065e5192017-04-06 11:16:33 +08007587 WARN_ON_ONCE(test_bit(MD_CLOSING, &mddev->flags));
Guoqing Jiangaf8d8e62016-08-12 13:42:37 +08007588 set_bit(MD_CLOSING, &mddev->flags);
NeilBrown065e5192017-04-06 11:16:33 +08007589 did_set_md_closing = true;
NeilBrown260fa032013-08-27 16:44:13 +10007590 mutex_unlock(&mddev->open_mutex);
7591 sync_blockdev(bdev);
7592 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007593 err = mddev_lock(mddev);
7594 if (err) {
NeilBrown9d487392016-11-02 14:16:49 +11007595 pr_debug("md: ioctl lock interrupted, reason %d, cmd %d\n",
7596 err, cmd);
NeilBrown3adc28d2014-09-30 15:46:41 +10007597 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007598 }
7599
NeilBrownc02c0ae2012-12-11 13:39:21 +11007600 if (cmd == SET_ARRAY_INFO) {
7601 mdu_array_info_t info;
7602 if (!arg)
7603 memset(&info, 0, sizeof(info));
7604 else if (copy_from_user(&info, argp, sizeof(info))) {
7605 err = -EFAULT;
NeilBrown3adc28d2014-09-30 15:46:41 +10007606 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007607 }
7608 if (mddev->pers) {
7609 err = update_array_info(mddev, &info);
7610 if (err) {
NeilBrown9d487392016-11-02 14:16:49 +11007611 pr_warn("md: couldn't update array info. %d\n", err);
NeilBrown3adc28d2014-09-30 15:46:41 +10007612 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007613 }
NeilBrown3adc28d2014-09-30 15:46:41 +10007614 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007615 }
7616 if (!list_empty(&mddev->disks)) {
NeilBrown9d487392016-11-02 14:16:49 +11007617 pr_warn("md: array %s already has disks!\n", mdname(mddev));
NeilBrownc02c0ae2012-12-11 13:39:21 +11007618 err = -EBUSY;
NeilBrown3adc28d2014-09-30 15:46:41 +10007619 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007620 }
7621 if (mddev->raid_disks) {
NeilBrown9d487392016-11-02 14:16:49 +11007622 pr_warn("md: array %s already initialised!\n", mdname(mddev));
NeilBrownc02c0ae2012-12-11 13:39:21 +11007623 err = -EBUSY;
NeilBrown3adc28d2014-09-30 15:46:41 +10007624 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007625 }
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02007626 err = md_set_array_info(mddev, &info);
NeilBrownc02c0ae2012-12-11 13:39:21 +11007627 if (err) {
NeilBrown9d487392016-11-02 14:16:49 +11007628 pr_warn("md: couldn't set array info. %d\n", err);
NeilBrown3adc28d2014-09-30 15:46:41 +10007629 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007630 }
NeilBrown3adc28d2014-09-30 15:46:41 +10007631 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007632 }
7633
7634 /*
7635 * Commands querying/configuring an existing array:
7636 */
NeilBrown32a76272005-06-21 17:17:14 -07007637 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
NeilBrown3f9d7b02006-12-22 01:11:41 -08007638 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
NeilBrowna17184a2008-02-06 01:39:55 -08007639 if ((!mddev->raid_disks && !mddev->external)
7640 && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
7641 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
7642 && cmd != GET_BITMAP_FILE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007643 err = -ENODEV;
NeilBrown3adc28d2014-09-30 15:46:41 +10007644 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007645 }
7646
7647 /*
7648 * Commands even a read-only array can execute:
7649 */
NeilBrownc02c0ae2012-12-11 13:39:21 +11007650 switch (cmd) {
NeilBrownc02c0ae2012-12-11 13:39:21 +11007651 case RESTART_ARRAY_RW:
7652 err = restart_array(mddev);
NeilBrown3adc28d2014-09-30 15:46:41 +10007653 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007654
7655 case STOP_ARRAY:
7656 err = do_md_stop(mddev, 0, bdev);
NeilBrown3adc28d2014-09-30 15:46:41 +10007657 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007658
7659 case STOP_ARRAY_RO:
7660 err = md_set_readonly(mddev, bdev);
NeilBrown3adc28d2014-09-30 15:46:41 +10007661 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007662
NeilBrown3ea8929d2013-04-24 11:42:41 +10007663 case HOT_REMOVE_DISK:
7664 err = hot_remove_disk(mddev, new_decode_dev(arg));
NeilBrown3adc28d2014-09-30 15:46:41 +10007665 goto unlock;
NeilBrown3ea8929d2013-04-24 11:42:41 +10007666
NeilBrown7ceb17e2013-04-24 11:42:42 +10007667 case ADD_NEW_DISK:
7668 /* We can support ADD_NEW_DISK on read-only arrays
Wei Fang466ad292016-03-21 19:19:30 +08007669 * only if we are re-adding a preexisting device.
NeilBrown7ceb17e2013-04-24 11:42:42 +10007670 * So require mddev->pers and MD_DISK_SYNC.
7671 */
7672 if (mddev->pers) {
7673 mdu_disk_info_t info;
7674 if (copy_from_user(&info, argp, sizeof(info)))
7675 err = -EFAULT;
7676 else if (!(info.state & (1<<MD_DISK_SYNC)))
7677 /* Need to clear read-only for this */
7678 break;
7679 else
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02007680 err = md_add_new_disk(mddev, &info);
NeilBrown3adc28d2014-09-30 15:46:41 +10007681 goto unlock;
NeilBrown7ceb17e2013-04-24 11:42:42 +10007682 }
7683 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007684 }
7685
7686 /*
7687 * The remaining ioctls are changing the state of the
NeilBrownf91de922005-11-08 21:39:36 -08007688 * superblock, so we do not allow them on read-only arrays.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007689 */
NeilBrown326eb172014-09-30 15:36:28 +10007690 if (mddev->ro && mddev->pers) {
NeilBrownf91de922005-11-08 21:39:36 -08007691 if (mddev->ro == 2) {
7692 mddev->ro = 0;
NeilBrown00bcb4a2010-06-01 19:37:23 +10007693 sysfs_notify_dirent_safe(mddev->sysfs_state);
Neil Brown0fd62b82008-06-28 08:31:36 +10007694 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
NeilBrownf3378b42013-02-28 11:59:03 +11007695 /* mddev_unlock will wake thread */
7696 /* If a device failed while we were read-only, we
7697 * need to make sure the metadata is updated now.
7698 */
Shaohua Li29530792016-12-08 15:48:19 -08007699 if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) {
NeilBrownf3378b42013-02-28 11:59:03 +11007700 mddev_unlock(mddev);
7701 wait_event(mddev->sb_wait,
Shaohua Li29530792016-12-08 15:48:19 -08007702 !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) &&
7703 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
NeilBrown29f097c2013-11-14 17:54:51 +11007704 mddev_lock_nointr(mddev);
NeilBrownf3378b42013-02-28 11:59:03 +11007705 }
NeilBrownf91de922005-11-08 21:39:36 -08007706 } else {
7707 err = -EROFS;
NeilBrown3adc28d2014-09-30 15:46:41 +10007708 goto unlock;
NeilBrownf91de922005-11-08 21:39:36 -08007709 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007710 }
7711
NeilBrownc02c0ae2012-12-11 13:39:21 +11007712 switch (cmd) {
7713 case ADD_NEW_DISK:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007714 {
NeilBrownc02c0ae2012-12-11 13:39:21 +11007715 mdu_disk_info_t info;
7716 if (copy_from_user(&info, argp, sizeof(info)))
7717 err = -EFAULT;
7718 else
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02007719 err = md_add_new_disk(mddev, &info);
NeilBrown3adc28d2014-09-30 15:46:41 +10007720 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007721 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007722
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05007723 case CLUSTERED_DISK_NACK:
7724 if (mddev_is_clustered(mddev))
7725 md_cluster_ops->new_disk_ack(mddev, false);
7726 else
7727 err = -EINVAL;
7728 goto unlock;
7729
NeilBrownc02c0ae2012-12-11 13:39:21 +11007730 case HOT_ADD_DISK:
7731 err = hot_add_disk(mddev, new_decode_dev(arg));
NeilBrown3adc28d2014-09-30 15:46:41 +10007732 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007733
NeilBrownc02c0ae2012-12-11 13:39:21 +11007734 case RUN_ARRAY:
7735 err = do_md_run(mddev);
NeilBrown3adc28d2014-09-30 15:46:41 +10007736 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007737
NeilBrownc02c0ae2012-12-11 13:39:21 +11007738 case SET_BITMAP_FILE:
7739 err = set_bitmap_file(mddev, (int)arg);
NeilBrown3adc28d2014-09-30 15:46:41 +10007740 goto unlock;
NeilBrown32a76272005-06-21 17:17:14 -07007741
NeilBrownc02c0ae2012-12-11 13:39:21 +11007742 default:
7743 err = -EINVAL;
NeilBrown3adc28d2014-09-30 15:46:41 +10007744 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007745 }
7746
NeilBrown3adc28d2014-09-30 15:46:41 +10007747unlock:
NeilBrownd3374822009-01-09 08:31:10 +11007748 if (mddev->hold_active == UNTIL_IOCTL &&
7749 err != -EINVAL)
7750 mddev->hold_active = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007751 mddev_unlock(mddev);
NeilBrown3adc28d2014-09-30 15:46:41 +10007752out:
NeilBrown065e5192017-04-06 11:16:33 +08007753 if(did_set_md_closing)
7754 clear_bit(MD_CLOSING, &mddev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007755 return err;
7756}
Arnd Bergmannaa98aa32009-12-14 12:50:05 +11007757#ifdef CONFIG_COMPAT
7758static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
7759 unsigned int cmd, unsigned long arg)
7760{
7761 switch (cmd) {
7762 case HOT_REMOVE_DISK:
7763 case HOT_ADD_DISK:
7764 case SET_DISK_FAULTY:
7765 case SET_BITMAP_FILE:
7766 /* These take in integer arg, do not convert */
7767 break;
7768 default:
7769 arg = (unsigned long)compat_ptr(arg);
7770 break;
7771 }
7772
7773 return md_ioctl(bdev, mode, cmd, arg);
7774}
7775#endif /* CONFIG_COMPAT */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007776
Christoph Hellwig118cf082020-11-03 11:00:13 +01007777static int md_set_read_only(struct block_device *bdev, bool ro)
7778{
7779 struct mddev *mddev = bdev->bd_disk->private_data;
7780 int err;
7781
7782 err = mddev_lock(mddev);
7783 if (err)
7784 return err;
7785
7786 if (!mddev->raid_disks && !mddev->external) {
7787 err = -ENODEV;
7788 goto out_unlock;
7789 }
7790
7791 /*
7792 * Transitioning to read-auto need only happen for arrays that call
7793 * md_write_start and which are not ready for writes yet.
7794 */
7795 if (!ro && mddev->ro == 1 && mddev->pers) {
7796 err = restart_array(mddev);
7797 if (err)
7798 goto out_unlock;
7799 mddev->ro = 2;
7800 }
7801
7802out_unlock:
7803 mddev_unlock(mddev);
7804 return err;
7805}
7806
Al Viroa39907f2008-03-02 10:31:15 -05007807static int md_open(struct block_device *bdev, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007808{
7809 /*
7810 * Succeed if we can lock the mddev, which confirms that
7811 * it isn't being stopped right now.
7812 */
NeilBrownfd01b882011-10-11 16:47:53 +11007813 struct mddev *mddev = mddev_find(bdev->bd_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007814 int err;
7815
Yuanhan Liu0c098222012-05-22 13:55:32 +10007816 if (!mddev)
7817 return -ENODEV;
7818
NeilBrownd3374822009-01-09 08:31:10 +11007819 if (mddev->gendisk != bdev->bd_disk) {
7820 /* we are racing with mddev_put which is discarding this
7821 * bd_disk.
7822 */
7823 mddev_put(mddev);
7824 /* Wait until bdev->bd_disk is definitely gone */
Guoqing Jiangf6766ff2020-04-04 23:57:09 +02007825 if (work_pending(&mddev->del_work))
7826 flush_workqueue(md_misc_wq);
NeilBrownd3374822009-01-09 08:31:10 +11007827 /* Then retry the open from the top */
7828 return -ERESTARTSYS;
7829 }
7830 BUG_ON(mddev != bdev->bd_disk->private_data);
7831
NeilBrownc8c00a62009-08-10 12:50:52 +10007832 if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07007833 goto out;
7834
Guoqing Jiangaf8d8e62016-08-12 13:42:37 +08007835 if (test_bit(MD_CLOSING, &mddev->flags)) {
7836 mutex_unlock(&mddev->open_mutex);
NeilBrowne2342ca2016-12-05 16:40:50 +11007837 err = -ENODEV;
7838 goto out;
Guoqing Jiangaf8d8e62016-08-12 13:42:37 +08007839 }
7840
Linus Torvalds1da177e2005-04-16 15:20:36 -07007841 err = 0;
NeilBrownf2ea68c2008-07-21 17:05:25 +10007842 atomic_inc(&mddev->openers);
NeilBrownc8c00a62009-08-10 12:50:52 +10007843 mutex_unlock(&mddev->open_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007844
Christoph Hellwig818077d2020-09-08 16:53:43 +02007845 bdev_check_media_change(bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007846 out:
NeilBrowne2342ca2016-12-05 16:40:50 +11007847 if (err)
7848 mddev_put(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007849 return err;
7850}
7851
Al Virodb2a1442013-05-05 21:52:57 -04007852static void md_release(struct gendisk *disk, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007853{
NeilBrownf72ffdd2014-09-30 14:23:59 +10007854 struct mddev *mddev = disk->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007855
Eric Sesterhenn52e5f9d2006-10-03 23:33:23 +02007856 BUG_ON(!mddev);
NeilBrownf2ea68c2008-07-21 17:05:25 +10007857 atomic_dec(&mddev->openers);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007858 mddev_put(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007859}
NeilBrownf0b4f7e2011-02-24 17:26:41 +11007860
Christoph Hellwiga564e232020-07-08 14:25:41 +02007861static unsigned int md_check_events(struct gendisk *disk, unsigned int clearing)
NeilBrownf0b4f7e2011-02-24 17:26:41 +11007862{
NeilBrownfd01b882011-10-11 16:47:53 +11007863 struct mddev *mddev = disk->private_data;
Christoph Hellwiga564e232020-07-08 14:25:41 +02007864 unsigned int ret = 0;
NeilBrownf0b4f7e2011-02-24 17:26:41 +11007865
Christoph Hellwiga564e232020-07-08 14:25:41 +02007866 if (mddev->changed)
7867 ret = DISK_EVENT_MEDIA_CHANGE;
NeilBrownf0b4f7e2011-02-24 17:26:41 +11007868 mddev->changed = 0;
Christoph Hellwiga564e232020-07-08 14:25:41 +02007869 return ret;
NeilBrownf0b4f7e2011-02-24 17:26:41 +11007870}
Christoph Hellwiga564e232020-07-08 14:25:41 +02007871
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02007872const struct block_device_operations md_fops =
Linus Torvalds1da177e2005-04-16 15:20:36 -07007873{
7874 .owner = THIS_MODULE,
Christoph Hellwigc62b37d2020-07-01 10:59:43 +02007875 .submit_bio = md_submit_bio,
Al Viroa39907f2008-03-02 10:31:15 -05007876 .open = md_open,
7877 .release = md_release,
NeilBrownb492b852009-05-26 12:57:36 +10007878 .ioctl = md_ioctl,
Arnd Bergmannaa98aa32009-12-14 12:50:05 +11007879#ifdef CONFIG_COMPAT
7880 .compat_ioctl = md_compat_ioctl,
7881#endif
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08007882 .getgeo = md_getgeo,
Christoph Hellwiga564e232020-07-08 14:25:41 +02007883 .check_events = md_check_events,
Christoph Hellwig118cf082020-11-03 11:00:13 +01007884 .set_read_only = md_set_read_only,
Linus Torvalds1da177e2005-04-16 15:20:36 -07007885};
7886
NeilBrownf72ffdd2014-09-30 14:23:59 +10007887static int md_thread(void *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007888{
NeilBrown2b8bf342011-10-11 16:48:23 +11007889 struct md_thread *thread = arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007890
Linus Torvalds1da177e2005-04-16 15:20:36 -07007891 /*
7892 * md_thread is a 'system-thread', it's priority should be very
7893 * high. We avoid resource deadlocks individually in each
7894 * raid personality. (RAID5 does preallocation) We also use RR and
7895 * the very same RT priority as kswapd, thus we will never get
7896 * into a priority inversion deadlock.
7897 *
7898 * we definitely have to have equal or higher priority than
7899 * bdflush, otherwise bdflush will deadlock if there are too
7900 * many dirty RAID5 blocks.
7901 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007902
NeilBrown6985c432005-10-19 21:23:47 -07007903 allow_signal(SIGKILL);
NeilBrowna6fb0932005-09-09 16:23:56 -07007904 while (!kthread_should_stop()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007905
NeilBrown93588e22005-11-15 00:09:12 -08007906 /* We need to wait INTERRUPTIBLE so that
7907 * we don't add to the load-average.
7908 * That means we need to be sure no signals are
7909 * pending
7910 */
7911 if (signal_pending(current))
7912 flush_signals(current);
7913
7914 wait_event_interruptible_timeout
7915 (thread->wqueue,
7916 test_bit(THREAD_WAKEUP, &thread->flags)
Shaohua Lice1ccd02016-11-21 10:29:18 -08007917 || kthread_should_stop() || kthread_should_park(),
NeilBrown93588e22005-11-15 00:09:12 -08007918 thread->timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007919
NeilBrown6c987912011-01-14 09:13:53 +11007920 clear_bit(THREAD_WAKEUP, &thread->flags);
Shaohua Lice1ccd02016-11-21 10:29:18 -08007921 if (kthread_should_park())
7922 kthread_parkme();
NeilBrown6c987912011-01-14 09:13:53 +11007923 if (!kthread_should_stop())
Shaohua Li4ed87312012-10-11 13:34:00 +11007924 thread->run(thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007925 }
NeilBrowna6fb0932005-09-09 16:23:56 -07007926
Linus Torvalds1da177e2005-04-16 15:20:36 -07007927 return 0;
7928}
7929
NeilBrown2b8bf342011-10-11 16:48:23 +11007930void md_wakeup_thread(struct md_thread *thread)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007931{
7932 if (thread) {
NeilBrown36a4e1f2011-10-07 14:23:17 +11007933 pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm);
Guoqing Jiangd1d90142017-10-09 10:32:48 +08007934 set_bit(THREAD_WAKEUP, &thread->flags);
7935 wake_up(&thread->wqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007936 }
7937}
NeilBrown6c144d32014-09-30 16:15:38 +10007938EXPORT_SYMBOL(md_wakeup_thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007939
Shaohua Li4ed87312012-10-11 13:34:00 +11007940struct md_thread *md_register_thread(void (*run) (struct md_thread *),
7941 struct mddev *mddev, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007942{
NeilBrown2b8bf342011-10-11 16:48:23 +11007943 struct md_thread *thread;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007944
NeilBrown2b8bf342011-10-11 16:48:23 +11007945 thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007946 if (!thread)
7947 return NULL;
7948
Linus Torvalds1da177e2005-04-16 15:20:36 -07007949 init_waitqueue_head(&thread->wqueue);
7950
Linus Torvalds1da177e2005-04-16 15:20:36 -07007951 thread->run = run;
7952 thread->mddev = mddev;
NeilBrown32a76272005-06-21 17:17:14 -07007953 thread->timeout = MAX_SCHEDULE_TIMEOUT;
NeilBrown0da3c612009-09-23 18:09:45 +10007954 thread->tsk = kthread_run(md_thread, thread,
7955 "%s_%s",
7956 mdname(thread->mddev),
NeilBrown02326052012-07-03 15:56:52 +10007957 name);
NeilBrowna6fb0932005-09-09 16:23:56 -07007958 if (IS_ERR(thread->tsk)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007959 kfree(thread);
7960 return NULL;
7961 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007962 return thread;
7963}
NeilBrown6c144d32014-09-30 16:15:38 +10007964EXPORT_SYMBOL(md_register_thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007965
NeilBrown2b8bf342011-10-11 16:48:23 +11007966void md_unregister_thread(struct md_thread **threadp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007967{
NeilBrown2b8bf342011-10-11 16:48:23 +11007968 struct md_thread *thread = *threadp;
NeilBrowne0cf8f02009-03-31 14:39:39 +11007969 if (!thread)
7970 return;
NeilBrown36a4e1f2011-10-07 14:23:17 +11007971 pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
NeilBrown01f96c02011-09-21 15:30:20 +10007972 /* Locking ensures that mddev_unlock does not wake_up a
7973 * non-existent thread
7974 */
7975 spin_lock(&pers_lock);
7976 *threadp = NULL;
7977 spin_unlock(&pers_lock);
NeilBrowna6fb0932005-09-09 16:23:56 -07007978
7979 kthread_stop(thread->tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007980 kfree(thread);
7981}
NeilBrown6c144d32014-09-30 16:15:38 +10007982EXPORT_SYMBOL(md_unregister_thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007983
NeilBrownfd01b882011-10-11 16:47:53 +11007984void md_error(struct mddev *mddev, struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007985{
NeilBrownb2d444d2005-11-08 21:39:31 -08007986 if (!rdev || test_bit(Faulty, &rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07007987 return;
Dan Williams6bfe0b42008-04-30 00:52:32 -07007988
NeilBrownde393cd2011-07-28 11:31:48 +10007989 if (!mddev->pers || !mddev->pers->error_handler)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007990 return;
7991 mddev->pers->error_handler(mddev,rdev);
Neil Brown72a23c22008-06-28 08:31:41 +10007992 if (mddev->degraded)
7993 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
NeilBrown00bcb4a2010-06-01 19:37:23 +10007994 sysfs_notify_dirent_safe(rdev->sysfs_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007995 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7996 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7997 md_wakeup_thread(mddev->thread);
NeilBrown768a4182010-07-26 11:49:55 +10007998 if (mddev->event_work.func)
Tejun Heoe804ac72010-10-15 15:36:08 +02007999 queue_work(md_misc_wq, &mddev->event_work);
Guoqing Jiangbb9ef712015-12-28 10:46:38 +08008000 md_new_event(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008001}
NeilBrown6c144d32014-09-30 16:15:38 +10008002EXPORT_SYMBOL(md_error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008003
8004/* seq_file implementation /proc/mdstat */
8005
8006static void status_unused(struct seq_file *seq)
8007{
8008 int i = 0;
NeilBrown3cb03002011-10-11 16:45:26 +11008009 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008010
8011 seq_printf(seq, "unused devices: ");
8012
Cheng Renquan159ec1f2009-01-09 08:31:08 +11008013 list_for_each_entry(rdev, &pending_raid_disks, same_set) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008014 char b[BDEVNAME_SIZE];
8015 i++;
8016 seq_printf(seq, "%s ",
8017 bdevname(rdev->bdev,b));
8018 }
8019 if (!i)
8020 seq_printf(seq, "<none>");
8021
8022 seq_printf(seq, "\n");
8023}
8024
NeilBrownf7851be2015-07-02 17:12:58 +10008025static int status_resync(struct seq_file *seq, struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008026{
NeilBrowndd71cf62009-05-07 12:49:35 +10008027 sector_t max_sectors, resync, res;
Mariusz Tkaczyk9642fa72019-06-13 16:11:41 +02008028 unsigned long dt, db = 0;
8029 sector_t rt, curr_mark_cnt, resync_mark_cnt;
8030 int scale, recovery_active;
NeilBrown4588b422006-03-27 01:18:04 -08008031 unsigned int per_milli;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008032
NeilBrownc804cde2012-05-21 09:28:33 +10008033 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
8034 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
NeilBrowndd71cf62009-05-07 12:49:35 +10008035 max_sectors = mddev->resync_max_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008036 else
NeilBrowndd71cf62009-05-07 12:49:35 +10008037 max_sectors = mddev->dev_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008038
NeilBrownf7851be2015-07-02 17:12:58 +10008039 resync = mddev->curr_resync;
8040 if (resync <= 3) {
8041 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
8042 /* Still cleaning up */
8043 resync = max_sectors;
Nate Daileyd2e2ec82017-11-30 11:33:30 -05008044 } else if (resync > max_sectors)
8045 resync = max_sectors;
8046 else
NeilBrownf7851be2015-07-02 17:12:58 +10008047 resync -= atomic_read(&mddev->recovery_active);
8048
8049 if (resync == 0) {
Guoqing Jiang0357ba22018-07-02 16:26:25 +08008050 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery)) {
8051 struct md_rdev *rdev;
8052
8053 rdev_for_each(rdev, mddev)
8054 if (rdev->raid_disk >= 0 &&
8055 !test_bit(Faulty, &rdev->flags) &&
8056 rdev->recovery_offset != MaxSector &&
8057 rdev->recovery_offset) {
8058 seq_printf(seq, "\trecover=REMOTE");
8059 return 1;
8060 }
8061 if (mddev->reshape_position != MaxSector)
8062 seq_printf(seq, "\treshape=REMOTE");
8063 else
8064 seq_printf(seq, "\tresync=REMOTE");
8065 return 1;
8066 }
NeilBrownf7851be2015-07-02 17:12:58 +10008067 if (mddev->recovery_cp < MaxSector) {
8068 seq_printf(seq, "\tresync=PENDING");
8069 return 1;
8070 }
8071 return 0;
8072 }
8073 if (resync < 3) {
8074 seq_printf(seq, "\tresync=DELAYED");
8075 return 1;
8076 }
8077
NeilBrown403df472014-09-30 15:52:29 +10008078 WARN_ON(max_sectors == 0);
NeilBrown4588b422006-03-27 01:18:04 -08008079 /* Pick 'scale' such that (resync>>scale)*1000 will fit
NeilBrowndd71cf62009-05-07 12:49:35 +10008080 * in a sector_t, and (max_sectors>>scale) will fit in a
NeilBrown4588b422006-03-27 01:18:04 -08008081 * u32, as those are the requirements for sector_div.
8082 * Thus 'scale' must be at least 10
8083 */
8084 scale = 10;
8085 if (sizeof(sector_t) > sizeof(unsigned long)) {
NeilBrowndd71cf62009-05-07 12:49:35 +10008086 while ( max_sectors/2 > (1ULL<<(scale+32)))
NeilBrown4588b422006-03-27 01:18:04 -08008087 scale++;
8088 }
8089 res = (resync>>scale)*1000;
NeilBrowndd71cf62009-05-07 12:49:35 +10008090 sector_div(res, (u32)((max_sectors>>scale)+1));
NeilBrown4588b422006-03-27 01:18:04 -08008091
8092 per_milli = res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008093 {
NeilBrown4588b422006-03-27 01:18:04 -08008094 int i, x = per_milli/50, y = 20-x;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008095 seq_printf(seq, "[");
8096 for (i = 0; i < x; i++)
8097 seq_printf(seq, "=");
8098 seq_printf(seq, ">");
8099 for (i = 0; i < y; i++)
8100 seq_printf(seq, ".");
8101 seq_printf(seq, "] ");
8102 }
NeilBrown4588b422006-03-27 01:18:04 -08008103 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
NeilBrownccfcc3c2006-03-27 01:18:09 -08008104 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
8105 "reshape" :
NeilBrown61df9d92006-10-03 01:15:57 -07008106 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
8107 "check" :
8108 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
8109 "resync" : "recovery"))),
8110 per_milli/10, per_milli % 10,
NeilBrowndd71cf62009-05-07 12:49:35 +10008111 (unsigned long long) resync/2,
8112 (unsigned long long) max_sectors/2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008113
8114 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07008115 * dt: time from mark until now
8116 * db: blocks written from mark until now
8117 * rt: remaining time
NeilBrowndd71cf62009-05-07 12:49:35 +10008118 *
Mariusz Tkaczyk9642fa72019-06-13 16:11:41 +02008119 * rt is a sector_t, which is always 64bit now. We are keeping
8120 * the original algorithm, but it is not really necessary.
8121 *
8122 * Original algorithm:
8123 * So we divide before multiply in case it is 32bit and close
8124 * to the limit.
8125 * We scale the divisor (db) by 32 to avoid losing precision
8126 * near the end of resync when the number of remaining sectors
8127 * is close to 'db'.
8128 * We then divide rt by 32 after multiplying by db to compensate.
8129 * The '+1' avoids division by zero if db is very small.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008130 */
8131 dt = ((jiffies - mddev->resync_mark) / HZ);
8132 if (!dt) dt++;
Mariusz Tkaczyk9642fa72019-06-13 16:11:41 +02008133
8134 curr_mark_cnt = mddev->curr_mark_cnt;
8135 recovery_active = atomic_read(&mddev->recovery_active);
8136 resync_mark_cnt = mddev->resync_mark_cnt;
8137
8138 if (curr_mark_cnt >= (recovery_active + resync_mark_cnt))
8139 db = curr_mark_cnt - (recovery_active + resync_mark_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008140
NeilBrowndd71cf62009-05-07 12:49:35 +10008141 rt = max_sectors - resync; /* number of remaining sectors */
Mariusz Tkaczyk9642fa72019-06-13 16:11:41 +02008142 rt = div64_u64(rt, db/32+1);
NeilBrowndd71cf62009-05-07 12:49:35 +10008143 rt *= dt;
8144 rt >>= 5;
8145
8146 seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
8147 ((unsigned long)rt % 60)/6);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008148
NeilBrownff4e8d92006-07-10 04:44:16 -07008149 seq_printf(seq, " speed=%ldK/sec", db/2/dt);
NeilBrownf7851be2015-07-02 17:12:58 +10008150 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008151}
8152
8153static void *md_seq_start(struct seq_file *seq, loff_t *pos)
8154{
8155 struct list_head *tmp;
8156 loff_t l = *pos;
NeilBrownfd01b882011-10-11 16:47:53 +11008157 struct mddev *mddev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008158
8159 if (l >= 0x10000)
8160 return NULL;
8161 if (!l--)
8162 /* header */
8163 return (void*)1;
8164
8165 spin_lock(&all_mddevs_lock);
8166 list_for_each(tmp,&all_mddevs)
8167 if (!l--) {
NeilBrownfd01b882011-10-11 16:47:53 +11008168 mddev = list_entry(tmp, struct mddev, all_mddevs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008169 mddev_get(mddev);
8170 spin_unlock(&all_mddevs_lock);
8171 return mddev;
8172 }
8173 spin_unlock(&all_mddevs_lock);
8174 if (!l--)
8175 return (void*)2;/* tail */
8176 return NULL;
8177}
8178
8179static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
8180{
8181 struct list_head *tmp;
NeilBrownfd01b882011-10-11 16:47:53 +11008182 struct mddev *next_mddev, *mddev = v;
NeilBrownf72ffdd2014-09-30 14:23:59 +10008183
Linus Torvalds1da177e2005-04-16 15:20:36 -07008184 ++*pos;
8185 if (v == (void*)2)
8186 return NULL;
8187
8188 spin_lock(&all_mddevs_lock);
8189 if (v == (void*)1)
8190 tmp = all_mddevs.next;
8191 else
8192 tmp = mddev->all_mddevs.next;
8193 if (tmp != &all_mddevs)
NeilBrownfd01b882011-10-11 16:47:53 +11008194 next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs));
Linus Torvalds1da177e2005-04-16 15:20:36 -07008195 else {
8196 next_mddev = (void*)2;
8197 *pos = 0x10000;
NeilBrownf72ffdd2014-09-30 14:23:59 +10008198 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008199 spin_unlock(&all_mddevs_lock);
8200
8201 if (v != (void*)1)
8202 mddev_put(mddev);
8203 return next_mddev;
8204
8205}
8206
8207static void md_seq_stop(struct seq_file *seq, void *v)
8208{
NeilBrownfd01b882011-10-11 16:47:53 +11008209 struct mddev *mddev = v;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008210
8211 if (mddev && v != (void*)1 && v != (void*)2)
8212 mddev_put(mddev);
8213}
8214
8215static int md_seq_show(struct seq_file *seq, void *v)
8216{
NeilBrownfd01b882011-10-11 16:47:53 +11008217 struct mddev *mddev = v;
Andre Nolldd8ac332009-03-31 14:33:13 +11008218 sector_t sectors;
NeilBrown3cb03002011-10-11 16:45:26 +11008219 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008220
8221 if (v == (void*)1) {
NeilBrown84fc4b52011-10-11 16:49:58 +11008222 struct md_personality *pers;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008223 seq_printf(seq, "Personalities : ");
8224 spin_lock(&pers_lock);
NeilBrown2604b702006-01-06 00:20:36 -08008225 list_for_each_entry(pers, &pers_list, list)
8226 seq_printf(seq, "[%s] ", pers->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008227
8228 spin_unlock(&pers_lock);
8229 seq_printf(seq, "\n");
Kay Sieversf1514632011-07-12 20:48:39 +02008230 seq->poll_event = atomic_read(&md_event_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008231 return 0;
8232 }
8233 if (v == (void*)2) {
8234 status_unused(seq);
8235 return 0;
8236 }
8237
NeilBrown36d091f2014-12-15 12:56:58 +11008238 spin_lock(&mddev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008239 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
8240 seq_printf(seq, "%s : %sactive", mdname(mddev),
8241 mddev->pers ? "" : "in");
8242 if (mddev->pers) {
NeilBrownf91de922005-11-08 21:39:36 -08008243 if (mddev->ro==1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008244 seq_printf(seq, " (read-only)");
NeilBrownf91de922005-11-08 21:39:36 -08008245 if (mddev->ro==2)
NeilBrown52720ae2008-03-10 11:43:47 -07008246 seq_printf(seq, " (auto-read-only)");
Linus Torvalds1da177e2005-04-16 15:20:36 -07008247 seq_printf(seq, " %s", mddev->pers->name);
8248 }
8249
Andre Nolldd8ac332009-03-31 14:33:13 +11008250 sectors = 0;
NeilBrownf97fcad2014-12-15 12:56:59 +11008251 rcu_read_lock();
8252 rdev_for_each_rcu(rdev, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008253 char b[BDEVNAME_SIZE];
8254 seq_printf(seq, " %s[%d]",
8255 bdevname(rdev->bdev,b), rdev->desc_nr);
NeilBrown8ddf9ef2005-09-09 16:23:45 -07008256 if (test_bit(WriteMostly, &rdev->flags))
8257 seq_printf(seq, "(W)");
Shaohua Li9efdca12015-10-12 16:59:50 -07008258 if (test_bit(Journal, &rdev->flags))
8259 seq_printf(seq, "(J)");
NeilBrownb2d444d2005-11-08 21:39:31 -08008260 if (test_bit(Faulty, &rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008261 seq_printf(seq, "(F)");
8262 continue;
NeilBrown2d78f8c2011-12-23 10:17:51 +11008263 }
8264 if (rdev->raid_disk < 0)
NeilBrownb325a322005-09-09 16:24:00 -07008265 seq_printf(seq, "(S)"); /* spare */
NeilBrown2d78f8c2011-12-23 10:17:51 +11008266 if (test_bit(Replacement, &rdev->flags))
8267 seq_printf(seq, "(R)");
Andre Nolldd8ac332009-03-31 14:33:13 +11008268 sectors += rdev->sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008269 }
NeilBrownf97fcad2014-12-15 12:56:59 +11008270 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07008271
8272 if (!list_empty(&mddev->disks)) {
8273 if (mddev->pers)
8274 seq_printf(seq, "\n %llu blocks",
Andre Nollf233ea52008-07-21 17:05:22 +10008275 (unsigned long long)
8276 mddev->array_sectors / 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008277 else
8278 seq_printf(seq, "\n %llu blocks",
Andre Nolldd8ac332009-03-31 14:33:13 +11008279 (unsigned long long)sectors / 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008280 }
NeilBrown1cd6bf12005-09-09 16:24:00 -07008281 if (mddev->persistent) {
8282 if (mddev->major_version != 0 ||
8283 mddev->minor_version != 90) {
8284 seq_printf(seq," super %d.%d",
8285 mddev->major_version,
8286 mddev->minor_version);
8287 }
NeilBrowne6910632008-02-06 01:39:51 -08008288 } else if (mddev->external)
8289 seq_printf(seq, " super external:%s",
8290 mddev->metadata_type);
8291 else
NeilBrown1cd6bf12005-09-09 16:24:00 -07008292 seq_printf(seq, " super non-persistent");
Linus Torvalds1da177e2005-04-16 15:20:36 -07008293
8294 if (mddev->pers) {
NeilBrownd710e132008-10-13 11:55:12 +11008295 mddev->pers->status(seq, mddev);
NeilBrownf72ffdd2014-09-30 14:23:59 +10008296 seq_printf(seq, "\n ");
NeilBrown8e1b39d2005-11-08 21:39:41 -08008297 if (mddev->pers->sync_request) {
NeilBrownf7851be2015-07-02 17:12:58 +10008298 if (status_resync(seq, mddev))
NeilBrown8e1b39d2005-11-08 21:39:41 -08008299 seq_printf(seq, "\n ");
NeilBrown8e1b39d2005-11-08 21:39:41 -08008300 }
NeilBrown32a76272005-06-21 17:17:14 -07008301 } else
8302 seq_printf(seq, "\n ");
8303
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07008304 md_bitmap_status(seq, mddev->bitmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008305
8306 seq_printf(seq, "\n");
8307 }
NeilBrown36d091f2014-12-15 12:56:58 +11008308 spin_unlock(&mddev->lock);
NeilBrownf72ffdd2014-09-30 14:23:59 +10008309
Linus Torvalds1da177e2005-04-16 15:20:36 -07008310 return 0;
8311}
8312
Jan Engelhardt110518b2009-05-07 12:49:37 +10008313static const struct seq_operations md_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008314 .start = md_seq_start,
8315 .next = md_seq_next,
8316 .stop = md_seq_stop,
8317 .show = md_seq_show,
8318};
8319
8320static int md_seq_open(struct inode *inode, struct file *file)
8321{
Kay Sieversf1514632011-07-12 20:48:39 +02008322 struct seq_file *seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008323 int error;
8324
8325 error = seq_open(file, &md_seq_ops);
NeilBrownd7603b72006-01-06 00:20:30 -08008326 if (error)
Kay Sieversf1514632011-07-12 20:48:39 +02008327 return error;
8328
8329 seq = file->private_data;
8330 seq->poll_event = atomic_read(&md_event_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008331 return error;
8332}
8333
NeilBrowne2f23b62014-04-09 14:33:51 +10008334static int md_unloading;
Al Viroafc9a422017-07-03 06:39:46 -04008335static __poll_t mdstat_poll(struct file *filp, poll_table *wait)
NeilBrownd7603b72006-01-06 00:20:30 -08008336{
Kay Sieversf1514632011-07-12 20:48:39 +02008337 struct seq_file *seq = filp->private_data;
Al Viroafc9a422017-07-03 06:39:46 -04008338 __poll_t mask;
NeilBrownd7603b72006-01-06 00:20:30 -08008339
NeilBrowne2f23b62014-04-09 14:33:51 +10008340 if (md_unloading)
Linus Torvaldsa9a08842018-02-11 14:34:03 -08008341 return EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI;
NeilBrownd7603b72006-01-06 00:20:30 -08008342 poll_wait(filp, &md_event_waiters, wait);
8343
8344 /* always allow read */
Linus Torvaldsa9a08842018-02-11 14:34:03 -08008345 mask = EPOLLIN | EPOLLRDNORM;
NeilBrownd7603b72006-01-06 00:20:30 -08008346
Kay Sieversf1514632011-07-12 20:48:39 +02008347 if (seq->poll_event != atomic_read(&md_event_count))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08008348 mask |= EPOLLERR | EPOLLPRI;
NeilBrownd7603b72006-01-06 00:20:30 -08008349 return mask;
8350}
8351
Alexey Dobriyan97a32532020-02-03 17:37:17 -08008352static const struct proc_ops mdstat_proc_ops = {
8353 .proc_open = md_seq_open,
8354 .proc_read = seq_read,
8355 .proc_lseek = seq_lseek,
8356 .proc_release = seq_release,
8357 .proc_poll = mdstat_poll,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008358};
8359
NeilBrown84fc4b52011-10-11 16:49:58 +11008360int register_md_personality(struct md_personality *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008361{
NeilBrown9d487392016-11-02 14:16:49 +11008362 pr_debug("md: %s personality registered for level %d\n",
8363 p->name, p->level);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008364 spin_lock(&pers_lock);
NeilBrown2604b702006-01-06 00:20:36 -08008365 list_add_tail(&p->list, &pers_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008366 spin_unlock(&pers_lock);
8367 return 0;
8368}
NeilBrown6c144d32014-09-30 16:15:38 +10008369EXPORT_SYMBOL(register_md_personality);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008370
NeilBrown84fc4b52011-10-11 16:49:58 +11008371int unregister_md_personality(struct md_personality *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008372{
NeilBrown9d487392016-11-02 14:16:49 +11008373 pr_debug("md: %s personality unregistered\n", p->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008374 spin_lock(&pers_lock);
NeilBrown2604b702006-01-06 00:20:36 -08008375 list_del_init(&p->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008376 spin_unlock(&pers_lock);
8377 return 0;
8378}
NeilBrown6c144d32014-09-30 16:15:38 +10008379EXPORT_SYMBOL(unregister_md_personality);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008380
NeilBrown6022e752015-08-13 12:32:55 +10008381int register_md_cluster_operations(struct md_cluster_operations *ops,
8382 struct module *module)
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008383{
NeilBrown6022e752015-08-13 12:32:55 +10008384 int ret = 0;
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008385 spin_lock(&pers_lock);
NeilBrown6022e752015-08-13 12:32:55 +10008386 if (md_cluster_ops != NULL)
8387 ret = -EALREADY;
8388 else {
8389 md_cluster_ops = ops;
8390 md_cluster_mod = module;
8391 }
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008392 spin_unlock(&pers_lock);
NeilBrown6022e752015-08-13 12:32:55 +10008393 return ret;
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008394}
8395EXPORT_SYMBOL(register_md_cluster_operations);
8396
8397int unregister_md_cluster_operations(void)
8398{
8399 spin_lock(&pers_lock);
8400 md_cluster_ops = NULL;
8401 spin_unlock(&pers_lock);
8402 return 0;
8403}
8404EXPORT_SYMBOL(unregister_md_cluster_operations);
8405
8406int md_setup_cluster(struct mddev *mddev, int nodes)
8407{
Zhao Heming7c9d5c52020-07-21 02:08:52 +08008408 int ret;
Guoqing Jiang47a7b0d2016-09-04 22:17:28 -04008409 if (!md_cluster_ops)
8410 request_module("md-cluster");
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008411 spin_lock(&pers_lock);
Guoqing Jiang47a7b0d2016-09-04 22:17:28 -04008412 /* ensure module won't be unloaded */
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008413 if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
NeilBrown9d487392016-11-02 14:16:49 +11008414 pr_warn("can't find md-cluster module or get it's reference.\n");
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008415 spin_unlock(&pers_lock);
8416 return -ENOENT;
8417 }
8418 spin_unlock(&pers_lock);
8419
Zhao Heming7c9d5c52020-07-21 02:08:52 +08008420 ret = md_cluster_ops->join(mddev, nodes);
8421 if (!ret)
8422 mddev->safemode_delay = 0;
8423 return ret;
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008424}
8425
8426void md_cluster_stop(struct mddev *mddev)
8427{
Goldwyn Rodriguesc4ce8672014-03-29 10:20:02 -05008428 if (!md_cluster_ops)
8429 return;
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008430 md_cluster_ops->leave(mddev);
8431 module_put(md_cluster_mod);
8432}
8433
NeilBrownfd01b882011-10-11 16:47:53 +11008434static int is_mddev_idle(struct mddev *mddev, int init)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008435{
NeilBrownf72ffdd2014-09-30 14:23:59 +10008436 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008437 int idle;
NeilBrowneea1bf32009-03-31 14:27:02 +11008438 int curr_events;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008439
8440 idle = 1;
NeilBrown4b809912008-07-21 17:05:25 +10008441 rcu_read_lock();
8442 rdev_for_each_rcu(rdev, mddev) {
Christoph Hellwig4245e522020-09-03 07:40:59 +02008443 struct gendisk *disk = rdev->bdev->bd_disk;
Michael Callahan59767fb2018-07-18 04:47:37 -07008444 curr_events = (int)part_stat_read_accum(&disk->part0, sectors) -
NeilBrowneea1bf32009-03-31 14:27:02 +11008445 atomic_read(&disk->sync_io);
NeilBrown713f6ab2007-07-17 04:06:12 -07008446 /* sync IO will cause sync_io to increase before the disk_stats
8447 * as sync_io is counted when a request starts, and
8448 * disk_stats is counted when it completes.
8449 * So resync activity will cause curr_events to be smaller than
8450 * when there was no such activity.
8451 * non-sync IO will cause disk_stat to increase without
8452 * increasing sync_io so curr_events will (eventually)
8453 * be larger than it was before. Once it becomes
8454 * substantially larger, the test below will cause
8455 * the array to appear non-idle, and resync will slow
8456 * down.
8457 * If there is a lot of outstanding resync activity when
8458 * we set last_event to curr_events, then all that activity
8459 * completing might cause the array to appear non-idle
8460 * and resync will be slowed down even though there might
8461 * not have been non-resync activity. This will only
8462 * happen once though. 'last_events' will soon reflect
8463 * the state where there is little or no outstanding
8464 * resync requests, and further resync activity will
8465 * always make curr_events less than last_events.
NeilBrownc0e48522005-11-18 01:11:01 -08008466 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07008467 */
NeilBrowneea1bf32009-03-31 14:27:02 +11008468 if (init || curr_events - rdev->last_events > 64) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008469 rdev->last_events = curr_events;
8470 idle = 0;
8471 }
8472 }
NeilBrown4b809912008-07-21 17:05:25 +10008473 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07008474 return idle;
8475}
8476
NeilBrownfd01b882011-10-11 16:47:53 +11008477void md_done_sync(struct mddev *mddev, int blocks, int ok)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008478{
8479 /* another "blocks" (512byte) blocks have been synced */
8480 atomic_sub(blocks, &mddev->recovery_active);
8481 wake_up(&mddev->recovery_wait);
8482 if (!ok) {
NeilBrowndfc70642008-05-23 13:04:39 -07008483 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
majianpeng0a19caa2012-11-19 19:57:34 +08008484 set_bit(MD_RECOVERY_ERROR, &mddev->recovery);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008485 md_wakeup_thread(mddev->thread);
8486 // stop recovery, signal do_sync ....
8487 }
8488}
NeilBrown6c144d32014-09-30 16:15:38 +10008489EXPORT_SYMBOL(md_done_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008490
NeilBrown06d91a52005-06-21 17:17:12 -07008491/* md_write_start(mddev, bi)
8492 * If we need to update some array metadata (e.g. 'active' flag
NeilBrown3d310eb2005-06-21 17:17:26 -07008493 * in superblock) before writing, schedule a superblock update
8494 * and wait for it to complete.
NeilBrowncc27b0c2017-06-05 16:49:39 +10008495 * A return value of 'false' means that the write wasn't recorded
8496 * and cannot proceed as the array is being suspend.
NeilBrown06d91a52005-06-21 17:17:12 -07008497 */
NeilBrowncc27b0c2017-06-05 16:49:39 +10008498bool md_write_start(struct mddev *mddev, struct bio *bi)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008499{
Neil Brown0fd62b82008-06-28 08:31:36 +10008500 int did_change = 0;
Heinz Mauelshagen4b6c1062018-02-02 23:13:19 +01008501
NeilBrown06d91a52005-06-21 17:17:12 -07008502 if (bio_data_dir(bi) != WRITE)
NeilBrowncc27b0c2017-06-05 16:49:39 +10008503 return true;
NeilBrown06d91a52005-06-21 17:17:12 -07008504
NeilBrownf91de922005-11-08 21:39:36 -08008505 BUG_ON(mddev->ro == 1);
8506 if (mddev->ro == 2) {
8507 /* need to switch to read/write */
8508 mddev->ro = 0;
8509 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8510 md_wakeup_thread(mddev->thread);
NeilBrown25156192008-03-04 14:29:32 -08008511 md_wakeup_thread(mddev->sync_thread);
Neil Brown0fd62b82008-06-28 08:31:36 +10008512 did_change = 1;
NeilBrownf91de922005-11-08 21:39:36 -08008513 }
NeilBrown4ad23a972017-03-15 14:05:14 +11008514 rcu_read_lock();
8515 percpu_ref_get(&mddev->writes_pending);
NeilBrown55cc39f2017-03-15 14:05:14 +11008516 smp_mb(); /* Match smp_mb in set_in_sync() */
NeilBrown31a59e32008-04-30 00:52:30 -07008517 if (mddev->safemode == 1)
8518 mddev->safemode = 0;
NeilBrown4ad23a972017-03-15 14:05:14 +11008519 /* sync_checkers is always 0 when writes_pending is in per-cpu mode */
NeilBrown81fe48e2017-08-08 16:56:36 +10008520 if (mddev->in_sync || mddev->sync_checkers) {
NeilBrown85572d72014-12-15 12:56:56 +11008521 spin_lock(&mddev->lock);
NeilBrown3d310eb2005-06-21 17:17:26 -07008522 if (mddev->in_sync) {
8523 mddev->in_sync = 0;
Shaohua Li29530792016-12-08 15:48:19 -08008524 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
8525 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
NeilBrown3d310eb2005-06-21 17:17:26 -07008526 md_wakeup_thread(mddev->thread);
Neil Brown0fd62b82008-06-28 08:31:36 +10008527 did_change = 1;
NeilBrown3d310eb2005-06-21 17:17:26 -07008528 }
NeilBrown85572d72014-12-15 12:56:56 +11008529 spin_unlock(&mddev->lock);
NeilBrown06d91a52005-06-21 17:17:12 -07008530 }
NeilBrown4ad23a972017-03-15 14:05:14 +11008531 rcu_read_unlock();
Neil Brown0fd62b82008-06-28 08:31:36 +10008532 if (did_change)
NeilBrown00bcb4a2010-06-01 19:37:23 +10008533 sysfs_notify_dirent_safe(mddev->sysfs_state);
Heinz Mauelshagen4b6c1062018-02-02 23:13:19 +01008534 if (!mddev->has_superblocks)
8535 return true;
NeilBrown09a44cc2008-05-23 13:04:36 -07008536 wait_event(mddev->sb_wait,
NeilBrownd47c8ad2017-10-05 16:23:16 +11008537 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) ||
8538 mddev->suspended);
NeilBrowncc27b0c2017-06-05 16:49:39 +10008539 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
8540 percpu_ref_put(&mddev->writes_pending);
8541 return false;
8542 }
8543 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008544}
NeilBrown6c144d32014-09-30 16:15:38 +10008545EXPORT_SYMBOL(md_write_start);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008546
NeilBrown49728052017-03-15 14:05:12 +11008547/* md_write_inc can only be called when md_write_start() has
8548 * already been called at least once of the current request.
8549 * It increments the counter and is useful when a single request
8550 * is split into several parts. Each part causes an increment and
8551 * so needs a matching md_write_end().
8552 * Unlike md_write_start(), it is safe to call md_write_inc() inside
8553 * a spinlocked region.
8554 */
8555void md_write_inc(struct mddev *mddev, struct bio *bi)
8556{
8557 if (bio_data_dir(bi) != WRITE)
8558 return;
8559 WARN_ON_ONCE(mddev->in_sync || mddev->ro);
NeilBrown4ad23a972017-03-15 14:05:14 +11008560 percpu_ref_get(&mddev->writes_pending);
NeilBrown49728052017-03-15 14:05:12 +11008561}
8562EXPORT_SYMBOL(md_write_inc);
8563
NeilBrownfd01b882011-10-11 16:47:53 +11008564void md_write_end(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008565{
NeilBrown4ad23a972017-03-15 14:05:14 +11008566 percpu_ref_put(&mddev->writes_pending);
8567
8568 if (mddev->safemode == 2)
8569 md_wakeup_thread(mddev->thread);
8570 else if (mddev->safemode_delay)
8571 /* The roundup() ensures this only performs locking once
8572 * every ->safemode_delay jiffies
8573 */
8574 mod_timer(&mddev->safemode_timer,
8575 roundup(jiffies, mddev->safemode_delay) +
8576 mddev->safemode_delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008577}
NeilBrown4ad23a972017-03-15 14:05:14 +11008578
NeilBrown6c144d32014-09-30 16:15:38 +10008579EXPORT_SYMBOL(md_write_end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008580
Xiao Ni26280892020-08-25 13:42:59 +08008581/* This is used by raid0 and raid10 */
8582void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
8583 struct bio *bio, sector_t start, sector_t size)
8584{
8585 struct bio *discard_bio = NULL;
8586
8587 if (__blkdev_issue_discard(rdev->bdev, start, size,
8588 GFP_NOIO, 0, &discard_bio) || !discard_bio)
8589 return;
8590
8591 bio_chain(discard_bio, bio);
8592 bio_clone_blkg_association(discard_bio, bio);
8593 if (mddev->gendisk)
8594 trace_block_bio_remap(bdev_get_queue(rdev->bdev),
8595 discard_bio, disk_devt(mddev->gendisk),
8596 bio->bi_iter.bi_sector);
8597 submit_bio_noacct(discard_bio);
8598}
8599EXPORT_SYMBOL(md_submit_discard_bio);
8600
NeilBrown2a2275d2007-01-26 00:57:11 -08008601/* md_allow_write(mddev)
8602 * Calling this ensures that the array is marked 'active' so that writes
8603 * may proceed without blocking. It is important to call this before
8604 * attempting a GFP_KERNEL allocation while holding the mddev lock.
8605 * Must be called with mddev_lock held.
8606 */
Artur Paszkiewicz2214c262017-05-08 11:56:55 +02008607void md_allow_write(struct mddev *mddev)
NeilBrown2a2275d2007-01-26 00:57:11 -08008608{
8609 if (!mddev->pers)
Artur Paszkiewicz2214c262017-05-08 11:56:55 +02008610 return;
NeilBrown2a2275d2007-01-26 00:57:11 -08008611 if (mddev->ro)
Artur Paszkiewicz2214c262017-05-08 11:56:55 +02008612 return;
Neil Brown1a0fd492008-06-28 08:31:27 +10008613 if (!mddev->pers->sync_request)
Artur Paszkiewicz2214c262017-05-08 11:56:55 +02008614 return;
NeilBrown2a2275d2007-01-26 00:57:11 -08008615
NeilBrown85572d72014-12-15 12:56:56 +11008616 spin_lock(&mddev->lock);
NeilBrown2a2275d2007-01-26 00:57:11 -08008617 if (mddev->in_sync) {
8618 mddev->in_sync = 0;
Shaohua Li29530792016-12-08 15:48:19 -08008619 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
8620 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
NeilBrown2a2275d2007-01-26 00:57:11 -08008621 if (mddev->safemode_delay &&
8622 mddev->safemode == 0)
8623 mddev->safemode = 1;
NeilBrown85572d72014-12-15 12:56:56 +11008624 spin_unlock(&mddev->lock);
NeilBrown2a2275d2007-01-26 00:57:11 -08008625 md_update_sb(mddev, 0);
NeilBrown00bcb4a2010-06-01 19:37:23 +10008626 sysfs_notify_dirent_safe(mddev->sysfs_state);
Artur Paszkiewicz2214c262017-05-08 11:56:55 +02008627 /* wait for the dirty state to be recorded in the metadata */
8628 wait_event(mddev->sb_wait,
Artur Paszkiewicz2214c262017-05-08 11:56:55 +02008629 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
NeilBrown2a2275d2007-01-26 00:57:11 -08008630 } else
NeilBrown85572d72014-12-15 12:56:56 +11008631 spin_unlock(&mddev->lock);
NeilBrown2a2275d2007-01-26 00:57:11 -08008632}
8633EXPORT_SYMBOL_GPL(md_allow_write);
8634
Linus Torvalds1da177e2005-04-16 15:20:36 -07008635#define SYNC_MARKS 10
8636#define SYNC_MARK_STEP (3*HZ)
majianpeng54f89342012-10-31 11:59:10 +11008637#define UPDATE_FREQUENCY (5*60*HZ)
Shaohua Li4ed87312012-10-11 13:34:00 +11008638void md_do_sync(struct md_thread *thread)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008639{
Shaohua Li4ed87312012-10-11 13:34:00 +11008640 struct mddev *mddev = thread->mddev;
NeilBrownfd01b882011-10-11 16:47:53 +11008641 struct mddev *mddev2;
Yufen Yue5b521e2019-06-14 15:41:07 -07008642 unsigned int currspeed = 0, window;
Xiao Niac7e50a2014-08-07 09:37:41 -04008643 sector_t max_sectors,j, io_sectors, recovery_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008644 unsigned long mark[SYNC_MARKS];
majianpeng54f89342012-10-31 11:59:10 +11008645 unsigned long update_time;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008646 sector_t mark_cnt[SYNC_MARKS];
8647 int last_mark,m;
8648 struct list_head *tmp;
8649 sector_t last_check;
NeilBrown57afd892005-06-21 17:17:13 -07008650 int skipped = 0;
NeilBrown3cb03002011-10-11 16:45:26 +11008651 struct md_rdev *rdev;
Jonathan Brassowc4a39552013-06-25 01:23:59 -05008652 char *desc, *action = NULL;
majianpeng7c2c57c2012-07-03 12:12:26 +10008653 struct blk_plug plug;
Guoqing Jiang41a9a0d2016-05-02 11:33:08 -04008654 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008655
8656 /* just incase thread restarts... */
Song Liud5d885f2017-11-19 22:17:01 -08008657 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
8658 test_bit(MD_RECOVERY_WAIT, &mddev->recovery))
Linus Torvalds1da177e2005-04-16 15:20:36 -07008659 return;
NeilBrown3991b312014-05-28 13:39:23 +10008660 if (mddev->ro) {/* never try to sync a read-only array */
8661 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008662 return;
NeilBrown3991b312014-05-28 13:39:23 +10008663 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008664
Guoqing Jiang41a9a0d2016-05-02 11:33:08 -04008665 if (mddev_is_clustered(mddev)) {
8666 ret = md_cluster_ops->resync_start(mddev);
8667 if (ret)
8668 goto skip;
8669
Guoqing Jiangbb8bf152016-06-02 23:32:04 -04008670 set_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags);
Guoqing Jiang41a9a0d2016-05-02 11:33:08 -04008671 if (!(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
8672 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ||
8673 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
8674 && ((unsigned long long)mddev->curr_resync_completed
8675 < (unsigned long long)mddev->resync_max_sectors))
8676 goto skip;
8677 }
8678
NeilBrown61df9d92006-10-03 01:15:57 -07008679 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
Jonathan Brassowc4a39552013-06-25 01:23:59 -05008680 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
NeilBrown61df9d92006-10-03 01:15:57 -07008681 desc = "data-check";
Jonathan Brassowc4a39552013-06-25 01:23:59 -05008682 action = "check";
8683 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
NeilBrown61df9d92006-10-03 01:15:57 -07008684 desc = "requested-resync";
Jonathan Brassowc4a39552013-06-25 01:23:59 -05008685 action = "repair";
8686 } else
NeilBrown61df9d92006-10-03 01:15:57 -07008687 desc = "resync";
8688 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
8689 desc = "reshape";
8690 else
8691 desc = "recovery";
8692
Jonathan Brassowc4a39552013-06-25 01:23:59 -05008693 mddev->last_sync_action = action ?: desc;
8694
Linus Torvalds1da177e2005-04-16 15:20:36 -07008695 /* we overload curr_resync somewhat here.
8696 * 0 == not engaged in resync at all
8697 * 2 == checking that there is no conflict with another sync
8698 * 1 == like 2, but have yielded to allow conflicting resync to
Yufen Yue5b521e2019-06-14 15:41:07 -07008699 * commence
Linus Torvalds1da177e2005-04-16 15:20:36 -07008700 * other == active in resync - this many blocks
8701 *
8702 * Before starting a resync we must have set curr_resync to
8703 * 2, and then checked that every "conflicting" array has curr_resync
8704 * less than ours. When we find one that is the same or higher
8705 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync
8706 * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
8707 * This will mean we have to start checking from the beginning again.
8708 *
8709 */
8710
8711 do {
Artur Paszkiewiczc622ca52016-08-16 14:26:08 +02008712 int mddev2_minor = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008713 mddev->curr_resync = 2;
8714
8715 try_again:
NeilBrown404e4b42009-12-30 15:25:23 +11008716 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
Linus Torvalds1da177e2005-04-16 15:20:36 -07008717 goto skip;
NeilBrown29ac4aa2008-02-06 01:39:58 -08008718 for_each_mddev(mddev2, tmp) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008719 if (mddev2 == mddev)
8720 continue;
Bernd Schubert90b08712008-05-23 13:04:38 -07008721 if (!mddev->parallel_resync
8722 && mddev2->curr_resync
8723 && match_mddev_units(mddev, mddev2)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008724 DEFINE_WAIT(wq);
8725 if (mddev < mddev2 && mddev->curr_resync == 2) {
8726 /* arbitrarily yield */
8727 mddev->curr_resync = 1;
8728 wake_up(&resync_wait);
8729 }
8730 if (mddev > mddev2 && mddev->curr_resync == 1)
8731 /* no need to wait here, we can wait the next
8732 * time 'round when curr_resync == 2
8733 */
8734 continue;
NeilBrown97441972008-09-19 11:49:54 +10008735 /* We need to wait 'interruptible' so as not to
8736 * contribute to the load average, and not to
8737 * be caught by 'softlockup'
8738 */
8739 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
NeilBrownc91abf52013-11-19 12:02:01 +11008740 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
NeilBrown8712e552005-10-26 01:58:58 -07008741 mddev2->curr_resync >= mddev->curr_resync) {
Artur Paszkiewiczc622ca52016-08-16 14:26:08 +02008742 if (mddev2_minor != mddev2->md_minor) {
8743 mddev2_minor = mddev2->md_minor;
NeilBrown9d487392016-11-02 14:16:49 +11008744 pr_info("md: delaying %s of %s until %s has finished (they share one or more physical units)\n",
8745 desc, mdname(mddev),
8746 mdname(mddev2));
Artur Paszkiewiczc622ca52016-08-16 14:26:08 +02008747 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008748 mddev_put(mddev2);
NeilBrown97441972008-09-19 11:49:54 +10008749 if (signal_pending(current))
8750 flush_signals(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008751 schedule();
8752 finish_wait(&resync_wait, &wq);
8753 goto try_again;
8754 }
8755 finish_wait(&resync_wait, &wq);
8756 }
8757 }
8758 } while (mddev->curr_resync < 2);
8759
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008760 j = 0;
NeilBrown9d888832005-11-08 21:39:26 -08008761 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008762 /* resync follows the size requested by the personality,
NeilBrown57afd892005-06-21 17:17:13 -07008763 * which defaults to physical size, but can be virtual size
Linus Torvalds1da177e2005-04-16 15:20:36 -07008764 */
8765 max_sectors = mddev->resync_max_sectors;
Jianpeng Ma7f7583d2012-10-11 14:17:59 +11008766 atomic64_set(&mddev->resync_mismatches, 0);
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008767 /* we don't use the checkpoint if there's a bitmap */
Neil Brown5e96ee62008-06-28 08:31:24 +10008768 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
8769 j = mddev->resync_min;
8770 else if (!mddev->bitmap)
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008771 j = mddev->recovery_cp;
Neil Brown5e96ee62008-06-28 08:31:24 +10008772
Guoqing Jiangcb9ee152018-10-18 16:37:47 +08008773 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
NeilBrownc804cde2012-05-21 09:28:33 +10008774 max_sectors = mddev->resync_max_sectors;
Guoqing Jiangcb9ee152018-10-18 16:37:47 +08008775 /*
8776 * If the original node aborts reshaping then we continue the
8777 * reshaping, so set j again to avoid restart reshape from the
8778 * first beginning
8779 */
8780 if (mddev_is_clustered(mddev) &&
8781 mddev->reshape_position != MaxSector)
8782 j = mddev->reshape_position;
8783 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008784 /* recovery follows the physical size of devices */
Andre Noll58c0fed2009-03-31 14:33:13 +11008785 max_sectors = mddev->dev_sectors;
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008786 j = MaxSector;
Dan Williams4e59ca72009-12-12 21:17:06 -07008787 rcu_read_lock();
NeilBrowndafb20f2012-03-19 12:46:39 +11008788 rdev_for_each_rcu(rdev, mddev)
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008789 if (rdev->raid_disk >= 0 &&
Shaohua Lif2076e72015-10-08 21:54:12 -07008790 !test_bit(Journal, &rdev->flags) &&
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008791 !test_bit(Faulty, &rdev->flags) &&
8792 !test_bit(In_sync, &rdev->flags) &&
8793 rdev->recovery_offset < j)
8794 j = rdev->recovery_offset;
Dan Williams4e59ca72009-12-12 21:17:06 -07008795 rcu_read_unlock();
NeilBrown133d4522014-07-02 12:04:14 +10008796
8797 /* If there is a bitmap, we need to make sure all
8798 * writes that started before we added a spare
8799 * complete before we start doing a recovery.
8800 * Otherwise the write might complete and (via
8801 * bitmap_endwrite) set a bit in the bitmap after the
8802 * recovery has checked that bit and skipped that
8803 * region.
8804 */
8805 if (mddev->bitmap) {
8806 mddev->pers->quiesce(mddev, 1);
8807 mddev->pers->quiesce(mddev, 0);
8808 }
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008809 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008810
NeilBrown9d487392016-11-02 14:16:49 +11008811 pr_info("md: %s of RAID array %s\n", desc, mdname(mddev));
8812 pr_debug("md: minimum _guaranteed_ speed: %d KB/sec/disk.\n", speed_min(mddev));
8813 pr_debug("md: using maximum available idle IO bandwidth (but not more than %d KB/sec) for %s.\n",
8814 speed_max(mddev), desc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008815
NeilBrowneea1bf32009-03-31 14:27:02 +11008816 is_mddev_idle(mddev, 1); /* this initializes IO event counters */
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008817
NeilBrown57afd892005-06-21 17:17:13 -07008818 io_sectors = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008819 for (m = 0; m < SYNC_MARKS; m++) {
8820 mark[m] = jiffies;
NeilBrown57afd892005-06-21 17:17:13 -07008821 mark_cnt[m] = io_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008822 }
8823 last_mark = 0;
8824 mddev->resync_mark = mark[last_mark];
8825 mddev->resync_mark_cnt = mark_cnt[last_mark];
8826
8827 /*
8828 * Tune reconstruction:
8829 */
Yufen Yue5b521e2019-06-14 15:41:07 -07008830 window = 32 * (PAGE_SIZE / 512);
NeilBrown9d487392016-11-02 14:16:49 +11008831 pr_debug("md: using %dk window, over a total of %lluk.\n",
8832 window/2, (unsigned long long)max_sectors/2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008833
8834 atomic_set(&mddev->recovery_active, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008835 last_check = 0;
8836
8837 if (j>2) {
NeilBrown9d487392016-11-02 14:16:49 +11008838 pr_debug("md: resuming %s of %s from checkpoint.\n",
8839 desc, mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07008840 mddev->curr_resync = j;
NeilBrown72f36d52012-10-11 14:25:57 +11008841 } else
8842 mddev->curr_resync = 3; /* no longer delayed */
NeilBrown75d3da42011-01-14 09:14:34 +11008843 mddev->curr_resync_completed = j;
Junxiao Bie1a86db2020-07-14 16:10:26 -07008844 sysfs_notify_dirent_safe(mddev->sysfs_completed);
NeilBrown72f36d52012-10-11 14:25:57 +11008845 md_new_event(mddev);
majianpeng54f89342012-10-31 11:59:10 +11008846 update_time = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008847
majianpeng7c2c57c2012-07-03 12:12:26 +10008848 blk_start_plug(&plug);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008849 while (j < max_sectors) {
NeilBrown57afd892005-06-21 17:17:13 -07008850 sector_t sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008851
NeilBrown57afd892005-06-21 17:17:13 -07008852 skipped = 0;
NeilBrown97e4f422009-03-31 14:33:13 +11008853
NeilBrown7a91ee12009-05-26 12:57:21 +10008854 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8855 ((mddev->curr_resync > mddev->curr_resync_completed &&
8856 (mddev->curr_resync - mddev->curr_resync_completed)
8857 > (max_sectors >> 4)) ||
majianpeng54f89342012-10-31 11:59:10 +11008858 time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) ||
NeilBrown7a91ee12009-05-26 12:57:21 +10008859 (j - mddev->curr_resync_completed)*2
NeilBrownc5e19d92015-07-17 12:06:02 +10008860 >= mddev->resync_max - mddev->curr_resync_completed ||
8861 mddev->curr_resync_completed > mddev->resync_max
NeilBrown7a91ee12009-05-26 12:57:21 +10008862 )) {
NeilBrown97e4f422009-03-31 14:33:13 +11008863 /* time to update curr_resync_completed */
NeilBrown97e4f422009-03-31 14:33:13 +11008864 wait_event(mddev->recovery_wait,
8865 atomic_read(&mddev->recovery_active) == 0);
NeilBrown75d3da42011-01-14 09:14:34 +11008866 mddev->curr_resync_completed = j;
kernelmail35d78c62012-10-31 11:59:10 +11008867 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
8868 j > mddev->recovery_cp)
8869 mddev->recovery_cp = j;
majianpeng54f89342012-10-31 11:59:10 +11008870 update_time = jiffies;
Shaohua Li29530792016-12-08 15:48:19 -08008871 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
Junxiao Bie1a86db2020-07-14 16:10:26 -07008872 sysfs_notify_dirent_safe(mddev->sysfs_completed);
NeilBrown97e4f422009-03-31 14:33:13 +11008873 }
NeilBrownacb180b2009-04-14 16:28:34 +10008874
NeilBrownc91abf52013-11-19 12:02:01 +11008875 while (j >= mddev->resync_max &&
8876 !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
NeilBrowne62e58a2009-07-01 13:15:35 +10008877 /* As this condition is controlled by user-space,
8878 * we can block indefinitely, so use '_interruptible'
8879 * to avoid triggering warnings.
8880 */
8881 flush_signals(current); /* just in case */
8882 wait_event_interruptible(mddev->recovery_wait,
8883 mddev->resync_max > j
NeilBrownc91abf52013-11-19 12:02:01 +11008884 || test_bit(MD_RECOVERY_INTR,
8885 &mddev->recovery));
NeilBrowne62e58a2009-07-01 13:15:35 +10008886 }
NeilBrownacb180b2009-04-14 16:28:34 +10008887
NeilBrownc91abf52013-11-19 12:02:01 +11008888 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8889 break;
NeilBrownacb180b2009-04-14 16:28:34 +10008890
NeilBrown09314792015-02-19 16:04:40 +11008891 sectors = mddev->pers->sync_request(mddev, j, &skipped);
NeilBrown57afd892005-06-21 17:17:13 -07008892 if (sectors == 0) {
NeilBrowndfc70642008-05-23 13:04:39 -07008893 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
NeilBrownc91abf52013-11-19 12:02:01 +11008894 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008895 }
NeilBrown57afd892005-06-21 17:17:13 -07008896
8897 if (!skipped) { /* actual IO requested */
8898 io_sectors += sectors;
8899 atomic_add(sectors, &mddev->recovery_active);
8900 }
8901
NeilBrowne875ece2011-07-28 11:39:24 +10008902 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8903 break;
8904
Linus Torvalds1da177e2005-04-16 15:20:36 -07008905 j += sectors;
NeilBrown5ed1df22015-07-24 13:27:08 +10008906 if (j > max_sectors)
8907 /* when skipping, extra large numbers can be returned. */
8908 j = max_sectors;
NeilBrown72f36d52012-10-11 14:25:57 +11008909 if (j > 2)
8910 mddev->curr_resync = j;
NeilBrownff4e8d92006-07-10 04:44:16 -07008911 mddev->curr_mark_cnt = io_sectors;
NeilBrownd7603b72006-01-06 00:20:30 -08008912 if (last_check == 0)
NeilBrowne875ece2011-07-28 11:39:24 +10008913 /* this is the earliest that rebuild will be
NeilBrownd7603b72006-01-06 00:20:30 -08008914 * visible in /proc/mdstat
8915 */
8916 md_new_event(mddev);
NeilBrown57afd892005-06-21 17:17:13 -07008917
8918 if (last_check + window > io_sectors || j == max_sectors)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008919 continue;
8920
NeilBrown57afd892005-06-21 17:17:13 -07008921 last_check = io_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008922 repeat:
8923 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
8924 /* step marks */
8925 int next = (last_mark+1) % SYNC_MARKS;
8926
8927 mddev->resync_mark = mark[next];
8928 mddev->resync_mark_cnt = mark_cnt[next];
8929 mark[next] = jiffies;
NeilBrown57afd892005-06-21 17:17:13 -07008930 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008931 last_mark = next;
8932 }
8933
NeilBrownc91abf52013-11-19 12:02:01 +11008934 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8935 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008936
8937 /*
8938 * this loop exits only if either when we are slower than
8939 * the 'hard' speed limit, or the system was IO-idle for
8940 * a jiffy.
8941 * the system might be non-idle CPU-wise, but we only care
8942 * about not overloading the IO subsystem. (things like an
8943 * e2fsck being done on the RAID array should execute fast)
8944 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07008945 cond_resched();
8946
Xiao Niac7e50a2014-08-07 09:37:41 -04008947 recovery_done = io_sectors - atomic_read(&mddev->recovery_active);
8948 currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2
NeilBrown57afd892005-06-21 17:17:13 -07008949 /((jiffies-mddev->resync_mark)/HZ +1) +1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008950
NeilBrown88202a02006-01-06 00:21:36 -08008951 if (currspeed > speed_min(mddev)) {
NeilBrownac8fa412015-02-19 16:55:00 +11008952 if (currspeed > speed_max(mddev)) {
NeilBrownc0e48522005-11-18 01:11:01 -08008953 msleep(500);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008954 goto repeat;
8955 }
NeilBrownac8fa412015-02-19 16:55:00 +11008956 if (!is_mddev_idle(mddev, 0)) {
8957 /*
8958 * Give other IO more of a chance.
8959 * The faster the devices, the less we wait.
8960 */
8961 wait_event(mddev->recovery_wait,
8962 !atomic_read(&mddev->recovery_active));
8963 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008964 }
8965 }
NeilBrown9d487392016-11-02 14:16:49 +11008966 pr_info("md: %s: %s %s.\n",mdname(mddev), desc,
8967 test_bit(MD_RECOVERY_INTR, &mddev->recovery)
8968 ? "interrupted" : "done");
Linus Torvalds1da177e2005-04-16 15:20:36 -07008969 /*
8970 * this also signals 'finished resyncing' to md_stop
8971 */
majianpeng7c2c57c2012-07-03 12:12:26 +10008972 blk_finish_plug(&plug);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008973 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
8974
NeilBrown5ed1df22015-07-24 13:27:08 +10008975 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8976 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
NeilBrown1217e1d2016-10-28 15:59:41 +11008977 mddev->curr_resync > 3) {
NeilBrown5ed1df22015-07-24 13:27:08 +10008978 mddev->curr_resync_completed = mddev->curr_resync;
Junxiao Bie1a86db2020-07-14 16:10:26 -07008979 sysfs_notify_dirent_safe(mddev->sysfs_completed);
NeilBrown5ed1df22015-07-24 13:27:08 +10008980 }
NeilBrown09314792015-02-19 16:04:40 +11008981 mddev->pers->sync_request(mddev, max_sectors, &skipped);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008982
NeilBrowndfc70642008-05-23 13:04:39 -07008983 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
NeilBrown1217e1d2016-10-28 15:59:41 +11008984 mddev->curr_resync > 3) {
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008985 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
8986 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
8987 if (mddev->curr_resync >= mddev->recovery_cp) {
NeilBrown9d487392016-11-02 14:16:49 +11008988 pr_debug("md: checkpointing %s of %s.\n",
8989 desc, mdname(mddev));
majianpeng0a19caa2012-11-19 19:57:34 +08008990 if (test_bit(MD_RECOVERY_ERROR,
8991 &mddev->recovery))
8992 mddev->recovery_cp =
8993 mddev->curr_resync_completed;
8994 else
8995 mddev->recovery_cp =
8996 mddev->curr_resync;
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008997 }
8998 } else
8999 mddev->recovery_cp = MaxSector;
9000 } else {
9001 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
9002 mddev->curr_resync = MaxSector;
NeilBrowndb0505d2017-10-17 16:18:36 +11009003 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
9004 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) {
9005 rcu_read_lock();
9006 rdev_for_each_rcu(rdev, mddev)
9007 if (rdev->raid_disk >= 0 &&
9008 mddev->delta_disks >= 0 &&
9009 !test_bit(Journal, &rdev->flags) &&
9010 !test_bit(Faulty, &rdev->flags) &&
9011 !test_bit(In_sync, &rdev->flags) &&
9012 rdev->recovery_offset < mddev->curr_resync)
9013 rdev->recovery_offset = mddev->curr_resync;
9014 rcu_read_unlock();
9015 }
NeilBrown5fd6c1d2006-06-26 00:27:40 -07009016 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009017 }
NeilBrowndb91ff52012-02-07 12:01:51 +11009018 skip:
Guoqing Jiangbb8bf152016-06-02 23:32:04 -04009019 /* set CHANGE_PENDING here since maybe another update is needed,
9020 * so other nodes are informed. It should be harmless for normal
9021 * raid */
Shaohua Li29530792016-12-08 15:48:19 -08009022 set_mask_bits(&mddev->sb_flags, 0,
9023 BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS));
Goldwyn Rodriguesc186b122015-09-30 13:20:35 -05009024
BingJing Chang88763912018-02-22 13:34:46 +08009025 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
9026 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
9027 mddev->delta_disks > 0 &&
9028 mddev->pers->finish_reshape &&
9029 mddev->pers->size &&
9030 mddev->queue) {
9031 mddev_lock_nointr(mddev);
9032 md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0));
9033 mddev_unlock(mddev);
Christoph Hellwig2c247c52020-11-16 15:57:11 +01009034 if (!mddev_is_clustered(mddev))
9035 set_capacity_and_notify(mddev->gendisk,
9036 mddev->array_sectors);
BingJing Chang88763912018-02-22 13:34:46 +08009037 }
9038
NeilBrown23da4222014-12-15 12:57:01 +11009039 spin_lock(&mddev->lock);
NeilBrownc07b70a2009-12-14 12:49:48 +11009040 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
9041 /* We completed so min/max setting can be forgotten if used. */
9042 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
9043 mddev->resync_min = 0;
9044 mddev->resync_max = MaxSector;
9045 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
9046 mddev->resync_min = mddev->curr_resync_completed;
NeilBrownf7851be2015-07-02 17:12:58 +10009047 set_bit(MD_RECOVERY_DONE, &mddev->recovery);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009048 mddev->curr_resync = 0;
NeilBrown23da4222014-12-15 12:57:01 +11009049 spin_unlock(&mddev->lock);
9050
Linus Torvalds1da177e2005-04-16 15:20:36 -07009051 wake_up(&resync_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009052 md_wakeup_thread(mddev->thread);
NeilBrownc6207272008-02-06 01:39:52 -08009053 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009054}
NeilBrown29269552006-03-27 01:18:10 -08009055EXPORT_SYMBOL_GPL(md_do_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009056
NeilBrown746d3202013-04-24 11:42:41 +10009057static int remove_and_add_spares(struct mddev *mddev,
9058 struct md_rdev *this)
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009059{
NeilBrown3cb03002011-10-11 16:45:26 +11009060 struct md_rdev *rdev;
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009061 int spares = 0;
NeilBrownf2a371c2012-01-09 00:46:41 +11009062 int removed = 0;
NeilBrownd787be42016-06-02 16:19:53 +10009063 bool remove_some = false;
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009064
NeilBrown39772f02018-02-03 09:19:30 +11009065 if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
9066 /* Mustn't remove devices when resync thread is running */
9067 return 0;
9068
NeilBrownd787be42016-06-02 16:19:53 +10009069 rdev_for_each(rdev, mddev) {
NeilBrown746d3202013-04-24 11:42:41 +10009070 if ((this == NULL || rdev == this) &&
9071 rdev->raid_disk >= 0 &&
Dan Williams6bfe0b42008-04-30 00:52:32 -07009072 !test_bit(Blocked, &rdev->flags) &&
NeilBrownd787be42016-06-02 16:19:53 +10009073 test_bit(Faulty, &rdev->flags) &&
9074 atomic_read(&rdev->nr_pending)==0) {
9075 /* Faulty non-Blocked devices with nr_pending == 0
9076 * never get nr_pending incremented,
9077 * never get Faulty cleared, and never get Blocked set.
9078 * So we can synchronize_rcu now rather than once per device
9079 */
9080 remove_some = true;
9081 set_bit(RemoveSynchronized, &rdev->flags);
9082 }
9083 }
9084
9085 if (remove_some)
9086 synchronize_rcu();
9087 rdev_for_each(rdev, mddev) {
9088 if ((this == NULL || rdev == this) &&
9089 rdev->raid_disk >= 0 &&
9090 !test_bit(Blocked, &rdev->flags) &&
9091 ((test_bit(RemoveSynchronized, &rdev->flags) ||
Shaohua Lif2076e72015-10-08 21:54:12 -07009092 (!test_bit(In_sync, &rdev->flags) &&
9093 !test_bit(Journal, &rdev->flags))) &&
NeilBrownd787be42016-06-02 16:19:53 +10009094 atomic_read(&rdev->nr_pending)==0)) {
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009095 if (mddev->pers->hot_remove_disk(
NeilBrownb8321b62011-12-23 10:17:51 +11009096 mddev, rdev) == 0) {
Namhyung Kim36fad852011-07-27 11:00:36 +10009097 sysfs_unlink_rdev(mddev, rdev);
NeilBrown011abdc2018-04-26 14:46:29 +10009098 rdev->saved_raid_disk = rdev->raid_disk;
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009099 rdev->raid_disk = -1;
NeilBrownf2a371c2012-01-09 00:46:41 +11009100 removed++;
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009101 }
9102 }
NeilBrownd787be42016-06-02 16:19:53 +10009103 if (remove_some && test_bit(RemoveSynchronized, &rdev->flags))
9104 clear_bit(RemoveSynchronized, &rdev->flags);
9105 }
9106
Jonathan Brassow90584fc2013-03-07 16:24:26 -06009107 if (removed && mddev->kobj.sd)
Junxiao Bie1a86db2020-07-14 16:10:26 -07009108 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009109
Goldwyn Rodrigues2910ff12015-09-28 10:27:26 -05009110 if (this && removed)
NeilBrown746d3202013-04-24 11:42:41 +10009111 goto no_add;
9112
NeilBrowndafb20f2012-03-19 12:46:39 +11009113 rdev_for_each(rdev, mddev) {
Goldwyn Rodrigues2910ff12015-09-28 10:27:26 -05009114 if (this && this != rdev)
9115 continue;
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05009116 if (test_bit(Candidate, &rdev->flags))
9117 continue;
NeilBrown7bfec5f2011-12-23 10:17:53 +11009118 if (rdev->raid_disk >= 0 &&
9119 !test_bit(In_sync, &rdev->flags) &&
Shaohua Lif2076e72015-10-08 21:54:12 -07009120 !test_bit(Journal, &rdev->flags) &&
NeilBrown7bfec5f2011-12-23 10:17:53 +11009121 !test_bit(Faulty, &rdev->flags))
9122 spares++;
NeilBrown7ceb17e2013-04-24 11:42:42 +10009123 if (rdev->raid_disk >= 0)
9124 continue;
9125 if (test_bit(Faulty, &rdev->flags))
9126 continue;
Shaohua Lif6b6ec52015-12-21 10:51:02 +11009127 if (!test_bit(Journal, &rdev->flags)) {
9128 if (mddev->ro &&
9129 ! (rdev->saved_raid_disk >= 0 &&
9130 !test_bit(Bitmap_sync, &rdev->flags)))
9131 continue;
NeilBrown7ceb17e2013-04-24 11:42:42 +10009132
Shaohua Lif6b6ec52015-12-21 10:51:02 +11009133 rdev->recovery_offset = 0;
9134 }
Guoqing Jiang3f79cc22020-04-04 23:57:11 +02009135 if (mddev->pers->hot_add_disk(mddev, rdev) == 0) {
Damien Le Moal5e3b8a82020-07-16 13:54:40 +09009136 /* failure here is OK */
9137 sysfs_link_rdev(mddev, rdev);
Shaohua Lif6b6ec52015-12-21 10:51:02 +11009138 if (!test_bit(Journal, &rdev->flags))
9139 spares++;
NeilBrown7ceb17e2013-04-24 11:42:42 +10009140 md_new_event(mddev);
Shaohua Li29530792016-12-08 15:48:19 -08009141 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrowndfc70642008-05-23 13:04:39 -07009142 }
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009143 }
NeilBrown746d3202013-04-24 11:42:41 +10009144no_add:
NeilBrown6dafab62012-09-19 12:54:22 +10009145 if (removed)
Shaohua Li29530792016-12-08 15:48:19 -08009146 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009147 return spares;
9148}
NeilBrown7ebc0be2011-01-14 09:14:33 +11009149
NeilBrownac05f252014-09-30 08:10:42 +10009150static void md_start_sync(struct work_struct *ws)
9151{
9152 struct mddev *mddev = container_of(ws, struct mddev, del_work);
Goldwyn Rodriguesc186b122015-09-30 13:20:35 -05009153
NeilBrownac05f252014-09-30 08:10:42 +10009154 mddev->sync_thread = md_register_thread(md_do_sync,
9155 mddev,
9156 "resync");
9157 if (!mddev->sync_thread) {
NeilBrown9d487392016-11-02 14:16:49 +11009158 pr_warn("%s: could not start resync thread...\n",
9159 mdname(mddev));
NeilBrownac05f252014-09-30 08:10:42 +10009160 /* leave the spares where they are, it shouldn't hurt */
9161 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9162 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
9163 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
9164 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
9165 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
NeilBrownf851b602014-12-11 10:02:10 +11009166 wake_up(&resync_wait);
NeilBrownac05f252014-09-30 08:10:42 +10009167 if (test_and_clear_bit(MD_RECOVERY_RECOVER,
9168 &mddev->recovery))
9169 if (mddev->sysfs_action)
9170 sysfs_notify_dirent_safe(mddev->sysfs_action);
9171 } else
9172 md_wakeup_thread(mddev->sync_thread);
9173 sysfs_notify_dirent_safe(mddev->sysfs_action);
9174 md_new_event(mddev);
9175}
9176
Linus Torvalds1da177e2005-04-16 15:20:36 -07009177/*
9178 * This routine is regularly called by all per-raid-array threads to
9179 * deal with generic issues like resync and super-block update.
9180 * Raid personalities that don't have a thread (linear/raid0) do not
9181 * need this as they never do any recovery or update the superblock.
9182 *
9183 * It does not do any resync itself, but rather "forks" off other threads
9184 * to do that as needed.
9185 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
9186 * "->recovery" and create a thread at ->sync_thread.
NeilBrowndfc70642008-05-23 13:04:39 -07009187 * When the thread finishes it sets MD_RECOVERY_DONE
Linus Torvalds1da177e2005-04-16 15:20:36 -07009188 * and wakeups up this thread which will reap the thread and finish up.
9189 * This thread also removes any faulty devices (with nr_pending == 0).
9190 *
9191 * The overall approach is:
9192 * 1/ if the superblock needs updating, update it.
9193 * 2/ If a recovery thread is running, don't do anything else.
9194 * 3/ If recovery has finished, clean up, possibly marking spares active.
9195 * 4/ If there are any faulty devices, remove them.
9196 * 5/ If array is degraded, try to add spares devices
9197 * 6/ If array has spares or is not in-sync, start a resync thread.
9198 */
NeilBrownfd01b882011-10-11 16:47:53 +11009199void md_check_recovery(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009200{
NeilBrown059421e2018-10-03 15:04:41 +10009201 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) {
9202 /* Write superblock - thread that called mddev_suspend()
9203 * holds reconfig_mutex for us.
9204 */
9205 set_bit(MD_UPDATING_SB, &mddev->flags);
9206 smp_mb__after_atomic();
9207 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags))
9208 md_update_sb(mddev, 0);
9209 clear_bit_unlock(MD_UPDATING_SB, &mddev->flags);
9210 wake_up(&mddev->sb_wait);
9211 }
9212
Jonathan Brassow68866e42011-06-08 15:10:08 +10009213 if (mddev->suspended)
9214 return;
9215
NeilBrown5f404022005-06-21 17:17:16 -07009216 if (mddev->bitmap)
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07009217 md_bitmap_daemon_work(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009218
NeilBrownfca4d842005-06-21 17:17:11 -07009219 if (signal_pending(current)) {
NeilBrown31a59e32008-04-30 00:52:30 -07009220 if (mddev->pers->sync_request && !mddev->external) {
NeilBrown9d487392016-11-02 14:16:49 +11009221 pr_debug("md: %s in immediate safe mode\n",
9222 mdname(mddev));
NeilBrownfca4d842005-06-21 17:17:11 -07009223 mddev->safemode = 2;
9224 }
9225 flush_signals(current);
9226 }
9227
NeilBrownc89a8ee2008-08-05 15:54:13 +10009228 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
9229 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009230 if ( ! (
Shaohua Li29530792016-12-08 15:48:19 -08009231 (mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07009232 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
NeilBrownfca4d842005-06-21 17:17:11 -07009233 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
NeilBrown31a59e32008-04-30 00:52:30 -07009234 (mddev->external == 0 && mddev->safemode == 1) ||
NeilBrown4ad23a972017-03-15 14:05:14 +11009235 (mddev->safemode == 2
NeilBrownfca4d842005-06-21 17:17:11 -07009236 && !mddev->in_sync && mddev->recovery_cp == MaxSector)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009237 ))
9238 return;
NeilBrownfca4d842005-06-21 17:17:11 -07009239
NeilBrowndf5b89b2006-03-27 01:18:20 -08009240 if (mddev_trylock(mddev)) {
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009241 int spares = 0;
NeilBrown480523f2019-08-20 10:21:09 +10009242 bool try_set_sync = mddev->safemode != 0;
NeilBrownfca4d842005-06-21 17:17:11 -07009243
Shaohua Liafc1f552017-08-11 20:34:45 -07009244 if (!mddev->external && mddev->safemode == 1)
NeilBrown33182d12017-08-08 16:56:36 +10009245 mddev->safemode = 0;
9246
NeilBrownc89a8ee2008-08-05 15:54:13 +10009247 if (mddev->ro) {
Neil Brownab16bfc2015-06-17 12:31:46 +10009248 struct md_rdev *rdev;
9249 if (!mddev->external && mddev->in_sync)
9250 /* 'Blocked' flag not needed as failed devices
9251 * will be recorded if array switched to read/write.
9252 * Leaving it set will prevent the device
9253 * from being removed.
9254 */
9255 rdev_for_each(rdev, mddev)
9256 clear_bit(Blocked, &rdev->flags);
NeilBrown7ceb17e2013-04-24 11:42:42 +10009257 /* On a read-only array we can:
9258 * - remove failed devices
9259 * - add already-in_sync devices if the array itself
9260 * is in-sync.
9261 * As we only add devices that are already in-sync,
9262 * we can activate the spares immediately.
NeilBrownc89a8ee2008-08-05 15:54:13 +10009263 */
NeilBrown7ceb17e2013-04-24 11:42:42 +10009264 remove_and_add_spares(mddev, NULL);
NeilBrown8313b8e2013-12-12 10:13:33 +11009265 /* There is no thread, but we need to call
9266 * ->spare_active and clear saved_raid_disk
9267 */
NeilBrown2ac295a2014-05-29 11:40:03 +10009268 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
NeilBrown8313b8e2013-12-12 10:13:33 +11009269 md_reap_sync_thread(mddev);
NeilBrowna4a3d262015-07-17 11:57:30 +10009270 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
NeilBrown8313b8e2013-12-12 10:13:33 +11009271 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
Shaohua Li29530792016-12-08 15:48:19 -08009272 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
NeilBrownc89a8ee2008-08-05 15:54:13 +10009273 goto unlock;
9274 }
9275
Guoqing Jiang659b2542015-12-21 10:50:59 +11009276 if (mddev_is_clustered(mddev)) {
9277 struct md_rdev *rdev;
9278 /* kick the device if another node issued a
9279 * remove disk.
9280 */
9281 rdev_for_each(rdev, mddev) {
9282 if (test_and_clear_bit(ClusterRemove, &rdev->flags) &&
9283 rdev->raid_disk < 0)
9284 md_kick_rdev_from_array(rdev);
9285 }
9286 }
9287
NeilBrown480523f2019-08-20 10:21:09 +10009288 if (try_set_sync && !mddev->external && !mddev->in_sync) {
NeilBrown85572d72014-12-15 12:56:56 +11009289 spin_lock(&mddev->lock);
NeilBrown6497709b2017-03-15 14:05:14 +11009290 set_in_sync(mddev);
NeilBrown85572d72014-12-15 12:56:56 +11009291 spin_unlock(&mddev->lock);
NeilBrownfca4d842005-06-21 17:17:11 -07009292 }
NeilBrownfca4d842005-06-21 17:17:11 -07009293
Shaohua Li29530792016-12-08 15:48:19 -08009294 if (mddev->sb_flags)
NeilBrown850b2b422006-10-03 01:15:46 -07009295 md_update_sb(mddev, 0);
NeilBrown06d91a52005-06-21 17:17:12 -07009296
Linus Torvalds1da177e2005-04-16 15:20:36 -07009297 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
9298 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
9299 /* resync/recovery still happening */
9300 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9301 goto unlock;
9302 }
9303 if (mddev->sync_thread) {
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009304 md_reap_sync_thread(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009305 goto unlock;
9306 }
Neil Brown72a23c22008-06-28 08:31:41 +10009307 /* Set RUNNING before clearing NEEDED to avoid
9308 * any transients in the value of "sync_action".
9309 */
NeilBrown72f36d52012-10-11 14:25:57 +11009310 mddev->curr_resync_completed = 0;
NeilBrown23da4222014-12-15 12:57:01 +11009311 spin_lock(&mddev->lock);
Neil Brown72a23c22008-06-28 08:31:41 +10009312 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
NeilBrown23da4222014-12-15 12:57:01 +11009313 spin_unlock(&mddev->lock);
NeilBrown24dd4692005-11-08 21:39:26 -08009314 /* Clear some bits that don't mean anything, but
9315 * might be left set
9316 */
NeilBrown24dd4692005-11-08 21:39:26 -08009317 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
9318 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009319
NeilBrowned209582012-04-24 10:23:14 +10009320 if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
9321 test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
NeilBrownac05f252014-09-30 08:10:42 +10009322 goto not_running;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009323 /* no recovery is running.
9324 * remove any failed drives, then
9325 * add spares if possible.
NeilBrown72f36d52012-10-11 14:25:57 +11009326 * Spares are also removed and re-added, to allow
Linus Torvalds1da177e2005-04-16 15:20:36 -07009327 * the personality to fail the re-add.
9328 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009329
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009330 if (mddev->reshape_position != MaxSector) {
NeilBrown50ac1682009-06-18 08:47:55 +10009331 if (mddev->pers->check_reshape == NULL ||
9332 mddev->pers->check_reshape(mddev) != 0)
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009333 /* Cannot proceed */
NeilBrownac05f252014-09-30 08:10:42 +10009334 goto not_running;
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009335 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
Neil Brown72a23c22008-06-28 08:31:41 +10009336 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
NeilBrown746d3202013-04-24 11:42:41 +10009337 } else if ((spares = remove_and_add_spares(mddev, NULL))) {
NeilBrown24dd4692005-11-08 21:39:26 -08009338 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9339 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
Dan Williams56ac36d2008-08-07 10:02:47 -07009340 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
Neil Brown72a23c22008-06-28 08:31:41 +10009341 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
NeilBrown24dd4692005-11-08 21:39:26 -08009342 } else if (mddev->recovery_cp < MaxSector) {
9343 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
Neil Brown72a23c22008-06-28 08:31:41 +10009344 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
NeilBrown24dd4692005-11-08 21:39:26 -08009345 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
9346 /* nothing to be done ... */
NeilBrownac05f252014-09-30 08:10:42 +10009347 goto not_running;
NeilBrown24dd4692005-11-08 21:39:26 -08009348
Linus Torvalds1da177e2005-04-16 15:20:36 -07009349 if (mddev->pers->sync_request) {
NeilBrownef99bf42012-05-22 13:55:08 +10009350 if (spares) {
NeilBrowna654b9d82005-06-21 17:17:27 -07009351 /* We are adding a device or devices to an array
9352 * which has the bitmap stored on all devices.
9353 * So make sure all bitmap pages get written
9354 */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07009355 md_bitmap_write_all(mddev->bitmap);
NeilBrowna654b9d82005-06-21 17:17:27 -07009356 }
NeilBrownac05f252014-09-30 08:10:42 +10009357 INIT_WORK(&mddev->del_work, md_start_sync);
9358 queue_work(md_misc_wq, &mddev->del_work);
9359 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009360 }
NeilBrownac05f252014-09-30 08:10:42 +10009361 not_running:
Neil Brown72a23c22008-06-28 08:31:41 +10009362 if (!mddev->sync_thread) {
9363 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
NeilBrownf851b602014-12-11 10:02:10 +11009364 wake_up(&resync_wait);
Neil Brown72a23c22008-06-28 08:31:41 +10009365 if (test_and_clear_bit(MD_RECOVERY_RECOVER,
9366 &mddev->recovery))
NeilBrown0c3573f2009-01-09 08:31:05 +11009367 if (mddev->sysfs_action)
NeilBrown00bcb4a2010-06-01 19:37:23 +10009368 sysfs_notify_dirent_safe(mddev->sysfs_action);
Neil Brown72a23c22008-06-28 08:31:41 +10009369 }
NeilBrownac05f252014-09-30 08:10:42 +10009370 unlock:
9371 wake_up(&mddev->sb_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009372 mddev_unlock(mddev);
9373 }
9374}
NeilBrown6c144d32014-09-30 16:15:38 +10009375EXPORT_SYMBOL(md_check_recovery);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009376
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009377void md_reap_sync_thread(struct mddev *mddev)
9378{
9379 struct md_rdev *rdev;
Guoqing Jiangaefb2e52018-10-18 16:37:44 +08009380 sector_t old_dev_sectors = mddev->dev_sectors;
9381 bool is_reshaped = false;
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009382
9383 /* resync has finished, collect result */
9384 md_unregister_thread(&mddev->sync_thread);
9385 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
Guoqing Jiang0d8ed0e92019-07-24 11:09:21 +02009386 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
9387 mddev->degraded != mddev->raid_disks) {
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009388 /* success...*/
9389 /* activate any spares */
9390 if (mddev->pers->spare_active(mddev)) {
Junxiao Bie1a86db2020-07-14 16:10:26 -07009391 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
Shaohua Li29530792016-12-08 15:48:19 -08009392 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009393 }
9394 }
9395 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
Guoqing Jiangaefb2e52018-10-18 16:37:44 +08009396 mddev->pers->finish_reshape) {
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009397 mddev->pers->finish_reshape(mddev);
Guoqing Jiangaefb2e52018-10-18 16:37:44 +08009398 if (mddev_is_clustered(mddev))
9399 is_reshaped = true;
9400 }
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009401
9402 /* If array is no-longer degraded, then any saved_raid_disk
NeilBrownf4667222013-12-09 12:04:56 +11009403 * information must be scrapped.
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009404 */
NeilBrownf4667222013-12-09 12:04:56 +11009405 if (!mddev->degraded)
9406 rdev_for_each(rdev, mddev)
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009407 rdev->saved_raid_disk = -1;
9408
9409 md_update_sb(mddev, 1);
Shaohua Li29530792016-12-08 15:48:19 -08009410 /* MD_SB_CHANGE_PENDING should be cleared by md_update_sb, so we can
Guoqing Jiangbb8bf152016-06-02 23:32:04 -04009411 * call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by
9412 * clustered raid */
9413 if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags))
9414 md_cluster_ops->resync_finish(mddev);
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009415 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
NeilBrownea358cd2015-06-12 20:05:04 +10009416 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009417 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9418 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
9419 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
9420 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
Guoqing Jiangaefb2e52018-10-18 16:37:44 +08009421 /*
9422 * We call md_cluster_ops->update_size here because sync_size could
9423 * be changed by md_update_sb, and MD_RECOVERY_RESHAPE is cleared,
9424 * so it is time to update size across cluster.
9425 */
9426 if (mddev_is_clustered(mddev) && is_reshaped
9427 && !test_bit(MD_CLOSING, &mddev->flags))
9428 md_cluster_ops->update_size(mddev, old_dev_sectors);
NeilBrownf851b602014-12-11 10:02:10 +11009429 wake_up(&resync_wait);
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009430 /* flag recovery needed just to double check */
9431 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9432 sysfs_notify_dirent_safe(mddev->sysfs_action);
9433 md_new_event(mddev);
9434 if (mddev->event_work.func)
9435 queue_work(md_misc_wq, &mddev->event_work);
9436}
NeilBrown6c144d32014-09-30 16:15:38 +10009437EXPORT_SYMBOL(md_reap_sync_thread);
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009438
NeilBrownfd01b882011-10-11 16:47:53 +11009439void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev)
Dan Williams6bfe0b42008-04-30 00:52:32 -07009440{
NeilBrown00bcb4a2010-06-01 19:37:23 +10009441 sysfs_notify_dirent_safe(rdev->sysfs_state);
Dan Williams6bfe0b42008-04-30 00:52:32 -07009442 wait_event_timeout(rdev->blocked_wait,
NeilBrownde393cd2011-07-28 11:31:48 +10009443 !test_bit(Blocked, &rdev->flags) &&
9444 !test_bit(BlockedBadBlocks, &rdev->flags),
Dan Williams6bfe0b42008-04-30 00:52:32 -07009445 msecs_to_jiffies(5000));
9446 rdev_dec_pending(rdev, mddev);
9447}
9448EXPORT_SYMBOL(md_wait_for_blocked_rdev);
9449
NeilBrownc6563a82012-05-21 09:27:00 +10009450void md_finish_reshape(struct mddev *mddev)
9451{
9452 /* called be personality module when reshape completes. */
9453 struct md_rdev *rdev;
9454
9455 rdev_for_each(rdev, mddev) {
9456 if (rdev->data_offset > rdev->new_data_offset)
9457 rdev->sectors += rdev->data_offset - rdev->new_data_offset;
9458 else
9459 rdev->sectors -= rdev->new_data_offset - rdev->data_offset;
9460 rdev->data_offset = rdev->new_data_offset;
9461 }
9462}
9463EXPORT_SYMBOL(md_finish_reshape);
NeilBrown2230dfe2011-07-28 11:31:46 +10009464
Vishal Vermafc974ee2015-12-24 19:20:34 -07009465/* Bad block management */
NeilBrown2230dfe2011-07-28 11:31:46 +10009466
Vishal Vermafc974ee2015-12-24 19:20:34 -07009467/* Returns 1 on success, 0 on failure */
NeilBrown3cb03002011-10-11 16:45:26 +11009468int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
NeilBrownc6563a82012-05-21 09:27:00 +10009469 int is_new)
NeilBrown2230dfe2011-07-28 11:31:46 +10009470{
Guoqing Jiang85ad1d12016-05-03 22:22:13 -04009471 struct mddev *mddev = rdev->mddev;
NeilBrownc6563a82012-05-21 09:27:00 +10009472 int rv;
9473 if (is_new)
9474 s += rdev->new_data_offset;
9475 else
9476 s += rdev->data_offset;
Vishal Vermafc974ee2015-12-24 19:20:34 -07009477 rv = badblocks_set(&rdev->badblocks, s, sectors, 0);
9478 if (rv == 0) {
NeilBrown2230dfe2011-07-28 11:31:46 +10009479 /* Make sure they get written out promptly */
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02009480 if (test_bit(ExternalBbl, &rdev->flags))
Junxiao Bie1a86db2020-07-14 16:10:26 -07009481 sysfs_notify_dirent_safe(rdev->sysfs_unack_badblocks);
NeilBrown8bd2f0a2011-12-08 16:26:08 +11009482 sysfs_notify_dirent_safe(rdev->sysfs_state);
Shaohua Li29530792016-12-08 15:48:19 -08009483 set_mask_bits(&mddev->sb_flags, 0,
9484 BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING));
NeilBrown2230dfe2011-07-28 11:31:46 +10009485 md_wakeup_thread(rdev->mddev->thread);
Vishal Vermafc974ee2015-12-24 19:20:34 -07009486 return 1;
9487 } else
9488 return 0;
NeilBrown2230dfe2011-07-28 11:31:46 +10009489}
9490EXPORT_SYMBOL_GPL(rdev_set_badblocks);
9491
NeilBrownc6563a82012-05-21 09:27:00 +10009492int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
9493 int is_new)
NeilBrown2230dfe2011-07-28 11:31:46 +10009494{
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02009495 int rv;
NeilBrownc6563a82012-05-21 09:27:00 +10009496 if (is_new)
9497 s += rdev->new_data_offset;
9498 else
9499 s += rdev->data_offset;
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02009500 rv = badblocks_clear(&rdev->badblocks, s, sectors);
9501 if ((rv == 0) && test_bit(ExternalBbl, &rdev->flags))
Junxiao Bie1a86db2020-07-14 16:10:26 -07009502 sysfs_notify_dirent_safe(rdev->sysfs_badblocks);
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02009503 return rv;
NeilBrown2230dfe2011-07-28 11:31:46 +10009504}
9505EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
9506
Adrian Bunk75c96f82005-05-05 16:16:09 -07009507static int md_notify_reboot(struct notifier_block *this,
9508 unsigned long code, void *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009509{
9510 struct list_head *tmp;
NeilBrownfd01b882011-10-11 16:47:53 +11009511 struct mddev *mddev;
Daniel P. Berrange2dba6a92011-09-23 10:40:45 +01009512 int need_delay = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009513
NeilBrownc744a652012-03-19 12:46:37 +11009514 for_each_mddev(mddev, tmp) {
9515 if (mddev_trylock(mddev)) {
NeilBrown30b8aa92012-04-24 10:23:16 +10009516 if (mddev->pers)
9517 __md_stop_writes(mddev);
NeilBrown0f62fb22014-05-06 09:36:08 +10009518 if (mddev->persistent)
9519 mddev->safemode = 2;
NeilBrownc744a652012-03-19 12:46:37 +11009520 mddev_unlock(mddev);
Daniel P. Berrange2dba6a92011-09-23 10:40:45 +01009521 }
NeilBrownc744a652012-03-19 12:46:37 +11009522 need_delay = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009523 }
NeilBrownc744a652012-03-19 12:46:37 +11009524 /*
9525 * certain more exotic SCSI devices are known to be
9526 * volatile wrt too early system reboots. While the
9527 * right place to handle this issue is the given
9528 * driver, we do want to have a safe RAID driver ...
9529 */
9530 if (need_delay)
9531 mdelay(1000*1);
9532
Linus Torvalds1da177e2005-04-16 15:20:36 -07009533 return NOTIFY_DONE;
9534}
9535
Adrian Bunk75c96f82005-05-05 16:16:09 -07009536static struct notifier_block md_notifier = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009537 .notifier_call = md_notify_reboot,
9538 .next = NULL,
9539 .priority = INT_MAX, /* before any real devices */
9540};
9541
9542static void md_geninit(void)
9543{
NeilBrown36a4e1f2011-10-07 14:23:17 +11009544 pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
Linus Torvalds1da177e2005-04-16 15:20:36 -07009545
Alexey Dobriyan97a32532020-02-03 17:37:17 -08009546 proc_create("mdstat", S_IRUGO, NULL, &mdstat_proc_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009547}
9548
Adrian Bunk75c96f82005-05-05 16:16:09 -07009549static int __init md_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009550{
Tejun Heoe804ac72010-10-15 15:36:08 +02009551 int ret = -ENOMEM;
9552
Tejun Heoada609e2011-01-25 14:35:54 +01009553 md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0);
Tejun Heoe804ac72010-10-15 15:36:08 +02009554 if (!md_wq)
9555 goto err_wq;
9556
9557 md_misc_wq = alloc_workqueue("md_misc", 0, 0);
9558 if (!md_misc_wq)
9559 goto err_misc_wq;
9560
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02009561 md_rdev_misc_wq = alloc_workqueue("md_rdev_misc", 0, 0);
Guoqing Jiangcf0b9b42020-10-08 05:19:09 +02009562 if (!md_rdev_misc_wq)
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02009563 goto err_rdev_misc_wq;
9564
Christoph Hellwig28144f92020-10-29 15:58:34 +01009565 ret = __register_blkdev(MD_MAJOR, "md", md_probe);
9566 if (ret < 0)
Tejun Heoe804ac72010-10-15 15:36:08 +02009567 goto err_md;
9568
Christoph Hellwig28144f92020-10-29 15:58:34 +01009569 ret = __register_blkdev(0, "mdp", md_probe);
9570 if (ret < 0)
Tejun Heoe804ac72010-10-15 15:36:08 +02009571 goto err_mdp;
9572 mdp_major = ret;
9573
Linus Torvalds1da177e2005-04-16 15:20:36 -07009574 register_reboot_notifier(&md_notifier);
Eric W. Biederman0b4d4142007-02-14 00:34:09 -08009575 raid_table_header = register_sysctl_table(raid_root_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009576
9577 md_geninit();
NeilBrownd710e132008-10-13 11:55:12 +11009578 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009579
Tejun Heoe804ac72010-10-15 15:36:08 +02009580err_mdp:
9581 unregister_blkdev(MD_MAJOR, "md");
9582err_md:
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02009583 destroy_workqueue(md_rdev_misc_wq);
9584err_rdev_misc_wq:
Tejun Heoe804ac72010-10-15 15:36:08 +02009585 destroy_workqueue(md_misc_wq);
9586err_misc_wq:
9587 destroy_workqueue(md_wq);
9588err_wq:
9589 return ret;
9590}
Linus Torvalds1da177e2005-04-16 15:20:36 -07009591
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009592static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
Goldwyn Rodrigues1d7e3e92014-06-07 01:53:00 -05009593{
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009594 struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
9595 struct md_rdev *rdev2;
9596 int role, ret;
9597 char b[BDEVNAME_SIZE];
Goldwyn Rodrigues1d7e3e92014-06-07 01:53:00 -05009598
Guoqing Jiang818da592017-03-01 16:42:40 +08009599 /*
9600 * If size is changed in another node then we need to
9601 * do resize as well.
9602 */
9603 if (mddev->dev_sectors != le64_to_cpu(sb->size)) {
9604 ret = mddev->pers->resize(mddev, le64_to_cpu(sb->size));
9605 if (ret)
9606 pr_info("md-cluster: resize failed\n");
9607 else
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07009608 md_bitmap_update_sb(mddev->bitmap);
Guoqing Jiang818da592017-03-01 16:42:40 +08009609 }
9610
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009611 /* Check for change of roles in the active devices */
9612 rdev_for_each(rdev2, mddev) {
9613 if (test_bit(Faulty, &rdev2->flags))
9614 continue;
9615
9616 /* Check if the roles changed */
9617 role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]);
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05009618
9619 if (test_bit(Candidate, &rdev2->flags)) {
9620 if (role == 0xfffe) {
9621 pr_info("md: Removing Candidate device %s because add failed\n", bdevname(rdev2->bdev,b));
9622 md_kick_rdev_from_array(rdev2);
9623 continue;
9624 }
9625 else
9626 clear_bit(Candidate, &rdev2->flags);
9627 }
9628
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009629 if (role != rdev2->raid_disk) {
Guoqing Jiangca1e98e2018-10-18 16:37:45 +08009630 /*
9631 * got activated except reshape is happening.
9632 */
9633 if (rdev2->raid_disk == -1 && role != 0xffff &&
9634 !(le32_to_cpu(sb->feature_map) &
9635 MD_FEATURE_RESHAPE_ACTIVE)) {
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009636 rdev2->saved_raid_disk = role;
9637 ret = remove_and_add_spares(mddev, rdev2);
9638 pr_info("Activated spare: %s\n",
NeilBrown9d487392016-11-02 14:16:49 +11009639 bdevname(rdev2->bdev,b));
Guoqing Jianga5781832016-05-02 11:33:14 -04009640 /* wakeup mddev->thread here, so array could
9641 * perform resync with the new activated disk */
9642 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9643 md_wakeup_thread(mddev->thread);
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009644 }
9645 /* device faulty
9646 * We just want to do the minimum to mark the disk
9647 * as faulty. The recovery is performed by the
9648 * one who initiated the error.
9649 */
9650 if ((role == 0xfffe) || (role == 0xfffd)) {
9651 md_error(mddev, rdev2);
9652 clear_bit(Blocked, &rdev2->flags);
9653 }
9654 }
Goldwyn Rodrigues1d7e3e92014-06-07 01:53:00 -05009655 }
9656
Goldwyn Rodrigues28c1b9f2015-10-22 16:01:25 +11009657 if (mddev->raid_disks != le32_to_cpu(sb->raid_disks))
9658 update_raid_disks(mddev, le32_to_cpu(sb->raid_disks));
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009659
Guoqing Jiang7564bed2018-10-18 16:37:42 +08009660 /*
9661 * Since mddev->delta_disks has already updated in update_raid_disks,
9662 * so it is time to check reshape.
9663 */
9664 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) &&
9665 (le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
9666 /*
9667 * reshape is happening in the remote node, we need to
9668 * update reshape_position and call start_reshape.
9669 */
Christoph Hellwiged4d0a4e2019-04-04 18:56:10 +02009670 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
Guoqing Jiang7564bed2018-10-18 16:37:42 +08009671 if (mddev->pers->update_reshape_pos)
9672 mddev->pers->update_reshape_pos(mddev);
9673 if (mddev->pers->start_reshape)
9674 mddev->pers->start_reshape(mddev);
9675 } else if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) &&
9676 mddev->reshape_position != MaxSector &&
9677 !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
9678 /* reshape is just done in another node. */
9679 mddev->reshape_position = MaxSector;
9680 if (mddev->pers->update_reshape_pos)
9681 mddev->pers->update_reshape_pos(mddev);
9682 }
9683
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009684 /* Finally set the event to be up to date */
9685 mddev->events = le64_to_cpu(sb->events);
9686}
9687
9688static int read_rdev(struct mddev *mddev, struct md_rdev *rdev)
9689{
9690 int err;
9691 struct page *swapout = rdev->sb_page;
9692 struct mdp_superblock_1 *sb;
9693
9694 /* Store the sb page of the rdev in the swapout temporary
9695 * variable in case we err in the future
9696 */
9697 rdev->sb_page = NULL;
NeilBrown7f0f0d82016-11-02 14:16:49 +11009698 err = alloc_disk_sb(rdev);
9699 if (err == 0) {
9700 ClearPageUptodate(rdev->sb_page);
9701 rdev->sb_loaded = 0;
9702 err = super_types[mddev->major_version].
9703 load_super(rdev, NULL, mddev->minor_version);
9704 }
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009705 if (err < 0) {
9706 pr_warn("%s: %d Could not reload rdev(%d) err: %d. Restoring old values\n",
9707 __func__, __LINE__, rdev->desc_nr, err);
NeilBrown7f0f0d82016-11-02 14:16:49 +11009708 if (rdev->sb_page)
9709 put_page(rdev->sb_page);
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009710 rdev->sb_page = swapout;
9711 rdev->sb_loaded = 1;
9712 return err;
9713 }
9714
9715 sb = page_address(rdev->sb_page);
9716 /* Read the offset unconditionally, even if MD_FEATURE_RECOVERY_OFFSET
9717 * is not set
9718 */
9719
9720 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RECOVERY_OFFSET))
9721 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
9722
9723 /* The other node finished recovery, call spare_active to set
9724 * device In_sync and mddev->degraded
9725 */
9726 if (rdev->recovery_offset == MaxSector &&
9727 !test_bit(In_sync, &rdev->flags) &&
9728 mddev->pers->spare_active(mddev))
Junxiao Bie1a86db2020-07-14 16:10:26 -07009729 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009730
9731 put_page(swapout);
9732 return 0;
9733}
9734
9735void md_reload_sb(struct mddev *mddev, int nr)
9736{
9737 struct md_rdev *rdev;
9738 int err;
9739
9740 /* Find the rdev */
9741 rdev_for_each_rcu(rdev, mddev) {
9742 if (rdev->desc_nr == nr)
9743 break;
9744 }
9745
9746 if (!rdev || rdev->desc_nr != nr) {
9747 pr_warn("%s: %d Could not find rdev with nr %d\n", __func__, __LINE__, nr);
9748 return;
9749 }
9750
9751 err = read_rdev(mddev, rdev);
9752 if (err < 0)
9753 return;
9754
9755 check_sb_changes(mddev, rdev);
9756
9757 /* Read all rdev's to update recovery_offset */
Guoqing Jiang0ea99242018-04-09 17:01:21 +08009758 rdev_for_each_rcu(rdev, mddev) {
9759 if (!test_bit(Faulty, &rdev->flags))
9760 read_rdev(mddev, rdev);
9761 }
Goldwyn Rodrigues1d7e3e92014-06-07 01:53:00 -05009762}
9763EXPORT_SYMBOL(md_reload_sb);
9764
Linus Torvalds1da177e2005-04-16 15:20:36 -07009765#ifndef MODULE
9766
9767/*
9768 * Searches all registered partitions for autorun RAID arrays
9769 * at boot time.
9770 */
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009771
Cong Wang5b1f5bc32016-06-08 09:20:16 -07009772static DEFINE_MUTEX(detected_devices_mutex);
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009773static LIST_HEAD(all_detected_devices);
9774struct detected_devices_node {
9775 struct list_head list;
9776 dev_t dev;
9777};
Linus Torvalds1da177e2005-04-16 15:20:36 -07009778
9779void md_autodetect_dev(dev_t dev)
9780{
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009781 struct detected_devices_node *node_detected_dev;
9782
9783 node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
9784 if (node_detected_dev) {
9785 node_detected_dev->dev = dev;
Cong Wang5b1f5bc32016-06-08 09:20:16 -07009786 mutex_lock(&detected_devices_mutex);
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009787 list_add_tail(&node_detected_dev->list, &all_detected_devices);
Cong Wang5b1f5bc32016-06-08 09:20:16 -07009788 mutex_unlock(&detected_devices_mutex);
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009789 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009790}
9791
Christoph Hellwigd82fa812020-06-06 15:00:24 +02009792void md_autostart_arrays(int part)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009793{
NeilBrown3cb03002011-10-11 16:45:26 +11009794 struct md_rdev *rdev;
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009795 struct detected_devices_node *node_detected_dev;
9796 dev_t dev;
9797 int i_scanned, i_passed;
9798
9799 i_scanned = 0;
9800 i_passed = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009801
NeilBrown9d487392016-11-02 14:16:49 +11009802 pr_info("md: Autodetecting RAID arrays.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07009803
Cong Wang5b1f5bc32016-06-08 09:20:16 -07009804 mutex_lock(&detected_devices_mutex);
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009805 while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
9806 i_scanned++;
9807 node_detected_dev = list_entry(all_detected_devices.next,
9808 struct detected_devices_node, list);
9809 list_del(&node_detected_dev->list);
9810 dev = node_detected_dev->dev;
9811 kfree(node_detected_dev);
Shaohua Li90bcf1332016-09-14 14:26:54 -07009812 mutex_unlock(&detected_devices_mutex);
NeilBrowndf968c42007-07-17 04:06:11 -07009813 rdev = md_import_device(dev,0, 90);
Shaohua Li90bcf1332016-09-14 14:26:54 -07009814 mutex_lock(&detected_devices_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009815 if (IS_ERR(rdev))
9816 continue;
9817
NeilBrown403df472014-09-30 15:52:29 +10009818 if (test_bit(Faulty, &rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009819 continue;
NeilBrown403df472014-09-30 15:52:29 +10009820
NeilBrownd0fae182008-03-04 14:29:31 -08009821 set_bit(AutoDetected, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009822 list_add(&rdev->same_set, &pending_raid_disks);
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009823 i_passed++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009824 }
Cong Wang5b1f5bc32016-06-08 09:20:16 -07009825 mutex_unlock(&detected_devices_mutex);
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009826
NeilBrown9d487392016-11-02 14:16:49 +11009827 pr_debug("md: Scanned %d and added %d devices.\n", i_scanned, i_passed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009828
9829 autorun_devices(part);
9830}
9831
Jeff Garzikfdee8ae2006-12-10 02:20:50 -08009832#endif /* !MODULE */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009833
9834static __exit void md_exit(void)
9835{
NeilBrownfd01b882011-10-11 16:47:53 +11009836 struct mddev *mddev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009837 struct list_head *tmp;
NeilBrowne2f23b62014-04-09 14:33:51 +10009838 int delay = 1;
Greg Kroah-Hartman8ab5e4c2005-06-20 21:15:16 -07009839
Christoph Hellwig3dbd8c22009-03-31 14:27:02 +11009840 unregister_blkdev(MD_MAJOR,"md");
Linus Torvalds1da177e2005-04-16 15:20:36 -07009841 unregister_blkdev(mdp_major, "mdp");
9842 unregister_reboot_notifier(&md_notifier);
9843 unregister_sysctl_table(raid_table_header);
NeilBrowne2f23b62014-04-09 14:33:51 +10009844
9845 /* We cannot unload the modules while some process is
9846 * waiting for us in select() or poll() - wake them up
9847 */
9848 md_unloading = 1;
9849 while (waitqueue_active(&md_event_waiters)) {
9850 /* not safe to leave yet */
9851 wake_up(&md_event_waiters);
9852 msleep(delay);
9853 delay += delay;
9854 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009855 remove_proc_entry("mdstat", NULL);
NeilBrowne2f23b62014-04-09 14:33:51 +10009856
NeilBrown29ac4aa2008-02-06 01:39:58 -08009857 for_each_mddev(mddev, tmp) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009858 export_array(mddev);
NeilBrown93568632017-02-06 13:41:39 +11009859 mddev->ctime = 0;
NeilBrownd3374822009-01-09 08:31:10 +11009860 mddev->hold_active = 0;
NeilBrown93568632017-02-06 13:41:39 +11009861 /*
9862 * for_each_mddev() will call mddev_put() at the end of each
9863 * iteration. As the mddev is now fully clear, this will
9864 * schedule the mddev for destruction by a workqueue, and the
9865 * destroy_workqueue() below will wait for that to complete.
9866 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009867 }
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02009868 destroy_workqueue(md_rdev_misc_wq);
Tejun Heoe804ac72010-10-15 15:36:08 +02009869 destroy_workqueue(md_misc_wq);
9870 destroy_workqueue(md_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009871}
9872
Dan Williams685784a2007-07-09 11:56:42 -07009873subsys_initcall(md_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009874module_exit(md_exit)
9875
Kees Cooke4dca7b2017-10-17 19:04:42 -07009876static int get_ro(char *buffer, const struct kernel_param *kp)
NeilBrownf91de922005-11-08 21:39:36 -08009877{
Xiongfeng Wang3f999802020-05-11 16:23:25 +08009878 return sprintf(buffer, "%d\n", start_readonly);
NeilBrownf91de922005-11-08 21:39:36 -08009879}
Kees Cooke4dca7b2017-10-17 19:04:42 -07009880static int set_ro(const char *val, const struct kernel_param *kp)
NeilBrownf91de922005-11-08 21:39:36 -08009881{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03009882 return kstrtouint(val, 10, (unsigned int *)&start_readonly);
NeilBrownf91de922005-11-08 21:39:36 -08009883}
9884
NeilBrown80ca3a42006-07-10 04:44:18 -07009885module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
9886module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
NeilBrownefeb53c2009-01-09 08:31:10 +11009887module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
NeilBrown78b63502017-04-12 16:26:13 +10009888module_param(create_on_open, bool, S_IRUSR|S_IWUSR);
NeilBrownf91de922005-11-08 21:39:36 -08009889
Linus Torvalds1da177e2005-04-16 15:20:36 -07009890MODULE_LICENSE("GPL");
NeilBrown0efb9e62009-12-14 12:49:58 +11009891MODULE_DESCRIPTION("MD RAID framework");
NeilBrownaa1595e2005-08-04 12:53:32 -07009892MODULE_ALIAS("md");
NeilBrown72008652005-08-26 18:34:15 -07009893MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);