blob: ca409428b4fcb9404a208e920bacbdb997b5ec52 [file] [log] [blame]
Thomas Gleixneraf1a8892019-05-20 19:08:12 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 md.c : Multiple Devices driver for Linux
NeilBrownf72ffdd2014-09-30 14:23:59 +10004 Copyright (C) 1998, 1999, 2000 Ingo Molnar
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 completely rewritten, based on the MD driver code from Marc Zyngier
7
8 Changes:
9
10 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
11 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
12 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
13 - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
14 - kmod support by: Cyrus Durgin
15 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
16 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
17
18 - lots of fixes and improvements to the RAID1/RAID5 and generic
19 RAID code (such as request based resynchronization):
20
21 Neil Brown <neilb@cse.unsw.edu.au>.
22
NeilBrown32a76272005-06-21 17:17:14 -070023 - persistent bitmap code
24 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
25
NeilBrown9d487392016-11-02 14:16:49 +110026
27 Errors, Warnings, etc.
28 Please use:
29 pr_crit() for error conditions that risk data loss
30 pr_err() for error conditions that are unexpected, like an IO error
31 or internal inconsistency
32 pr_warn() for error conditions that could have been predicated, like
33 adding a device to an array when it has incompatible metadata
34 pr_info() for every interesting, very rare events, like an array starting
35 or stopping, or resync starting or stopping
36 pr_debug() for everything else.
37
Linus Torvalds1da177e2005-04-16 15:20:36 -070038*/
39
Guoqing Jiang963c5552019-06-14 17:10:36 +080040#include <linux/sched/mm.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +010041#include <linux/sched/signal.h>
NeilBrowna6fb0932005-09-09 16:23:56 -070042#include <linux/kthread.h>
NeilBrownbff61972009-03-31 14:33:13 +110043#include <linux/blkdev.h>
Vishal Vermafc974ee2015-12-24 19:20:34 -070044#include <linux/badblocks.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045#include <linux/sysctl.h>
NeilBrownbff61972009-03-31 14:33:13 +110046#include <linux/seq_file.h>
Al Viroff01bb42011-09-16 02:31:11 -040047#include <linux/fs.h>
NeilBrownd7603b72006-01-06 00:20:30 -080048#include <linux/poll.h>
NeilBrown16f17b32006-06-26 00:27:37 -070049#include <linux/ctype.h>
André Goddard Rosae7d28602009-12-14 18:01:06 -080050#include <linux/string.h>
NeilBrownfb4d8c72008-10-13 11:55:12 +110051#include <linux/hdreg.h>
52#include <linux/proc_fs.h>
53#include <linux/random.h>
Paul Gortmaker056075c2011-07-03 13:58:33 -040054#include <linux/module.h>
NeilBrownfb4d8c72008-10-13 11:55:12 +110055#include <linux/reboot.h>
NeilBrown32a76272005-06-21 17:17:14 -070056#include <linux/file.h>
Arnd Bergmannaa98aa32009-12-14 12:50:05 +110057#include <linux/compat.h>
Stephen Rothwell25570722008-10-15 09:09:21 +110058#include <linux/delay.h>
NeilBrownbff61972009-03-31 14:33:13 +110059#include <linux/raid/md_p.h>
60#include <linux/raid/md_u.h>
Christoph Hellwig74cc979c2020-03-24 08:25:19 +010061#include <linux/raid/detect.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090062#include <linux/slab.h>
NeilBrown4ad23a972017-03-15 14:05:14 +110063#include <linux/percpu-refcount.h>
Christoph Hellwigc6a564ff2020-03-25 16:48:42 +010064#include <linux/part_stat.h>
NeilBrown4ad23a972017-03-15 14:05:14 +110065
Shaohua Li504634f2016-11-18 09:44:08 -080066#include <trace/events/block.h>
NeilBrown43b2e5d2009-03-31 14:33:13 +110067#include "md.h"
Mike Snitzer935fe092017-10-10 17:02:41 -040068#include "md-bitmap.h"
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -050069#include "md-cluster.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070070
NeilBrown01f96c02011-09-21 15:30:20 +100071/* pers_list is a list of registered personalities protected
72 * by pers_lock.
73 * pers_lock does extra service to protect accesses to
74 * mddev->thread when the mutex cannot be held.
75 */
NeilBrown2604b702006-01-06 00:20:36 -080076static LIST_HEAD(pers_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -070077static DEFINE_SPINLOCK(pers_lock);
78
Kent Overstreet28dec872018-06-07 20:52:54 -040079static struct kobj_type md_ktype;
80
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -050081struct md_cluster_operations *md_cluster_ops;
Goldwyn Rodrigues589a1c42014-06-07 02:39:37 -050082EXPORT_SYMBOL(md_cluster_ops);
Christoph Hellwig2b598ee2019-04-04 18:56:14 +020083static struct module *md_cluster_mod;
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -050084
Bernd Schubert90b08712008-05-23 13:04:38 -070085static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
Tejun Heoe804ac72010-10-15 15:36:08 +020086static struct workqueue_struct *md_wq;
87static struct workqueue_struct *md_misc_wq;
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +020088static struct workqueue_struct *md_rdev_misc_wq;
Bernd Schubert90b08712008-05-23 13:04:38 -070089
NeilBrown746d3202013-04-24 11:42:41 +100090static int remove_and_add_spares(struct mddev *mddev,
91 struct md_rdev *this);
NeilBrown5aa61f42014-12-15 12:56:57 +110092static void mddev_detach(struct mddev *mddev);
NeilBrown746d3202013-04-24 11:42:41 +100093
Linus Torvalds1da177e2005-04-16 15:20:36 -070094/*
Robert Becker1e509152009-12-14 12:49:58 +110095 * Default number of read corrections we'll attempt on an rdev
96 * before ejecting it from the array. We divide the read error
97 * count by 2 for every hour elapsed between read errors.
98 */
99#define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
Zhao Heming7c9d5c52020-07-21 02:08:52 +0800100/* Default safemode delay: 200 msec */
101#define DEFAULT_SAFEMODE_DELAY ((200 * HZ)/1000 +1)
Robert Becker1e509152009-12-14 12:49:58 +1100102/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
104 * is 1000 KB/sec, so the extra system load does not show up that much.
105 * Increase it if you want to have more _guaranteed_ speed. Note that
Adrian Bunk338cec32005-09-10 00:26:54 -0700106 * the RAID driver will use the maximum available bandwidth if the IO
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 * subsystem is idle. There is also an 'absolute maximum' reconstruction
108 * speed limit - in case reconstruction slows down your system despite
109 * idle IO detection.
110 *
111 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
NeilBrown88202a02006-01-06 00:21:36 -0800112 * or /sys/block/mdX/md/sync_speed_{min,max}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 */
114
115static int sysctl_speed_limit_min = 1000;
116static int sysctl_speed_limit_max = 200000;
NeilBrownfd01b882011-10-11 16:47:53 +1100117static inline int speed_min(struct mddev *mddev)
NeilBrown88202a02006-01-06 00:21:36 -0800118{
119 return mddev->sync_speed_min ?
120 mddev->sync_speed_min : sysctl_speed_limit_min;
121}
122
NeilBrownfd01b882011-10-11 16:47:53 +1100123static inline int speed_max(struct mddev *mddev)
NeilBrown88202a02006-01-06 00:21:36 -0800124{
125 return mddev->sync_speed_max ?
126 mddev->sync_speed_max : sysctl_speed_limit_max;
127}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100129static void rdev_uninit_serial(struct md_rdev *rdev)
Guoqing Jiang3e148a32019-06-19 17:30:46 +0800130{
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100131 if (!test_and_clear_bit(CollisionCheck, &rdev->flags))
132 return;
Guoqing Jiang3e148a32019-06-19 17:30:46 +0800133
Guoqing Jiang025471f2019-12-23 10:49:01 +0100134 kvfree(rdev->serial);
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100135 rdev->serial = NULL;
Guoqing Jiang3e148a32019-06-19 17:30:46 +0800136}
137
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100138static void rdevs_uninit_serial(struct mddev *mddev)
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100139{
140 struct md_rdev *rdev;
141
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100142 rdev_for_each(rdev, mddev)
143 rdev_uninit_serial(rdev);
144}
145
146static int rdev_init_serial(struct md_rdev *rdev)
147{
Guoqing Jiang025471f2019-12-23 10:49:01 +0100148 /* serial_nums equals with BARRIER_BUCKETS_NR */
149 int i, serial_nums = 1 << ((PAGE_SHIFT - ilog2(sizeof(atomic_t))));
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100150 struct serial_in_rdev *serial = NULL;
151
152 if (test_bit(CollisionCheck, &rdev->flags))
153 return 0;
154
Guoqing Jiang025471f2019-12-23 10:49:01 +0100155 serial = kvmalloc(sizeof(struct serial_in_rdev) * serial_nums,
156 GFP_KERNEL);
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100157 if (!serial)
158 return -ENOMEM;
159
Guoqing Jiang025471f2019-12-23 10:49:01 +0100160 for (i = 0; i < serial_nums; i++) {
161 struct serial_in_rdev *serial_tmp = &serial[i];
162
163 spin_lock_init(&serial_tmp->serial_lock);
164 serial_tmp->serial_rb = RB_ROOT_CACHED;
165 init_waitqueue_head(&serial_tmp->serial_io_wait);
166 }
167
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100168 rdev->serial = serial;
169 set_bit(CollisionCheck, &rdev->flags);
170
171 return 0;
172}
173
174static int rdevs_init_serial(struct mddev *mddev)
175{
176 struct md_rdev *rdev;
177 int ret = 0;
178
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100179 rdev_for_each(rdev, mddev) {
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100180 ret = rdev_init_serial(rdev);
181 if (ret)
182 break;
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100183 }
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100184
185 /* Free all resources if pool is not existed */
186 if (ret && !mddev->serial_info_pool)
187 rdevs_uninit_serial(mddev);
188
189 return ret;
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100190}
191
Guoqing Jiang963c5552019-06-14 17:10:36 +0800192/*
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100193 * rdev needs to enable serial stuffs if it meets the conditions:
194 * 1. it is multi-queue device flaged with writemostly.
195 * 2. the write-behind mode is enabled.
196 */
197static int rdev_need_serial(struct md_rdev *rdev)
198{
199 return (rdev && rdev->mddev->bitmap_info.max_write_behind > 0 &&
Christoph Hellwige556f6b2020-06-26 10:01:56 +0200200 rdev->bdev->bd_disk->queue->nr_hw_queues != 1 &&
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100201 test_bit(WriteMostly, &rdev->flags));
202}
203
204/*
205 * Init resource for rdev(s), then create serial_info_pool if:
206 * 1. rdev is the first device which return true from rdev_enable_serial.
207 * 2. rdev is NULL, means we want to enable serialization for all rdevs.
Guoqing Jiang963c5552019-06-14 17:10:36 +0800208 */
Guoqing Jiang404659c2019-12-23 10:48:53 +0100209void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100210 bool is_suspend)
Guoqing Jiang963c5552019-06-14 17:10:36 +0800211{
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100212 int ret = 0;
213
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100214 if (rdev && !rdev_need_serial(rdev) &&
215 !test_bit(CollisionCheck, &rdev->flags))
Guoqing Jiang963c5552019-06-14 17:10:36 +0800216 return;
217
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100218 if (!is_suspend)
219 mddev_suspend(mddev);
220
221 if (!rdev)
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100222 ret = rdevs_init_serial(mddev);
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100223 else
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100224 ret = rdev_init_serial(rdev);
225 if (ret)
226 goto abort;
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100227
Guoqing Jiang404659c2019-12-23 10:48:53 +0100228 if (mddev->serial_info_pool == NULL) {
Coly Li3024ba22020-04-09 22:17:23 +0800229 /*
230 * already in memalloc noio context by
231 * mddev_suspend()
232 */
Guoqing Jiang404659c2019-12-23 10:48:53 +0100233 mddev->serial_info_pool =
234 mempool_create_kmalloc_pool(NR_SERIAL_INFOS,
235 sizeof(struct serial_info));
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100236 if (!mddev->serial_info_pool) {
237 rdevs_uninit_serial(mddev);
Guoqing Jiang404659c2019-12-23 10:48:53 +0100238 pr_err("can't alloc memory pool for serialization\n");
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100239 }
Guoqing Jiang963c5552019-06-14 17:10:36 +0800240 }
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100241
242abort:
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100243 if (!is_suspend)
244 mddev_resume(mddev);
Guoqing Jiang963c5552019-06-14 17:10:36 +0800245}
Guoqing Jiang963c5552019-06-14 17:10:36 +0800246
247/*
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100248 * Free resource from rdev(s), and destroy serial_info_pool under conditions:
249 * 1. rdev is the last device flaged with CollisionCheck.
250 * 2. when bitmap is destroyed while policy is not enabled.
251 * 3. for disable policy, the pool is destroyed only when no rdev needs it.
Guoqing Jiang963c5552019-06-14 17:10:36 +0800252 */
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100253void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
254 bool is_suspend)
Guoqing Jiang963c5552019-06-14 17:10:36 +0800255{
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100256 if (rdev && !test_bit(CollisionCheck, &rdev->flags))
Guoqing Jiang963c5552019-06-14 17:10:36 +0800257 return;
258
Guoqing Jiang404659c2019-12-23 10:48:53 +0100259 if (mddev->serial_info_pool) {
Guoqing Jiang963c5552019-06-14 17:10:36 +0800260 struct md_rdev *temp;
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100261 int num = 0; /* used to track if other rdevs need the pool */
Guoqing Jiang963c5552019-06-14 17:10:36 +0800262
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100263 if (!is_suspend)
264 mddev_suspend(mddev);
265 rdev_for_each(temp, mddev) {
266 if (!rdev) {
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100267 if (!mddev->serialize_policy ||
268 !rdev_need_serial(temp))
269 rdev_uninit_serial(temp);
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100270 else
271 num++;
272 } else if (temp != rdev &&
273 test_bit(CollisionCheck, &temp->flags))
Guoqing Jiang963c5552019-06-14 17:10:36 +0800274 num++;
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100275 }
276
277 if (rdev)
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100278 rdev_uninit_serial(rdev);
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100279
280 if (num)
281 pr_info("The mempool could be used by other devices\n");
282 else {
Guoqing Jiang404659c2019-12-23 10:48:53 +0100283 mempool_destroy(mddev->serial_info_pool);
284 mddev->serial_info_pool = NULL;
Guoqing Jiang963c5552019-06-14 17:10:36 +0800285 }
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100286 if (!is_suspend)
287 mddev_resume(mddev);
Guoqing Jiang963c5552019-06-14 17:10:36 +0800288 }
289}
290
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291static struct ctl_table_header *raid_table_header;
292
Joe Perches82592c32013-11-14 15:16:18 +1100293static struct ctl_table raid_table[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 .procname = "speed_limit_min",
296 .data = &sysctl_speed_limit_min,
297 .maxlen = sizeof(int),
NeilBrown80ca3a42006-07-10 04:44:18 -0700298 .mode = S_IRUGO|S_IWUSR,
Eric W. Biederman6d456112009-11-16 03:11:48 -0800299 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 },
301 {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 .procname = "speed_limit_max",
303 .data = &sysctl_speed_limit_max,
304 .maxlen = sizeof(int),
NeilBrown80ca3a42006-07-10 04:44:18 -0700305 .mode = S_IRUGO|S_IWUSR,
Eric W. Biederman6d456112009-11-16 03:11:48 -0800306 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 },
Eric W. Biederman894d2492009-11-05 14:34:02 -0800308 { }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309};
310
Joe Perches82592c32013-11-14 15:16:18 +1100311static struct ctl_table raid_dir_table[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 .procname = "raid",
314 .maxlen = 0,
NeilBrown80ca3a42006-07-10 04:44:18 -0700315 .mode = S_IRUGO|S_IXUGO,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 .child = raid_table,
317 },
Eric W. Biederman894d2492009-11-05 14:34:02 -0800318 { }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319};
320
Joe Perches82592c32013-11-14 15:16:18 +1100321static struct ctl_table raid_root_table[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 .procname = "dev",
324 .maxlen = 0,
325 .mode = 0555,
326 .child = raid_dir_table,
327 },
Eric W. Biederman894d2492009-11-05 14:34:02 -0800328 { }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329};
330
NeilBrownf91de922005-11-08 21:39:36 -0800331static int start_readonly;
332
NeilBrown78b63502017-04-12 16:26:13 +1000333/*
334 * The original mechanism for creating an md device is to create
335 * a device node in /dev and to open it. This causes races with device-close.
336 * The preferred method is to write to the "new_array" module parameter.
337 * This can avoid races.
338 * Setting create_on_open to false disables the original mechanism
339 * so all the races disappear.
340 */
341static bool create_on_open = true;
342
NeilBrowna167f662010-10-26 18:31:13 +1100343struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
NeilBrownfd01b882011-10-11 16:47:53 +1100344 struct mddev *mddev)
NeilBrowna167f662010-10-26 18:31:13 +1100345{
Kent Overstreetafeee512018-05-20 18:25:52 -0400346 if (!mddev || !bioset_initialized(&mddev->bio_set))
NeilBrowna167f662010-10-26 18:31:13 +1100347 return bio_alloc(gfp_mask, nr_iovecs);
348
Marcos Paulo de Souza62516912019-01-14 06:31:56 -0700349 return bio_alloc_bioset(gfp_mask, nr_iovecs, &mddev->bio_set);
NeilBrowna167f662010-10-26 18:31:13 +1100350}
351EXPORT_SYMBOL_GPL(bio_alloc_mddev);
352
NeilBrown5a850712017-06-21 09:12:21 +1000353static struct bio *md_bio_alloc_sync(struct mddev *mddev)
354{
Kent Overstreetafeee512018-05-20 18:25:52 -0400355 if (!mddev || !bioset_initialized(&mddev->sync_set))
NeilBrown5a850712017-06-21 09:12:21 +1000356 return bio_alloc(GFP_NOIO, 1);
357
Kent Overstreetafeee512018-05-20 18:25:52 -0400358 return bio_alloc_bioset(GFP_NOIO, 1, &mddev->sync_set);
NeilBrown5a850712017-06-21 09:12:21 +1000359}
360
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361/*
NeilBrownd7603b72006-01-06 00:20:30 -0800362 * We have a system wide 'event count' that is incremented
363 * on any 'interesting' event, and readers of /proc/mdstat
364 * can use 'poll' or 'select' to find out when the event
365 * count increases.
366 *
367 * Events are:
368 * start array, stop array, error, add device, remove device,
369 * start build, activate spare
370 */
NeilBrown2989ddb2006-01-06 00:20:43 -0800371static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
NeilBrownd7603b72006-01-06 00:20:30 -0800372static atomic_t md_event_count;
NeilBrownfd01b882011-10-11 16:47:53 +1100373void md_new_event(struct mddev *mddev)
NeilBrownd7603b72006-01-06 00:20:30 -0800374{
375 atomic_inc(&md_event_count);
376 wake_up(&md_event_waiters);
377}
NeilBrown29269552006-03-27 01:18:10 -0800378EXPORT_SYMBOL_GPL(md_new_event);
NeilBrownd7603b72006-01-06 00:20:30 -0800379
380/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 * Enables to iterate over all existing md arrays
382 * all_mddevs_lock protects this list.
383 */
384static LIST_HEAD(all_mddevs);
385static DEFINE_SPINLOCK(all_mddevs_lock);
386
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387/*
388 * iterates through all used mddevs in the system.
389 * We take care to grab the all_mddevs_lock whenever navigating
390 * the list, and to always hold a refcount when unlocked.
391 * Any code which breaks out of this loop while own
392 * a reference to the current mddev and must mddev_put it.
393 */
NeilBrownfd01b882011-10-11 16:47:53 +1100394#define for_each_mddev(_mddev,_tmp) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 \
NeilBrownf72ffdd2014-09-30 14:23:59 +1000396 for (({ spin_lock(&all_mddevs_lock); \
NeilBrownfd01b882011-10-11 16:47:53 +1100397 _tmp = all_mddevs.next; \
398 _mddev = NULL;}); \
399 ({ if (_tmp != &all_mddevs) \
400 mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 spin_unlock(&all_mddevs_lock); \
NeilBrownfd01b882011-10-11 16:47:53 +1100402 if (_mddev) mddev_put(_mddev); \
403 _mddev = list_entry(_tmp, struct mddev, all_mddevs); \
404 _tmp != &all_mddevs;}); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 ({ spin_lock(&all_mddevs_lock); \
NeilBrownfd01b882011-10-11 16:47:53 +1100406 _tmp = _tmp->next;}) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 )
408
NeilBrown409c57f2009-03-31 14:39:39 +1100409/* Rather than calling directly into the personality make_request function,
410 * IO requests come here first so that we can check if the device is
411 * being suspended pending a reconfiguration.
412 * We hold a refcount over the call to ->make_request. By the time that
413 * call has finished, the bio has been linked into some internal structure
414 * and so is visible to ->quiesce(), so we don't need the refcount any more.
415 */
NeilBrownb3143b92017-10-17 13:46:43 +1100416static bool is_suspended(struct mddev *mddev, struct bio *bio)
417{
418 if (mddev->suspended)
419 return true;
420 if (bio_data_dir(bio) != WRITE)
421 return false;
422 if (mddev->suspend_lo >= mddev->suspend_hi)
423 return false;
424 if (bio->bi_iter.bi_sector >= mddev->suspend_hi)
425 return false;
426 if (bio_end_sector(bio) < mddev->suspend_lo)
427 return false;
428 return true;
429}
430
Shaohua Li393debc2017-09-21 10:23:35 -0700431void md_handle_request(struct mddev *mddev, struct bio *bio)
432{
433check_suspended:
434 rcu_read_lock();
NeilBrownb3143b92017-10-17 13:46:43 +1100435 if (is_suspended(mddev, bio)) {
Shaohua Li393debc2017-09-21 10:23:35 -0700436 DEFINE_WAIT(__wait);
437 for (;;) {
438 prepare_to_wait(&mddev->sb_wait, &__wait,
439 TASK_UNINTERRUPTIBLE);
NeilBrownb3143b92017-10-17 13:46:43 +1100440 if (!is_suspended(mddev, bio))
Shaohua Li393debc2017-09-21 10:23:35 -0700441 break;
442 rcu_read_unlock();
443 schedule();
444 rcu_read_lock();
445 }
446 finish_wait(&mddev->sb_wait, &__wait);
447 }
448 atomic_inc(&mddev->active_io);
449 rcu_read_unlock();
450
451 if (!mddev->pers->make_request(mddev, bio)) {
452 atomic_dec(&mddev->active_io);
453 wake_up(&mddev->sb_wait);
454 goto check_suspended;
455 }
456
457 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
458 wake_up(&mddev->sb_wait);
459}
460EXPORT_SYMBOL(md_handle_request);
461
Artur Paszkiewicz41d2d842020-07-03 11:13:09 +0200462struct md_io {
463 struct mddev *mddev;
464 bio_end_io_t *orig_bi_end_io;
465 void *orig_bi_private;
466 unsigned long start_time;
Christoph Hellwig8446fe92020-11-24 09:36:54 +0100467 struct block_device *part;
Artur Paszkiewicz41d2d842020-07-03 11:13:09 +0200468};
469
470static void md_end_io(struct bio *bio)
471{
472 struct md_io *md_io = bio->bi_private;
473 struct mddev *mddev = md_io->mddev;
474
Song Liu00fe60e2020-08-31 15:27:24 -0700475 part_end_io_acct(md_io->part, bio, md_io->start_time);
Artur Paszkiewicz41d2d842020-07-03 11:13:09 +0200476
477 bio->bi_end_io = md_io->orig_bi_end_io;
478 bio->bi_private = md_io->orig_bi_private;
479
480 mempool_free(md_io, &mddev->md_io_pool);
481
482 if (bio->bi_end_io)
483 bio->bi_end_io(bio);
484}
485
Christoph Hellwigc62b37d2020-07-01 10:59:43 +0200486static blk_qc_t md_submit_bio(struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487{
NeilBrown49077322010-03-25 16:20:56 +1100488 const int rw = bio_data_dir(bio);
Christoph Hellwige4fc5a72020-05-08 18:15:14 +0200489 struct mddev *mddev = bio->bi_disk->private_data;
NeilBrown49077322010-03-25 16:20:56 +1100490
Colin Ian King9a5a8592020-07-02 12:35:02 +0100491 if (mddev == NULL || mddev->pers == NULL) {
492 bio_io_error(bio);
493 return BLK_QC_T_NONE;
494 }
NeilBrown409c57f2009-03-31 14:39:39 +1100495
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -0300496 if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) {
497 bio_io_error(bio);
498 return BLK_QC_T_NONE;
499 }
500
Christoph Hellwigf695ca32020-07-01 10:59:39 +0200501 blk_queue_split(&bio);
Kent Overstreet54efd502015-04-23 22:37:18 -0700502
Sebastian Riemerbbfa57c2013-02-21 13:28:09 +1100503 if (mddev->ro == 1 && unlikely(rw == WRITE)) {
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200504 if (bio_sectors(bio) != 0)
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200505 bio->bi_status = BLK_STS_IOERR;
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200506 bio_endio(bio);
Jens Axboedece1632015-11-05 10:41:16 -0700507 return BLK_QC_T_NONE;
Sebastian Riemerbbfa57c2013-02-21 13:28:09 +1100508 }
NeilBrown49077322010-03-25 16:20:56 +1100509
Artur Paszkiewicz41d2d842020-07-03 11:13:09 +0200510 if (bio->bi_end_io != md_end_io) {
511 struct md_io *md_io;
512
513 md_io = mempool_alloc(&mddev->md_io_pool, GFP_NOIO);
514 md_io->mddev = mddev;
515 md_io->orig_bi_end_io = bio->bi_end_io;
516 md_io->orig_bi_private = bio->bi_private;
517
518 bio->bi_end_io = md_end_io;
519 bio->bi_private = md_io;
520
Song Liu00fe60e2020-08-31 15:27:24 -0700521 md_io->start_time = part_start_io_acct(mddev->gendisk,
522 &md_io->part, bio);
Artur Paszkiewicz41d2d842020-07-03 11:13:09 +0200523 }
524
Shaohua Li9c573de2016-04-25 16:52:38 -0700525 /* bio could be mergeable after passing to underlayer */
Jens Axboe1eff9d32016-08-05 15:35:16 -0600526 bio->bi_opf &= ~REQ_NOMERGE;
Shaohua Li393debc2017-09-21 10:23:35 -0700527
528 md_handle_request(mddev, bio);
NeilBrown49077322010-03-25 16:20:56 +1100529
Jens Axboedece1632015-11-05 10:41:16 -0700530 return BLK_QC_T_NONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531}
532
NeilBrown9e35b992010-04-06 14:23:02 +1000533/* mddev_suspend makes sure no new requests are submitted
534 * to the device, and that any requests that have been submitted
535 * are completely handled.
NeilBrownafa0f552014-12-15 12:56:58 +1100536 * Once mddev_detach() is called and completes, the module will be
537 * completely unused.
NeilBrown9e35b992010-04-06 14:23:02 +1000538 */
NeilBrownfd01b882011-10-11 16:47:53 +1100539void mddev_suspend(struct mddev *mddev)
NeilBrown409c57f2009-03-31 14:39:39 +1100540{
Heinz Mauelshagen092398d2016-05-03 19:43:57 +0200541 WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk);
NeilBrown4d5324f2017-10-19 12:17:16 +1100542 lockdep_assert_held(&mddev->reconfig_mutex);
Mikulas Patocka0dc10e52015-12-18 15:19:16 +1100543 if (mddev->suspended++)
544 return;
NeilBrown409c57f2009-03-31 14:39:39 +1100545 synchronize_rcu();
NeilBrowncc27b0c2017-06-05 16:49:39 +1000546 wake_up(&mddev->sb_wait);
NeilBrown35bfc522017-10-17 13:46:43 +1100547 set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags);
548 smp_mb__after_atomic();
NeilBrown409c57f2009-03-31 14:39:39 +1100549 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
550 mddev->pers->quiesce(mddev, 1);
NeilBrown35bfc522017-10-17 13:46:43 +1100551 clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags);
552 wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags));
Jonathan Brassow0d9f4f12012-05-16 04:06:14 -0500553
554 del_timer_sync(&mddev->safemode_timer);
Coly Li78f57ef2020-04-09 22:17:20 +0800555 /* restrict memory reclaim I/O during raid array is suspend */
556 mddev->noio_flag = memalloc_noio_save();
NeilBrown409c57f2009-03-31 14:39:39 +1100557}
NeilBrown390ee602010-06-01 19:37:27 +1000558EXPORT_SYMBOL_GPL(mddev_suspend);
NeilBrown409c57f2009-03-31 14:39:39 +1100559
NeilBrownfd01b882011-10-11 16:47:53 +1100560void mddev_resume(struct mddev *mddev)
NeilBrown409c57f2009-03-31 14:39:39 +1100561{
Coly Li78f57ef2020-04-09 22:17:20 +0800562 /* entred the memalloc scope from mddev_suspend() */
563 memalloc_noio_restore(mddev->noio_flag);
NeilBrown4d5324f2017-10-19 12:17:16 +1100564 lockdep_assert_held(&mddev->reconfig_mutex);
Mikulas Patocka0dc10e52015-12-18 15:19:16 +1100565 if (--mddev->suspended)
566 return;
NeilBrown409c57f2009-03-31 14:39:39 +1100567 wake_up(&mddev->sb_wait);
568 mddev->pers->quiesce(mddev, 0);
Jonathan Brassow0fd018a2011-06-07 17:49:36 -0500569
Jonathan Brassow47525e52012-05-22 13:55:29 +1000570 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
Jonathan Brassow0fd018a2011-06-07 17:49:36 -0500571 md_wakeup_thread(mddev->thread);
572 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
NeilBrown409c57f2009-03-31 14:39:39 +1100573}
NeilBrown390ee602010-06-01 19:37:27 +1000574EXPORT_SYMBOL_GPL(mddev_resume);
NeilBrown409c57f2009-03-31 14:39:39 +1100575
NeilBrowna2826aa2009-12-14 12:49:49 +1100576/*
Tejun Heoe9c74692010-09-03 11:56:18 +0200577 * Generic flush handling for md
NeilBrowna2826aa2009-12-14 12:49:49 +1100578 */
NeilBrown4bc034d2019-03-29 10:46:16 -0700579
580static void md_end_flush(struct bio *bio)
NeilBrowna2826aa2009-12-14 12:49:49 +1100581{
NeilBrown4bc034d2019-03-29 10:46:16 -0700582 struct md_rdev *rdev = bio->bi_private;
583 struct mddev *mddev = rdev->mddev;
NeilBrowna2826aa2009-12-14 12:49:49 +1100584
585 rdev_dec_pending(rdev, mddev);
586
NeilBrown4bc034d2019-03-29 10:46:16 -0700587 if (atomic_dec_and_test(&mddev->flush_pending)) {
588 /* The pre-request flush has finished */
589 queue_work(md_wq, &mddev->flush_work);
NeilBrowna2826aa2009-12-14 12:49:49 +1100590 }
NeilBrown4bc034d2019-03-29 10:46:16 -0700591 bio_put(bio);
NeilBrowna2826aa2009-12-14 12:49:49 +1100592}
593
NeilBrown4bc034d2019-03-29 10:46:16 -0700594static void md_submit_flush_data(struct work_struct *ws);
595
596static void submit_flushes(struct work_struct *ws)
NeilBrowna2826aa2009-12-14 12:49:49 +1100597{
NeilBrown4bc034d2019-03-29 10:46:16 -0700598 struct mddev *mddev = container_of(ws, struct mddev, flush_work);
NeilBrown3cb03002011-10-11 16:45:26 +1100599 struct md_rdev *rdev;
NeilBrowna2826aa2009-12-14 12:49:49 +1100600
NeilBrown2bc13b82019-03-29 10:46:17 -0700601 mddev->start_flush = ktime_get_boottime();
NeilBrown4bc034d2019-03-29 10:46:16 -0700602 INIT_WORK(&mddev->flush_work, md_submit_flush_data);
603 atomic_set(&mddev->flush_pending, 1);
NeilBrowna2826aa2009-12-14 12:49:49 +1100604 rcu_read_lock();
NeilBrowndafb20f2012-03-19 12:46:39 +1100605 rdev_for_each_rcu(rdev, mddev)
NeilBrowna2826aa2009-12-14 12:49:49 +1100606 if (rdev->raid_disk >= 0 &&
607 !test_bit(Faulty, &rdev->flags)) {
608 /* Take two references, one is dropped
609 * when request finishes, one after
610 * we reclaim rcu_read_lock
611 */
612 struct bio *bi;
613 atomic_inc(&rdev->nr_pending);
614 atomic_inc(&rdev->nr_pending);
615 rcu_read_unlock();
Shaohua Lib5e1b8c2012-05-21 09:26:59 +1000616 bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
Xiao Ni5a409b42018-05-21 11:49:54 +0800617 bi->bi_end_io = md_end_flush;
NeilBrown4bc034d2019-03-29 10:46:16 -0700618 bi->bi_private = rdev;
619 bio_set_dev(bi, rdev->bdev);
Christoph Hellwig70fd7612016-11-01 07:40:10 -0600620 bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
NeilBrown4bc034d2019-03-29 10:46:16 -0700621 atomic_inc(&mddev->flush_pending);
Mike Christie4e49ea42016-06-05 14:31:41 -0500622 submit_bio(bi);
NeilBrowna2826aa2009-12-14 12:49:49 +1100623 rcu_read_lock();
624 rdev_dec_pending(rdev, mddev);
625 }
626 rcu_read_unlock();
NeilBrown4bc034d2019-03-29 10:46:16 -0700627 if (atomic_dec_and_test(&mddev->flush_pending))
628 queue_work(md_wq, &mddev->flush_work);
629}
NeilBrowna2826aa2009-12-14 12:49:49 +1100630
NeilBrown4bc034d2019-03-29 10:46:16 -0700631static void md_submit_flush_data(struct work_struct *ws)
632{
633 struct mddev *mddev = container_of(ws, struct mddev, flush_work);
634 struct bio *bio = mddev->flush_bio;
635
636 /*
637 * must reset flush_bio before calling into md_handle_request to avoid a
638 * deadlock, because other bios passed md_handle_request suspend check
639 * could wait for this and below md_handle_request could wait for those
640 * bios because of suspend check
641 */
Pankaj Gupta81ba3c22020-11-11 06:16:56 +0100642 mddev->prev_flush_start = mddev->start_flush;
NeilBrown4bc034d2019-03-29 10:46:16 -0700643 mddev->flush_bio = NULL;
644 wake_up(&mddev->sb_wait);
645
646 if (bio->bi_iter.bi_size == 0) {
647 /* an empty barrier - all done */
648 bio_endio(bio);
649 } else {
650 bio->bi_opf &= ~REQ_PREFLUSH;
651 md_handle_request(mddev, bio);
NeilBrowna2826aa2009-12-14 12:49:49 +1100652 }
NeilBrowna2826aa2009-12-14 12:49:49 +1100653}
NeilBrown4bc034d2019-03-29 10:46:16 -0700654
David Jeffery775d7832019-09-16 13:15:14 -0400655/*
656 * Manages consolidation of flushes and submitting any flushes needed for
657 * a bio with REQ_PREFLUSH. Returns true if the bio is finished or is
658 * being finished in another context. Returns false if the flushing is
659 * complete but still needs the I/O portion of the bio to be processed.
660 */
661bool md_flush_request(struct mddev *mddev, struct bio *bio)
NeilBrown4bc034d2019-03-29 10:46:16 -0700662{
Pankaj Gupta81ba3c22020-11-11 06:16:56 +0100663 ktime_t req_start = ktime_get_boottime();
NeilBrown4bc034d2019-03-29 10:46:16 -0700664 spin_lock_irq(&mddev->lock);
Pankaj Gupta204d1a62020-11-11 06:16:57 +0100665 /* flush requests wait until ongoing flush completes,
666 * hence coalescing all the pending requests.
667 */
NeilBrown4bc034d2019-03-29 10:46:16 -0700668 wait_event_lock_irq(mddev->sb_wait,
NeilBrown2bc13b82019-03-29 10:46:17 -0700669 !mddev->flush_bio ||
Pankaj Guptaa23f2aa2020-11-11 06:16:58 +0100670 ktime_before(req_start, mddev->prev_flush_start),
NeilBrown4bc034d2019-03-29 10:46:16 -0700671 mddev->lock);
Pankaj Gupta204d1a62020-11-11 06:16:57 +0100672 /* new request after previous flush is completed */
Pankaj Guptaa23f2aa2020-11-11 06:16:58 +0100673 if (ktime_after(req_start, mddev->prev_flush_start)) {
NeilBrown2bc13b82019-03-29 10:46:17 -0700674 WARN_ON(mddev->flush_bio);
675 mddev->flush_bio = bio;
676 bio = NULL;
677 }
NeilBrown4bc034d2019-03-29 10:46:16 -0700678 spin_unlock_irq(&mddev->lock);
679
NeilBrown2bc13b82019-03-29 10:46:17 -0700680 if (!bio) {
681 INIT_WORK(&mddev->flush_work, submit_flushes);
682 queue_work(md_wq, &mddev->flush_work);
683 } else {
684 /* flush was performed for some other bio while we waited. */
685 if (bio->bi_iter.bi_size == 0)
686 /* an empty barrier - all done */
687 bio_endio(bio);
688 else {
689 bio->bi_opf &= ~REQ_PREFLUSH;
David Jeffery775d7832019-09-16 13:15:14 -0400690 return false;
NeilBrown2bc13b82019-03-29 10:46:17 -0700691 }
692 }
David Jeffery775d7832019-09-16 13:15:14 -0400693 return true;
NeilBrown4bc034d2019-03-29 10:46:16 -0700694}
Tejun Heoe9c74692010-09-03 11:56:18 +0200695EXPORT_SYMBOL(md_flush_request);
NeilBrown409c57f2009-03-31 14:39:39 +1100696
NeilBrownfd01b882011-10-11 16:47:53 +1100697static inline struct mddev *mddev_get(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698{
699 atomic_inc(&mddev->active);
700 return mddev;
701}
702
Dan Williams5fd3a172009-03-04 00:57:25 -0700703static void mddev_delayed_delete(struct work_struct *ws);
NeilBrownd3374822009-01-09 08:31:10 +1100704
NeilBrownfd01b882011-10-11 16:47:53 +1100705static void mddev_put(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706{
707 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
708 return;
NeilBrownd3374822009-01-09 08:31:10 +1100709 if (!mddev->raid_disks && list_empty(&mddev->disks) &&
NeilBrowncbd19982009-12-30 12:08:49 +1100710 mddev->ctime == 0 && !mddev->hold_active) {
711 /* Array is not configured at all, and not held active,
712 * so destroy it */
NeilBrownaf8a2432011-12-08 15:49:46 +1100713 list_del_init(&mddev->all_mddevs);
Kent Overstreet28dec872018-06-07 20:52:54 -0400714
715 /*
716 * Call queue_work inside the spinlock so that
717 * flush_workqueue() after mddev_find will succeed in waiting
718 * for the work to be done.
719 */
720 INIT_WORK(&mddev->del_work, mddev_delayed_delete);
721 queue_work(md_misc_wq, &mddev->del_work);
NeilBrownd3374822009-01-09 08:31:10 +1100722 }
723 spin_unlock(&all_mddevs_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724}
725
Kees Cook8376d3c2017-10-16 17:01:48 -0700726static void md_safemode_timeout(struct timer_list *t);
Sasha Levin25b2edf2015-07-24 18:19:58 -0400727
NeilBrownfd01b882011-10-11 16:47:53 +1100728void mddev_init(struct mddev *mddev)
NeilBrownfafd7fb2010-04-01 15:55:30 +1100729{
Kent Overstreet28dec872018-06-07 20:52:54 -0400730 kobject_init(&mddev->kobj, &md_ktype);
NeilBrownfafd7fb2010-04-01 15:55:30 +1100731 mutex_init(&mddev->open_mutex);
732 mutex_init(&mddev->reconfig_mutex);
733 mutex_init(&mddev->bitmap_info.mutex);
734 INIT_LIST_HEAD(&mddev->disks);
735 INIT_LIST_HEAD(&mddev->all_mddevs);
Kees Cook8376d3c2017-10-16 17:01:48 -0700736 timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0);
NeilBrownfafd7fb2010-04-01 15:55:30 +1100737 atomic_set(&mddev->active, 1);
738 atomic_set(&mddev->openers, 0);
739 atomic_set(&mddev->active_io, 0);
NeilBrown85572d72014-12-15 12:56:56 +1100740 spin_lock_init(&mddev->lock);
NeilBrown4bc034d2019-03-29 10:46:16 -0700741 atomic_set(&mddev->flush_pending, 0);
NeilBrownfafd7fb2010-04-01 15:55:30 +1100742 init_waitqueue_head(&mddev->sb_wait);
743 init_waitqueue_head(&mddev->recovery_wait);
744 mddev->reshape_position = MaxSector;
NeilBrown2c810cd2012-05-21 09:27:00 +1000745 mddev->reshape_backwards = 0;
Jonathan Brassowc4a39552013-06-25 01:23:59 -0500746 mddev->last_sync_action = "none";
NeilBrownfafd7fb2010-04-01 15:55:30 +1100747 mddev->resync_min = 0;
748 mddev->resync_max = MaxSector;
749 mddev->level = LEVEL_NONE;
750}
NeilBrown390ee602010-06-01 19:37:27 +1000751EXPORT_SYMBOL_GPL(mddev_init);
NeilBrownfafd7fb2010-04-01 15:55:30 +1100752
NeilBrownf72ffdd2014-09-30 14:23:59 +1000753static struct mddev *mddev_find(dev_t unit)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754{
NeilBrownfd01b882011-10-11 16:47:53 +1100755 struct mddev *mddev, *new = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756
NeilBrown8f5f02c2011-02-16 13:58:51 +1100757 if (unit && MAJOR(unit) != MD_MAJOR)
758 unit &= ~((1<<MdpMinorShift)-1);
759
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760 retry:
761 spin_lock(&all_mddevs_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762
NeilBrownefeb53c2009-01-09 08:31:10 +1100763 if (unit) {
764 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
765 if (mddev->unit == unit) {
766 mddev_get(mddev);
767 spin_unlock(&all_mddevs_lock);
768 kfree(new);
769 return mddev;
770 }
771
772 if (new) {
773 list_add(&new->all_mddevs, &all_mddevs);
774 spin_unlock(&all_mddevs_lock);
775 new->hold_active = UNTIL_IOCTL;
776 return new;
777 }
778 } else if (new) {
779 /* find an unused unit number */
780 static int next_minor = 512;
781 int start = next_minor;
782 int is_free = 0;
783 int dev = 0;
784 while (!is_free) {
785 dev = MKDEV(MD_MAJOR, next_minor);
786 next_minor++;
787 if (next_minor > MINORMASK)
788 next_minor = 0;
789 if (next_minor == start) {
790 /* Oh dear, all in use. */
791 spin_unlock(&all_mddevs_lock);
792 kfree(new);
793 return NULL;
794 }
NeilBrownf72ffdd2014-09-30 14:23:59 +1000795
NeilBrownefeb53c2009-01-09 08:31:10 +1100796 is_free = 1;
797 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
798 if (mddev->unit == dev) {
799 is_free = 0;
800 break;
801 }
802 }
803 new->unit = dev;
804 new->md_minor = MINOR(dev);
805 new->hold_active = UNTIL_STOP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 list_add(&new->all_mddevs, &all_mddevs);
807 spin_unlock(&all_mddevs_lock);
808 return new;
809 }
810 spin_unlock(&all_mddevs_lock);
811
NeilBrown9ffae0c2006-01-06 00:20:32 -0800812 new = kzalloc(sizeof(*new), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813 if (!new)
814 return NULL;
815
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816 new->unit = unit;
817 if (MAJOR(unit) == MD_MAJOR)
818 new->md_minor = MINOR(unit);
819 else
820 new->md_minor = MINOR(unit) >> MdpMinorShift;
821
NeilBrownfafd7fb2010-04-01 15:55:30 +1100822 mddev_init(new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 goto retry;
825}
826
NeilBrownb6eb1272010-04-15 10:13:47 +1000827static struct attribute_group md_redundancy_group;
828
NeilBrown5c47daf2014-12-15 12:57:01 +1100829void mddev_unlock(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830{
NeilBrowna64c8762010-04-14 17:15:37 +1000831 if (mddev->to_remove) {
NeilBrownb6eb1272010-04-15 10:13:47 +1000832 /* These cannot be removed under reconfig_mutex as
833 * an access to the files will try to take reconfig_mutex
834 * while holding the file unremovable, which leads to
835 * a deadlock.
NeilBrownbb4f1e92010-08-08 21:18:03 +1000836 * So hold set sysfs_active while the remove in happeing,
837 * and anything else which might set ->to_remove or my
838 * otherwise change the sysfs namespace will fail with
839 * -EBUSY if sysfs_active is still set.
840 * We set sysfs_active under reconfig_mutex and elsewhere
841 * test it under the same mutex to ensure its correct value
842 * is seen.
NeilBrownb6eb1272010-04-15 10:13:47 +1000843 */
NeilBrowna64c8762010-04-14 17:15:37 +1000844 struct attribute_group *to_remove = mddev->to_remove;
845 mddev->to_remove = NULL;
NeilBrownbb4f1e92010-08-08 21:18:03 +1000846 mddev->sysfs_active = 1;
NeilBrownb6eb1272010-04-15 10:13:47 +1000847 mutex_unlock(&mddev->reconfig_mutex);
848
NeilBrown00bcb4a2010-06-01 19:37:23 +1000849 if (mddev->kobj.sd) {
850 if (to_remove != &md_redundancy_group)
851 sysfs_remove_group(&mddev->kobj, to_remove);
852 if (mddev->pers == NULL ||
853 mddev->pers->sync_request == NULL) {
854 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
855 if (mddev->sysfs_action)
856 sysfs_put(mddev->sysfs_action);
Junxiao Bie8efa9b2020-08-04 17:27:18 -0700857 if (mddev->sysfs_completed)
858 sysfs_put(mddev->sysfs_completed);
859 if (mddev->sysfs_degraded)
860 sysfs_put(mddev->sysfs_degraded);
NeilBrown00bcb4a2010-06-01 19:37:23 +1000861 mddev->sysfs_action = NULL;
Junxiao Bie8efa9b2020-08-04 17:27:18 -0700862 mddev->sysfs_completed = NULL;
863 mddev->sysfs_degraded = NULL;
NeilBrown00bcb4a2010-06-01 19:37:23 +1000864 }
NeilBrowna64c8762010-04-14 17:15:37 +1000865 }
NeilBrownbb4f1e92010-08-08 21:18:03 +1000866 mddev->sysfs_active = 0;
NeilBrownb6eb1272010-04-15 10:13:47 +1000867 } else
868 mutex_unlock(&mddev->reconfig_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869
Chris Dunlop751e67c2011-10-19 16:48:26 +1100870 /* As we've dropped the mutex we need a spinlock to
871 * make sure the thread doesn't disappear
NeilBrown01f96c02011-09-21 15:30:20 +1000872 */
873 spin_lock(&pers_lock);
NeilBrown005eca52005-08-22 13:11:08 -0700874 md_wakeup_thread(mddev->thread);
NeilBrown4d5324f2017-10-19 12:17:16 +1100875 wake_up(&mddev->sb_wait);
NeilBrown01f96c02011-09-21 15:30:20 +1000876 spin_unlock(&pers_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877}
NeilBrown5c47daf2014-12-15 12:57:01 +1100878EXPORT_SYMBOL_GPL(mddev_unlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879
Goldwyn Rodrigues57d051d2015-04-14 10:43:55 -0500880struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr)
NeilBrown1ca69c42012-10-11 13:37:33 +1100881{
882 struct md_rdev *rdev;
883
884 rdev_for_each_rcu(rdev, mddev)
885 if (rdev->desc_nr == nr)
886 return rdev;
887
888 return NULL;
889}
Goldwyn Rodrigues57d051d2015-04-14 10:43:55 -0500890EXPORT_SYMBOL_GPL(md_find_rdev_nr_rcu);
NeilBrown1ca69c42012-10-11 13:37:33 +1100891
892static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893{
NeilBrown3cb03002011-10-11 16:45:26 +1100894 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895
NeilBrowndafb20f2012-03-19 12:46:39 +1100896 rdev_for_each(rdev, mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 if (rdev->bdev->bd_dev == dev)
898 return rdev;
Cheng Renquan159ec1f2009-01-09 08:31:08 +1100899
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 return NULL;
901}
902
Tomasz Majchrzak1532d9e2017-12-27 10:31:40 +0100903struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev)
NeilBrown1ca69c42012-10-11 13:37:33 +1100904{
905 struct md_rdev *rdev;
906
907 rdev_for_each_rcu(rdev, mddev)
908 if (rdev->bdev->bd_dev == dev)
909 return rdev;
910
911 return NULL;
912}
Tomasz Majchrzak1532d9e2017-12-27 10:31:40 +0100913EXPORT_SYMBOL_GPL(md_find_rdev_rcu);
NeilBrown1ca69c42012-10-11 13:37:33 +1100914
NeilBrown84fc4b52011-10-11 16:49:58 +1100915static struct md_personality *find_pers(int level, char *clevel)
NeilBrown2604b702006-01-06 00:20:36 -0800916{
NeilBrown84fc4b52011-10-11 16:49:58 +1100917 struct md_personality *pers;
NeilBrownd9d166c2006-01-06 00:20:51 -0800918 list_for_each_entry(pers, &pers_list, list) {
919 if (level != LEVEL_NONE && pers->level == level)
NeilBrown2604b702006-01-06 00:20:36 -0800920 return pers;
NeilBrownd9d166c2006-01-06 00:20:51 -0800921 if (strcmp(pers->name, clevel)==0)
922 return pers;
923 }
NeilBrown2604b702006-01-06 00:20:36 -0800924 return NULL;
925}
926
Andre Nollb73df2d2008-07-11 22:02:23 +1000927/* return the offset of the super block in 512byte sectors */
NeilBrown3cb03002011-10-11 16:45:26 +1100928static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929{
Jonathan Brassow57b2caa2011-01-14 09:14:33 +1100930 sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512;
Andre Nollb73df2d2008-07-11 22:02:23 +1000931 return MD_NEW_SIZE_SECTORS(num_sectors);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932}
933
NeilBrownf72ffdd2014-09-30 14:23:59 +1000934static int alloc_disk_sb(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936 rdev->sb_page = alloc_page(GFP_KERNEL);
NeilBrown7f0f0d82016-11-02 14:16:49 +1100937 if (!rdev->sb_page)
Andre Nollebc24332008-07-11 22:02:20 +1000938 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 return 0;
940}
941
NeilBrown545c8792012-05-22 13:54:30 +1000942void md_rdev_clear(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943{
944 if (rdev->sb_page) {
NeilBrown2d1f3b52006-01-06 00:20:31 -0800945 put_page(rdev->sb_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 rdev->sb_loaded = 0;
947 rdev->sb_page = NULL;
Andre Noll0f420352008-07-11 22:02:23 +1000948 rdev->sb_start = 0;
Andre Nolldd8ac332009-03-31 14:33:13 +1100949 rdev->sectors = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 }
NeilBrown2699b672011-07-28 11:31:47 +1000951 if (rdev->bb_page) {
952 put_page(rdev->bb_page);
953 rdev->bb_page = NULL;
954 }
Dan Williamsd3b407fb2016-01-06 12:19:22 -0800955 badblocks_exit(&rdev->badblocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956}
NeilBrown545c8792012-05-22 13:54:30 +1000957EXPORT_SYMBOL_GPL(md_rdev_clear);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200959static void super_written(struct bio *bio)
NeilBrown7bfa19f2005-06-21 17:17:28 -0700960{
NeilBrown3cb03002011-10-11 16:45:26 +1100961 struct md_rdev *rdev = bio->bi_private;
NeilBrownfd01b882011-10-11 16:47:53 +1100962 struct mddev *mddev = rdev->mddev;
NeilBrown7bfa19f2005-06-21 17:17:28 -0700963
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200964 if (bio->bi_status) {
Guoqing Jiangb3db8a22020-07-28 12:01:41 +0200965 pr_err("md: %s gets error=%d\n", __func__,
966 blk_status_to_errno(bio->bi_status));
NeilBrowna9701a32005-11-08 21:39:34 -0800967 md_error(mddev, rdev);
NeilBrown46533ff2016-11-18 16:16:11 +1100968 if (!test_bit(Faulty, &rdev->flags)
969 && (bio->bi_opf & MD_FAILFAST)) {
Shaohua Li29530792016-12-08 15:48:19 -0800970 set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags);
NeilBrown46533ff2016-11-18 16:16:11 +1100971 set_bit(LastDev, &rdev->flags);
972 }
973 } else
974 clear_bit(LastDev, &rdev->flags);
NeilBrown7bfa19f2005-06-21 17:17:28 -0700975
NeilBrowna9701a32005-11-08 21:39:34 -0800976 if (atomic_dec_and_test(&mddev->pending_writes))
977 wake_up(&mddev->sb_wait);
Shaohua Lied3b98c2016-03-29 14:00:19 -0700978 rdev_dec_pending(rdev, mddev);
Neil Brownf8b58ed2005-06-27 22:29:34 -0700979 bio_put(bio);
NeilBrown7bfa19f2005-06-21 17:17:28 -0700980}
981
NeilBrownfd01b882011-10-11 16:47:53 +1100982void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
NeilBrown7bfa19f2005-06-21 17:17:28 -0700983 sector_t sector, int size, struct page *page)
984{
985 /* write first size bytes of page to sector of rdev
986 * Increment mddev->pending_writes before returning
987 * and decrement it on completion, waking up sb_wait
988 * if zero is reached.
989 * If an error occurred, call md_error
990 */
NeilBrown46533ff2016-11-18 16:16:11 +1100991 struct bio *bio;
992 int ff = 0;
993
Heinz Mauelshagen4b6c1062018-02-02 23:13:19 +0100994 if (!page)
995 return;
996
NeilBrown46533ff2016-11-18 16:16:11 +1100997 if (test_bit(Faulty, &rdev->flags))
998 return;
999
NeilBrown5a850712017-06-21 09:12:21 +10001000 bio = md_bio_alloc_sync(mddev);
NeilBrown7bfa19f2005-06-21 17:17:28 -07001001
Shaohua Lied3b98c2016-03-29 14:00:19 -07001002 atomic_inc(&rdev->nr_pending);
1003
Christoph Hellwig74d46992017-08-23 19:10:32 +02001004 bio_set_dev(bio, rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev);
Kent Overstreet4f024f32013-10-11 15:44:27 -07001005 bio->bi_iter.bi_sector = sector;
NeilBrown7bfa19f2005-06-21 17:17:28 -07001006 bio_add_page(bio, page, size, 0);
1007 bio->bi_private = rdev;
1008 bio->bi_end_io = super_written;
NeilBrown46533ff2016-11-18 16:16:11 +11001009
1010 if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) &&
1011 test_bit(FailFast, &rdev->flags) &&
1012 !test_bit(LastDev, &rdev->flags))
1013 ff = MD_FAILFAST;
Jan Kara5a8948f2017-05-31 09:44:33 +02001014 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA | ff;
NeilBrowna9701a32005-11-08 21:39:34 -08001015
NeilBrown7bfa19f2005-06-21 17:17:28 -07001016 atomic_inc(&mddev->pending_writes);
Mike Christie4e49ea42016-06-05 14:31:41 -05001017 submit_bio(bio);
NeilBrowna9701a32005-11-08 21:39:34 -08001018}
1019
NeilBrown46533ff2016-11-18 16:16:11 +11001020int md_super_wait(struct mddev *mddev)
NeilBrowna9701a32005-11-08 21:39:34 -08001021{
Tejun Heoe9c74692010-09-03 11:56:18 +02001022 /* wait for all superblock writes that were scheduled to complete */
NeilBrown1967cd52014-09-09 14:20:28 +10001023 wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
Shaohua Li29530792016-12-08 15:48:19 -08001024 if (test_and_clear_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags))
NeilBrown46533ff2016-11-18 16:16:11 +11001025 return -EAGAIN;
1026 return 0;
NeilBrown7bfa19f2005-06-21 17:17:28 -07001027}
1028
NeilBrown3cb03002011-10-11 16:45:26 +11001029int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
Mike Christie796a5cf2016-06-05 14:32:07 -05001030 struct page *page, int op, int op_flags, bool metadata_op)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031{
NeilBrown5a850712017-06-21 09:12:21 +10001032 struct bio *bio = md_bio_alloc_sync(rdev->mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033 int ret;
1034
Christoph Hellwig74d46992017-08-23 19:10:32 +02001035 if (metadata_op && rdev->meta_bdev)
1036 bio_set_dev(bio, rdev->meta_bdev);
1037 else
1038 bio_set_dev(bio, rdev->bdev);
Mike Christie796a5cf2016-06-05 14:32:07 -05001039 bio_set_op_attrs(bio, op, op_flags);
Jonathan Brassowccebd4c2011-01-14 09:14:33 +11001040 if (metadata_op)
Kent Overstreet4f024f32013-10-11 15:44:27 -07001041 bio->bi_iter.bi_sector = sector + rdev->sb_start;
NeilBrown1fdd6fc92012-05-21 09:28:32 +10001042 else if (rdev->mddev->reshape_position != MaxSector &&
1043 (rdev->mddev->reshape_backwards ==
1044 (sector >= rdev->mddev->reshape_position)))
Kent Overstreet4f024f32013-10-11 15:44:27 -07001045 bio->bi_iter.bi_sector = sector + rdev->new_data_offset;
Jonathan Brassowccebd4c2011-01-14 09:14:33 +11001046 else
Kent Overstreet4f024f32013-10-11 15:44:27 -07001047 bio->bi_iter.bi_sector = sector + rdev->data_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048 bio_add_page(bio, page, size, 0);
Mike Christie4e49ea42016-06-05 14:31:41 -05001049
1050 submit_bio_wait(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001052 ret = !bio->bi_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053 bio_put(bio);
1054 return ret;
1055}
NeilBrowna8745db2006-01-06 00:20:34 -08001056EXPORT_SYMBOL_GPL(sync_page_io);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057
NeilBrownf72ffdd2014-09-30 14:23:59 +10001058static int read_disk_sb(struct md_rdev *rdev, int size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059{
1060 char b[BDEVNAME_SIZE];
NeilBrown403df472014-09-30 15:52:29 +10001061
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062 if (rdev->sb_loaded)
1063 return 0;
1064
Mike Christie796a5cf2016-06-05 14:32:07 -05001065 if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066 goto fail;
1067 rdev->sb_loaded = 1;
1068 return 0;
1069
1070fail:
NeilBrown9d487392016-11-02 14:16:49 +11001071 pr_err("md: disabled device %s, could not read superblock.\n",
1072 bdevname(rdev->bdev,b));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073 return -EINVAL;
1074}
1075
Amir Goldsteine6fd2092017-05-04 16:26:20 +03001076static int md_uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077{
NeilBrownf72ffdd2014-09-30 14:23:59 +10001078 return sb1->set_uuid0 == sb2->set_uuid0 &&
Andre Noll05710462008-07-11 22:02:20 +10001079 sb1->set_uuid1 == sb2->set_uuid1 &&
1080 sb1->set_uuid2 == sb2->set_uuid2 &&
1081 sb1->set_uuid3 == sb2->set_uuid3;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082}
1083
Amir Goldsteine6fd2092017-05-04 16:26:20 +03001084static int md_sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085{
1086 int ret;
1087 mdp_super_t *tmp1, *tmp2;
1088
1089 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
1090 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
1091
1092 if (!tmp1 || !tmp2) {
1093 ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094 goto abort;
1095 }
1096
1097 *tmp1 = *sb1;
1098 *tmp2 = *sb2;
1099
1100 /*
1101 * nr_disks is not constant
1102 */
1103 tmp1->nr_disks = 0;
1104 tmp2->nr_disks = 0;
1105
Andre Nollce0c8e02008-07-11 22:02:20 +10001106 ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107abort:
Jesper Juhl990a8ba2005-06-21 17:17:30 -07001108 kfree(tmp1);
1109 kfree(tmp2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110 return ret;
1111}
1112
NeilBrown4d167f02007-05-09 02:35:37 -07001113static u32 md_csum_fold(u32 csum)
1114{
1115 csum = (csum & 0xffff) + (csum >> 16);
1116 return (csum & 0xffff) + (csum >> 16);
1117}
1118
NeilBrownf72ffdd2014-09-30 14:23:59 +10001119static unsigned int calc_sb_csum(mdp_super_t *sb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120{
NeilBrown4d167f02007-05-09 02:35:37 -07001121 u64 newcsum = 0;
1122 u32 *sb32 = (u32*)sb;
1123 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124 unsigned int disk_csum, csum;
1125
1126 disk_csum = sb->sb_csum;
1127 sb->sb_csum = 0;
NeilBrown4d167f02007-05-09 02:35:37 -07001128
1129 for (i = 0; i < MD_SB_BYTES/4 ; i++)
1130 newcsum += sb32[i];
1131 csum = (newcsum & 0xffffffff) + (newcsum>>32);
1132
NeilBrown4d167f02007-05-09 02:35:37 -07001133#ifdef CONFIG_ALPHA
1134 /* This used to use csum_partial, which was wrong for several
1135 * reasons including that different results are returned on
1136 * different architectures. It isn't critical that we get exactly
1137 * the same return value as before (we always csum_fold before
1138 * testing, and that removes any differences). However as we
1139 * know that csum_partial always returned a 16bit value on
1140 * alphas, do a fold to maximise conformity to previous behaviour.
1141 */
1142 sb->sb_csum = md_csum_fold(disk_csum);
1143#else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144 sb->sb_csum = disk_csum;
NeilBrown4d167f02007-05-09 02:35:37 -07001145#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146 return csum;
1147}
1148
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149/*
1150 * Handle superblock details.
1151 * We want to be able to handle multiple superblock formats
1152 * so we have a common interface to them all, and an array of
1153 * different handlers.
1154 * We rely on user-space to write the initial superblock, and support
1155 * reading and updating of superblocks.
1156 * Interface methods are:
NeilBrown3cb03002011-10-11 16:45:26 +11001157 * int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158 * loads and validates a superblock on dev.
1159 * if refdev != NULL, compare superblocks on both devices
1160 * Return:
1161 * 0 - dev has a superblock that is compatible with refdev
1162 * 1 - dev has a superblock that is compatible and newer than refdev
1163 * so dev should be used as the refdev in future
1164 * -EINVAL superblock incompatible or invalid
1165 * -othererror e.g. -EIO
1166 *
NeilBrownfd01b882011-10-11 16:47:53 +11001167 * int validate_super(struct mddev *mddev, struct md_rdev *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168 * Verify that dev is acceptable into mddev.
1169 * The first time, mddev->raid_disks will be 0, and data from
1170 * dev should be merged in. Subsequent calls check that dev
1171 * is new enough. Return 0 or -EINVAL
1172 *
NeilBrownfd01b882011-10-11 16:47:53 +11001173 * void sync_super(struct mddev *mddev, struct md_rdev *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174 * Update the superblock for rdev with data in mddev
1175 * This does not write to disc.
1176 *
1177 */
1178
1179struct super_type {
Chris Webb0cd17fe2008-06-28 08:31:46 +10001180 char *name;
1181 struct module *owner;
NeilBrownc6563a82012-05-21 09:27:00 +10001182 int (*load_super)(struct md_rdev *rdev,
1183 struct md_rdev *refdev,
Chris Webb0cd17fe2008-06-28 08:31:46 +10001184 int minor_version);
NeilBrownc6563a82012-05-21 09:27:00 +10001185 int (*validate_super)(struct mddev *mddev,
1186 struct md_rdev *rdev);
1187 void (*sync_super)(struct mddev *mddev,
1188 struct md_rdev *rdev);
NeilBrown3cb03002011-10-11 16:45:26 +11001189 unsigned long long (*rdev_size_change)(struct md_rdev *rdev,
Andre Noll15f4a5f2008-07-21 14:42:12 +10001190 sector_t num_sectors);
NeilBrownc6563a82012-05-21 09:27:00 +10001191 int (*allow_new_offset)(struct md_rdev *rdev,
1192 unsigned long long new_offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193};
1194
1195/*
Andre Noll0894cc32009-06-18 08:49:23 +10001196 * Check that the given mddev has no bitmap.
1197 *
1198 * This function is called from the run method of all personalities that do not
1199 * support bitmaps. It prints an error message and returns non-zero if mddev
1200 * has a bitmap. Otherwise, it returns 0.
1201 *
1202 */
NeilBrownfd01b882011-10-11 16:47:53 +11001203int md_check_no_bitmap(struct mddev *mddev)
Andre Noll0894cc32009-06-18 08:49:23 +10001204{
NeilBrownc3d97142009-12-14 12:49:52 +11001205 if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
Andre Noll0894cc32009-06-18 08:49:23 +10001206 return 0;
NeilBrown9d487392016-11-02 14:16:49 +11001207 pr_warn("%s: bitmaps are not supported for %s\n",
Andre Noll0894cc32009-06-18 08:49:23 +10001208 mdname(mddev), mddev->pers->name);
1209 return 1;
1210}
1211EXPORT_SYMBOL(md_check_no_bitmap);
1212
1213/*
NeilBrownf72ffdd2014-09-30 14:23:59 +10001214 * load_super for 0.90.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215 */
NeilBrown3cb03002011-10-11 16:45:26 +11001216static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217{
1218 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1219 mdp_super_t *sb;
1220 int ret;
Yufen Yu228fc7d2019-10-30 18:47:02 +08001221 bool spare_disk = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222
1223 /*
Andre Noll0f420352008-07-11 22:02:23 +10001224 * Calculate the position of the superblock (512byte sectors),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225 * it's at the end of the disk.
1226 *
1227 * It also happens to be a multiple of 4Kb.
1228 */
Jonathan Brassow57b2caa2011-01-14 09:14:33 +11001229 rdev->sb_start = calc_dev_sboffset(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230
NeilBrown0002b272005-09-09 16:23:53 -07001231 ret = read_disk_sb(rdev, MD_SB_BYTES);
NeilBrown9d487392016-11-02 14:16:49 +11001232 if (ret)
1233 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234
1235 ret = -EINVAL;
1236
1237 bdevname(rdev->bdev, b);
Namhyung Kim65a06f062011-07-27 11:00:36 +10001238 sb = page_address(rdev->sb_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239
1240 if (sb->md_magic != MD_SB_MAGIC) {
NeilBrown9d487392016-11-02 14:16:49 +11001241 pr_warn("md: invalid raid superblock magic on %s\n", b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242 goto abort;
1243 }
1244
1245 if (sb->major_version != 0 ||
NeilBrownf6705572006-03-27 01:18:11 -08001246 sb->minor_version < 90 ||
1247 sb->minor_version > 91) {
NeilBrown9d487392016-11-02 14:16:49 +11001248 pr_warn("Bad version number %d.%d on %s\n",
1249 sb->major_version, sb->minor_version, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250 goto abort;
1251 }
1252
1253 if (sb->raid_disks <= 0)
1254 goto abort;
1255
NeilBrown4d167f02007-05-09 02:35:37 -07001256 if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
NeilBrown9d487392016-11-02 14:16:49 +11001257 pr_warn("md: invalid superblock checksum on %s\n", b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258 goto abort;
1259 }
1260
1261 rdev->preferred_minor = sb->md_minor;
1262 rdev->data_offset = 0;
NeilBrownc6563a82012-05-21 09:27:00 +10001263 rdev->new_data_offset = 0;
NeilBrown0002b272005-09-09 16:23:53 -07001264 rdev->sb_size = MD_SB_BYTES;
NeilBrown9f2f3832011-07-28 11:31:47 +10001265 rdev->badblocks.shift = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266
1267 if (sb->level == LEVEL_MULTIPATH)
1268 rdev->desc_nr = -1;
1269 else
1270 rdev->desc_nr = sb->this_disk.number;
1271
Yufen Yu228fc7d2019-10-30 18:47:02 +08001272 /* not spare disk, or LEVEL_MULTIPATH */
1273 if (sb->level == LEVEL_MULTIPATH ||
1274 (rdev->desc_nr >= 0 &&
Yufen Yu3b7436c2019-12-10 15:01:29 +08001275 rdev->desc_nr < MD_SB_DISKS &&
Yufen Yu228fc7d2019-10-30 18:47:02 +08001276 sb->disks[rdev->desc_nr].state &
1277 ((1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))))
1278 spare_disk = false;
1279
Harvey Harrison9a7b2b02008-04-28 02:15:49 -07001280 if (!refdev) {
Yufen Yu228fc7d2019-10-30 18:47:02 +08001281 if (!spare_disk)
Yufen Yu6a5cb532019-10-16 16:00:03 +08001282 ret = 1;
1283 else
1284 ret = 0;
Harvey Harrison9a7b2b02008-04-28 02:15:49 -07001285 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286 __u64 ev1, ev2;
Namhyung Kim65a06f062011-07-27 11:00:36 +10001287 mdp_super_t *refsb = page_address(refdev->sb_page);
Amir Goldsteine6fd2092017-05-04 16:26:20 +03001288 if (!md_uuid_equal(refsb, sb)) {
NeilBrown9d487392016-11-02 14:16:49 +11001289 pr_warn("md: %s has different UUID to %s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290 b, bdevname(refdev->bdev,b2));
1291 goto abort;
1292 }
Amir Goldsteine6fd2092017-05-04 16:26:20 +03001293 if (!md_sb_equal(refsb, sb)) {
NeilBrown9d487392016-11-02 14:16:49 +11001294 pr_warn("md: %s has same UUID but different superblock to %s\n",
1295 b, bdevname(refdev->bdev, b2));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296 goto abort;
1297 }
1298 ev1 = md_event(sb);
1299 ev2 = md_event(refsb);
Yufen Yu6a5cb532019-10-16 16:00:03 +08001300
Yufen Yu228fc7d2019-10-30 18:47:02 +08001301 if (!spare_disk && ev1 > ev2)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302 ret = 1;
NeilBrownf72ffdd2014-09-30 14:23:59 +10001303 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304 ret = 0;
1305 }
NeilBrown8190e752009-06-18 08:48:58 +10001306 rdev->sectors = rdev->sb_start;
NeilBrown667a5312012-08-16 16:46:12 +10001307 /* Limit to 4TB as metadata cannot record more than that.
1308 * (not needed for Linear and RAID0 as metadata doesn't
1309 * record this size)
1310 */
Christoph Hellwig72deb452019-04-05 18:08:59 +02001311 if ((u64)rdev->sectors >= (2ULL << 32) && sb->level >= 1)
Arnd Bergmann3312c952015-12-21 10:51:01 +11001312 rdev->sectors = (sector_t)(2ULL << 32) - 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313
NeilBrown27a7b262011-09-10 17:21:28 +10001314 if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
NeilBrown2bf071b2006-01-06 00:20:55 -08001315 /* "this cannot possibly happen" ... */
1316 ret = -EINVAL;
1317
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 abort:
1319 return ret;
1320}
1321
1322/*
1323 * validate_super for 0.90.0
1324 */
NeilBrownfd01b882011-10-11 16:47:53 +11001325static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326{
1327 mdp_disk_t *desc;
Namhyung Kim65a06f062011-07-27 11:00:36 +10001328 mdp_super_t *sb = page_address(rdev->sb_page);
NeilBrown07d84d102006-06-26 00:27:56 -07001329 __u64 ev1 = md_event(sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330
NeilBrown41158c72005-06-21 17:17:25 -07001331 rdev->raid_disk = -1;
NeilBrownc5d79ad2008-02-06 01:39:54 -08001332 clear_bit(Faulty, &rdev->flags);
1333 clear_bit(In_sync, &rdev->flags);
NeilBrown8313b8e2013-12-12 10:13:33 +11001334 clear_bit(Bitmap_sync, &rdev->flags);
NeilBrownc5d79ad2008-02-06 01:39:54 -08001335 clear_bit(WriteMostly, &rdev->flags);
NeilBrownc5d79ad2008-02-06 01:39:54 -08001336
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337 if (mddev->raid_disks == 0) {
1338 mddev->major_version = 0;
1339 mddev->minor_version = sb->minor_version;
1340 mddev->patch_version = sb->patch_version;
NeilBrowne6910632008-02-06 01:39:51 -08001341 mddev->external = 0;
Andre Noll9d8f0362009-06-18 08:45:01 +10001342 mddev->chunk_sectors = sb->chunk_size >> 9;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343 mddev->ctime = sb->ctime;
1344 mddev->utime = sb->utime;
1345 mddev->level = sb->level;
NeilBrownd9d166c2006-01-06 00:20:51 -08001346 mddev->clevel[0] = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347 mddev->layout = sb->layout;
1348 mddev->raid_disks = sb->raid_disks;
NeilBrown27a7b262011-09-10 17:21:28 +10001349 mddev->dev_sectors = ((sector_t)sb->size) * 2;
NeilBrown07d84d102006-06-26 00:27:56 -07001350 mddev->events = ev1;
NeilBrownc3d97142009-12-14 12:49:52 +11001351 mddev->bitmap_info.offset = 0;
NeilBrown6409bb02012-05-22 13:55:07 +10001352 mddev->bitmap_info.space = 0;
1353 /* bitmap can use 60 K after the 4K superblocks */
NeilBrownc3d97142009-12-14 12:49:52 +11001354 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
NeilBrown6409bb02012-05-22 13:55:07 +10001355 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
NeilBrown2c810cd2012-05-21 09:27:00 +10001356 mddev->reshape_backwards = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357
NeilBrownf6705572006-03-27 01:18:11 -08001358 if (mddev->minor_version >= 91) {
1359 mddev->reshape_position = sb->reshape_position;
1360 mddev->delta_disks = sb->delta_disks;
1361 mddev->new_level = sb->new_level;
1362 mddev->new_layout = sb->new_layout;
Andre Noll664e7c42009-06-18 08:45:27 +10001363 mddev->new_chunk_sectors = sb->new_chunk >> 9;
NeilBrown2c810cd2012-05-21 09:27:00 +10001364 if (mddev->delta_disks < 0)
1365 mddev->reshape_backwards = 1;
NeilBrownf6705572006-03-27 01:18:11 -08001366 } else {
1367 mddev->reshape_position = MaxSector;
1368 mddev->delta_disks = 0;
1369 mddev->new_level = mddev->level;
1370 mddev->new_layout = mddev->layout;
Andre Noll664e7c42009-06-18 08:45:27 +10001371 mddev->new_chunk_sectors = mddev->chunk_sectors;
NeilBrownf6705572006-03-27 01:18:11 -08001372 }
NeilBrown33f2c352019-09-09 16:52:29 +10001373 if (mddev->level == 0)
1374 mddev->layout = -1;
NeilBrownf6705572006-03-27 01:18:11 -08001375
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376 if (sb->state & (1<<MD_SB_CLEAN))
1377 mddev->recovery_cp = MaxSector;
1378 else {
NeilBrownf72ffdd2014-09-30 14:23:59 +10001379 if (sb->events_hi == sb->cp_events_hi &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380 sb->events_lo == sb->cp_events_lo) {
1381 mddev->recovery_cp = sb->recovery_cp;
1382 } else
1383 mddev->recovery_cp = 0;
1384 }
1385
1386 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
1387 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
1388 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
1389 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
1390
1391 mddev->max_disks = MD_SB_DISKS;
NeilBrowna654b9d82005-06-21 17:17:27 -07001392
1393 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
NeilBrown6409bb02012-05-22 13:55:07 +10001394 mddev->bitmap_info.file == NULL) {
NeilBrownc3d97142009-12-14 12:49:52 +11001395 mddev->bitmap_info.offset =
1396 mddev->bitmap_info.default_offset;
NeilBrown6409bb02012-05-22 13:55:07 +10001397 mddev->bitmap_info.space =
Dave Jonesc9ad0202013-08-19 22:26:32 -04001398 mddev->bitmap_info.default_space;
NeilBrown6409bb02012-05-22 13:55:07 +10001399 }
NeilBrowna654b9d82005-06-21 17:17:27 -07001400
NeilBrown41158c72005-06-21 17:17:25 -07001401 } else if (mddev->pers == NULL) {
NeilBrownbe6800a2010-05-18 10:17:09 +10001402 /* Insist on good event counter while assembling, except
1403 * for spares (which don't need an event count) */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404 ++ev1;
NeilBrownbe6800a2010-05-18 10:17:09 +10001405 if (sb->disks[rdev->desc_nr].state & (
1406 (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
NeilBrownf72ffdd2014-09-30 14:23:59 +10001407 if (ev1 < mddev->events)
NeilBrownbe6800a2010-05-18 10:17:09 +10001408 return -EINVAL;
NeilBrown41158c72005-06-21 17:17:25 -07001409 } else if (mddev->bitmap) {
1410 /* if adding to array with a bitmap, then we can accept an
1411 * older device ... but not too old.
1412 */
NeilBrown41158c72005-06-21 17:17:25 -07001413 if (ev1 < mddev->bitmap->events_cleared)
1414 return 0;
NeilBrown8313b8e2013-12-12 10:13:33 +11001415 if (ev1 < mddev->events)
1416 set_bit(Bitmap_sync, &rdev->flags);
NeilBrown07d84d102006-06-26 00:27:56 -07001417 } else {
1418 if (ev1 < mddev->events)
1419 /* just a hot-add of a new device, leave raid_disk at -1 */
1420 return 0;
1421 }
NeilBrown41158c72005-06-21 17:17:25 -07001422
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423 if (mddev->level != LEVEL_MULTIPATH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424 desc = sb->disks + rdev->desc_nr;
1425
1426 if (desc->state & (1<<MD_DISK_FAULTY))
NeilBrownb2d444d2005-11-08 21:39:31 -08001427 set_bit(Faulty, &rdev->flags);
NeilBrown7c7546c2006-06-26 00:27:41 -07001428 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
1429 desc->raid_disk < mddev->raid_disks */) {
NeilBrownb2d444d2005-11-08 21:39:31 -08001430 set_bit(In_sync, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431 rdev->raid_disk = desc->raid_disk;
NeilBrownf4667222013-12-09 12:04:56 +11001432 rdev->saved_raid_disk = desc->raid_disk;
NeilBrown0261cd9f2009-11-13 17:40:48 +11001433 } else if (desc->state & (1<<MD_DISK_ACTIVE)) {
1434 /* active but not in sync implies recovery up to
1435 * reshape position. We don't know exactly where
1436 * that is, so set to zero for now */
1437 if (mddev->minor_version >= 91) {
1438 rdev->recovery_offset = 0;
1439 rdev->raid_disk = desc->raid_disk;
1440 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441 }
NeilBrown8ddf9ef2005-09-09 16:23:45 -07001442 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
1443 set_bit(WriteMostly, &rdev->flags);
NeilBrown688834e2016-11-18 16:16:11 +11001444 if (desc->state & (1<<MD_DISK_FAILFAST))
1445 set_bit(FailFast, &rdev->flags);
NeilBrown41158c72005-06-21 17:17:25 -07001446 } else /* MULTIPATH are always insync */
NeilBrownb2d444d2005-11-08 21:39:31 -08001447 set_bit(In_sync, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448 return 0;
1449}
1450
1451/*
1452 * sync_super for 0.90.0
1453 */
NeilBrownfd01b882011-10-11 16:47:53 +11001454static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455{
1456 mdp_super_t *sb;
NeilBrown3cb03002011-10-11 16:45:26 +11001457 struct md_rdev *rdev2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458 int next_spare = mddev->raid_disks;
NeilBrown19133a42005-11-08 21:39:35 -08001459
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460 /* make rdev->sb match mddev data..
1461 *
1462 * 1/ zero out disks
1463 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
1464 * 3/ any empty disks < next_spare become removed
1465 *
1466 * disks[0] gets initialised to REMOVED because
1467 * we cannot be sure from other fields if it has
1468 * been initialised or not.
1469 */
1470 int i;
1471 int active=0, working=0,failed=0,spare=0,nr_disks=0;
1472
NeilBrown61181562005-09-09 16:24:02 -07001473 rdev->sb_size = MD_SB_BYTES;
1474
Namhyung Kim65a06f062011-07-27 11:00:36 +10001475 sb = page_address(rdev->sb_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476
1477 memset(sb, 0, sizeof(*sb));
1478
1479 sb->md_magic = MD_SB_MAGIC;
1480 sb->major_version = mddev->major_version;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481 sb->patch_version = mddev->patch_version;
1482 sb->gvalid_words = 0; /* ignored */
1483 memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
1484 memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
1485 memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
1486 memcpy(&sb->set_uuid3, mddev->uuid+12,4);
1487
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11001488 sb->ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489 sb->level = mddev->level;
Andre Noll58c0fed2009-03-31 14:33:13 +11001490 sb->size = mddev->dev_sectors / 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491 sb->raid_disks = mddev->raid_disks;
1492 sb->md_minor = mddev->md_minor;
NeilBrowne6910632008-02-06 01:39:51 -08001493 sb->not_persistent = 0;
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11001494 sb->utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495 sb->state = 0;
1496 sb->events_hi = (mddev->events>>32);
1497 sb->events_lo = (u32)mddev->events;
1498
NeilBrownf6705572006-03-27 01:18:11 -08001499 if (mddev->reshape_position == MaxSector)
1500 sb->minor_version = 90;
1501 else {
1502 sb->minor_version = 91;
1503 sb->reshape_position = mddev->reshape_position;
1504 sb->new_level = mddev->new_level;
1505 sb->delta_disks = mddev->delta_disks;
1506 sb->new_layout = mddev->new_layout;
Andre Noll664e7c42009-06-18 08:45:27 +10001507 sb->new_chunk = mddev->new_chunk_sectors << 9;
NeilBrownf6705572006-03-27 01:18:11 -08001508 }
1509 mddev->minor_version = sb->minor_version;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510 if (mddev->in_sync)
1511 {
1512 sb->recovery_cp = mddev->recovery_cp;
1513 sb->cp_events_hi = (mddev->events>>32);
1514 sb->cp_events_lo = (u32)mddev->events;
1515 if (mddev->recovery_cp == MaxSector)
1516 sb->state = (1<< MD_SB_CLEAN);
1517 } else
1518 sb->recovery_cp = 0;
1519
1520 sb->layout = mddev->layout;
Andre Noll9d8f0362009-06-18 08:45:01 +10001521 sb->chunk_size = mddev->chunk_sectors << 9;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522
NeilBrownc3d97142009-12-14 12:49:52 +11001523 if (mddev->bitmap && mddev->bitmap_info.file == NULL)
NeilBrowna654b9d82005-06-21 17:17:27 -07001524 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
1525
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 sb->disks[0].state = (1<<MD_DISK_REMOVED);
NeilBrowndafb20f2012-03-19 12:46:39 +11001527 rdev_for_each(rdev2, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528 mdp_disk_t *d;
NeilBrown86e6ffd2005-11-08 21:39:24 -08001529 int desc_nr;
NeilBrown0261cd9f2009-11-13 17:40:48 +11001530 int is_active = test_bit(In_sync, &rdev2->flags);
1531
1532 if (rdev2->raid_disk >= 0 &&
1533 sb->minor_version >= 91)
1534 /* we have nowhere to store the recovery_offset,
1535 * but if it is not below the reshape_position,
1536 * we can piggy-back on that.
1537 */
1538 is_active = 1;
1539 if (rdev2->raid_disk < 0 ||
1540 test_bit(Faulty, &rdev2->flags))
1541 is_active = 0;
1542 if (is_active)
NeilBrown86e6ffd2005-11-08 21:39:24 -08001543 desc_nr = rdev2->raid_disk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544 else
NeilBrown86e6ffd2005-11-08 21:39:24 -08001545 desc_nr = next_spare++;
NeilBrown19133a42005-11-08 21:39:35 -08001546 rdev2->desc_nr = desc_nr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547 d = &sb->disks[rdev2->desc_nr];
1548 nr_disks++;
1549 d->number = rdev2->desc_nr;
1550 d->major = MAJOR(rdev2->bdev->bd_dev);
1551 d->minor = MINOR(rdev2->bdev->bd_dev);
NeilBrown0261cd9f2009-11-13 17:40:48 +11001552 if (is_active)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553 d->raid_disk = rdev2->raid_disk;
1554 else
1555 d->raid_disk = rdev2->desc_nr; /* compatibility */
NeilBrown1be78922006-03-27 01:18:03 -08001556 if (test_bit(Faulty, &rdev2->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557 d->state = (1<<MD_DISK_FAULTY);
NeilBrown0261cd9f2009-11-13 17:40:48 +11001558 else if (is_active) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559 d->state = (1<<MD_DISK_ACTIVE);
NeilBrown0261cd9f2009-11-13 17:40:48 +11001560 if (test_bit(In_sync, &rdev2->flags))
1561 d->state |= (1<<MD_DISK_SYNC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562 active++;
1563 working++;
1564 } else {
1565 d->state = 0;
1566 spare++;
1567 working++;
1568 }
NeilBrown8ddf9ef2005-09-09 16:23:45 -07001569 if (test_bit(WriteMostly, &rdev2->flags))
1570 d->state |= (1<<MD_DISK_WRITEMOSTLY);
NeilBrown688834e2016-11-18 16:16:11 +11001571 if (test_bit(FailFast, &rdev2->flags))
1572 d->state |= (1<<MD_DISK_FAILFAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574 /* now set the "removed" and "faulty" bits on any missing devices */
1575 for (i=0 ; i < mddev->raid_disks ; i++) {
1576 mdp_disk_t *d = &sb->disks[i];
1577 if (d->state == 0 && d->number == 0) {
1578 d->number = i;
1579 d->raid_disk = i;
1580 d->state = (1<<MD_DISK_REMOVED);
1581 d->state |= (1<<MD_DISK_FAULTY);
1582 failed++;
1583 }
1584 }
1585 sb->nr_disks = nr_disks;
1586 sb->active_disks = active;
1587 sb->working_disks = working;
1588 sb->failed_disks = failed;
1589 sb->spare_disks = spare;
1590
1591 sb->this_disk = sb->disks[rdev->desc_nr];
1592 sb->sb_csum = calc_sb_csum(sb);
1593}
1594
1595/*
Chris Webb0cd17fe2008-06-28 08:31:46 +10001596 * rdev_size_change for 0.90.0
1597 */
1598static unsigned long long
NeilBrown3cb03002011-10-11 16:45:26 +11001599super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
Chris Webb0cd17fe2008-06-28 08:31:46 +10001600{
Andre Noll58c0fed2009-03-31 14:33:13 +11001601 if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
Chris Webb0cd17fe2008-06-28 08:31:46 +10001602 return 0; /* component must fit device */
NeilBrownc3d97142009-12-14 12:49:52 +11001603 if (rdev->mddev->bitmap_info.offset)
Chris Webb0cd17fe2008-06-28 08:31:46 +10001604 return 0; /* can't move bitmap */
Jonathan Brassow57b2caa2011-01-14 09:14:33 +11001605 rdev->sb_start = calc_dev_sboffset(rdev);
Andre Noll15f4a5f2008-07-21 14:42:12 +10001606 if (!num_sectors || num_sectors > rdev->sb_start)
1607 num_sectors = rdev->sb_start;
NeilBrown27a7b262011-09-10 17:21:28 +10001608 /* Limit to 4TB as metadata cannot record more than that.
1609 * 4TB == 2^32 KB, or 2*2^32 sectors.
1610 */
Christoph Hellwig72deb452019-04-05 18:08:59 +02001611 if ((u64)num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
Arnd Bergmann3312c952015-12-21 10:51:01 +11001612 num_sectors = (sector_t)(2ULL << 32) - 2;
NeilBrown46533ff2016-11-18 16:16:11 +11001613 do {
1614 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
Chris Webb0cd17fe2008-06-28 08:31:46 +10001615 rdev->sb_page);
NeilBrown46533ff2016-11-18 16:16:11 +11001616 } while (md_super_wait(rdev->mddev) < 0);
Justin Maggardc26a44e2010-11-24 16:36:17 +11001617 return num_sectors;
Chris Webb0cd17fe2008-06-28 08:31:46 +10001618}
1619
NeilBrownc6563a82012-05-21 09:27:00 +10001620static int
1621super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset)
1622{
1623 /* non-zero offset changes not possible with v0.90 */
1624 return new_offset == 0;
1625}
Chris Webb0cd17fe2008-06-28 08:31:46 +10001626
1627/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628 * version 1 superblock
1629 */
1630
NeilBrownf72ffdd2014-09-30 14:23:59 +10001631static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632{
NeilBrown1c05b4b2006-10-21 10:24:08 -07001633 __le32 disk_csum;
1634 u32 csum;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635 unsigned long long newcsum;
1636 int size = 256 + le32_to_cpu(sb->max_dev)*2;
NeilBrown1c05b4b2006-10-21 10:24:08 -07001637 __le32 *isuper = (__le32*)sb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638
1639 disk_csum = sb->sb_csum;
1640 sb->sb_csum = 0;
1641 newcsum = 0;
NeilBrown1f3c9902012-12-11 13:09:00 +11001642 for (; size >= 4; size -= 4)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643 newcsum += le32_to_cpu(*isuper++);
1644
1645 if (size == 2)
NeilBrown1c05b4b2006-10-21 10:24:08 -07001646 newcsum += le16_to_cpu(*(__le16*) isuper);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647
1648 csum = (newcsum & 0xffffffff) + (newcsum >> 32);
1649 sb->sb_csum = disk_csum;
1650 return cpu_to_le32(csum);
1651}
1652
NeilBrown3cb03002011-10-11 16:45:26 +11001653static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654{
1655 struct mdp_superblock_1 *sb;
1656 int ret;
Andre Noll0f420352008-07-11 22:02:23 +10001657 sector_t sb_start;
NeilBrownc6563a82012-05-21 09:27:00 +10001658 sector_t sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
NeilBrown0002b272005-09-09 16:23:53 -07001660 int bmask;
Yufen Yu228fc7d2019-10-30 18:47:02 +08001661 bool spare_disk = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662
1663 /*
Andre Noll0f420352008-07-11 22:02:23 +10001664 * Calculate the position of the superblock in 512byte sectors.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665 * It is always aligned to a 4K boundary and
1666 * depeding on minor_version, it can be:
1667 * 0: At least 8K, but less than 12K, from end of device
1668 * 1: At start of device
1669 * 2: 4K from start of device.
1670 */
1671 switch(minor_version) {
1672 case 0:
Mike Snitzer77304d22010-11-08 14:39:12 +01001673 sb_start = i_size_read(rdev->bdev->bd_inode) >> 9;
Andre Noll0f420352008-07-11 22:02:23 +10001674 sb_start -= 8*2;
1675 sb_start &= ~(sector_t)(4*2-1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676 break;
1677 case 1:
Andre Noll0f420352008-07-11 22:02:23 +10001678 sb_start = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679 break;
1680 case 2:
Andre Noll0f420352008-07-11 22:02:23 +10001681 sb_start = 8;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001682 break;
1683 default:
1684 return -EINVAL;
1685 }
Andre Noll0f420352008-07-11 22:02:23 +10001686 rdev->sb_start = sb_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687
NeilBrown0002b272005-09-09 16:23:53 -07001688 /* superblock is rarely larger than 1K, but it can be larger,
1689 * and it is safe to read 4k, so we do that
1690 */
1691 ret = read_disk_sb(rdev, 4096);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692 if (ret) return ret;
1693
Namhyung Kim65a06f062011-07-27 11:00:36 +10001694 sb = page_address(rdev->sb_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695
1696 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1697 sb->major_version != cpu_to_le32(1) ||
1698 le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
Andre Noll0f420352008-07-11 22:02:23 +10001699 le64_to_cpu(sb->super_offset) != rdev->sb_start ||
NeilBrown71c08052005-09-09 16:23:51 -07001700 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701 return -EINVAL;
1702
1703 if (calc_sb_1_csum(sb) != sb->sb_csum) {
NeilBrown9d487392016-11-02 14:16:49 +11001704 pr_warn("md: invalid superblock checksum on %s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705 bdevname(rdev->bdev,b));
1706 return -EINVAL;
1707 }
1708 if (le64_to_cpu(sb->data_size) < 10) {
NeilBrown9d487392016-11-02 14:16:49 +11001709 pr_warn("md: data_size too small on %s\n",
1710 bdevname(rdev->bdev,b));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711 return -EINVAL;
1712 }
NeilBrownc6563a82012-05-21 09:27:00 +10001713 if (sb->pad0 ||
1714 sb->pad3[0] ||
1715 memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1])))
1716 /* Some padding is non-zero, might be a new feature */
1717 return -EINVAL;
NeilBrowne11e93f2007-05-09 02:35:36 -07001718
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719 rdev->preferred_minor = 0xffff;
1720 rdev->data_offset = le64_to_cpu(sb->data_offset);
NeilBrownc6563a82012-05-21 09:27:00 +10001721 rdev->new_data_offset = rdev->data_offset;
1722 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
1723 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
1724 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
NeilBrown4dbcdc72006-01-06 00:20:52 -08001725 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726
NeilBrown0002b272005-09-09 16:23:53 -07001727 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
Martin K. Petersene1defc42009-05-22 17:17:49 -04001728 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
NeilBrown0002b272005-09-09 16:23:53 -07001729 if (rdev->sb_size & bmask)
NeilBrowna1801f82008-03-04 14:29:31 -08001730 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1731
1732 if (minor_version
Andre Noll0f420352008-07-11 22:02:23 +10001733 && rdev->data_offset < sb_start + (rdev->sb_size/512))
NeilBrowna1801f82008-03-04 14:29:31 -08001734 return -EINVAL;
NeilBrownc6563a82012-05-21 09:27:00 +10001735 if (minor_version
1736 && rdev->new_data_offset < sb_start + (rdev->sb_size/512))
1737 return -EINVAL;
NeilBrown0002b272005-09-09 16:23:53 -07001738
NeilBrown31b65a02006-07-10 04:44:14 -07001739 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1740 rdev->desc_nr = -1;
1741 else
1742 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1743
NeilBrown2699b672011-07-28 11:31:47 +10001744 if (!rdev->bb_page) {
1745 rdev->bb_page = alloc_page(GFP_KERNEL);
1746 if (!rdev->bb_page)
1747 return -ENOMEM;
1748 }
1749 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) &&
1750 rdev->badblocks.count == 0) {
1751 /* need to load the bad block list.
1752 * Currently we limit it to one page.
1753 */
1754 s32 offset;
1755 sector_t bb_sector;
Christoph Hellwig00485d02019-04-04 18:56:12 +02001756 __le64 *bbp;
NeilBrown2699b672011-07-28 11:31:47 +10001757 int i;
1758 int sectors = le16_to_cpu(sb->bblog_size);
1759 if (sectors > (PAGE_SIZE / 512))
1760 return -EINVAL;
1761 offset = le32_to_cpu(sb->bblog_offset);
1762 if (offset == 0)
1763 return -EINVAL;
1764 bb_sector = (long long)offset;
1765 if (!sync_page_io(rdev, bb_sector, sectors << 9,
Mike Christie796a5cf2016-06-05 14:32:07 -05001766 rdev->bb_page, REQ_OP_READ, 0, true))
NeilBrown2699b672011-07-28 11:31:47 +10001767 return -EIO;
Christoph Hellwig00485d02019-04-04 18:56:12 +02001768 bbp = (__le64 *)page_address(rdev->bb_page);
NeilBrown2699b672011-07-28 11:31:47 +10001769 rdev->badblocks.shift = sb->bblog_shift;
1770 for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
1771 u64 bb = le64_to_cpu(*bbp);
1772 int count = bb & (0x3ff);
1773 u64 sector = bb >> 10;
1774 sector <<= sb->bblog_shift;
1775 count <<= sb->bblog_shift;
1776 if (bb + 1 == 0)
1777 break;
Vishal Vermafc974ee2015-12-24 19:20:34 -07001778 if (badblocks_set(&rdev->badblocks, sector, count, 1))
NeilBrown2699b672011-07-28 11:31:47 +10001779 return -EINVAL;
1780 }
NeilBrown486adf72013-04-24 11:42:44 +10001781 } else if (sb->bblog_offset != 0)
1782 rdev->badblocks.shift = 0;
NeilBrown2699b672011-07-28 11:31:47 +10001783
Pawel Baldysiakddc08822017-08-16 17:13:45 +02001784 if ((le32_to_cpu(sb->feature_map) &
1785 (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS))) {
Artur Paszkiewiczea0213e2017-03-09 09:59:57 +01001786 rdev->ppl.offset = (__s16)le16_to_cpu(sb->ppl.offset);
1787 rdev->ppl.size = le16_to_cpu(sb->ppl.size);
1788 rdev->ppl.sector = rdev->sb_start + rdev->ppl.offset;
1789 }
1790
NeilBrown33f2c352019-09-09 16:52:29 +10001791 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT) &&
1792 sb->level != 0)
1793 return -EINVAL;
1794
Yufen Yu228fc7d2019-10-30 18:47:02 +08001795 /* not spare disk, or LEVEL_MULTIPATH */
1796 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH) ||
1797 (rdev->desc_nr >= 0 &&
1798 rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
1799 (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
1800 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL)))
1801 spare_disk = false;
Yufen Yu6a5cb532019-10-16 16:00:03 +08001802
Harvey Harrison9a7b2b02008-04-28 02:15:49 -07001803 if (!refdev) {
Yufen Yu228fc7d2019-10-30 18:47:02 +08001804 if (!spare_disk)
Yufen Yu6a5cb532019-10-16 16:00:03 +08001805 ret = 1;
1806 else
1807 ret = 0;
Harvey Harrison9a7b2b02008-04-28 02:15:49 -07001808 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809 __u64 ev1, ev2;
Namhyung Kim65a06f062011-07-27 11:00:36 +10001810 struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811
1812 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1813 sb->level != refsb->level ||
1814 sb->layout != refsb->layout ||
1815 sb->chunksize != refsb->chunksize) {
NeilBrown9d487392016-11-02 14:16:49 +11001816 pr_warn("md: %s has strangely different superblock to %s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817 bdevname(rdev->bdev,b),
1818 bdevname(refdev->bdev,b2));
1819 return -EINVAL;
1820 }
1821 ev1 = le64_to_cpu(sb->events);
1822 ev2 = le64_to_cpu(refsb->events);
1823
Yufen Yu228fc7d2019-10-30 18:47:02 +08001824 if (!spare_disk && ev1 > ev2)
NeilBrown8ed75462006-02-03 03:03:41 -08001825 ret = 1;
1826 else
1827 ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828 }
NeilBrownc6563a82012-05-21 09:27:00 +10001829 if (minor_version) {
1830 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9);
1831 sectors -= rdev->data_offset;
1832 } else
1833 sectors = rdev->sb_start;
1834 if (sectors < le64_to_cpu(sb->data_size))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835 return -EINVAL;
Andre Nolldd8ac332009-03-31 14:33:13 +11001836 rdev->sectors = le64_to_cpu(sb->data_size);
NeilBrown8ed75462006-02-03 03:03:41 -08001837 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838}
1839
NeilBrownfd01b882011-10-11 16:47:53 +11001840static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001841{
Namhyung Kim65a06f062011-07-27 11:00:36 +10001842 struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
NeilBrown07d84d102006-06-26 00:27:56 -07001843 __u64 ev1 = le64_to_cpu(sb->events);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844
NeilBrown41158c72005-06-21 17:17:25 -07001845 rdev->raid_disk = -1;
NeilBrownc5d79ad2008-02-06 01:39:54 -08001846 clear_bit(Faulty, &rdev->flags);
1847 clear_bit(In_sync, &rdev->flags);
NeilBrown8313b8e2013-12-12 10:13:33 +11001848 clear_bit(Bitmap_sync, &rdev->flags);
NeilBrownc5d79ad2008-02-06 01:39:54 -08001849 clear_bit(WriteMostly, &rdev->flags);
NeilBrownc5d79ad2008-02-06 01:39:54 -08001850
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851 if (mddev->raid_disks == 0) {
1852 mddev->major_version = 1;
1853 mddev->patch_version = 0;
NeilBrowne6910632008-02-06 01:39:51 -08001854 mddev->external = 0;
Andre Noll9d8f0362009-06-18 08:45:01 +10001855 mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11001856 mddev->ctime = le64_to_cpu(sb->ctime);
1857 mddev->utime = le64_to_cpu(sb->utime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001858 mddev->level = le32_to_cpu(sb->level);
NeilBrownd9d166c2006-01-06 00:20:51 -08001859 mddev->clevel[0] = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001860 mddev->layout = le32_to_cpu(sb->layout);
1861 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
Andre Noll58c0fed2009-03-31 14:33:13 +11001862 mddev->dev_sectors = le64_to_cpu(sb->size);
NeilBrown07d84d102006-06-26 00:27:56 -07001863 mddev->events = ev1;
NeilBrownc3d97142009-12-14 12:49:52 +11001864 mddev->bitmap_info.offset = 0;
NeilBrown6409bb02012-05-22 13:55:07 +10001865 mddev->bitmap_info.space = 0;
1866 /* Default location for bitmap is 1K after superblock
1867 * using 3K - total of 4K
1868 */
NeilBrownc3d97142009-12-14 12:49:52 +11001869 mddev->bitmap_info.default_offset = 1024 >> 9;
NeilBrown6409bb02012-05-22 13:55:07 +10001870 mddev->bitmap_info.default_space = (4096-1024) >> 9;
NeilBrown2c810cd2012-05-21 09:27:00 +10001871 mddev->reshape_backwards = 0;
1872
Linus Torvalds1da177e2005-04-16 15:20:36 -07001873 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1874 memcpy(mddev->uuid, sb->set_uuid, 16);
1875
1876 mddev->max_disks = (4096-256)/2;
NeilBrowna654b9d82005-06-21 17:17:27 -07001877
NeilBrown71c08052005-09-09 16:23:51 -07001878 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
NeilBrown6409bb02012-05-22 13:55:07 +10001879 mddev->bitmap_info.file == NULL) {
NeilBrownc3d97142009-12-14 12:49:52 +11001880 mddev->bitmap_info.offset =
1881 (__s32)le32_to_cpu(sb->bitmap_offset);
NeilBrown6409bb02012-05-22 13:55:07 +10001882 /* Metadata doesn't record how much space is available.
1883 * For 1.0, we assume we can use up to the superblock
1884 * if before, else to 4K beyond superblock.
1885 * For others, assume no change is possible.
1886 */
1887 if (mddev->minor_version > 0)
1888 mddev->bitmap_info.space = 0;
1889 else if (mddev->bitmap_info.offset > 0)
1890 mddev->bitmap_info.space =
1891 8 - mddev->bitmap_info.offset;
1892 else
1893 mddev->bitmap_info.space =
1894 -mddev->bitmap_info.offset;
1895 }
NeilBrowne11e93f2007-05-09 02:35:36 -07001896
NeilBrownf6705572006-03-27 01:18:11 -08001897 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1898 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1899 mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1900 mddev->new_level = le32_to_cpu(sb->new_level);
1901 mddev->new_layout = le32_to_cpu(sb->new_layout);
Andre Noll664e7c42009-06-18 08:45:27 +10001902 mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
NeilBrown2c810cd2012-05-21 09:27:00 +10001903 if (mddev->delta_disks < 0 ||
1904 (mddev->delta_disks == 0 &&
1905 (le32_to_cpu(sb->feature_map)
1906 & MD_FEATURE_RESHAPE_BACKWARDS)))
1907 mddev->reshape_backwards = 1;
NeilBrownf6705572006-03-27 01:18:11 -08001908 } else {
1909 mddev->reshape_position = MaxSector;
1910 mddev->delta_disks = 0;
1911 mddev->new_level = mddev->level;
1912 mddev->new_layout = mddev->layout;
Andre Noll664e7c42009-06-18 08:45:27 +10001913 mddev->new_chunk_sectors = mddev->chunk_sectors;
NeilBrownf6705572006-03-27 01:18:11 -08001914 }
1915
NeilBrown33f2c352019-09-09 16:52:29 +10001916 if (mddev->level == 0 &&
1917 !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT))
1918 mddev->layout = -1;
1919
Song Liu486b0f72016-08-19 15:34:01 -07001920 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)
Shaohua Lia62ab492016-01-06 14:37:13 -08001921 set_bit(MD_HAS_JOURNAL, &mddev->flags);
Artur Paszkiewiczea0213e2017-03-09 09:59:57 +01001922
Pawel Baldysiakddc08822017-08-16 17:13:45 +02001923 if (le32_to_cpu(sb->feature_map) &
1924 (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS)) {
Artur Paszkiewiczea0213e2017-03-09 09:59:57 +01001925 if (le32_to_cpu(sb->feature_map) &
1926 (MD_FEATURE_BITMAP_OFFSET | MD_FEATURE_JOURNAL))
1927 return -EINVAL;
Pawel Baldysiakddc08822017-08-16 17:13:45 +02001928 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_PPL) &&
1929 (le32_to_cpu(sb->feature_map) &
1930 MD_FEATURE_MULTIPLE_PPLS))
1931 return -EINVAL;
Artur Paszkiewiczea0213e2017-03-09 09:59:57 +01001932 set_bit(MD_HAS_PPL, &mddev->flags);
1933 }
NeilBrown41158c72005-06-21 17:17:25 -07001934 } else if (mddev->pers == NULL) {
NeilBrownbe6800a2010-05-18 10:17:09 +10001935 /* Insist of good event counter while assembling, except for
1936 * spares (which don't need an event count) */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937 ++ev1;
NeilBrownbe6800a2010-05-18 10:17:09 +10001938 if (rdev->desc_nr >= 0 &&
1939 rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
Song Liua3dfbda2015-10-08 21:54:11 -07001940 (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
1941 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL))
NeilBrownbe6800a2010-05-18 10:17:09 +10001942 if (ev1 < mddev->events)
1943 return -EINVAL;
NeilBrown41158c72005-06-21 17:17:25 -07001944 } else if (mddev->bitmap) {
1945 /* If adding to array with a bitmap, then we can accept an
1946 * older device, but not too old.
1947 */
NeilBrown41158c72005-06-21 17:17:25 -07001948 if (ev1 < mddev->bitmap->events_cleared)
1949 return 0;
NeilBrown8313b8e2013-12-12 10:13:33 +11001950 if (ev1 < mddev->events)
1951 set_bit(Bitmap_sync, &rdev->flags);
NeilBrown07d84d102006-06-26 00:27:56 -07001952 } else {
1953 if (ev1 < mddev->events)
1954 /* just a hot-add of a new device, leave raid_disk at -1 */
1955 return 0;
1956 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957 if (mddev->level != LEVEL_MULTIPATH) {
1958 int role;
NeilBrown3673f302009-08-03 10:59:56 +10001959 if (rdev->desc_nr < 0 ||
1960 rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
Song Liuc4d4c912015-08-13 14:31:54 -07001961 role = MD_DISK_ROLE_SPARE;
NeilBrown3673f302009-08-03 10:59:56 +10001962 rdev->desc_nr = -1;
1963 } else
1964 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965 switch(role) {
Song Liuc4d4c912015-08-13 14:31:54 -07001966 case MD_DISK_ROLE_SPARE: /* spare */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967 break;
Song Liuc4d4c912015-08-13 14:31:54 -07001968 case MD_DISK_ROLE_FAULTY: /* faulty */
NeilBrownb2d444d2005-11-08 21:39:31 -08001969 set_bit(Faulty, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970 break;
Song Liubac624f2015-08-13 14:31:55 -07001971 case MD_DISK_ROLE_JOURNAL: /* journal device */
1972 if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)) {
1973 /* journal device without journal feature */
NeilBrown9d487392016-11-02 14:16:49 +11001974 pr_warn("md: journal device provided without journal feature, ignoring the device\n");
Song Liubac624f2015-08-13 14:31:55 -07001975 return -EINVAL;
1976 }
1977 set_bit(Journal, &rdev->flags);
Shaohua Li3069aa82015-08-13 14:31:56 -07001978 rdev->journal_tail = le64_to_cpu(sb->journal_tail);
Shaohua Li9b156032015-12-18 15:19:16 +11001979 rdev->raid_disk = 0;
Song Liubac624f2015-08-13 14:31:55 -07001980 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981 default:
NeilBrownf4667222013-12-09 12:04:56 +11001982 rdev->saved_raid_disk = role;
NeilBrown5fd6c1d2006-06-26 00:27:40 -07001983 if ((le32_to_cpu(sb->feature_map) &
NeilBrownf4667222013-12-09 12:04:56 +11001984 MD_FEATURE_RECOVERY_OFFSET)) {
NeilBrown5fd6c1d2006-06-26 00:27:40 -07001985 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
NeilBrownf4667222013-12-09 12:04:56 +11001986 if (!(le32_to_cpu(sb->feature_map) &
1987 MD_FEATURE_RECOVERY_BITMAP))
1988 rdev->saved_raid_disk = -1;
Guoqing Jiang062f5b2a2019-07-24 11:09:20 +02001989 } else {
1990 /*
1991 * If the array is FROZEN, then the device can't
1992 * be in_sync with rest of array.
1993 */
1994 if (!test_bit(MD_RECOVERY_FROZEN,
1995 &mddev->recovery))
1996 set_bit(In_sync, &rdev->flags);
1997 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998 rdev->raid_disk = role;
1999 break;
2000 }
NeilBrown8ddf9ef2005-09-09 16:23:45 -07002001 if (sb->devflags & WriteMostly1)
2002 set_bit(WriteMostly, &rdev->flags);
NeilBrown688834e2016-11-18 16:16:11 +11002003 if (sb->devflags & FailFast1)
2004 set_bit(FailFast, &rdev->flags);
NeilBrown2d78f8c2011-12-23 10:17:51 +11002005 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
2006 set_bit(Replacement, &rdev->flags);
NeilBrown41158c72005-06-21 17:17:25 -07002007 } else /* MULTIPATH are always insync */
NeilBrownb2d444d2005-11-08 21:39:31 -08002008 set_bit(In_sync, &rdev->flags);
NeilBrown41158c72005-06-21 17:17:25 -07002009
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010 return 0;
2011}
2012
NeilBrownfd01b882011-10-11 16:47:53 +11002013static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014{
2015 struct mdp_superblock_1 *sb;
NeilBrown3cb03002011-10-11 16:45:26 +11002016 struct md_rdev *rdev2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002017 int max_dev, i;
2018 /* make rdev->sb match mddev and rdev data. */
2019
Namhyung Kim65a06f062011-07-27 11:00:36 +10002020 sb = page_address(rdev->sb_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002021
2022 sb->feature_map = 0;
2023 sb->pad0 = 0;
NeilBrown5fd6c1d2006-06-26 00:27:40 -07002024 sb->recovery_offset = cpu_to_le64(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025 memset(sb->pad3, 0, sizeof(sb->pad3));
2026
2027 sb->utime = cpu_to_le64((__u64)mddev->utime);
2028 sb->events = cpu_to_le64(mddev->events);
2029 if (mddev->in_sync)
2030 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
Shaohua Libd18f642015-09-02 13:49:50 -07002031 else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags))
2032 sb->resync_offset = cpu_to_le64(MaxSector);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033 else
2034 sb->resync_offset = cpu_to_le64(0);
2035
NeilBrown1c05b4b2006-10-21 10:24:08 -07002036 sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
NeilBrown4dbcdc72006-01-06 00:20:52 -08002037
NeilBrownf0ca3402006-02-02 14:28:04 -08002038 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
Andre Noll58c0fed2009-03-31 14:33:13 +11002039 sb->size = cpu_to_le64(mddev->dev_sectors);
Andre Noll9d8f0362009-06-18 08:45:01 +10002040 sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
NeilBrown62e1e382009-05-26 09:40:59 +10002041 sb->level = cpu_to_le32(mddev->level);
2042 sb->layout = cpu_to_le32(mddev->layout);
NeilBrown688834e2016-11-18 16:16:11 +11002043 if (test_bit(FailFast, &rdev->flags))
2044 sb->devflags |= FailFast1;
2045 else
2046 sb->devflags &= ~FailFast1;
NeilBrownf0ca3402006-02-02 14:28:04 -08002047
NeilBrownaeb9b2112011-08-25 14:43:08 +10002048 if (test_bit(WriteMostly, &rdev->flags))
2049 sb->devflags |= WriteMostly1;
2050 else
2051 sb->devflags &= ~WriteMostly1;
NeilBrownc6563a82012-05-21 09:27:00 +10002052 sb->data_offset = cpu_to_le64(rdev->data_offset);
2053 sb->data_size = cpu_to_le64(rdev->sectors);
NeilBrownaeb9b2112011-08-25 14:43:08 +10002054
NeilBrownc3d97142009-12-14 12:49:52 +11002055 if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
2056 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
NeilBrown71c08052005-09-09 16:23:51 -07002057 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
NeilBrowna654b9d82005-06-21 17:17:27 -07002058 }
NeilBrown5fd6c1d2006-06-26 00:27:40 -07002059
Shaohua Lif2076e72015-10-08 21:54:12 -07002060 if (rdev->raid_disk >= 0 && !test_bit(Journal, &rdev->flags) &&
NeilBrown97e4f422009-03-31 14:33:13 +11002061 !test_bit(In_sync, &rdev->flags)) {
NeilBrown93be75f2009-12-14 12:50:06 +11002062 sb->feature_map |=
2063 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
2064 sb->recovery_offset =
2065 cpu_to_le64(rdev->recovery_offset);
NeilBrownf4667222013-12-09 12:04:56 +11002066 if (rdev->saved_raid_disk >= 0 && mddev->bitmap)
2067 sb->feature_map |=
2068 cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP);
NeilBrown5fd6c1d2006-06-26 00:27:40 -07002069 }
Shaohua Li3069aa82015-08-13 14:31:56 -07002070 /* Note: recovery_offset and journal_tail share space */
2071 if (test_bit(Journal, &rdev->flags))
2072 sb->journal_tail = cpu_to_le64(rdev->journal_tail);
NeilBrown2d78f8c2011-12-23 10:17:51 +11002073 if (test_bit(Replacement, &rdev->flags))
2074 sb->feature_map |=
2075 cpu_to_le32(MD_FEATURE_REPLACEMENT);
NeilBrown5fd6c1d2006-06-26 00:27:40 -07002076
NeilBrownf6705572006-03-27 01:18:11 -08002077 if (mddev->reshape_position != MaxSector) {
2078 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
2079 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
2080 sb->new_layout = cpu_to_le32(mddev->new_layout);
2081 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
2082 sb->new_level = cpu_to_le32(mddev->new_level);
Andre Noll664e7c42009-06-18 08:45:27 +10002083 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
NeilBrown2c810cd2012-05-21 09:27:00 +10002084 if (mddev->delta_disks == 0 &&
2085 mddev->reshape_backwards)
2086 sb->feature_map
2087 |= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS);
NeilBrownc6563a82012-05-21 09:27:00 +10002088 if (rdev->new_data_offset != rdev->data_offset) {
2089 sb->feature_map
2090 |= cpu_to_le32(MD_FEATURE_NEW_OFFSET);
2091 sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset
2092 - rdev->data_offset));
2093 }
NeilBrownf6705572006-03-27 01:18:11 -08002094 }
NeilBrowna654b9d82005-06-21 17:17:27 -07002095
Goldwyn Rodrigues3c462c82015-08-19 07:35:54 +10002096 if (mddev_is_clustered(mddev))
2097 sb->feature_map |= cpu_to_le32(MD_FEATURE_CLUSTERED);
2098
NeilBrown2699b672011-07-28 11:31:47 +10002099 if (rdev->badblocks.count == 0)
2100 /* Nothing to do for bad blocks*/ ;
2101 else if (sb->bblog_offset == 0)
2102 /* Cannot record bad blocks on this device */
2103 md_error(mddev, rdev);
2104 else {
2105 struct badblocks *bb = &rdev->badblocks;
Christoph Hellwigae506402019-04-04 18:56:13 +02002106 __le64 *bbp = (__le64 *)page_address(rdev->bb_page);
NeilBrown2699b672011-07-28 11:31:47 +10002107 u64 *p = bb->page;
2108 sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
2109 if (bb->changed) {
2110 unsigned seq;
2111
2112retry:
2113 seq = read_seqbegin(&bb->lock);
2114
2115 memset(bbp, 0xff, PAGE_SIZE);
2116
2117 for (i = 0 ; i < bb->count ; i++) {
majianpeng35f9ac22012-11-08 08:56:27 +08002118 u64 internal_bb = p[i];
NeilBrown2699b672011-07-28 11:31:47 +10002119 u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
2120 | BB_LEN(internal_bb));
majianpeng35f9ac22012-11-08 08:56:27 +08002121 bbp[i] = cpu_to_le64(store_bb);
NeilBrown2699b672011-07-28 11:31:47 +10002122 }
NeilBrownd0962932012-03-19 12:46:41 +11002123 bb->changed = 0;
NeilBrown2699b672011-07-28 11:31:47 +10002124 if (read_seqretry(&bb->lock, seq))
2125 goto retry;
2126
2127 bb->sector = (rdev->sb_start +
2128 (int)le32_to_cpu(sb->bblog_offset));
2129 bb->size = le16_to_cpu(sb->bblog_size);
NeilBrown2699b672011-07-28 11:31:47 +10002130 }
2131 }
2132
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133 max_dev = 0;
NeilBrowndafb20f2012-03-19 12:46:39 +11002134 rdev_for_each(rdev2, mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135 if (rdev2->desc_nr+1 > max_dev)
2136 max_dev = rdev2->desc_nr+1;
NeilBrowna778b732007-05-23 13:58:10 -07002137
NeilBrown70471da2009-08-03 10:59:57 +10002138 if (max_dev > le32_to_cpu(sb->max_dev)) {
2139 int bmask;
NeilBrowna778b732007-05-23 13:58:10 -07002140 sb->max_dev = cpu_to_le32(max_dev);
NeilBrown70471da2009-08-03 10:59:57 +10002141 rdev->sb_size = max_dev * 2 + 256;
2142 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
2143 if (rdev->sb_size & bmask)
2144 rdev->sb_size = (rdev->sb_size | bmask) + 1;
NeilBrownddcf3522010-09-08 16:48:17 +10002145 } else
2146 max_dev = le32_to_cpu(sb->max_dev);
2147
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148 for (i=0; i<max_dev;i++)
Lidong Zhong8df72022017-06-12 10:45:55 +08002149 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
NeilBrownf72ffdd2014-09-30 14:23:59 +10002150
Song Liua97b7892015-10-08 21:54:09 -07002151 if (test_bit(MD_HAS_JOURNAL, &mddev->flags))
2152 sb->feature_map |= cpu_to_le32(MD_FEATURE_JOURNAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002153
Artur Paszkiewiczea0213e2017-03-09 09:59:57 +01002154 if (test_bit(MD_HAS_PPL, &mddev->flags)) {
Pawel Baldysiakddc08822017-08-16 17:13:45 +02002155 if (test_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags))
2156 sb->feature_map |=
2157 cpu_to_le32(MD_FEATURE_MULTIPLE_PPLS);
2158 else
2159 sb->feature_map |= cpu_to_le32(MD_FEATURE_PPL);
Artur Paszkiewiczea0213e2017-03-09 09:59:57 +01002160 sb->ppl.offset = cpu_to_le16(rdev->ppl.offset);
2161 sb->ppl.size = cpu_to_le16(rdev->ppl.size);
2162 }
2163
NeilBrowndafb20f2012-03-19 12:46:39 +11002164 rdev_for_each(rdev2, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165 i = rdev2->desc_nr;
NeilBrownb2d444d2005-11-08 21:39:31 -08002166 if (test_bit(Faulty, &rdev2->flags))
Song Liuc4d4c912015-08-13 14:31:54 -07002167 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY);
NeilBrownb2d444d2005-11-08 21:39:31 -08002168 else if (test_bit(In_sync, &rdev2->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
Song Liua97b7892015-10-08 21:54:09 -07002170 else if (test_bit(Journal, &rdev2->flags))
Song Liubac624f2015-08-13 14:31:55 -07002171 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_JOURNAL);
NeilBrown93be75f2009-12-14 12:50:06 +11002172 else if (rdev2->raid_disk >= 0)
NeilBrown5fd6c1d2006-06-26 00:27:40 -07002173 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174 else
Song Liuc4d4c912015-08-13 14:31:54 -07002175 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176 }
2177
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178 sb->sb_csum = calc_sb_1_csum(sb);
2179}
2180
Xiao Nid9c0fa52020-06-30 15:55:36 +08002181static sector_t super_1_choose_bm_space(sector_t dev_size)
2182{
2183 sector_t bm_space;
2184
2185 /* if the device is bigger than 8Gig, save 64k for bitmap
2186 * usage, if bigger than 200Gig, save 128k
2187 */
2188 if (dev_size < 64*2)
2189 bm_space = 0;
2190 else if (dev_size - 64*2 >= 200*1024*1024*2)
2191 bm_space = 128*2;
2192 else if (dev_size - 4*2 > 8*1024*1024*2)
2193 bm_space = 64*2;
2194 else
2195 bm_space = 4*2;
2196 return bm_space;
2197}
2198
Chris Webb0cd17fe2008-06-28 08:31:46 +10002199static unsigned long long
NeilBrown3cb03002011-10-11 16:45:26 +11002200super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
Chris Webb0cd17fe2008-06-28 08:31:46 +10002201{
2202 struct mdp_superblock_1 *sb;
Andre Noll15f4a5f2008-07-21 14:42:12 +10002203 sector_t max_sectors;
Andre Noll58c0fed2009-03-31 14:33:13 +11002204 if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
Chris Webb0cd17fe2008-06-28 08:31:46 +10002205 return 0; /* component must fit device */
NeilBrownc6563a82012-05-21 09:27:00 +10002206 if (rdev->data_offset != rdev->new_data_offset)
2207 return 0; /* too confusing */
Andre Noll0f420352008-07-11 22:02:23 +10002208 if (rdev->sb_start < rdev->data_offset) {
Chris Webb0cd17fe2008-06-28 08:31:46 +10002209 /* minor versions 1 and 2; superblock before data */
Mike Snitzer77304d22010-11-08 14:39:12 +01002210 max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
Andre Noll15f4a5f2008-07-21 14:42:12 +10002211 max_sectors -= rdev->data_offset;
2212 if (!num_sectors || num_sectors > max_sectors)
2213 num_sectors = max_sectors;
NeilBrownc3d97142009-12-14 12:49:52 +11002214 } else if (rdev->mddev->bitmap_info.offset) {
Chris Webb0cd17fe2008-06-28 08:31:46 +10002215 /* minor version 0 with bitmap we can't move */
2216 return 0;
2217 } else {
2218 /* minor version 0; superblock after data */
Xiao Nid9c0fa52020-06-30 15:55:36 +08002219 sector_t sb_start, bm_space;
2220 sector_t dev_size = i_size_read(rdev->bdev->bd_inode) >> 9;
2221
2222 /* 8K is for superblock */
2223 sb_start = dev_size - 8*2;
Andre Noll0f420352008-07-11 22:02:23 +10002224 sb_start &= ~(sector_t)(4*2 - 1);
Xiao Nid9c0fa52020-06-30 15:55:36 +08002225
2226 bm_space = super_1_choose_bm_space(dev_size);
2227
2228 /* Space that can be used to store date needs to decrease
2229 * superblock bitmap space and bad block space(4K)
2230 */
2231 max_sectors = sb_start - bm_space - 4*2;
2232
Andre Noll15f4a5f2008-07-21 14:42:12 +10002233 if (!num_sectors || num_sectors > max_sectors)
2234 num_sectors = max_sectors;
Chris Webb0cd17fe2008-06-28 08:31:46 +10002235 }
Namhyung Kim65a06f062011-07-27 11:00:36 +10002236 sb = page_address(rdev->sb_page);
Andre Noll15f4a5f2008-07-21 14:42:12 +10002237 sb->data_size = cpu_to_le64(num_sectors);
Jason Yan3fb632e2017-03-10 11:27:23 +08002238 sb->super_offset = cpu_to_le64(rdev->sb_start);
Chris Webb0cd17fe2008-06-28 08:31:46 +10002239 sb->sb_csum = calc_sb_1_csum(sb);
NeilBrown46533ff2016-11-18 16:16:11 +11002240 do {
2241 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
2242 rdev->sb_page);
2243 } while (md_super_wait(rdev->mddev) < 0);
Justin Maggardc26a44e2010-11-24 16:36:17 +11002244 return num_sectors;
NeilBrownc6563a82012-05-21 09:27:00 +10002245
2246}
2247
2248static int
2249super_1_allow_new_offset(struct md_rdev *rdev,
2250 unsigned long long new_offset)
2251{
2252 /* All necessary checks on new >= old have been done */
2253 struct bitmap *bitmap;
2254 if (new_offset >= rdev->data_offset)
2255 return 1;
2256
2257 /* with 1.0 metadata, there is no metadata to tread on
2258 * so we can always move back */
2259 if (rdev->mddev->minor_version == 0)
2260 return 1;
2261
2262 /* otherwise we must be sure not to step on
2263 * any metadata, so stay:
2264 * 36K beyond start of superblock
2265 * beyond end of badblocks
2266 * beyond write-intent bitmap
2267 */
2268 if (rdev->sb_start + (32+4)*2 > new_offset)
2269 return 0;
2270 bitmap = rdev->mddev->bitmap;
2271 if (bitmap && !rdev->mddev->bitmap_info.file &&
2272 rdev->sb_start + rdev->mddev->bitmap_info.offset +
NeilBrown1ec885c2012-05-22 13:55:10 +10002273 bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset)
NeilBrownc6563a82012-05-21 09:27:00 +10002274 return 0;
2275 if (rdev->badblocks.sector + rdev->badblocks.size > new_offset)
2276 return 0;
2277
2278 return 1;
Chris Webb0cd17fe2008-06-28 08:31:46 +10002279}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280
Adrian Bunk75c96f82005-05-05 16:16:09 -07002281static struct super_type super_types[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282 [0] = {
2283 .name = "0.90.0",
2284 .owner = THIS_MODULE,
Chris Webb0cd17fe2008-06-28 08:31:46 +10002285 .load_super = super_90_load,
2286 .validate_super = super_90_validate,
2287 .sync_super = super_90_sync,
2288 .rdev_size_change = super_90_rdev_size_change,
NeilBrownc6563a82012-05-21 09:27:00 +10002289 .allow_new_offset = super_90_allow_new_offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290 },
2291 [1] = {
2292 .name = "md-1",
2293 .owner = THIS_MODULE,
Chris Webb0cd17fe2008-06-28 08:31:46 +10002294 .load_super = super_1_load,
2295 .validate_super = super_1_validate,
2296 .sync_super = super_1_sync,
2297 .rdev_size_change = super_1_rdev_size_change,
NeilBrownc6563a82012-05-21 09:27:00 +10002298 .allow_new_offset = super_1_allow_new_offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299 },
2300};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002301
NeilBrownfd01b882011-10-11 16:47:53 +11002302static void sync_super(struct mddev *mddev, struct md_rdev *rdev)
Jonathan Brassow076f9682011-06-07 17:51:30 -05002303{
2304 if (mddev->sync_super) {
2305 mddev->sync_super(mddev, rdev);
2306 return;
2307 }
2308
2309 BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));
2310
2311 super_types[mddev->major_version].sync_super(mddev, rdev);
2312}
2313
NeilBrownfd01b882011-10-11 16:47:53 +11002314static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315{
NeilBrown3cb03002011-10-11 16:45:26 +11002316 struct md_rdev *rdev, *rdev2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317
NeilBrown4b809912008-07-21 17:05:25 +10002318 rcu_read_lock();
Song Liu0b020e82015-09-03 23:00:35 -07002319 rdev_for_each_rcu(rdev, mddev1) {
2320 if (test_bit(Faulty, &rdev->flags) ||
2321 test_bit(Journal, &rdev->flags) ||
2322 rdev->raid_disk == -1)
2323 continue;
2324 rdev_for_each_rcu(rdev2, mddev2) {
2325 if (test_bit(Faulty, &rdev2->flags) ||
2326 test_bit(Journal, &rdev2->flags) ||
2327 rdev2->raid_disk == -1)
2328 continue;
Christoph Hellwig61a27e1f2020-09-03 07:40:58 +02002329 if (rdev->bdev->bd_disk == rdev2->bdev->bd_disk) {
NeilBrown4b809912008-07-21 17:05:25 +10002330 rcu_read_unlock();
NeilBrown7dd5e7c32007-02-28 20:11:35 -08002331 return 1;
NeilBrown4b809912008-07-21 17:05:25 +10002332 }
Song Liu0b020e82015-09-03 23:00:35 -07002333 }
2334 }
NeilBrown4b809912008-07-21 17:05:25 +10002335 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002336 return 0;
2337}
2338
2339static LIST_HEAD(pending_raid_disks);
2340
Andre Nollac5e7112009-08-03 10:59:47 +10002341/*
2342 * Try to register data integrity profile for an mddev
2343 *
2344 * This is called when an array is started and after a disk has been kicked
2345 * from the array. It only succeeds if all working and active component devices
2346 * are integrity capable with matching profiles.
2347 */
NeilBrownfd01b882011-10-11 16:47:53 +11002348int md_integrity_register(struct mddev *mddev)
Martin K. Petersen3f9d99c2009-03-31 14:27:02 +11002349{
NeilBrown3cb03002011-10-11 16:45:26 +11002350 struct md_rdev *rdev, *reference = NULL;
Martin K. Petersen3f9d99c2009-03-31 14:27:02 +11002351
Andre Nollac5e7112009-08-03 10:59:47 +10002352 if (list_empty(&mddev->disks))
2353 return 0; /* nothing to do */
Jonathan Brassow629acb62011-06-08 15:10:08 +10002354 if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
2355 return 0; /* shouldn't register, or already is */
NeilBrowndafb20f2012-03-19 12:46:39 +11002356 rdev_for_each(rdev, mddev) {
Andre Nollac5e7112009-08-03 10:59:47 +10002357 /* skip spares and non-functional disks */
2358 if (test_bit(Faulty, &rdev->flags))
2359 continue;
2360 if (rdev->raid_disk < 0)
2361 continue;
Andre Nollac5e7112009-08-03 10:59:47 +10002362 if (!reference) {
2363 /* Use the first rdev as the reference */
2364 reference = rdev;
2365 continue;
2366 }
2367 /* does this rdev's profile match the reference profile? */
2368 if (blk_integrity_compare(reference->bdev->bd_disk,
2369 rdev->bdev->bd_disk) < 0)
2370 return -EINVAL;
Martin K. Petersen3f9d99c2009-03-31 14:27:02 +11002371 }
Martin K. Petersen89078d52011-03-28 20:09:12 -04002372 if (!reference || !bdev_get_integrity(reference->bdev))
2373 return 0;
Andre Nollac5e7112009-08-03 10:59:47 +10002374 /*
2375 * All component devices are integrity capable and have matching
2376 * profiles, register the common profile for the md device.
2377 */
Martin K. Petersen25520d52015-10-21 13:19:49 -04002378 blk_integrity_register(mddev->gendisk,
2379 bdev_get_integrity(reference->bdev));
2380
NeilBrown9d487392016-11-02 14:16:49 +11002381 pr_debug("md: data integrity enabled on %s\n", mdname(mddev));
Kent Overstreetafeee512018-05-20 18:25:52 -04002382 if (bioset_integrity_create(&mddev->bio_set, BIO_POOL_SIZE)) {
NeilBrown9d487392016-11-02 14:16:49 +11002383 pr_err("md: failed to create integrity pool for %s\n",
Martin K. Petersena91a2782011-03-17 11:11:05 +01002384 mdname(mddev));
2385 return -EINVAL;
2386 }
Andre Nollac5e7112009-08-03 10:59:47 +10002387 return 0;
Martin K. Petersen3f9d99c2009-03-31 14:27:02 +11002388}
Andre Nollac5e7112009-08-03 10:59:47 +10002389EXPORT_SYMBOL(md_integrity_register);
2390
Dan Williams1501efa2016-01-13 16:00:07 -08002391/*
2392 * Attempt to add an rdev, but only if it is consistent with the current
2393 * integrity profile
2394 */
2395int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
Andre Nollac5e7112009-08-03 10:59:47 +10002396{
Jonathan Brassow2863b9e2012-10-11 13:38:58 +11002397 struct blk_integrity *bi_mddev;
Dan Williams1501efa2016-01-13 16:00:07 -08002398 char name[BDEVNAME_SIZE];
Jonathan Brassow2863b9e2012-10-11 13:38:58 +11002399
2400 if (!mddev->gendisk)
Dan Williams1501efa2016-01-13 16:00:07 -08002401 return 0;
Jonathan Brassow2863b9e2012-10-11 13:38:58 +11002402
Jonathan Brassow2863b9e2012-10-11 13:38:58 +11002403 bi_mddev = blk_get_integrity(mddev->gendisk);
Andre Nollac5e7112009-08-03 10:59:47 +10002404
2405 if (!bi_mddev) /* nothing to do */
Dan Williams1501efa2016-01-13 16:00:07 -08002406 return 0;
2407
2408 if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) {
NeilBrown9d487392016-11-02 14:16:49 +11002409 pr_err("%s: incompatible integrity profile for %s\n",
2410 mdname(mddev), bdevname(rdev->bdev, name));
Dan Williams1501efa2016-01-13 16:00:07 -08002411 return -ENXIO;
2412 }
2413
2414 return 0;
Andre Nollac5e7112009-08-03 10:59:47 +10002415}
2416EXPORT_SYMBOL(md_integrity_add_rdev);
Martin K. Petersen3f9d99c2009-03-31 14:27:02 +11002417
NeilBrownf72ffdd2014-09-30 14:23:59 +10002418static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419{
NeilBrown7dd5e7c32007-02-28 20:11:35 -08002420 char b[BDEVNAME_SIZE];
NeilBrown5e55e2f2007-03-26 21:32:14 -08002421 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002422
Dan Williams11e2ede2008-04-30 00:52:32 -07002423 /* prevent duplicates */
2424 if (find_rdev(mddev, rdev->bdev->bd_dev))
2425 return -EEXIST;
2426
NeilBrown97b20ef2017-04-13 08:53:48 +10002427 if ((bdev_read_only(rdev->bdev) || bdev_read_only(rdev->meta_bdev)) &&
2428 mddev->pers)
2429 return -EROFS;
2430
Andre Nolldd8ac332009-03-31 14:33:13 +11002431 /* make sure rdev->sectors exceeds mddev->dev_sectors */
Shaohua Lif6b6ec52015-12-21 10:51:02 +11002432 if (!test_bit(Journal, &rdev->flags) &&
2433 rdev->sectors &&
2434 (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) {
NeilBrowna778b732007-05-23 13:58:10 -07002435 if (mddev->pers) {
2436 /* Cannot change size, so fail
2437 * If mddev->level <= 0, then we don't care
2438 * about aligning sizes (e.g. linear)
2439 */
2440 if (mddev->level > 0)
2441 return -ENOSPC;
2442 } else
Andre Nolldd8ac332009-03-31 14:33:13 +11002443 mddev->dev_sectors = rdev->sectors;
NeilBrown2bf071b2006-01-06 00:20:55 -08002444 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445
2446 /* Verify rdev->desc_nr is unique.
2447 * If it is -1, assign a free number, else
2448 * check number is not in use
2449 */
NeilBrown4878e9e2014-09-25 17:00:11 +10002450 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002451 if (rdev->desc_nr < 0) {
2452 int choice = 0;
NeilBrown4878e9e2014-09-25 17:00:11 +10002453 if (mddev->pers)
2454 choice = mddev->raid_disks;
Goldwyn Rodrigues57d051d2015-04-14 10:43:55 -05002455 while (md_find_rdev_nr_rcu(mddev, choice))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002456 choice++;
2457 rdev->desc_nr = choice;
2458 } else {
Goldwyn Rodrigues57d051d2015-04-14 10:43:55 -05002459 if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) {
NeilBrown4878e9e2014-09-25 17:00:11 +10002460 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002461 return -EBUSY;
NeilBrown4878e9e2014-09-25 17:00:11 +10002462 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002463 }
NeilBrown4878e9e2014-09-25 17:00:11 +10002464 rcu_read_unlock();
Shaohua Lif6b6ec52015-12-21 10:51:02 +11002465 if (!test_bit(Journal, &rdev->flags) &&
2466 mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
NeilBrown9d487392016-11-02 14:16:49 +11002467 pr_warn("md: %s: array is limited to %d devices\n",
2468 mdname(mddev), mddev->max_disks);
NeilBrownde01dfa2009-02-06 18:02:46 +11002469 return -EBUSY;
2470 }
NeilBrown19133a42005-11-08 21:39:35 -08002471 bdevname(rdev->bdev,b);
Rasmus Villemoes90a9bef2015-06-25 15:02:36 -07002472 strreplace(b, '/', '!');
Greg Kroah-Hartman649316b2007-12-17 23:05:35 -07002473
Linus Torvalds1da177e2005-04-16 15:20:36 -07002474 rdev->mddev = mddev;
NeilBrown9d487392016-11-02 14:16:49 +11002475 pr_debug("md: bind<%s>\n", b);
NeilBrown86e6ffd2005-11-08 21:39:24 -08002476
Guoqing Jiang963c5552019-06-14 17:10:36 +08002477 if (mddev->raid_disks)
Guoqing Jiang404659c2019-12-23 10:48:53 +01002478 mddev_create_serial_pool(mddev, rdev, false);
Guoqing Jiang963c5552019-06-14 17:10:36 +08002479
Greg Kroah-Hartmanb2d6db52007-12-17 23:05:35 -07002480 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
NeilBrown5e55e2f2007-03-26 21:32:14 -08002481 goto fail;
NeilBrown86e6ffd2005-11-08 21:39:24 -08002482
Damien Le Moal5e3b8a82020-07-16 13:54:40 +09002483 /* failure here is OK */
Christoph Hellwig8d652692020-11-17 08:18:55 +01002484 err = sysfs_create_link(&rdev->kobj, bdev_kobj(rdev->bdev), "block");
NeilBrown00bcb4a2010-06-01 19:37:23 +10002485 rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
Junxiao Bie1a86db2020-07-14 16:10:26 -07002486 rdev->sysfs_unack_badblocks =
2487 sysfs_get_dirent_safe(rdev->kobj.sd, "unacknowledged_bad_blocks");
2488 rdev->sysfs_badblocks =
2489 sysfs_get_dirent_safe(rdev->kobj.sd, "bad_blocks");
NeilBrown3c0ee632008-10-21 13:25:28 +11002490
NeilBrown4b809912008-07-21 17:05:25 +10002491 list_add_rcu(&rdev->same_set, &mddev->disks);
Tejun Heoe09b4572010-11-13 11:55:17 +01002492 bd_link_disk_holder(rdev->bdev, mddev->gendisk);
NeilBrown4044ba52009-01-09 08:31:11 +11002493
2494 /* May as well allow recovery to be retried once */
NeilBrown53890422011-07-27 11:00:36 +10002495 mddev->recovery_disabled++;
Martin K. Petersen3f9d99c2009-03-31 14:27:02 +11002496
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497 return 0;
NeilBrown5e55e2f2007-03-26 21:32:14 -08002498
2499 fail:
NeilBrown9d487392016-11-02 14:16:49 +11002500 pr_warn("md: failed to register dev-%s for %s\n",
2501 b, mdname(mddev));
NeilBrown5e55e2f2007-03-26 21:32:14 -08002502 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002503}
2504
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02002505static void rdev_delayed_delete(struct work_struct *ws)
NeilBrown5792a282007-04-04 19:08:18 -07002506{
NeilBrown3cb03002011-10-11 16:45:26 +11002507 struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work);
NeilBrown5792a282007-04-04 19:08:18 -07002508 kobject_del(&rdev->kobj);
NeilBrown177a99b2008-02-06 01:39:56 -08002509 kobject_put(&rdev->kobj);
NeilBrown5792a282007-04-04 19:08:18 -07002510}
2511
NeilBrownf72ffdd2014-09-30 14:23:59 +10002512static void unbind_rdev_from_array(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002513{
2514 char b[BDEVNAME_SIZE];
NeilBrown403df472014-09-30 15:52:29 +10002515
Tejun Heo49731ba2011-01-14 18:43:57 +01002516 bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
NeilBrown4b809912008-07-21 17:05:25 +10002517 list_del_rcu(&rdev->same_set);
NeilBrown9d487392016-11-02 14:16:49 +11002518 pr_debug("md: unbind<%s>\n", bdevname(rdev->bdev,b));
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +01002519 mddev_destroy_serial_pool(rdev->mddev, rdev, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002520 rdev->mddev = NULL;
NeilBrown86e6ffd2005-11-08 21:39:24 -08002521 sysfs_remove_link(&rdev->kobj, "block");
NeilBrown3c0ee632008-10-21 13:25:28 +11002522 sysfs_put(rdev->sysfs_state);
Junxiao Bie1a86db2020-07-14 16:10:26 -07002523 sysfs_put(rdev->sysfs_unack_badblocks);
2524 sysfs_put(rdev->sysfs_badblocks);
NeilBrown3c0ee632008-10-21 13:25:28 +11002525 rdev->sysfs_state = NULL;
Junxiao Bie1a86db2020-07-14 16:10:26 -07002526 rdev->sysfs_unack_badblocks = NULL;
2527 rdev->sysfs_badblocks = NULL;
NeilBrown2230dfe2011-07-28 11:31:46 +10002528 rdev->badblocks.count = 0;
NeilBrown5792a282007-04-04 19:08:18 -07002529 /* We need to delay this, otherwise we can deadlock when
NeilBrown4b809912008-07-21 17:05:25 +10002530 * writing to 'remove' to "dev/state". We also need
2531 * to delay it due to rcu usage.
NeilBrown5792a282007-04-04 19:08:18 -07002532 */
NeilBrown4b809912008-07-21 17:05:25 +10002533 synchronize_rcu();
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02002534 INIT_WORK(&rdev->del_work, rdev_delayed_delete);
NeilBrown177a99b2008-02-06 01:39:56 -08002535 kobject_get(&rdev->kobj);
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02002536 queue_work(md_rdev_misc_wq, &rdev->del_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002537}
2538
2539/*
2540 * prevent the device from being mounted, repartitioned or
2541 * otherwise reused by a RAID array (or any other kernel
2542 * subsystem), by bd_claiming the device.
2543 */
NeilBrown3cb03002011-10-11 16:45:26 +11002544static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002545{
2546 int err = 0;
2547 struct block_device *bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002548
Tejun Heod4d77622010-11-13 11:55:18 +01002549 bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
NeilBrown3cb03002011-10-11 16:45:26 +11002550 shared ? (struct md_rdev *)lock_rdev : rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002551 if (IS_ERR(bdev)) {
Christoph Hellwigea3edd42020-03-24 08:25:11 +01002552 pr_warn("md: could not open device unknown-block(%u,%u).\n",
2553 MAJOR(dev), MINOR(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002554 return PTR_ERR(bdev);
2555 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002556 rdev->bdev = bdev;
2557 return err;
2558}
2559
NeilBrown3cb03002011-10-11 16:45:26 +11002560static void unlock_rdev(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002561{
2562 struct block_device *bdev = rdev->bdev;
2563 rdev->bdev = NULL;
Tejun Heoe525fd82010-11-13 11:55:17 +01002564 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002565}
2566
2567void md_autodetect_dev(dev_t dev);
2568
NeilBrownf72ffdd2014-09-30 14:23:59 +10002569static void export_rdev(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002570{
2571 char b[BDEVNAME_SIZE];
NeilBrown403df472014-09-30 15:52:29 +10002572
NeilBrown9d487392016-11-02 14:16:49 +11002573 pr_debug("md: export_rdev(%s)\n", bdevname(rdev->bdev,b));
NeilBrown545c8792012-05-22 13:54:30 +10002574 md_rdev_clear(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002575#ifndef MODULE
NeilBrownd0fae182008-03-04 14:29:31 -08002576 if (test_bit(AutoDetected, &rdev->flags))
2577 md_autodetect_dev(rdev->bdev->bd_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002578#endif
2579 unlock_rdev(rdev);
NeilBrown86e6ffd2005-11-08 21:39:24 -08002580 kobject_put(&rdev->kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002581}
2582
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05002583void md_kick_rdev_from_array(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002584{
2585 unbind_rdev_from_array(rdev);
2586 export_rdev(rdev);
2587}
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05002588EXPORT_SYMBOL_GPL(md_kick_rdev_from_array);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002589
NeilBrownfd01b882011-10-11 16:47:53 +11002590static void export_array(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591{
NeilBrown0638bb02014-09-25 17:43:47 +10002592 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002593
NeilBrown0638bb02014-09-25 17:43:47 +10002594 while (!list_empty(&mddev->disks)) {
2595 rdev = list_first_entry(&mddev->disks, struct md_rdev,
2596 same_set);
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05002597 md_kick_rdev_from_array(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002598 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002599 mddev->raid_disks = 0;
2600 mddev->major_version = 0;
2601}
2602
NeilBrown6497709b2017-03-15 14:05:14 +11002603static bool set_in_sync(struct mddev *mddev)
2604{
Shaohua Liefa4b772017-10-18 22:08:13 -07002605 lockdep_assert_held(&mddev->lock);
NeilBrown4ad23a972017-03-15 14:05:14 +11002606 if (!mddev->in_sync) {
2607 mddev->sync_checkers++;
2608 spin_unlock(&mddev->lock);
2609 percpu_ref_switch_to_atomic_sync(&mddev->writes_pending);
2610 spin_lock(&mddev->lock);
2611 if (!mddev->in_sync &&
2612 percpu_ref_is_zero(&mddev->writes_pending)) {
NeilBrown6497709b2017-03-15 14:05:14 +11002613 mddev->in_sync = 1;
NeilBrown4ad23a972017-03-15 14:05:14 +11002614 /*
2615 * Ensure ->in_sync is visible before we clear
2616 * ->sync_checkers.
2617 */
NeilBrown55cc39f2017-03-15 14:05:14 +11002618 smp_mb();
NeilBrown6497709b2017-03-15 14:05:14 +11002619 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
2620 sysfs_notify_dirent_safe(mddev->sysfs_state);
2621 }
NeilBrown4ad23a972017-03-15 14:05:14 +11002622 if (--mddev->sync_checkers == 0)
2623 percpu_ref_switch_to_percpu(&mddev->writes_pending);
NeilBrown6497709b2017-03-15 14:05:14 +11002624 }
2625 if (mddev->safemode == 1)
2626 mddev->safemode = 0;
2627 return mddev->in_sync;
2628}
2629
NeilBrownf72ffdd2014-09-30 14:23:59 +10002630static void sync_sbs(struct mddev *mddev, int nospares)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002631{
NeilBrown42543762006-06-26 00:27:57 -07002632 /* Update each superblock (in-memory image), but
2633 * if we are allowed to, skip spares which already
2634 * have the right event counter, or have one earlier
2635 * (which would mean they aren't being marked as dirty
2636 * with the rest of the array)
2637 */
NeilBrown3cb03002011-10-11 16:45:26 +11002638 struct md_rdev *rdev;
NeilBrowndafb20f2012-03-19 12:46:39 +11002639 rdev_for_each(rdev, mddev) {
NeilBrown42543762006-06-26 00:27:57 -07002640 if (rdev->sb_events == mddev->events ||
2641 (nospares &&
2642 rdev->raid_disk < 0 &&
NeilBrown42543762006-06-26 00:27:57 -07002643 rdev->sb_events+1 == mddev->events)) {
2644 /* Don't update this superblock */
2645 rdev->sb_loaded = 2;
2646 } else {
Jonathan Brassow076f9682011-06-07 17:51:30 -05002647 sync_super(mddev, rdev);
NeilBrown42543762006-06-26 00:27:57 -07002648 rdev->sb_loaded = 1;
2649 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002650 }
2651}
2652
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002653static bool does_sb_need_changing(struct mddev *mddev)
2654{
2655 struct md_rdev *rdev;
2656 struct mdp_superblock_1 *sb;
2657 int role;
2658
2659 /* Find a good rdev */
2660 rdev_for_each(rdev, mddev)
2661 if ((rdev->raid_disk >= 0) && !test_bit(Faulty, &rdev->flags))
2662 break;
2663
2664 /* No good device found. */
2665 if (!rdev)
2666 return false;
2667
2668 sb = page_address(rdev->sb_page);
2669 /* Check if a device has become faulty or a spare become active */
2670 rdev_for_each(rdev, mddev) {
2671 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
2672 /* Device activated? */
2673 if (role == 0xffff && rdev->raid_disk >=0 &&
2674 !test_bit(Faulty, &rdev->flags))
2675 return true;
2676 /* Device turned faulty? */
2677 if (test_bit(Faulty, &rdev->flags) && (role < 0xfffd))
2678 return true;
2679 }
2680
2681 /* Check if any mddev parameters have changed */
2682 if ((mddev->dev_sectors != le64_to_cpu(sb->size)) ||
2683 (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) ||
Jason Yan13459212017-03-10 11:49:12 +08002684 (mddev->layout != le32_to_cpu(sb->layout)) ||
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002685 (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) ||
2686 (mddev->chunk_sectors != le32_to_cpu(sb->chunksize)))
2687 return true;
2688
2689 return false;
2690}
2691
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05002692void md_update_sb(struct mddev *mddev, int force_change)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002693{
NeilBrown3cb03002011-10-11 16:45:26 +11002694 struct md_rdev *rdev;
NeilBrown06d91a52005-06-21 17:17:12 -07002695 int sync_req;
NeilBrown42543762006-06-26 00:27:57 -07002696 int nospares = 0;
NeilBrown2699b672011-07-28 11:31:47 +10002697 int any_badblocks_changed = 0;
Guoqing Jiang23b63f92015-10-12 17:21:30 +08002698 int ret = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002699
NeilBrownd87f0642013-04-24 11:42:40 +10002700 if (mddev->ro) {
2701 if (force_change)
Shaohua Li29530792016-12-08 15:48:19 -08002702 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrownd87f0642013-04-24 11:42:40 +10002703 return;
2704 }
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002705
Guoqing Jiang2c97cf12016-05-02 11:33:09 -04002706repeat:
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002707 if (mddev_is_clustered(mddev)) {
Shaohua Li29530792016-12-08 15:48:19 -08002708 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002709 force_change = 1;
Shaohua Li29530792016-12-08 15:48:19 -08002710 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
Guoqing Jiang85ad1d12016-05-03 22:22:13 -04002711 nospares = 1;
Guoqing Jiang23b63f92015-10-12 17:21:30 +08002712 ret = md_cluster_ops->metadata_update_start(mddev);
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002713 /* Has someone else has updated the sb */
2714 if (!does_sb_need_changing(mddev)) {
Guoqing Jiang23b63f92015-10-12 17:21:30 +08002715 if (ret == 0)
2716 md_cluster_ops->metadata_update_cancel(mddev);
Shaohua Li29530792016-12-08 15:48:19 -08002717 bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
2718 BIT(MD_SB_CHANGE_DEVS) |
2719 BIT(MD_SB_CHANGE_CLEAN));
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002720 return;
2721 }
2722 }
Guoqing Jiang2c97cf12016-05-02 11:33:09 -04002723
NeilBrowndb0505d2017-10-17 16:18:36 +11002724 /*
2725 * First make sure individual recovery_offsets are correct
2726 * curr_resync_completed can only be used during recovery.
2727 * During reshape/resync it might use array-addresses rather
2728 * that device addresses.
2729 */
NeilBrowndafb20f2012-03-19 12:46:39 +11002730 rdev_for_each(rdev, mddev) {
NeilBrown3a3a5dd2010-08-16 18:09:31 +10002731 if (rdev->raid_disk >= 0 &&
2732 mddev->delta_disks >= 0 &&
NeilBrowndb0505d2017-10-17 16:18:36 +11002733 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
2734 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery) &&
2735 !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
Shaohua Lif2076e72015-10-08 21:54:12 -07002736 !test_bit(Journal, &rdev->flags) &&
NeilBrown3a3a5dd2010-08-16 18:09:31 +10002737 !test_bit(In_sync, &rdev->flags) &&
2738 mddev->curr_resync_completed > rdev->recovery_offset)
2739 rdev->recovery_offset = mddev->curr_resync_completed;
2740
NeilBrownf72ffdd2014-09-30 14:23:59 +10002741 }
Dan Williamsbd52b742010-08-30 17:33:33 +10002742 if (!mddev->persistent) {
Shaohua Li29530792016-12-08 15:48:19 -08002743 clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
2744 clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrownde393cd2011-07-28 11:31:48 +10002745 if (!mddev->external) {
Shaohua Li29530792016-12-08 15:48:19 -08002746 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
NeilBrowndafb20f2012-03-19 12:46:39 +11002747 rdev_for_each(rdev, mddev) {
NeilBrownde393cd2011-07-28 11:31:48 +10002748 if (rdev->badblocks.changed) {
NeilBrownd0962932012-03-19 12:46:41 +11002749 rdev->badblocks.changed = 0;
Vishal Vermafc974ee2015-12-24 19:20:34 -07002750 ack_all_badblocks(&rdev->badblocks);
NeilBrownde393cd2011-07-28 11:31:48 +10002751 md_error(mddev, rdev);
2752 }
2753 clear_bit(Blocked, &rdev->flags);
2754 clear_bit(BlockedBadBlocks, &rdev->flags);
2755 wake_up(&rdev->blocked_wait);
2756 }
2757 }
NeilBrown3a3a5dd2010-08-16 18:09:31 +10002758 wake_up(&mddev->sb_wait);
2759 return;
2760 }
2761
NeilBrown85572d72014-12-15 12:56:56 +11002762 spin_lock(&mddev->lock);
NeilBrown84692192006-08-27 01:23:49 -07002763
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11002764 mddev->utime = ktime_get_real_seconds();
NeilBrown3a3a5dd2010-08-16 18:09:31 +10002765
Shaohua Li29530792016-12-08 15:48:19 -08002766 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
NeilBrown850b2b422006-10-03 01:15:46 -07002767 force_change = 1;
Shaohua Li29530792016-12-08 15:48:19 -08002768 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
NeilBrown850b2b422006-10-03 01:15:46 -07002769 /* just a clean<-> dirty transition, possibly leave spares alone,
2770 * though if events isn't the right even/odd, we will have to do
2771 * spares after all
2772 */
2773 nospares = 1;
2774 if (force_change)
2775 nospares = 0;
2776 if (mddev->degraded)
NeilBrown84692192006-08-27 01:23:49 -07002777 /* If the array is degraded, then skipping spares is both
2778 * dangerous and fairly pointless.
2779 * Dangerous because a device that was removed from the array
2780 * might have a event_count that still looks up-to-date,
2781 * so it can be re-added without a resync.
2782 * Pointless because if there are any spares to skip,
2783 * then a recovery will happen and soon that array won't
2784 * be degraded any more and the spare can go back to sleep then.
2785 */
NeilBrown850b2b422006-10-03 01:15:46 -07002786 nospares = 0;
NeilBrown84692192006-08-27 01:23:49 -07002787
NeilBrown06d91a52005-06-21 17:17:12 -07002788 sync_req = mddev->in_sync;
NeilBrown42543762006-06-26 00:27:57 -07002789
2790 /* If this is just a dirty<->clean transition, and the array is clean
2791 * and 'events' is odd, we can roll back to the previous clean state */
NeilBrown850b2b422006-10-03 01:15:46 -07002792 if (nospares
NeilBrown42543762006-06-26 00:27:57 -07002793 && (mddev->in_sync && mddev->recovery_cp == MaxSector)
NeilBrowna8707c02010-05-18 09:28:43 +10002794 && mddev->can_decrease_events
2795 && mddev->events != 1) {
NeilBrown42543762006-06-26 00:27:57 -07002796 mddev->events--;
NeilBrowna8707c02010-05-18 09:28:43 +10002797 mddev->can_decrease_events = 0;
2798 } else {
NeilBrown42543762006-06-26 00:27:57 -07002799 /* otherwise we have to go forward and ... */
2800 mddev->events ++;
NeilBrowna8707c02010-05-18 09:28:43 +10002801 mddev->can_decrease_events = nospares;
NeilBrown42543762006-06-26 00:27:57 -07002802 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002803
NeilBrown403df472014-09-30 15:52:29 +10002804 /*
2805 * This 64-bit counter should never wrap.
2806 * Either we are in around ~1 trillion A.C., assuming
2807 * 1 reboot per second, or we have a bug...
2808 */
2809 WARN_ON(mddev->events == 0);
NeilBrown2699b672011-07-28 11:31:47 +10002810
NeilBrowndafb20f2012-03-19 12:46:39 +11002811 rdev_for_each(rdev, mddev) {
NeilBrown2699b672011-07-28 11:31:47 +10002812 if (rdev->badblocks.changed)
2813 any_badblocks_changed++;
NeilBrownde393cd2011-07-28 11:31:48 +10002814 if (test_bit(Faulty, &rdev->flags))
2815 set_bit(FaultRecorded, &rdev->flags);
2816 }
NeilBrown2699b672011-07-28 11:31:47 +10002817
NeilBrowne6910632008-02-06 01:39:51 -08002818 sync_sbs(mddev, nospares);
NeilBrown85572d72014-12-15 12:56:56 +11002819 spin_unlock(&mddev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002820
NeilBrown36a4e1f2011-10-07 14:23:17 +11002821 pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
2822 mdname(mddev), mddev->in_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002823
Shaohua Li504634f2016-11-18 09:44:08 -08002824 if (mddev->queue)
2825 blk_add_trace_msg(mddev->queue, "md md_update_sb");
NeilBrown46533ff2016-11-18 16:16:11 +11002826rewrite:
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002827 md_bitmap_update_sb(mddev->bitmap);
NeilBrowndafb20f2012-03-19 12:46:39 +11002828 rdev_for_each(rdev, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002829 char b[BDEVNAME_SIZE];
NeilBrown36a4e1f2011-10-07 14:23:17 +11002830
NeilBrown42543762006-06-26 00:27:57 -07002831 if (rdev->sb_loaded != 1)
2832 continue; /* no noise on spare devices */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002833
NeilBrownf4667222013-12-09 12:04:56 +11002834 if (!test_bit(Faulty, &rdev->flags)) {
NeilBrown7bfa19f2005-06-21 17:17:28 -07002835 md_super_write(mddev,rdev,
Andre Noll0f420352008-07-11 22:02:23 +10002836 rdev->sb_start, rdev->sb_size,
NeilBrown7bfa19f2005-06-21 17:17:28 -07002837 rdev->sb_page);
NeilBrown36a4e1f2011-10-07 14:23:17 +11002838 pr_debug("md: (write) %s's sb offset: %llu\n",
2839 bdevname(rdev->bdev, b),
2840 (unsigned long long)rdev->sb_start);
NeilBrown42543762006-06-26 00:27:57 -07002841 rdev->sb_events = mddev->events;
NeilBrown2699b672011-07-28 11:31:47 +10002842 if (rdev->badblocks.size) {
2843 md_super_write(mddev, rdev,
2844 rdev->badblocks.sector,
2845 rdev->badblocks.size << 9,
2846 rdev->bb_page);
2847 rdev->badblocks.size = 0;
2848 }
NeilBrown7bfa19f2005-06-21 17:17:28 -07002849
NeilBrownf4667222013-12-09 12:04:56 +11002850 } else
NeilBrown36a4e1f2011-10-07 14:23:17 +11002851 pr_debug("md: %s (skipping faulty)\n",
2852 bdevname(rdev->bdev, b));
Andrei Warkentind70ed2e2011-10-18 12:16:48 +11002853
NeilBrown7bfa19f2005-06-21 17:17:28 -07002854 if (mddev->level == LEVEL_MULTIPATH)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002855 /* only need to write one superblock... */
2856 break;
2857 }
NeilBrown46533ff2016-11-18 16:16:11 +11002858 if (md_super_wait(mddev) < 0)
2859 goto rewrite;
Shaohua Li29530792016-12-08 15:48:19 -08002860 /* if there was a failure, MD_SB_CHANGE_DEVS was set, and we re-write super */
NeilBrown7bfa19f2005-06-21 17:17:28 -07002861
Guoqing Jiang2c97cf12016-05-02 11:33:09 -04002862 if (mddev_is_clustered(mddev) && ret == 0)
2863 md_cluster_ops->metadata_update_finish(mddev);
2864
NeilBrown850b2b422006-10-03 01:15:46 -07002865 if (mddev->in_sync != sync_req ||
Shaohua Li29530792016-12-08 15:48:19 -08002866 !bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
2867 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_CLEAN)))
NeilBrown06d91a52005-06-21 17:17:12 -07002868 /* have to write it out again */
NeilBrown06d91a52005-06-21 17:17:12 -07002869 goto repeat;
NeilBrown3d310eb2005-06-21 17:17:26 -07002870 wake_up(&mddev->sb_wait);
NeilBrownacb180b2009-04-14 16:28:34 +10002871 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
Junxiao Bie1a86db2020-07-14 16:10:26 -07002872 sysfs_notify_dirent_safe(mddev->sysfs_completed);
NeilBrown06d91a52005-06-21 17:17:12 -07002873
NeilBrowndafb20f2012-03-19 12:46:39 +11002874 rdev_for_each(rdev, mddev) {
NeilBrownde393cd2011-07-28 11:31:48 +10002875 if (test_and_clear_bit(FaultRecorded, &rdev->flags))
2876 clear_bit(Blocked, &rdev->flags);
2877
2878 if (any_badblocks_changed)
Vishal Vermafc974ee2015-12-24 19:20:34 -07002879 ack_all_badblocks(&rdev->badblocks);
NeilBrownde393cd2011-07-28 11:31:48 +10002880 clear_bit(BlockedBadBlocks, &rdev->flags);
2881 wake_up(&rdev->blocked_wait);
2882 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002883}
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05002884EXPORT_SYMBOL(md_update_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002885
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002886static int add_bound_rdev(struct md_rdev *rdev)
2887{
2888 struct mddev *mddev = rdev->mddev;
2889 int err = 0;
Shaohua Li87d4d912016-01-06 14:37:14 -08002890 bool add_journal = test_bit(Journal, &rdev->flags);
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002891
Shaohua Li87d4d912016-01-06 14:37:14 -08002892 if (!mddev->pers->hot_remove_disk || add_journal) {
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002893 /* If there is hot_add_disk but no hot_remove_disk
2894 * then added disks for geometry changes,
2895 * and should be added immediately.
2896 */
2897 super_types[mddev->major_version].
2898 validate_super(mddev, rdev);
Shaohua Li87d4d912016-01-06 14:37:14 -08002899 if (add_journal)
2900 mddev_suspend(mddev);
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002901 err = mddev->pers->hot_add_disk(mddev, rdev);
Shaohua Li87d4d912016-01-06 14:37:14 -08002902 if (add_journal)
2903 mddev_resume(mddev);
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002904 if (err) {
Guoqing Jiangdb767672016-06-02 23:32:05 -04002905 md_kick_rdev_from_array(rdev);
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002906 return err;
2907 }
2908 }
2909 sysfs_notify_dirent_safe(rdev->sysfs_state);
2910
Shaohua Li29530792016-12-08 15:48:19 -08002911 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002912 if (mddev->degraded)
2913 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
2914 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2915 md_new_event(mddev);
2916 md_wakeup_thread(mddev->thread);
2917 return 0;
2918}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002919
Andre Noll7f6ce762008-03-23 18:34:54 +01002920/* words written to sysfs files may, or may not, be \n terminated.
NeilBrownbce74da2006-01-06 00:20:41 -08002921 * We want to accept with case. For this we use cmd_match.
2922 */
2923static int cmd_match(const char *cmd, const char *str)
2924{
2925 /* See if cmd, written into a sysfs file, matches
2926 * str. They must either be the same, or cmd can
2927 * have a trailing newline
2928 */
2929 while (*cmd && *str && *cmd == *str) {
2930 cmd++;
2931 str++;
2932 }
2933 if (*cmd == '\n')
2934 cmd++;
2935 if (*str || *cmd)
2936 return 0;
2937 return 1;
2938}
2939
NeilBrown86e6ffd2005-11-08 21:39:24 -08002940struct rdev_sysfs_entry {
2941 struct attribute attr;
NeilBrown3cb03002011-10-11 16:45:26 +11002942 ssize_t (*show)(struct md_rdev *, char *);
2943 ssize_t (*store)(struct md_rdev *, const char *, size_t);
NeilBrown86e6ffd2005-11-08 21:39:24 -08002944};
2945
2946static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11002947state_show(struct md_rdev *rdev, char *page)
NeilBrown86e6ffd2005-11-08 21:39:24 -08002948{
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02002949 char *sep = ",";
NeilBrown20a49ff2008-02-06 01:39:57 -08002950 size_t len = 0;
Mark Rutland6aa7de02017-10-23 14:07:29 -07002951 unsigned long flags = READ_ONCE(rdev->flags);
NeilBrown86e6ffd2005-11-08 21:39:24 -08002952
NeilBrown758bfc82014-12-15 12:56:59 +11002953 if (test_bit(Faulty, &flags) ||
Tomasz Majchrzakdcbcb482016-10-21 16:27:08 +02002954 (!test_bit(ExternalBbl, &flags) &&
2955 rdev->badblocks.unacked_exist))
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02002956 len += sprintf(page+len, "faulty%s", sep);
2957 if (test_bit(In_sync, &flags))
2958 len += sprintf(page+len, "in_sync%s", sep);
2959 if (test_bit(Journal, &flags))
2960 len += sprintf(page+len, "journal%s", sep);
2961 if (test_bit(WriteMostly, &flags))
2962 len += sprintf(page+len, "write_mostly%s", sep);
NeilBrown758bfc82014-12-15 12:56:59 +11002963 if (test_bit(Blocked, &flags) ||
NeilBrown52c64152011-12-08 16:22:48 +11002964 (rdev->badblocks.unacked_exist
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02002965 && !test_bit(Faulty, &flags)))
2966 len += sprintf(page+len, "blocked%s", sep);
NeilBrown758bfc82014-12-15 12:56:59 +11002967 if (!test_bit(Faulty, &flags) &&
Shaohua Lif2076e72015-10-08 21:54:12 -07002968 !test_bit(Journal, &flags) &&
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02002969 !test_bit(In_sync, &flags))
2970 len += sprintf(page+len, "spare%s", sep);
2971 if (test_bit(WriteErrorSeen, &flags))
2972 len += sprintf(page+len, "write_error%s", sep);
2973 if (test_bit(WantReplacement, &flags))
2974 len += sprintf(page+len, "want_replacement%s", sep);
2975 if (test_bit(Replacement, &flags))
2976 len += sprintf(page+len, "replacement%s", sep);
2977 if (test_bit(ExternalBbl, &flags))
2978 len += sprintf(page+len, "external_bbl%s", sep);
NeilBrown688834e2016-11-18 16:16:11 +11002979 if (test_bit(FailFast, &flags))
2980 len += sprintf(page+len, "failfast%s", sep);
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02002981
2982 if (len)
2983 len -= strlen(sep);
NeilBrown2d78f8c2011-12-23 10:17:51 +11002984
NeilBrown86e6ffd2005-11-08 21:39:24 -08002985 return len+sprintf(page+len, "\n");
2986}
2987
NeilBrown45dc2de2006-06-26 00:27:58 -07002988static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11002989state_store(struct md_rdev *rdev, const char *buf, size_t len)
NeilBrown45dc2de2006-06-26 00:27:58 -07002990{
2991 /* can write
NeilBrownde393cd2011-07-28 11:31:48 +10002992 * faulty - simulates an error
NeilBrown45dc2de2006-06-26 00:27:58 -07002993 * remove - disconnects the device
NeilBrownf6556752006-06-26 00:28:01 -07002994 * writemostly - sets write_mostly
2995 * -writemostly - clears write_mostly
NeilBrownde393cd2011-07-28 11:31:48 +10002996 * blocked - sets the Blocked flags
2997 * -blocked - clears the Blocked and possibly simulates an error
NeilBrown6d56e272009-04-14 12:01:57 +10002998 * insync - sets Insync providing device isn't active
NeilBrownf4667222013-12-09 12:04:56 +11002999 * -insync - clear Insync for a device with a slot assigned,
3000 * so that it gets rebuilt based on bitmap
NeilBrownd7a9d442011-07-28 11:31:48 +10003001 * write_error - sets WriteErrorSeen
3002 * -write_error - clears WriteErrorSeen
NeilBrown688834e2016-11-18 16:16:11 +11003003 * {,-}failfast - set/clear FailFast
NeilBrown45dc2de2006-06-26 00:27:58 -07003004 */
3005 int err = -EINVAL;
3006 if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
3007 md_error(rdev->mddev, rdev);
NeilBrown5ef56c82011-08-25 14:42:51 +10003008 if (test_bit(Faulty, &rdev->flags))
3009 err = 0;
3010 else
3011 err = -EBUSY;
NeilBrown45dc2de2006-06-26 00:27:58 -07003012 } else if (cmd_match(buf, "remove")) {
Shaohua Li5d881782016-07-28 09:06:34 -07003013 if (rdev->mddev->pers) {
3014 clear_bit(Blocked, &rdev->flags);
3015 remove_and_add_spares(rdev->mddev, rdev);
3016 }
NeilBrown45dc2de2006-06-26 00:27:58 -07003017 if (rdev->raid_disk >= 0)
3018 err = -EBUSY;
3019 else {
NeilBrownfd01b882011-10-11 16:47:53 +11003020 struct mddev *mddev = rdev->mddev;
NeilBrown45dc2de2006-06-26 00:27:58 -07003021 err = 0;
Guoqing Jianga9720902015-10-12 17:21:27 +08003022 if (mddev_is_clustered(mddev))
3023 err = md_cluster_ops->remove_disk(mddev, rdev);
3024
3025 if (err == 0) {
3026 md_kick_rdev_from_array(rdev);
NeilBrown060b0682016-11-04 16:46:03 +11003027 if (mddev->pers) {
Shaohua Li29530792016-12-08 15:48:19 -08003028 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrown060b0682016-11-04 16:46:03 +11003029 md_wakeup_thread(mddev->thread);
3030 }
Guoqing Jianga9720902015-10-12 17:21:27 +08003031 md_new_event(mddev);
3032 }
NeilBrown45dc2de2006-06-26 00:27:58 -07003033 }
NeilBrownf6556752006-06-26 00:28:01 -07003034 } else if (cmd_match(buf, "writemostly")) {
3035 set_bit(WriteMostly, &rdev->flags);
Guoqing Jiang404659c2019-12-23 10:48:53 +01003036 mddev_create_serial_pool(rdev->mddev, rdev, false);
NeilBrownf6556752006-06-26 00:28:01 -07003037 err = 0;
3038 } else if (cmd_match(buf, "-writemostly")) {
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +01003039 mddev_destroy_serial_pool(rdev->mddev, rdev, false);
NeilBrownf6556752006-06-26 00:28:01 -07003040 clear_bit(WriteMostly, &rdev->flags);
3041 err = 0;
Dan Williams6bfe0b42008-04-30 00:52:32 -07003042 } else if (cmd_match(buf, "blocked")) {
3043 set_bit(Blocked, &rdev->flags);
3044 err = 0;
3045 } else if (cmd_match(buf, "-blocked")) {
NeilBrownde393cd2011-07-28 11:31:48 +10003046 if (!test_bit(Faulty, &rdev->flags) &&
Tomasz Majchrzakdcbcb482016-10-21 16:27:08 +02003047 !test_bit(ExternalBbl, &rdev->flags) &&
NeilBrown7da64a02011-08-30 16:20:17 +10003048 rdev->badblocks.unacked_exist) {
NeilBrownde393cd2011-07-28 11:31:48 +10003049 /* metadata handler doesn't understand badblocks,
3050 * so we need to fail the device
3051 */
3052 md_error(rdev->mddev, rdev);
3053 }
Dan Williams6bfe0b42008-04-30 00:52:32 -07003054 clear_bit(Blocked, &rdev->flags);
NeilBrownde393cd2011-07-28 11:31:48 +10003055 clear_bit(BlockedBadBlocks, &rdev->flags);
Dan Williams6bfe0b42008-04-30 00:52:32 -07003056 wake_up(&rdev->blocked_wait);
3057 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3058 md_wakeup_thread(rdev->mddev->thread);
3059
3060 err = 0;
NeilBrown6d56e272009-04-14 12:01:57 +10003061 } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
3062 set_bit(In_sync, &rdev->flags);
3063 err = 0;
NeilBrown688834e2016-11-18 16:16:11 +11003064 } else if (cmd_match(buf, "failfast")) {
3065 set_bit(FailFast, &rdev->flags);
3066 err = 0;
3067 } else if (cmd_match(buf, "-failfast")) {
3068 clear_bit(FailFast, &rdev->flags);
3069 err = 0;
Shaohua Lif2076e72015-10-08 21:54:12 -07003070 } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 &&
3071 !test_bit(Journal, &rdev->flags)) {
NeilBrowne1960f82014-09-30 15:24:25 +10003072 if (rdev->mddev->pers == NULL) {
3073 clear_bit(In_sync, &rdev->flags);
3074 rdev->saved_raid_disk = rdev->raid_disk;
3075 rdev->raid_disk = -1;
3076 err = 0;
3077 }
NeilBrownd7a9d442011-07-28 11:31:48 +10003078 } else if (cmd_match(buf, "write_error")) {
3079 set_bit(WriteErrorSeen, &rdev->flags);
3080 err = 0;
3081 } else if (cmd_match(buf, "-write_error")) {
3082 clear_bit(WriteErrorSeen, &rdev->flags);
3083 err = 0;
NeilBrown2d78f8c2011-12-23 10:17:51 +11003084 } else if (cmd_match(buf, "want_replacement")) {
3085 /* Any non-spare device that is not a replacement can
3086 * become want_replacement at any time, but we then need to
3087 * check if recovery is needed.
3088 */
3089 if (rdev->raid_disk >= 0 &&
Shaohua Lif2076e72015-10-08 21:54:12 -07003090 !test_bit(Journal, &rdev->flags) &&
NeilBrown2d78f8c2011-12-23 10:17:51 +11003091 !test_bit(Replacement, &rdev->flags))
3092 set_bit(WantReplacement, &rdev->flags);
3093 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3094 md_wakeup_thread(rdev->mddev->thread);
3095 err = 0;
3096 } else if (cmd_match(buf, "-want_replacement")) {
3097 /* Clearing 'want_replacement' is always allowed.
3098 * Once replacements starts it is too late though.
3099 */
3100 err = 0;
3101 clear_bit(WantReplacement, &rdev->flags);
3102 } else if (cmd_match(buf, "replacement")) {
3103 /* Can only set a device as a replacement when array has not
3104 * yet been started. Once running, replacement is automatic
3105 * from spares, or by assigning 'slot'.
3106 */
3107 if (rdev->mddev->pers)
3108 err = -EBUSY;
3109 else {
3110 set_bit(Replacement, &rdev->flags);
3111 err = 0;
3112 }
3113 } else if (cmd_match(buf, "-replacement")) {
3114 /* Similarly, can only clear Replacement before start */
3115 if (rdev->mddev->pers)
3116 err = -EBUSY;
3117 else {
3118 clear_bit(Replacement, &rdev->flags);
3119 err = 0;
3120 }
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05003121 } else if (cmd_match(buf, "re-add")) {
Yufen Yuee37e622019-04-02 14:22:14 +08003122 if (!rdev->mddev->pers)
3123 err = -EINVAL;
3124 else if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
3125 rdev->saved_raid_disk >= 0) {
Goldwyn Rodrigues97f6cd32015-04-14 10:45:42 -05003126 /* clear_bit is performed _after_ all the devices
3127 * have their local Faulty bit cleared. If any writes
3128 * happen in the meantime in the local node, they
3129 * will land in the local bitmap, which will be synced
3130 * by this node eventually
3131 */
3132 if (!mddev_is_clustered(rdev->mddev) ||
3133 (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) {
3134 clear_bit(Faulty, &rdev->flags);
3135 err = add_bound_rdev(rdev);
3136 }
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05003137 } else
3138 err = -EBUSY;
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02003139 } else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) {
3140 set_bit(ExternalBbl, &rdev->flags);
3141 rdev->badblocks.shift = 0;
3142 err = 0;
3143 } else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) {
3144 clear_bit(ExternalBbl, &rdev->flags);
3145 err = 0;
NeilBrown45dc2de2006-06-26 00:27:58 -07003146 }
NeilBrown00bcb4a2010-06-01 19:37:23 +10003147 if (!err)
3148 sysfs_notify_dirent_safe(rdev->sysfs_state);
NeilBrown45dc2de2006-06-26 00:27:58 -07003149 return err ? err : len;
3150}
NeilBrown80ca3a42006-07-10 04:44:18 -07003151static struct rdev_sysfs_entry rdev_state =
NeilBrown750f1992014-09-30 08:53:05 +10003152__ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
NeilBrown86e6ffd2005-11-08 21:39:24 -08003153
3154static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003155errors_show(struct md_rdev *rdev, char *page)
NeilBrown4dbcdc72006-01-06 00:20:52 -08003156{
3157 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
3158}
3159
3160static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003161errors_store(struct md_rdev *rdev, const char *buf, size_t len)
NeilBrown4dbcdc72006-01-06 00:20:52 -08003162{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03003163 unsigned int n;
3164 int rv;
3165
3166 rv = kstrtouint(buf, 10, &n);
3167 if (rv < 0)
3168 return rv;
3169 atomic_set(&rdev->corrected_errors, n);
3170 return len;
NeilBrown4dbcdc72006-01-06 00:20:52 -08003171}
3172static struct rdev_sysfs_entry rdev_errors =
NeilBrown80ca3a42006-07-10 04:44:18 -07003173__ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
NeilBrown4dbcdc72006-01-06 00:20:52 -08003174
NeilBrown014236d2006-01-06 00:20:55 -08003175static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003176slot_show(struct md_rdev *rdev, char *page)
NeilBrown014236d2006-01-06 00:20:55 -08003177{
Shaohua Lif2076e72015-10-08 21:54:12 -07003178 if (test_bit(Journal, &rdev->flags))
3179 return sprintf(page, "journal\n");
3180 else if (rdev->raid_disk < 0)
NeilBrown014236d2006-01-06 00:20:55 -08003181 return sprintf(page, "none\n");
3182 else
3183 return sprintf(page, "%d\n", rdev->raid_disk);
3184}
3185
3186static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003187slot_store(struct md_rdev *rdev, const char *buf, size_t len)
NeilBrown014236d2006-01-06 00:20:55 -08003188{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03003189 int slot;
NeilBrownc303da62008-02-06 01:39:51 -08003190 int err;
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03003191
Shaohua Lif2076e72015-10-08 21:54:12 -07003192 if (test_bit(Journal, &rdev->flags))
3193 return -EBUSY;
NeilBrown014236d2006-01-06 00:20:55 -08003194 if (strncmp(buf, "none", 4)==0)
3195 slot = -1;
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03003196 else {
3197 err = kstrtouint(buf, 10, (unsigned int *)&slot);
3198 if (err < 0)
3199 return err;
3200 }
Neil Brown6c2fce22008-06-28 08:31:31 +10003201 if (rdev->mddev->pers && slot == -1) {
NeilBrownc303da62008-02-06 01:39:51 -08003202 /* Setting 'slot' on an active array requires also
3203 * updating the 'rd%d' link, and communicating
3204 * with the personality with ->hot_*_disk.
3205 * For now we only support removing
3206 * failed/spare devices. This normally happens automatically,
3207 * but not when the metadata is externally managed.
3208 */
NeilBrownc303da62008-02-06 01:39:51 -08003209 if (rdev->raid_disk == -1)
3210 return -EEXIST;
3211 /* personality does all needed checks */
Namhyung Kim01393f32011-06-09 11:42:54 +10003212 if (rdev->mddev->pers->hot_remove_disk == NULL)
NeilBrownc303da62008-02-06 01:39:51 -08003213 return -EINVAL;
NeilBrown746d3202013-04-24 11:42:41 +10003214 clear_bit(Blocked, &rdev->flags);
3215 remove_and_add_spares(rdev->mddev, rdev);
3216 if (rdev->raid_disk >= 0)
3217 return -EBUSY;
NeilBrownc303da62008-02-06 01:39:51 -08003218 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3219 md_wakeup_thread(rdev->mddev->thread);
Neil Brown6c2fce22008-06-28 08:31:31 +10003220 } else if (rdev->mddev->pers) {
Neil Brown6c2fce22008-06-28 08:31:31 +10003221 /* Activating a spare .. or possibly reactivating
NeilBrown6d56e272009-04-14 12:01:57 +10003222 * if we ever get bitmaps working here.
Neil Brown6c2fce22008-06-28 08:31:31 +10003223 */
Goldwyn Rodriguescb01c542015-12-18 15:19:16 +11003224 int err;
Neil Brown6c2fce22008-06-28 08:31:31 +10003225
3226 if (rdev->raid_disk != -1)
3227 return -EBUSY;
3228
NeilBrownc6751b22011-02-02 11:57:13 +11003229 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
3230 return -EBUSY;
3231
Neil Brown6c2fce22008-06-28 08:31:31 +10003232 if (rdev->mddev->pers->hot_add_disk == NULL)
3233 return -EINVAL;
3234
NeilBrownba1b41b2011-01-14 09:14:34 +11003235 if (slot >= rdev->mddev->raid_disks &&
3236 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
3237 return -ENOSPC;
3238
Neil Brown6c2fce22008-06-28 08:31:31 +10003239 rdev->raid_disk = slot;
3240 if (test_bit(In_sync, &rdev->flags))
3241 rdev->saved_raid_disk = slot;
3242 else
3243 rdev->saved_raid_disk = -1;
NeilBrownd30519f2011-10-18 12:13:47 +11003244 clear_bit(In_sync, &rdev->flags);
NeilBrown8313b8e2013-12-12 10:13:33 +11003245 clear_bit(Bitmap_sync, &rdev->flags);
Guoqing Jiang3f79cc22020-04-04 23:57:11 +02003246 err = rdev->mddev->pers->hot_add_disk(rdev->mddev, rdev);
Goldwyn Rodriguescb01c542015-12-18 15:19:16 +11003247 if (err) {
3248 rdev->raid_disk = -1;
3249 return err;
3250 } else
3251 sysfs_notify_dirent_safe(rdev->sysfs_state);
Damien Le Moal5e3b8a82020-07-16 13:54:40 +09003252 /* failure here is OK */;
3253 sysfs_link_rdev(rdev->mddev, rdev);
Neil Brown6c2fce22008-06-28 08:31:31 +10003254 /* don't wakeup anyone, leave that to userspace. */
NeilBrownc303da62008-02-06 01:39:51 -08003255 } else {
NeilBrownba1b41b2011-01-14 09:14:34 +11003256 if (slot >= rdev->mddev->raid_disks &&
3257 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
NeilBrownc303da62008-02-06 01:39:51 -08003258 return -ENOSPC;
3259 rdev->raid_disk = slot;
3260 /* assume it is working */
NeilBrownc5d79ad2008-02-06 01:39:54 -08003261 clear_bit(Faulty, &rdev->flags);
3262 clear_bit(WriteMostly, &rdev->flags);
NeilBrownc303da62008-02-06 01:39:51 -08003263 set_bit(In_sync, &rdev->flags);
NeilBrown00bcb4a2010-06-01 19:37:23 +10003264 sysfs_notify_dirent_safe(rdev->sysfs_state);
NeilBrownc303da62008-02-06 01:39:51 -08003265 }
NeilBrown014236d2006-01-06 00:20:55 -08003266 return len;
3267}
3268
NeilBrown014236d2006-01-06 00:20:55 -08003269static struct rdev_sysfs_entry rdev_slot =
NeilBrown80ca3a42006-07-10 04:44:18 -07003270__ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
NeilBrown014236d2006-01-06 00:20:55 -08003271
NeilBrown93c8cad2006-01-06 00:20:56 -08003272static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003273offset_show(struct md_rdev *rdev, char *page)
NeilBrown93c8cad2006-01-06 00:20:56 -08003274{
Andrew Morton6961ece2006-01-06 00:20:59 -08003275 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
NeilBrown93c8cad2006-01-06 00:20:56 -08003276}
3277
3278static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003279offset_store(struct md_rdev *rdev, const char *buf, size_t len)
NeilBrown93c8cad2006-01-06 00:20:56 -08003280{
NeilBrownc6563a82012-05-21 09:27:00 +10003281 unsigned long long offset;
Jingoo Hanb29bebd2013-06-01 16:15:16 +09003282 if (kstrtoull(buf, 10, &offset) < 0)
NeilBrown93c8cad2006-01-06 00:20:56 -08003283 return -EINVAL;
Neil Brown8ed0a522008-06-28 08:31:29 +10003284 if (rdev->mddev->pers && rdev->raid_disk >= 0)
NeilBrown93c8cad2006-01-06 00:20:56 -08003285 return -EBUSY;
Andre Nolldd8ac332009-03-31 14:33:13 +11003286 if (rdev->sectors && rdev->mddev->external)
NeilBrownc5d79ad2008-02-06 01:39:54 -08003287 /* Must set offset before size, so overlap checks
3288 * can be sane */
3289 return -EBUSY;
NeilBrown93c8cad2006-01-06 00:20:56 -08003290 rdev->data_offset = offset;
NeilBrown25f7fd42012-07-19 15:59:18 +10003291 rdev->new_data_offset = offset;
NeilBrown93c8cad2006-01-06 00:20:56 -08003292 return len;
3293}
3294
3295static struct rdev_sysfs_entry rdev_offset =
NeilBrown80ca3a42006-07-10 04:44:18 -07003296__ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
NeilBrown93c8cad2006-01-06 00:20:56 -08003297
NeilBrownc6563a82012-05-21 09:27:00 +10003298static ssize_t new_offset_show(struct md_rdev *rdev, char *page)
3299{
3300 return sprintf(page, "%llu\n",
3301 (unsigned long long)rdev->new_data_offset);
3302}
3303
3304static ssize_t new_offset_store(struct md_rdev *rdev,
3305 const char *buf, size_t len)
3306{
3307 unsigned long long new_offset;
3308 struct mddev *mddev = rdev->mddev;
3309
Jingoo Hanb29bebd2013-06-01 16:15:16 +09003310 if (kstrtoull(buf, 10, &new_offset) < 0)
NeilBrownc6563a82012-05-21 09:27:00 +10003311 return -EINVAL;
3312
NeilBrownf851b602014-12-11 10:02:10 +11003313 if (mddev->sync_thread ||
3314 test_bit(MD_RECOVERY_RUNNING,&mddev->recovery))
NeilBrownc6563a82012-05-21 09:27:00 +10003315 return -EBUSY;
3316 if (new_offset == rdev->data_offset)
3317 /* reset is always permitted */
3318 ;
3319 else if (new_offset > rdev->data_offset) {
3320 /* must not push array size beyond rdev_sectors */
3321 if (new_offset - rdev->data_offset
3322 + mddev->dev_sectors > rdev->sectors)
3323 return -E2BIG;
3324 }
3325 /* Metadata worries about other space details. */
3326
3327 /* decreasing the offset is inconsistent with a backwards
3328 * reshape.
3329 */
3330 if (new_offset < rdev->data_offset &&
3331 mddev->reshape_backwards)
3332 return -EINVAL;
3333 /* Increasing offset is inconsistent with forwards
3334 * reshape. reshape_direction should be set to
3335 * 'backwards' first.
3336 */
3337 if (new_offset > rdev->data_offset &&
3338 !mddev->reshape_backwards)
3339 return -EINVAL;
3340
3341 if (mddev->pers && mddev->persistent &&
3342 !super_types[mddev->major_version]
3343 .allow_new_offset(rdev, new_offset))
3344 return -E2BIG;
3345 rdev->new_data_offset = new_offset;
3346 if (new_offset > rdev->data_offset)
3347 mddev->reshape_backwards = 1;
3348 else if (new_offset < rdev->data_offset)
3349 mddev->reshape_backwards = 0;
3350
3351 return len;
3352}
3353static struct rdev_sysfs_entry rdev_new_offset =
3354__ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store);
3355
NeilBrown83303b62006-01-06 00:21:06 -08003356static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003357rdev_size_show(struct md_rdev *rdev, char *page)
NeilBrown83303b62006-01-06 00:21:06 -08003358{
Andre Nolldd8ac332009-03-31 14:33:13 +11003359 return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
NeilBrown83303b62006-01-06 00:21:06 -08003360}
3361
NeilBrownc5d79ad2008-02-06 01:39:54 -08003362static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
3363{
3364 /* check if two start/length pairs overlap */
3365 if (s1+l1 <= s2)
3366 return 0;
3367 if (s2+l2 <= s1)
3368 return 0;
3369 return 1;
3370}
3371
Dan Williamsb522adc2009-03-31 15:00:31 +11003372static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
3373{
3374 unsigned long long blocks;
3375 sector_t new;
3376
Jingoo Hanb29bebd2013-06-01 16:15:16 +09003377 if (kstrtoull(buf, 10, &blocks) < 0)
Dan Williamsb522adc2009-03-31 15:00:31 +11003378 return -EINVAL;
3379
3380 if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
3381 return -EINVAL; /* sector conversion overflow */
3382
3383 new = blocks * 2;
3384 if (new != blocks * 2)
3385 return -EINVAL; /* unsigned long long to sector_t overflow */
3386
3387 *sectors = new;
3388 return 0;
3389}
3390
NeilBrown83303b62006-01-06 00:21:06 -08003391static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003392rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
NeilBrown83303b62006-01-06 00:21:06 -08003393{
NeilBrownfd01b882011-10-11 16:47:53 +11003394 struct mddev *my_mddev = rdev->mddev;
Andre Nolldd8ac332009-03-31 14:33:13 +11003395 sector_t oldsectors = rdev->sectors;
Dan Williamsb522adc2009-03-31 15:00:31 +11003396 sector_t sectors;
NeilBrown27c529b2008-03-04 14:29:33 -08003397
Shaohua Lif2076e72015-10-08 21:54:12 -07003398 if (test_bit(Journal, &rdev->flags))
3399 return -EBUSY;
Dan Williamsb522adc2009-03-31 15:00:31 +11003400 if (strict_blocks_to_sectors(buf, &sectors) < 0)
Neil Brownd7027452008-07-12 10:37:50 +10003401 return -EINVAL;
NeilBrownc6563a82012-05-21 09:27:00 +10003402 if (rdev->data_offset != rdev->new_data_offset)
3403 return -EINVAL; /* too confusing */
Chris Webb0cd17fe2008-06-28 08:31:46 +10003404 if (my_mddev->pers && rdev->raid_disk >= 0) {
Neil Brownd7027452008-07-12 10:37:50 +10003405 if (my_mddev->persistent) {
Andre Nolldd8ac332009-03-31 14:33:13 +11003406 sectors = super_types[my_mddev->major_version].
3407 rdev_size_change(rdev, sectors);
3408 if (!sectors)
Chris Webb0cd17fe2008-06-28 08:31:46 +10003409 return -EBUSY;
Andre Nolldd8ac332009-03-31 14:33:13 +11003410 } else if (!sectors)
Mike Snitzer77304d22010-11-08 14:39:12 +01003411 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
Andre Nolldd8ac332009-03-31 14:33:13 +11003412 rdev->data_offset;
NeilBrowna6468532013-02-21 14:33:17 +11003413 if (!my_mddev->pers->resize)
3414 /* Cannot change size for RAID0 or Linear etc */
3415 return -EINVAL;
Chris Webb0cd17fe2008-06-28 08:31:46 +10003416 }
Andre Nolldd8ac332009-03-31 14:33:13 +11003417 if (sectors < my_mddev->dev_sectors)
Chris Webb7d3c6f82008-10-13 11:55:11 +11003418 return -EINVAL; /* component must fit device */
Chris Webb0cd17fe2008-06-28 08:31:46 +10003419
Andre Nolldd8ac332009-03-31 14:33:13 +11003420 rdev->sectors = sectors;
3421 if (sectors > oldsectors && my_mddev->external) {
NeilBrown8b1afc32014-09-29 15:33:20 +10003422 /* Need to check that all other rdevs with the same
3423 * ->bdev do not overlap. 'rcu' is sufficient to walk
3424 * the rdev lists safely.
3425 * This check does not provide a hard guarantee, it
3426 * just helps avoid dangerous mistakes.
NeilBrownc5d79ad2008-02-06 01:39:54 -08003427 */
NeilBrownfd01b882011-10-11 16:47:53 +11003428 struct mddev *mddev;
NeilBrownc5d79ad2008-02-06 01:39:54 -08003429 int overlap = 0;
Cheng Renquan159ec1f2009-01-09 08:31:08 +11003430 struct list_head *tmp;
NeilBrownc5d79ad2008-02-06 01:39:54 -08003431
NeilBrown8b1afc32014-09-29 15:33:20 +10003432 rcu_read_lock();
NeilBrown29ac4aa2008-02-06 01:39:58 -08003433 for_each_mddev(mddev, tmp) {
NeilBrown3cb03002011-10-11 16:45:26 +11003434 struct md_rdev *rdev2;
NeilBrownc5d79ad2008-02-06 01:39:54 -08003435
NeilBrowndafb20f2012-03-19 12:46:39 +11003436 rdev_for_each(rdev2, mddev)
NeilBrownf21e9ff2011-01-31 12:10:09 +11003437 if (rdev->bdev == rdev2->bdev &&
3438 rdev != rdev2 &&
3439 overlaps(rdev->data_offset, rdev->sectors,
3440 rdev2->data_offset,
3441 rdev2->sectors)) {
NeilBrownc5d79ad2008-02-06 01:39:54 -08003442 overlap = 1;
3443 break;
3444 }
NeilBrownc5d79ad2008-02-06 01:39:54 -08003445 if (overlap) {
3446 mddev_put(mddev);
3447 break;
3448 }
3449 }
NeilBrown8b1afc32014-09-29 15:33:20 +10003450 rcu_read_unlock();
NeilBrownc5d79ad2008-02-06 01:39:54 -08003451 if (overlap) {
3452 /* Someone else could have slipped in a size
3453 * change here, but doing so is just silly.
Andre Nolldd8ac332009-03-31 14:33:13 +11003454 * We put oldsectors back because we *know* it is
NeilBrownc5d79ad2008-02-06 01:39:54 -08003455 * safe, and trust userspace not to race with
3456 * itself
3457 */
Andre Nolldd8ac332009-03-31 14:33:13 +11003458 rdev->sectors = oldsectors;
NeilBrownc5d79ad2008-02-06 01:39:54 -08003459 return -EBUSY;
3460 }
3461 }
NeilBrown83303b62006-01-06 00:21:06 -08003462 return len;
3463}
3464
3465static struct rdev_sysfs_entry rdev_size =
NeilBrown80ca3a42006-07-10 04:44:18 -07003466__ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
NeilBrown83303b62006-01-06 00:21:06 -08003467
NeilBrown3cb03002011-10-11 16:45:26 +11003468static ssize_t recovery_start_show(struct md_rdev *rdev, char *page)
Dan Williams06e3c812009-12-12 21:17:12 -07003469{
3470 unsigned long long recovery_start = rdev->recovery_offset;
3471
3472 if (test_bit(In_sync, &rdev->flags) ||
3473 recovery_start == MaxSector)
3474 return sprintf(page, "none\n");
3475
3476 return sprintf(page, "%llu\n", recovery_start);
3477}
3478
NeilBrown3cb03002011-10-11 16:45:26 +11003479static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len)
Dan Williams06e3c812009-12-12 21:17:12 -07003480{
3481 unsigned long long recovery_start;
3482
3483 if (cmd_match(buf, "none"))
3484 recovery_start = MaxSector;
Jingoo Hanb29bebd2013-06-01 16:15:16 +09003485 else if (kstrtoull(buf, 10, &recovery_start))
Dan Williams06e3c812009-12-12 21:17:12 -07003486 return -EINVAL;
3487
3488 if (rdev->mddev->pers &&
3489 rdev->raid_disk >= 0)
3490 return -EBUSY;
3491
3492 rdev->recovery_offset = recovery_start;
3493 if (recovery_start == MaxSector)
3494 set_bit(In_sync, &rdev->flags);
3495 else
3496 clear_bit(In_sync, &rdev->flags);
3497 return len;
3498}
3499
3500static struct rdev_sysfs_entry rdev_recovery_start =
3501__ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
3502
Vishal Vermafc974ee2015-12-24 19:20:34 -07003503/* sysfs access to bad-blocks list.
3504 * We present two files.
3505 * 'bad-blocks' lists sector numbers and lengths of ranges that
3506 * are recorded as bad. The list is truncated to fit within
3507 * the one-page limit of sysfs.
3508 * Writing "sector length" to this file adds an acknowledged
3509 * bad block list.
3510 * 'unacknowledged-bad-blocks' lists bad blocks that have not yet
3511 * been acknowledged. Writing to this file adds bad blocks
3512 * without acknowledging them. This is largely for testing.
3513 */
NeilBrown3cb03002011-10-11 16:45:26 +11003514static ssize_t bb_show(struct md_rdev *rdev, char *page)
NeilBrown16c791a2011-07-28 11:31:47 +10003515{
3516 return badblocks_show(&rdev->badblocks, page, 0);
3517}
NeilBrown3cb03002011-10-11 16:45:26 +11003518static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len)
NeilBrown16c791a2011-07-28 11:31:47 +10003519{
NeilBrownde393cd2011-07-28 11:31:48 +10003520 int rv = badblocks_store(&rdev->badblocks, page, len, 0);
3521 /* Maybe that ack was all we needed */
3522 if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags))
3523 wake_up(&rdev->blocked_wait);
3524 return rv;
NeilBrown16c791a2011-07-28 11:31:47 +10003525}
3526static struct rdev_sysfs_entry rdev_bad_blocks =
3527__ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store);
3528
NeilBrown3cb03002011-10-11 16:45:26 +11003529static ssize_t ubb_show(struct md_rdev *rdev, char *page)
NeilBrown16c791a2011-07-28 11:31:47 +10003530{
3531 return badblocks_show(&rdev->badblocks, page, 1);
3532}
NeilBrown3cb03002011-10-11 16:45:26 +11003533static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len)
NeilBrown16c791a2011-07-28 11:31:47 +10003534{
3535 return badblocks_store(&rdev->badblocks, page, len, 1);
3536}
3537static struct rdev_sysfs_entry rdev_unack_bad_blocks =
3538__ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store);
3539
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01003540static ssize_t
3541ppl_sector_show(struct md_rdev *rdev, char *page)
3542{
3543 return sprintf(page, "%llu\n", (unsigned long long)rdev->ppl.sector);
3544}
3545
3546static ssize_t
3547ppl_sector_store(struct md_rdev *rdev, const char *buf, size_t len)
3548{
3549 unsigned long long sector;
3550
3551 if (kstrtoull(buf, 10, &sector) < 0)
3552 return -EINVAL;
3553 if (sector != (sector_t)sector)
3554 return -EINVAL;
3555
3556 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) &&
3557 rdev->raid_disk >= 0)
3558 return -EBUSY;
3559
3560 if (rdev->mddev->persistent) {
3561 if (rdev->mddev->major_version == 0)
3562 return -EINVAL;
3563 if ((sector > rdev->sb_start &&
3564 sector - rdev->sb_start > S16_MAX) ||
3565 (sector < rdev->sb_start &&
3566 rdev->sb_start - sector > -S16_MIN))
3567 return -EINVAL;
3568 rdev->ppl.offset = sector - rdev->sb_start;
3569 } else if (!rdev->mddev->external) {
3570 return -EBUSY;
3571 }
3572 rdev->ppl.sector = sector;
3573 return len;
3574}
3575
3576static struct rdev_sysfs_entry rdev_ppl_sector =
3577__ATTR(ppl_sector, S_IRUGO|S_IWUSR, ppl_sector_show, ppl_sector_store);
3578
3579static ssize_t
3580ppl_size_show(struct md_rdev *rdev, char *page)
3581{
3582 return sprintf(page, "%u\n", rdev->ppl.size);
3583}
3584
3585static ssize_t
3586ppl_size_store(struct md_rdev *rdev, const char *buf, size_t len)
3587{
3588 unsigned int size;
3589
3590 if (kstrtouint(buf, 10, &size) < 0)
3591 return -EINVAL;
3592
3593 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) &&
3594 rdev->raid_disk >= 0)
3595 return -EBUSY;
3596
3597 if (rdev->mddev->persistent) {
3598 if (rdev->mddev->major_version == 0)
3599 return -EINVAL;
3600 if (size > U16_MAX)
3601 return -EINVAL;
3602 } else if (!rdev->mddev->external) {
3603 return -EBUSY;
3604 }
3605 rdev->ppl.size = size;
3606 return len;
3607}
3608
3609static struct rdev_sysfs_entry rdev_ppl_size =
3610__ATTR(ppl_size, S_IRUGO|S_IWUSR, ppl_size_show, ppl_size_store);
3611
NeilBrown86e6ffd2005-11-08 21:39:24 -08003612static struct attribute *rdev_default_attrs[] = {
3613 &rdev_state.attr,
NeilBrown4dbcdc72006-01-06 00:20:52 -08003614 &rdev_errors.attr,
NeilBrown014236d2006-01-06 00:20:55 -08003615 &rdev_slot.attr,
NeilBrown93c8cad2006-01-06 00:20:56 -08003616 &rdev_offset.attr,
NeilBrownc6563a82012-05-21 09:27:00 +10003617 &rdev_new_offset.attr,
NeilBrown83303b62006-01-06 00:21:06 -08003618 &rdev_size.attr,
Dan Williams06e3c812009-12-12 21:17:12 -07003619 &rdev_recovery_start.attr,
NeilBrown16c791a2011-07-28 11:31:47 +10003620 &rdev_bad_blocks.attr,
3621 &rdev_unack_bad_blocks.attr,
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01003622 &rdev_ppl_sector.attr,
3623 &rdev_ppl_size.attr,
NeilBrown86e6ffd2005-11-08 21:39:24 -08003624 NULL,
3625};
3626static ssize_t
3627rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
3628{
3629 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
NeilBrown3cb03002011-10-11 16:45:26 +11003630 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
NeilBrown86e6ffd2005-11-08 21:39:24 -08003631
3632 if (!entry->show)
3633 return -EIO;
NeilBrown758bfc82014-12-15 12:56:59 +11003634 if (!rdev->mddev)
Marcos Paulo de Souza168b3052019-06-14 15:41:06 -07003635 return -ENODEV;
NeilBrown758bfc82014-12-15 12:56:59 +11003636 return entry->show(rdev, page);
NeilBrown86e6ffd2005-11-08 21:39:24 -08003637}
3638
3639static ssize_t
3640rdev_attr_store(struct kobject *kobj, struct attribute *attr,
3641 const char *page, size_t length)
3642{
3643 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
NeilBrown3cb03002011-10-11 16:45:26 +11003644 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
NeilBrown27c529b2008-03-04 14:29:33 -08003645 ssize_t rv;
NeilBrownfd01b882011-10-11 16:47:53 +11003646 struct mddev *mddev = rdev->mddev;
NeilBrown86e6ffd2005-11-08 21:39:24 -08003647
3648 if (!entry->store)
3649 return -EIO;
NeilBrown67463ac2006-07-10 04:44:19 -07003650 if (!capable(CAP_SYS_ADMIN))
3651 return -EACCES;
Pawel Baldysiakc42d3242019-03-27 13:48:21 +01003652 rv = mddev ? mddev_lock(mddev) : -ENODEV;
NeilBrownca388052008-02-06 01:39:55 -08003653 if (!rv) {
NeilBrown27c529b2008-03-04 14:29:33 -08003654 if (rdev->mddev == NULL)
Pawel Baldysiakc42d3242019-03-27 13:48:21 +01003655 rv = -ENODEV;
NeilBrown27c529b2008-03-04 14:29:33 -08003656 else
3657 rv = entry->store(rdev, page, length);
Dan Williams6a518302008-04-30 00:52:28 -07003658 mddev_unlock(mddev);
NeilBrownca388052008-02-06 01:39:55 -08003659 }
3660 return rv;
NeilBrown86e6ffd2005-11-08 21:39:24 -08003661}
3662
3663static void rdev_free(struct kobject *ko)
3664{
NeilBrown3cb03002011-10-11 16:45:26 +11003665 struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj);
NeilBrown86e6ffd2005-11-08 21:39:24 -08003666 kfree(rdev);
3667}
Emese Revfy52cf25d2010-01-19 02:58:23 +01003668static const struct sysfs_ops rdev_sysfs_ops = {
NeilBrown86e6ffd2005-11-08 21:39:24 -08003669 .show = rdev_attr_show,
3670 .store = rdev_attr_store,
3671};
3672static struct kobj_type rdev_ktype = {
3673 .release = rdev_free,
3674 .sysfs_ops = &rdev_sysfs_ops,
3675 .default_attrs = rdev_default_attrs,
3676};
3677
NeilBrown3cb03002011-10-11 16:45:26 +11003678int md_rdev_init(struct md_rdev *rdev)
NeilBrowne8bb9a82010-06-01 19:37:26 +10003679{
3680 rdev->desc_nr = -1;
3681 rdev->saved_raid_disk = -1;
3682 rdev->raid_disk = -1;
3683 rdev->flags = 0;
3684 rdev->data_offset = 0;
NeilBrownc6563a82012-05-21 09:27:00 +10003685 rdev->new_data_offset = 0;
NeilBrowne8bb9a82010-06-01 19:37:26 +10003686 rdev->sb_events = 0;
Arnd Bergmann0e3ef492016-06-17 17:33:10 +02003687 rdev->last_read_error = 0;
NeilBrown2699b672011-07-28 11:31:47 +10003688 rdev->sb_loaded = 0;
3689 rdev->bb_page = NULL;
NeilBrowne8bb9a82010-06-01 19:37:26 +10003690 atomic_set(&rdev->nr_pending, 0);
3691 atomic_set(&rdev->read_errors, 0);
3692 atomic_set(&rdev->corrected_errors, 0);
3693
3694 INIT_LIST_HEAD(&rdev->same_set);
3695 init_waitqueue_head(&rdev->blocked_wait);
NeilBrown2230dfe2011-07-28 11:31:46 +10003696
3697 /* Add space to store bad block list.
3698 * This reserves the space even on arrays where it cannot
3699 * be used - I wonder if that matters
3700 */
Vishal Vermafc974ee2015-12-24 19:20:34 -07003701 return badblocks_init(&rdev->badblocks, 0);
NeilBrowne8bb9a82010-06-01 19:37:26 +10003702}
3703EXPORT_SYMBOL_GPL(md_rdev_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003704/*
3705 * Import a device. If 'super_format' >= 0, then sanity check the superblock
3706 *
3707 * mark the device faulty if:
3708 *
3709 * - the device is nonexistent (zero size)
3710 * - the device has no valid superblock
3711 *
3712 * a faulty rdev _never_ has rdev->sb set.
3713 */
NeilBrown3cb03002011-10-11 16:45:26 +11003714static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003715{
3716 char b[BDEVNAME_SIZE];
3717 int err;
NeilBrown3cb03002011-10-11 16:45:26 +11003718 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003719 sector_t size;
3720
NeilBrown9ffae0c2006-01-06 00:20:32 -08003721 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
NeilBrown9d487392016-11-02 14:16:49 +11003722 if (!rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003723 return ERR_PTR(-ENOMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003724
NeilBrown2230dfe2011-07-28 11:31:46 +10003725 err = md_rdev_init(rdev);
3726 if (err)
3727 goto abort_free;
3728 err = alloc_disk_sb(rdev);
3729 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003730 goto abort_free;
3731
NeilBrownc5d79ad2008-02-06 01:39:54 -08003732 err = lock_rdev(rdev, newdev, super_format == -2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003733 if (err)
3734 goto abort_free;
3735
Greg Kroah-Hartmanf9cb0742007-12-17 23:05:35 -07003736 kobject_init(&rdev->kobj, &rdev_ktype);
NeilBrown86e6ffd2005-11-08 21:39:24 -08003737
Mike Snitzer77304d22010-11-08 14:39:12 +01003738 size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003739 if (!size) {
NeilBrown9d487392016-11-02 14:16:49 +11003740 pr_warn("md: %s has zero or unknown size, marking faulty!\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003741 bdevname(rdev->bdev,b));
3742 err = -EINVAL;
3743 goto abort_free;
3744 }
3745
3746 if (super_format >= 0) {
3747 err = super_types[super_format].
3748 load_super(rdev, NULL, super_minor);
3749 if (err == -EINVAL) {
NeilBrown9d487392016-11-02 14:16:49 +11003750 pr_warn("md: %s does not have a valid v%d.%d superblock, not importing!\n",
NeilBrowndf968c42007-07-17 04:06:11 -07003751 bdevname(rdev->bdev,b),
NeilBrown9d487392016-11-02 14:16:49 +11003752 super_format, super_minor);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003753 goto abort_free;
3754 }
3755 if (err < 0) {
NeilBrown9d487392016-11-02 14:16:49 +11003756 pr_warn("md: could not read %s's sb, not importing!\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003757 bdevname(rdev->bdev,b));
3758 goto abort_free;
3759 }
3760 }
Dan Williams6bfe0b42008-04-30 00:52:32 -07003761
Linus Torvalds1da177e2005-04-16 15:20:36 -07003762 return rdev;
3763
3764abort_free:
NeilBrown2699b672011-07-28 11:31:47 +10003765 if (rdev->bdev)
3766 unlock_rdev(rdev);
NeilBrown545c8792012-05-22 13:54:30 +10003767 md_rdev_clear(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003768 kfree(rdev);
3769 return ERR_PTR(err);
3770}
3771
3772/*
3773 * Check a full RAID array for plausibility
3774 */
3775
Yufen Yu6a5cb532019-10-16 16:00:03 +08003776static int analyze_sbs(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003777{
3778 int i;
NeilBrown3cb03002011-10-11 16:45:26 +11003779 struct md_rdev *rdev, *freshest, *tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003780 char b[BDEVNAME_SIZE];
3781
3782 freshest = NULL;
NeilBrowndafb20f2012-03-19 12:46:39 +11003783 rdev_for_each_safe(rdev, tmp, mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003784 switch (super_types[mddev->major_version].
3785 load_super(rdev, freshest, mddev->minor_version)) {
3786 case 1:
3787 freshest = rdev;
3788 break;
3789 case 0:
3790 break;
3791 default:
NeilBrown9d487392016-11-02 14:16:49 +11003792 pr_warn("md: fatal superblock inconsistency in %s -- removing from array\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003793 bdevname(rdev->bdev,b));
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05003794 md_kick_rdev_from_array(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003795 }
3796
Yufen Yu6a5cb532019-10-16 16:00:03 +08003797 /* Cannot find a valid fresh disk */
3798 if (!freshest) {
3799 pr_warn("md: cannot find a valid disk\n");
3800 return -EINVAL;
3801 }
3802
Linus Torvalds1da177e2005-04-16 15:20:36 -07003803 super_types[mddev->major_version].
3804 validate_super(mddev, freshest);
3805
3806 i = 0;
NeilBrowndafb20f2012-03-19 12:46:39 +11003807 rdev_for_each_safe(rdev, tmp, mddev) {
NeilBrown233fca32010-04-14 17:02:09 +10003808 if (mddev->max_disks &&
3809 (rdev->desc_nr >= mddev->max_disks ||
3810 i > mddev->max_disks)) {
NeilBrown9d487392016-11-02 14:16:49 +11003811 pr_warn("md: %s: %s: only %d devices permitted\n",
3812 mdname(mddev), bdevname(rdev->bdev, b),
3813 mddev->max_disks);
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05003814 md_kick_rdev_from_array(rdev);
NeilBrownde01dfa2009-02-06 18:02:46 +11003815 continue;
3816 }
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05003817 if (rdev != freshest) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003818 if (super_types[mddev->major_version].
3819 validate_super(mddev, rdev)) {
NeilBrown9d487392016-11-02 14:16:49 +11003820 pr_warn("md: kicking non-fresh %s from array!\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003821 bdevname(rdev->bdev,b));
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05003822 md_kick_rdev_from_array(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003823 continue;
3824 }
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05003825 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003826 if (mddev->level == LEVEL_MULTIPATH) {
3827 rdev->desc_nr = i++;
3828 rdev->raid_disk = rdev->desc_nr;
NeilBrownb2d444d2005-11-08 21:39:31 -08003829 set_bit(In_sync, &rdev->flags);
Shaohua Lif2076e72015-10-08 21:54:12 -07003830 } else if (rdev->raid_disk >=
3831 (mddev->raid_disks - min(0, mddev->delta_disks)) &&
3832 !test_bit(Journal, &rdev->flags)) {
NeilBrowna778b732007-05-23 13:58:10 -07003833 rdev->raid_disk = -1;
3834 clear_bit(In_sync, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003835 }
3836 }
Yufen Yu6a5cb532019-10-16 16:00:03 +08003837
3838 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003839}
3840
NeilBrown72e02072009-12-14 12:49:55 +11003841/* Read a fixed-point number.
3842 * Numbers in sysfs attributes should be in "standard" units where
3843 * possible, so time should be in seconds.
NeilBrownf72ffdd2014-09-30 14:23:59 +10003844 * However we internally use a a much smaller unit such as
NeilBrown72e02072009-12-14 12:49:55 +11003845 * milliseconds or jiffies.
3846 * This function takes a decimal number with a possible fractional
3847 * component, and produces an integer which is the result of
3848 * multiplying that number by 10^'scale'.
3849 * all without any floating-point arithmetic.
3850 */
3851int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
3852{
3853 unsigned long result = 0;
3854 long decimals = -1;
3855 while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
3856 if (*cp == '.')
3857 decimals = 0;
3858 else if (decimals < scale) {
3859 unsigned int value;
3860 value = *cp - '0';
3861 result = result * 10 + value;
3862 if (decimals >= 0)
3863 decimals++;
3864 }
3865 cp++;
3866 }
3867 if (*cp == '\n')
3868 cp++;
3869 if (*cp)
3870 return -EINVAL;
3871 if (decimals < 0)
3872 decimals = 0;
Andy Shevchenkocf891602019-07-23 23:41:55 +03003873 *res = result * int_pow(10, scale - decimals);
NeilBrown72e02072009-12-14 12:49:55 +11003874 return 0;
3875}
3876
NeilBrowneae17012005-11-08 21:39:23 -08003877static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11003878safe_delay_show(struct mddev *mddev, char *page)
NeilBrown16f17b32006-06-26 00:27:37 -07003879{
3880 int msec = (mddev->safemode_delay*1000)/HZ;
3881 return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
3882}
3883static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11003884safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
NeilBrown16f17b32006-06-26 00:27:37 -07003885{
NeilBrown16f17b32006-06-26 00:27:37 -07003886 unsigned long msec;
Dan Williams97ce0a72008-09-24 22:48:19 -07003887
Goldwyn Rodrigues28c1b9f2015-10-22 16:01:25 +11003888 if (mddev_is_clustered(mddev)) {
NeilBrown9d487392016-11-02 14:16:49 +11003889 pr_warn("md: Safemode is disabled for clustered mode\n");
Goldwyn Rodrigues28c1b9f2015-10-22 16:01:25 +11003890 return -EINVAL;
3891 }
3892
NeilBrown72e02072009-12-14 12:49:55 +11003893 if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
NeilBrown16f17b32006-06-26 00:27:37 -07003894 return -EINVAL;
NeilBrown16f17b32006-06-26 00:27:37 -07003895 if (msec == 0)
3896 mddev->safemode_delay = 0;
3897 else {
NeilBrown19052c02008-08-05 15:54:13 +10003898 unsigned long old_delay = mddev->safemode_delay;
NeilBrown1b30e662014-12-15 12:57:00 +11003899 unsigned long new_delay = (msec*HZ)/1000;
3900
3901 if (new_delay == 0)
3902 new_delay = 1;
3903 mddev->safemode_delay = new_delay;
3904 if (new_delay < old_delay || old_delay == 0)
3905 mod_timer(&mddev->safemode_timer, jiffies+1);
NeilBrown16f17b32006-06-26 00:27:37 -07003906 }
3907 return len;
3908}
3909static struct md_sysfs_entry md_safe_delay =
NeilBrown80ca3a42006-07-10 04:44:18 -07003910__ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
NeilBrown16f17b32006-06-26 00:27:37 -07003911
3912static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11003913level_show(struct mddev *mddev, char *page)
NeilBrowneae17012005-11-08 21:39:23 -08003914{
NeilBrown36d091f2014-12-15 12:56:58 +11003915 struct md_personality *p;
3916 int ret;
3917 spin_lock(&mddev->lock);
3918 p = mddev->pers;
NeilBrownd9d166c2006-01-06 00:20:51 -08003919 if (p)
NeilBrown36d091f2014-12-15 12:56:58 +11003920 ret = sprintf(page, "%s\n", p->name);
NeilBrownd9d166c2006-01-06 00:20:51 -08003921 else if (mddev->clevel[0])
NeilBrown36d091f2014-12-15 12:56:58 +11003922 ret = sprintf(page, "%s\n", mddev->clevel);
NeilBrownd9d166c2006-01-06 00:20:51 -08003923 else if (mddev->level != LEVEL_NONE)
NeilBrown36d091f2014-12-15 12:56:58 +11003924 ret = sprintf(page, "%d\n", mddev->level);
NeilBrownd9d166c2006-01-06 00:20:51 -08003925 else
NeilBrown36d091f2014-12-15 12:56:58 +11003926 ret = 0;
3927 spin_unlock(&mddev->lock);
3928 return ret;
NeilBrowneae17012005-11-08 21:39:23 -08003929}
3930
NeilBrownd9d166c2006-01-06 00:20:51 -08003931static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11003932level_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrownd9d166c2006-01-06 00:20:51 -08003933{
Dan Williamsf2859af2010-05-02 10:04:16 -07003934 char clevel[16];
NeilBrown67918752014-12-15 12:57:01 +11003935 ssize_t rv;
3936 size_t slen = len;
NeilBrowndb721d32014-12-15 12:56:58 +11003937 struct md_personality *pers, *oldpers;
Dan Williamsf2859af2010-05-02 10:04:16 -07003938 long level;
NeilBrowndb721d32014-12-15 12:56:58 +11003939 void *priv, *oldpriv;
NeilBrown3cb03002011-10-11 16:45:26 +11003940 struct md_rdev *rdev;
NeilBrown245f46c2009-03-31 14:39:39 +11003941
NeilBrown67918752014-12-15 12:57:01 +11003942 if (slen == 0 || slen >= sizeof(clevel))
3943 return -EINVAL;
3944
3945 rv = mddev_lock(mddev);
3946 if (rv)
NeilBrown245f46c2009-03-31 14:39:39 +11003947 return rv;
NeilBrown67918752014-12-15 12:57:01 +11003948
3949 if (mddev->pers == NULL) {
3950 strncpy(mddev->clevel, buf, slen);
3951 if (mddev->clevel[slen-1] == '\n')
3952 slen--;
3953 mddev->clevel[slen] = 0;
3954 mddev->level = LEVEL_NONE;
3955 rv = len;
3956 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11003957 }
NeilBrown67918752014-12-15 12:57:01 +11003958 rv = -EROFS;
NeilBrownbd8839e2014-05-28 13:39:21 +10003959 if (mddev->ro)
NeilBrown67918752014-12-15 12:57:01 +11003960 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11003961
3962 /* request to change the personality. Need to ensure:
3963 * - array is not engaged in resync/recovery/reshape
3964 * - old personality can be suspended
3965 * - new personality will access other array.
3966 */
3967
NeilBrown67918752014-12-15 12:57:01 +11003968 rv = -EBUSY;
NeilBrownbb4f1e92010-08-08 21:18:03 +10003969 if (mddev->sync_thread ||
NeilBrownf851b602014-12-11 10:02:10 +11003970 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
NeilBrownbb4f1e92010-08-08 21:18:03 +10003971 mddev->reshape_position != MaxSector ||
3972 mddev->sysfs_active)
NeilBrown67918752014-12-15 12:57:01 +11003973 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11003974
NeilBrown67918752014-12-15 12:57:01 +11003975 rv = -EINVAL;
NeilBrown245f46c2009-03-31 14:39:39 +11003976 if (!mddev->pers->quiesce) {
NeilBrown9d487392016-11-02 14:16:49 +11003977 pr_warn("md: %s: %s does not support online personality change\n",
3978 mdname(mddev), mddev->pers->name);
NeilBrown67918752014-12-15 12:57:01 +11003979 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11003980 }
3981
3982 /* Now find the new personality */
NeilBrown67918752014-12-15 12:57:01 +11003983 strncpy(clevel, buf, slen);
3984 if (clevel[slen-1] == '\n')
3985 slen--;
3986 clevel[slen] = 0;
Jingoo Hanb29bebd2013-06-01 16:15:16 +09003987 if (kstrtol(clevel, 10, &level))
Dan Williamsf2859af2010-05-02 10:04:16 -07003988 level = LEVEL_NONE;
NeilBrown245f46c2009-03-31 14:39:39 +11003989
Dan Williamsf2859af2010-05-02 10:04:16 -07003990 if (request_module("md-%s", clevel) != 0)
3991 request_module("md-level-%s", clevel);
NeilBrown245f46c2009-03-31 14:39:39 +11003992 spin_lock(&pers_lock);
Dan Williamsf2859af2010-05-02 10:04:16 -07003993 pers = find_pers(level, clevel);
NeilBrown245f46c2009-03-31 14:39:39 +11003994 if (!pers || !try_module_get(pers->owner)) {
3995 spin_unlock(&pers_lock);
NeilBrown9d487392016-11-02 14:16:49 +11003996 pr_warn("md: personality %s not loaded\n", clevel);
NeilBrown67918752014-12-15 12:57:01 +11003997 rv = -EINVAL;
3998 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11003999 }
4000 spin_unlock(&pers_lock);
4001
4002 if (pers == mddev->pers) {
4003 /* Nothing to do! */
4004 module_put(pers->owner);
NeilBrown67918752014-12-15 12:57:01 +11004005 rv = len;
4006 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11004007 }
4008 if (!pers->takeover) {
4009 module_put(pers->owner);
NeilBrown9d487392016-11-02 14:16:49 +11004010 pr_warn("md: %s: %s does not support personality takeover\n",
4011 mdname(mddev), clevel);
NeilBrown67918752014-12-15 12:57:01 +11004012 rv = -EINVAL;
4013 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11004014 }
4015
NeilBrowndafb20f2012-03-19 12:46:39 +11004016 rdev_for_each(rdev, mddev)
NeilBrowne93f68a2010-06-15 09:36:03 +01004017 rdev->new_raid_disk = rdev->raid_disk;
4018
NeilBrown245f46c2009-03-31 14:39:39 +11004019 /* ->takeover must set new_* and/or delta_disks
4020 * if it succeeds, and may set them when it fails.
4021 */
4022 priv = pers->takeover(mddev);
4023 if (IS_ERR(priv)) {
4024 mddev->new_level = mddev->level;
4025 mddev->new_layout = mddev->layout;
Andre Noll664e7c42009-06-18 08:45:27 +10004026 mddev->new_chunk_sectors = mddev->chunk_sectors;
NeilBrown245f46c2009-03-31 14:39:39 +11004027 mddev->raid_disks -= mddev->delta_disks;
4028 mddev->delta_disks = 0;
NeilBrown2c810cd2012-05-21 09:27:00 +10004029 mddev->reshape_backwards = 0;
NeilBrown245f46c2009-03-31 14:39:39 +11004030 module_put(pers->owner);
NeilBrown9d487392016-11-02 14:16:49 +11004031 pr_warn("md: %s: %s would not accept array\n",
4032 mdname(mddev), clevel);
NeilBrown67918752014-12-15 12:57:01 +11004033 rv = PTR_ERR(priv);
4034 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11004035 }
4036
4037 /* Looks like we have a winner */
4038 mddev_suspend(mddev);
NeilBrown5aa61f42014-12-15 12:56:57 +11004039 mddev_detach(mddev);
NeilBrown36d091f2014-12-15 12:56:58 +11004040
4041 spin_lock(&mddev->lock);
NeilBrowndb721d32014-12-15 12:56:58 +11004042 oldpers = mddev->pers;
4043 oldpriv = mddev->private;
4044 mddev->pers = pers;
4045 mddev->private = priv;
4046 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
4047 mddev->level = mddev->new_level;
4048 mddev->layout = mddev->new_layout;
4049 mddev->chunk_sectors = mddev->new_chunk_sectors;
4050 mddev->delta_disks = 0;
4051 mddev->reshape_backwards = 0;
4052 mddev->degraded = 0;
NeilBrown36d091f2014-12-15 12:56:58 +11004053 spin_unlock(&mddev->lock);
NeilBrownf72ffdd2014-09-30 14:23:59 +10004054
NeilBrowndb721d32014-12-15 12:56:58 +11004055 if (oldpers->sync_request == NULL &&
Trela Maciej54071b32010-03-08 16:02:42 +11004056 mddev->external) {
4057 /* We are converting from a no-redundancy array
4058 * to a redundancy array and metadata is managed
4059 * externally so we need to be sure that writes
4060 * won't block due to a need to transition
4061 * clean->dirty
4062 * until external management is started.
4063 */
4064 mddev->in_sync = 0;
4065 mddev->safemode_delay = 0;
4066 mddev->safemode = 0;
4067 }
4068
NeilBrowndb721d32014-12-15 12:56:58 +11004069 oldpers->free(mddev, oldpriv);
4070
4071 if (oldpers->sync_request == NULL &&
4072 pers->sync_request != NULL) {
4073 /* need to add the md_redundancy_group */
4074 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
NeilBrown9d487392016-11-02 14:16:49 +11004075 pr_warn("md: cannot register extra attributes for %s\n",
4076 mdname(mddev));
NeilBrowndb721d32014-12-15 12:56:58 +11004077 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
Junxiao Bie8efa9b2020-08-04 17:27:18 -07004078 mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed");
4079 mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded");
NeilBrowndb721d32014-12-15 12:56:58 +11004080 }
4081 if (oldpers->sync_request != NULL &&
4082 pers->sync_request == NULL) {
4083 /* need to remove the md_redundancy_group */
4084 if (mddev->to_remove == NULL)
4085 mddev->to_remove = &md_redundancy_group;
4086 }
4087
Alexey Obitotskiy4cb9da72016-06-23 12:11:01 +02004088 module_put(oldpers->owner);
4089
NeilBrowndafb20f2012-03-19 12:46:39 +11004090 rdev_for_each(rdev, mddev) {
NeilBrowne93f68a2010-06-15 09:36:03 +01004091 if (rdev->raid_disk < 0)
4092 continue;
NeilBrownbf2cb0d2011-01-14 09:14:34 +11004093 if (rdev->new_raid_disk >= mddev->raid_disks)
NeilBrowne93f68a2010-06-15 09:36:03 +01004094 rdev->new_raid_disk = -1;
4095 if (rdev->new_raid_disk == rdev->raid_disk)
4096 continue;
Namhyung Kim36fad852011-07-27 11:00:36 +10004097 sysfs_unlink_rdev(mddev, rdev);
NeilBrowne93f68a2010-06-15 09:36:03 +01004098 }
NeilBrowndafb20f2012-03-19 12:46:39 +11004099 rdev_for_each(rdev, mddev) {
NeilBrowne93f68a2010-06-15 09:36:03 +01004100 if (rdev->raid_disk < 0)
4101 continue;
4102 if (rdev->new_raid_disk == rdev->raid_disk)
4103 continue;
4104 rdev->raid_disk = rdev->new_raid_disk;
4105 if (rdev->raid_disk < 0)
NeilBrown3a981b02009-08-03 10:59:55 +10004106 clear_bit(In_sync, &rdev->flags);
NeilBrowne93f68a2010-06-15 09:36:03 +01004107 else {
Namhyung Kim36fad852011-07-27 11:00:36 +10004108 if (sysfs_link_rdev(mddev, rdev))
NeilBrown9d487392016-11-02 14:16:49 +11004109 pr_warn("md: cannot register rd%d for %s after level change\n",
4110 rdev->raid_disk, mdname(mddev));
NeilBrown3a981b02009-08-03 10:59:55 +10004111 }
NeilBrowne93f68a2010-06-15 09:36:03 +01004112 }
4113
NeilBrowndb721d32014-12-15 12:56:58 +11004114 if (pers->sync_request == NULL) {
Trela, Maciej9af204c2010-03-08 16:02:44 +11004115 /* this is now an array without redundancy, so
4116 * it must always be in_sync
4117 */
4118 mddev->in_sync = 1;
4119 del_timer_sync(&mddev->safemode_timer);
4120 }
NeilBrown02e5f5c2013-11-14 15:16:15 +11004121 blk_set_stacking_limits(&mddev->queue->limits);
NeilBrown245f46c2009-03-31 14:39:39 +11004122 pers->run(mddev);
Shaohua Li29530792016-12-08 15:48:19 -08004123 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
Jonathan Brassow47525e52012-05-22 13:55:29 +10004124 mddev_resume(mddev);
NeilBrown830778a2014-01-14 15:17:03 +11004125 if (!mddev->thread)
4126 md_update_sb(mddev, 1);
Junxiao Bie1a86db2020-07-14 16:10:26 -07004127 sysfs_notify_dirent_safe(mddev->sysfs_level);
Dan Williamsbb7f8d22010-05-01 18:14:57 -07004128 md_new_event(mddev);
NeilBrown67918752014-12-15 12:57:01 +11004129 rv = len;
4130out_unlock:
4131 mddev_unlock(mddev);
NeilBrownd9d166c2006-01-06 00:20:51 -08004132 return rv;
4133}
4134
4135static struct md_sysfs_entry md_level =
NeilBrown80ca3a42006-07-10 04:44:18 -07004136__ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
NeilBrowneae17012005-11-08 21:39:23 -08004137
NeilBrownd4dbd022006-06-26 00:27:59 -07004138static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004139layout_show(struct mddev *mddev, char *page)
NeilBrownd4dbd022006-06-26 00:27:59 -07004140{
4141 /* just a number, not meaningful for all levels */
NeilBrown08a02ec2007-05-09 02:35:38 -07004142 if (mddev->reshape_position != MaxSector &&
4143 mddev->layout != mddev->new_layout)
4144 return sprintf(page, "%d (%d)\n",
4145 mddev->new_layout, mddev->layout);
NeilBrownd4dbd022006-06-26 00:27:59 -07004146 return sprintf(page, "%d\n", mddev->layout);
4147}
4148
4149static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004150layout_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrownd4dbd022006-06-26 00:27:59 -07004151{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004152 unsigned int n;
NeilBrown67918752014-12-15 12:57:01 +11004153 int err;
NeilBrownd4dbd022006-06-26 00:27:59 -07004154
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004155 err = kstrtouint(buf, 10, &n);
4156 if (err < 0)
4157 return err;
NeilBrown67918752014-12-15 12:57:01 +11004158 err = mddev_lock(mddev);
4159 if (err)
4160 return err;
NeilBrownd4dbd022006-06-26 00:27:59 -07004161
NeilBrownb3546032009-03-31 14:56:41 +11004162 if (mddev->pers) {
NeilBrown50ac1682009-06-18 08:47:55 +10004163 if (mddev->pers->check_reshape == NULL)
NeilBrown67918752014-12-15 12:57:01 +11004164 err = -EBUSY;
4165 else if (mddev->ro)
4166 err = -EROFS;
4167 else {
4168 mddev->new_layout = n;
4169 err = mddev->pers->check_reshape(mddev);
4170 if (err)
4171 mddev->new_layout = mddev->layout;
NeilBrown597a7112009-06-18 08:47:42 +10004172 }
NeilBrownb3546032009-03-31 14:56:41 +11004173 } else {
NeilBrown08a02ec2007-05-09 02:35:38 -07004174 mddev->new_layout = n;
NeilBrownb3546032009-03-31 14:56:41 +11004175 if (mddev->reshape_position == MaxSector)
4176 mddev->layout = n;
4177 }
NeilBrown67918752014-12-15 12:57:01 +11004178 mddev_unlock(mddev);
4179 return err ?: len;
NeilBrownd4dbd022006-06-26 00:27:59 -07004180}
4181static struct md_sysfs_entry md_layout =
NeilBrown80ca3a42006-07-10 04:44:18 -07004182__ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
NeilBrownd4dbd022006-06-26 00:27:59 -07004183
NeilBrowneae17012005-11-08 21:39:23 -08004184static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004185raid_disks_show(struct mddev *mddev, char *page)
NeilBrowneae17012005-11-08 21:39:23 -08004186{
NeilBrownbb636542005-11-08 21:39:45 -08004187 if (mddev->raid_disks == 0)
4188 return 0;
NeilBrown08a02ec2007-05-09 02:35:38 -07004189 if (mddev->reshape_position != MaxSector &&
4190 mddev->delta_disks != 0)
4191 return sprintf(page, "%d (%d)\n", mddev->raid_disks,
4192 mddev->raid_disks - mddev->delta_disks);
NeilBrowneae17012005-11-08 21:39:23 -08004193 return sprintf(page, "%d\n", mddev->raid_disks);
4194}
4195
NeilBrownfd01b882011-10-11 16:47:53 +11004196static int update_raid_disks(struct mddev *mddev, int raid_disks);
NeilBrownda943b992006-01-06 00:20:54 -08004197
4198static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004199raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrownda943b992006-01-06 00:20:54 -08004200{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004201 unsigned int n;
NeilBrown67918752014-12-15 12:57:01 +11004202 int err;
NeilBrownda943b992006-01-06 00:20:54 -08004203
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004204 err = kstrtouint(buf, 10, &n);
4205 if (err < 0)
4206 return err;
NeilBrownda943b992006-01-06 00:20:54 -08004207
NeilBrown67918752014-12-15 12:57:01 +11004208 err = mddev_lock(mddev);
4209 if (err)
4210 return err;
NeilBrownda943b992006-01-06 00:20:54 -08004211 if (mddev->pers)
NeilBrown67918752014-12-15 12:57:01 +11004212 err = update_raid_disks(mddev, n);
NeilBrown08a02ec2007-05-09 02:35:38 -07004213 else if (mddev->reshape_position != MaxSector) {
NeilBrownc6563a82012-05-21 09:27:00 +10004214 struct md_rdev *rdev;
NeilBrown08a02ec2007-05-09 02:35:38 -07004215 int olddisks = mddev->raid_disks - mddev->delta_disks;
NeilBrownc6563a82012-05-21 09:27:00 +10004216
NeilBrown67918752014-12-15 12:57:01 +11004217 err = -EINVAL;
NeilBrownc6563a82012-05-21 09:27:00 +10004218 rdev_for_each(rdev, mddev) {
4219 if (olddisks < n &&
4220 rdev->data_offset < rdev->new_data_offset)
NeilBrown67918752014-12-15 12:57:01 +11004221 goto out_unlock;
NeilBrownc6563a82012-05-21 09:27:00 +10004222 if (olddisks > n &&
4223 rdev->data_offset > rdev->new_data_offset)
NeilBrown67918752014-12-15 12:57:01 +11004224 goto out_unlock;
NeilBrownc6563a82012-05-21 09:27:00 +10004225 }
NeilBrown67918752014-12-15 12:57:01 +11004226 err = 0;
NeilBrown08a02ec2007-05-09 02:35:38 -07004227 mddev->delta_disks = n - olddisks;
4228 mddev->raid_disks = n;
NeilBrown2c810cd2012-05-21 09:27:00 +10004229 mddev->reshape_backwards = (mddev->delta_disks < 0);
NeilBrown08a02ec2007-05-09 02:35:38 -07004230 } else
NeilBrownda943b992006-01-06 00:20:54 -08004231 mddev->raid_disks = n;
NeilBrown67918752014-12-15 12:57:01 +11004232out_unlock:
4233 mddev_unlock(mddev);
4234 return err ? err : len;
NeilBrownda943b992006-01-06 00:20:54 -08004235}
4236static struct md_sysfs_entry md_raid_disks =
NeilBrown80ca3a42006-07-10 04:44:18 -07004237__ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
NeilBrowneae17012005-11-08 21:39:23 -08004238
NeilBrown24dd4692005-11-08 21:39:26 -08004239static ssize_t
Sebastian Parschauerec164d072020-07-28 12:01:39 +02004240uuid_show(struct mddev *mddev, char *page)
4241{
4242 return sprintf(page, "%pU\n", mddev->uuid);
4243}
4244static struct md_sysfs_entry md_uuid =
4245__ATTR(uuid, S_IRUGO, uuid_show, NULL);
4246
4247static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004248chunk_size_show(struct mddev *mddev, char *page)
NeilBrown3b343802006-01-06 00:20:47 -08004249{
NeilBrown08a02ec2007-05-09 02:35:38 -07004250 if (mddev->reshape_position != MaxSector &&
Andre Noll664e7c42009-06-18 08:45:27 +10004251 mddev->chunk_sectors != mddev->new_chunk_sectors)
4252 return sprintf(page, "%d (%d)\n",
4253 mddev->new_chunk_sectors << 9,
Andre Noll9d8f0362009-06-18 08:45:01 +10004254 mddev->chunk_sectors << 9);
4255 return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
NeilBrown3b343802006-01-06 00:20:47 -08004256}
4257
4258static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004259chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown3b343802006-01-06 00:20:47 -08004260{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004261 unsigned long n;
NeilBrown67918752014-12-15 12:57:01 +11004262 int err;
NeilBrown3b343802006-01-06 00:20:47 -08004263
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004264 err = kstrtoul(buf, 10, &n);
4265 if (err < 0)
4266 return err;
NeilBrown3b343802006-01-06 00:20:47 -08004267
NeilBrown67918752014-12-15 12:57:01 +11004268 err = mddev_lock(mddev);
4269 if (err)
4270 return err;
NeilBrownb3546032009-03-31 14:56:41 +11004271 if (mddev->pers) {
NeilBrown50ac1682009-06-18 08:47:55 +10004272 if (mddev->pers->check_reshape == NULL)
NeilBrown67918752014-12-15 12:57:01 +11004273 err = -EBUSY;
4274 else if (mddev->ro)
4275 err = -EROFS;
4276 else {
4277 mddev->new_chunk_sectors = n >> 9;
4278 err = mddev->pers->check_reshape(mddev);
4279 if (err)
4280 mddev->new_chunk_sectors = mddev->chunk_sectors;
NeilBrown597a7112009-06-18 08:47:42 +10004281 }
NeilBrownb3546032009-03-31 14:56:41 +11004282 } else {
Andre Noll664e7c42009-06-18 08:45:27 +10004283 mddev->new_chunk_sectors = n >> 9;
NeilBrownb3546032009-03-31 14:56:41 +11004284 if (mddev->reshape_position == MaxSector)
Andre Noll9d8f0362009-06-18 08:45:01 +10004285 mddev->chunk_sectors = n >> 9;
NeilBrownb3546032009-03-31 14:56:41 +11004286 }
NeilBrown67918752014-12-15 12:57:01 +11004287 mddev_unlock(mddev);
4288 return err ?: len;
NeilBrown3b343802006-01-06 00:20:47 -08004289}
4290static struct md_sysfs_entry md_chunk_size =
NeilBrown80ca3a42006-07-10 04:44:18 -07004291__ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
NeilBrown3b343802006-01-06 00:20:47 -08004292
NeilBrowna94213b2006-06-26 00:28:00 -07004293static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004294resync_start_show(struct mddev *mddev, char *page)
NeilBrowna94213b2006-06-26 00:28:00 -07004295{
NeilBrownd1a7c502009-03-31 15:24:32 +11004296 if (mddev->recovery_cp == MaxSector)
4297 return sprintf(page, "none\n");
NeilBrowna94213b2006-06-26 00:28:00 -07004298 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
4299}
4300
4301static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004302resync_start_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrowna94213b2006-06-26 00:28:00 -07004303{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004304 unsigned long long n;
NeilBrown67918752014-12-15 12:57:01 +11004305 int err;
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004306
4307 if (cmd_match(buf, "none"))
4308 n = MaxSector;
4309 else {
4310 err = kstrtoull(buf, 10, &n);
4311 if (err < 0)
4312 return err;
4313 if (n != (sector_t)n)
4314 return -EINVAL;
4315 }
NeilBrowna94213b2006-06-26 00:28:00 -07004316
NeilBrown67918752014-12-15 12:57:01 +11004317 err = mddev_lock(mddev);
4318 if (err)
4319 return err;
NeilBrownb0986362011-05-11 15:52:21 +10004320 if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
NeilBrown67918752014-12-15 12:57:01 +11004321 err = -EBUSY;
NeilBrowna94213b2006-06-26 00:28:00 -07004322
NeilBrown67918752014-12-15 12:57:01 +11004323 if (!err) {
4324 mddev->recovery_cp = n;
4325 if (mddev->pers)
Shaohua Li29530792016-12-08 15:48:19 -08004326 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
NeilBrown67918752014-12-15 12:57:01 +11004327 }
4328 mddev_unlock(mddev);
4329 return err ?: len;
NeilBrowna94213b2006-06-26 00:28:00 -07004330}
4331static struct md_sysfs_entry md_resync_start =
NeilBrown750f1992014-09-30 08:53:05 +10004332__ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR,
4333 resync_start_show, resync_start_store);
NeilBrowna94213b2006-06-26 00:28:00 -07004334
NeilBrown9e653b62006-06-26 00:27:58 -07004335/*
4336 * The array state can be:
4337 *
4338 * clear
4339 * No devices, no size, no level
4340 * Equivalent to STOP_ARRAY ioctl
4341 * inactive
4342 * May have some settings, but array is not active
4343 * all IO results in error
4344 * When written, doesn't tear down array, but just stops it
4345 * suspended (not supported yet)
4346 * All IO requests will block. The array can be reconfigured.
Andre Noll910d8cb2008-03-25 21:00:53 +01004347 * Writing this, if accepted, will block until array is quiescent
NeilBrown9e653b62006-06-26 00:27:58 -07004348 * readonly
4349 * no resync can happen. no superblocks get written.
4350 * write requests fail
4351 * read-auto
4352 * like readonly, but behaves like 'clean' on a write request.
4353 *
4354 * clean - no pending writes, but otherwise active.
4355 * When written to inactive array, starts without resync
4356 * If a write request arrives then
4357 * if metadata is known, mark 'dirty' and switch to 'active'.
4358 * if not known, block and switch to write-pending
4359 * If written to an active array that has pending writes, then fails.
4360 * active
4361 * fully active: IO and resync can be happening.
4362 * When written to inactive array, starts with resync
4363 *
4364 * write-pending
4365 * clean, but writes are blocked waiting for 'active' to be written.
4366 *
4367 * active-idle
4368 * like active, but no writes have been seen for a while (100msec).
4369 *
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -03004370 * broken
4371 * RAID0/LINEAR-only: same as clean, but array is missing a member.
4372 * It's useful because RAID0/LINEAR mounted-arrays aren't stopped
4373 * when a member is gone, so this state will at least alert the
4374 * user that something is wrong.
NeilBrown9e653b62006-06-26 00:27:58 -07004375 */
4376enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -03004377 write_pending, active_idle, broken, bad_word};
Adrian Bunk05381952006-06-26 00:28:01 -07004378static char *array_states[] = {
NeilBrown9e653b62006-06-26 00:27:58 -07004379 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -03004380 "write-pending", "active-idle", "broken", NULL };
NeilBrown9e653b62006-06-26 00:27:58 -07004381
4382static int match_word(const char *word, char **list)
4383{
4384 int n;
4385 for (n=0; list[n]; n++)
4386 if (cmd_match(word, list[n]))
4387 break;
4388 return n;
4389}
4390
4391static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004392array_state_show(struct mddev *mddev, char *page)
NeilBrown9e653b62006-06-26 00:27:58 -07004393{
4394 enum array_state st = inactive;
4395
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -03004396 if (mddev->pers && !test_bit(MD_NOT_READY, &mddev->flags)) {
NeilBrown9e653b62006-06-26 00:27:58 -07004397 switch(mddev->ro) {
4398 case 1:
4399 st = readonly;
4400 break;
4401 case 2:
4402 st = read_auto;
4403 break;
4404 case 0:
NeilBrown55cc39f2017-03-15 14:05:14 +11004405 spin_lock(&mddev->lock);
Shaohua Li29530792016-12-08 15:48:19 -08004406 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
NeilBrowne6910632008-02-06 01:39:51 -08004407 st = write_pending;
Tomasz Majchrzak16f88942016-10-24 12:47:28 +02004408 else if (mddev->in_sync)
4409 st = clean;
NeilBrown9e653b62006-06-26 00:27:58 -07004410 else if (mddev->safemode)
4411 st = active_idle;
4412 else
4413 st = active;
NeilBrown55cc39f2017-03-15 14:05:14 +11004414 spin_unlock(&mddev->lock);
NeilBrown9e653b62006-06-26 00:27:58 -07004415 }
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -03004416
4417 if (test_bit(MD_BROKEN, &mddev->flags) && st == clean)
4418 st = broken;
4419 } else {
NeilBrown9e653b62006-06-26 00:27:58 -07004420 if (list_empty(&mddev->disks) &&
4421 mddev->raid_disks == 0 &&
Andre Noll58c0fed2009-03-31 14:33:13 +11004422 mddev->dev_sectors == 0)
NeilBrown9e653b62006-06-26 00:27:58 -07004423 st = clear;
4424 else
4425 st = inactive;
4426 }
4427 return sprintf(page, "%s\n", array_states[st]);
4428}
4429
NeilBrownf72ffdd2014-09-30 14:23:59 +10004430static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev);
4431static int md_set_readonly(struct mddev *mddev, struct block_device *bdev);
NeilBrownfd01b882011-10-11 16:47:53 +11004432static int restart_array(struct mddev *mddev);
NeilBrown9e653b62006-06-26 00:27:58 -07004433
4434static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004435array_state_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown9e653b62006-06-26 00:27:58 -07004436{
NeilBrown6497709b2017-03-15 14:05:14 +11004437 int err = 0;
NeilBrown9e653b62006-06-26 00:27:58 -07004438 enum array_state st = match_word(buf, array_states);
NeilBrown67918752014-12-15 12:57:01 +11004439
4440 if (mddev->pers && (st == active || st == clean) && mddev->ro != 1) {
4441 /* don't take reconfig_mutex when toggling between
4442 * clean and active
4443 */
4444 spin_lock(&mddev->lock);
4445 if (st == active) {
4446 restart_array(mddev);
Shaohua Li29530792016-12-08 15:48:19 -08004447 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
Tomasz Majchrzak91a6c4a2016-10-25 17:07:08 +02004448 md_wakeup_thread(mddev->thread);
NeilBrown67918752014-12-15 12:57:01 +11004449 wake_up(&mddev->sb_wait);
NeilBrown67918752014-12-15 12:57:01 +11004450 } else /* st == clean */ {
4451 restart_array(mddev);
NeilBrown6497709b2017-03-15 14:05:14 +11004452 if (!set_in_sync(mddev))
NeilBrown67918752014-12-15 12:57:01 +11004453 err = -EBUSY;
4454 }
Tomasz Majchrzak573275b2016-06-30 10:47:09 +02004455 if (!err)
4456 sysfs_notify_dirent_safe(mddev->sysfs_state);
NeilBrown67918752014-12-15 12:57:01 +11004457 spin_unlock(&mddev->lock);
NeilBrownc008f1d2015-06-12 19:46:44 +10004458 return err ?: len;
NeilBrown67918752014-12-15 12:57:01 +11004459 }
4460 err = mddev_lock(mddev);
4461 if (err)
4462 return err;
4463 err = -EINVAL;
NeilBrown9e653b62006-06-26 00:27:58 -07004464 switch(st) {
4465 case bad_word:
4466 break;
4467 case clear:
4468 /* stopping an active array */
NeilBrowna05b7ea2012-07-19 15:59:18 +10004469 err = do_md_stop(mddev, 0, NULL);
NeilBrown9e653b62006-06-26 00:27:58 -07004470 break;
4471 case inactive:
4472 /* stopping an active array */
NeilBrown90cf1952012-07-31 10:04:55 +10004473 if (mddev->pers)
NeilBrowna05b7ea2012-07-19 15:59:18 +10004474 err = do_md_stop(mddev, 2, NULL);
NeilBrown90cf1952012-07-31 10:04:55 +10004475 else
NeilBrowne6910632008-02-06 01:39:51 -08004476 err = 0; /* already inactive */
NeilBrown9e653b62006-06-26 00:27:58 -07004477 break;
4478 case suspended:
4479 break; /* not supported yet */
4480 case readonly:
4481 if (mddev->pers)
NeilBrowna05b7ea2012-07-19 15:59:18 +10004482 err = md_set_readonly(mddev, NULL);
NeilBrown9e653b62006-06-26 00:27:58 -07004483 else {
4484 mddev->ro = 1;
NeilBrown648b6292008-04-30 00:52:30 -07004485 set_disk_ro(mddev->gendisk, 1);
NeilBrown9e653b62006-06-26 00:27:58 -07004486 err = do_md_run(mddev);
4487 }
4488 break;
4489 case read_auto:
NeilBrown9e653b62006-06-26 00:27:58 -07004490 if (mddev->pers) {
NeilBrown80268ee2008-10-13 11:55:12 +11004491 if (mddev->ro == 0)
NeilBrowna05b7ea2012-07-19 15:59:18 +10004492 err = md_set_readonly(mddev, NULL);
NeilBrown80268ee2008-10-13 11:55:12 +11004493 else if (mddev->ro == 1)
NeilBrown648b6292008-04-30 00:52:30 -07004494 err = restart_array(mddev);
4495 if (err == 0) {
4496 mddev->ro = 2;
4497 set_disk_ro(mddev->gendisk, 0);
4498 }
NeilBrown9e653b62006-06-26 00:27:58 -07004499 } else {
4500 mddev->ro = 2;
4501 err = do_md_run(mddev);
4502 }
4503 break;
4504 case clean:
4505 if (mddev->pers) {
Song Liu339421d2015-10-08 21:54:13 -07004506 err = restart_array(mddev);
4507 if (err)
4508 break;
NeilBrown85572d72014-12-15 12:56:56 +11004509 spin_lock(&mddev->lock);
NeilBrown6497709b2017-03-15 14:05:14 +11004510 if (!set_in_sync(mddev))
NeilBrowne6910632008-02-06 01:39:51 -08004511 err = -EBUSY;
NeilBrown85572d72014-12-15 12:56:56 +11004512 spin_unlock(&mddev->lock);
NeilBrown5bf29592009-05-07 12:50:57 +10004513 } else
4514 err = -EINVAL;
NeilBrown9e653b62006-06-26 00:27:58 -07004515 break;
4516 case active:
4517 if (mddev->pers) {
Song Liu339421d2015-10-08 21:54:13 -07004518 err = restart_array(mddev);
4519 if (err)
4520 break;
Shaohua Li29530792016-12-08 15:48:19 -08004521 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
NeilBrown9e653b62006-06-26 00:27:58 -07004522 wake_up(&mddev->sb_wait);
4523 err = 0;
4524 } else {
4525 mddev->ro = 0;
NeilBrown648b6292008-04-30 00:52:30 -07004526 set_disk_ro(mddev->gendisk, 0);
NeilBrown9e653b62006-06-26 00:27:58 -07004527 err = do_md_run(mddev);
4528 }
4529 break;
4530 case write_pending:
4531 case active_idle:
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -03004532 case broken:
NeilBrown9e653b62006-06-26 00:27:58 -07004533 /* these cannot be set */
4534 break;
4535 }
NeilBrown67918752014-12-15 12:57:01 +11004536
4537 if (!err) {
NeilBrown1d23f172011-12-08 15:49:12 +11004538 if (mddev->hold_active == UNTIL_IOCTL)
4539 mddev->hold_active = 0;
NeilBrown00bcb4a2010-06-01 19:37:23 +10004540 sysfs_notify_dirent_safe(mddev->sysfs_state);
Neil Brown0fd62b82008-06-28 08:31:36 +10004541 }
NeilBrown67918752014-12-15 12:57:01 +11004542 mddev_unlock(mddev);
4543 return err ?: len;
NeilBrown9e653b62006-06-26 00:27:58 -07004544}
NeilBrown80ca3a42006-07-10 04:44:18 -07004545static struct md_sysfs_entry md_array_state =
NeilBrown750f1992014-09-30 08:53:05 +10004546__ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
NeilBrown9e653b62006-06-26 00:27:58 -07004547
NeilBrown6d7ff7382006-01-06 00:21:16 -08004548static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004549max_corrected_read_errors_show(struct mddev *mddev, char *page) {
Robert Becker1e509152009-12-14 12:49:58 +11004550 return sprintf(page, "%d\n",
4551 atomic_read(&mddev->max_corr_read_errors));
4552}
4553
4554static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004555max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len)
Robert Becker1e509152009-12-14 12:49:58 +11004556{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004557 unsigned int n;
4558 int rv;
Robert Becker1e509152009-12-14 12:49:58 +11004559
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004560 rv = kstrtouint(buf, 10, &n);
4561 if (rv < 0)
4562 return rv;
4563 atomic_set(&mddev->max_corr_read_errors, n);
4564 return len;
Robert Becker1e509152009-12-14 12:49:58 +11004565}
4566
4567static struct md_sysfs_entry max_corr_read_errors =
4568__ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
4569 max_corrected_read_errors_store);
4570
4571static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004572null_show(struct mddev *mddev, char *page)
NeilBrown6d7ff7382006-01-06 00:21:16 -08004573{
4574 return -EINVAL;
4575}
4576
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02004577/* need to ensure rdev_delayed_delete() has completed */
4578static void flush_rdev_wq(struct mddev *mddev)
4579{
4580 struct md_rdev *rdev;
4581
4582 rcu_read_lock();
4583 rdev_for_each_rcu(rdev, mddev)
4584 if (work_pending(&rdev->del_work)) {
4585 flush_workqueue(md_rdev_misc_wq);
4586 break;
4587 }
4588 rcu_read_unlock();
4589}
4590
NeilBrown6d7ff7382006-01-06 00:21:16 -08004591static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004592new_dev_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown6d7ff7382006-01-06 00:21:16 -08004593{
4594 /* buf must be %d:%d\n? giving major and minor numbers */
4595 /* The new device is added to the array.
4596 * If the array has a persistent superblock, we read the
4597 * superblock to initialise info and check validity.
4598 * Otherwise, only checking done is that in bind_rdev_to_array,
4599 * which mainly checks size.
4600 */
4601 char *e;
4602 int major = simple_strtoul(buf, &e, 10);
4603 int minor;
4604 dev_t dev;
NeilBrown3cb03002011-10-11 16:45:26 +11004605 struct md_rdev *rdev;
NeilBrown6d7ff7382006-01-06 00:21:16 -08004606 int err;
4607
4608 if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
4609 return -EINVAL;
4610 minor = simple_strtoul(e+1, &e, 10);
4611 if (*e && *e != '\n')
4612 return -EINVAL;
4613 dev = MKDEV(major, minor);
4614 if (major != MAJOR(dev) ||
4615 minor != MINOR(dev))
4616 return -EOVERFLOW;
4617
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02004618 flush_rdev_wq(mddev);
NeilBrown67918752014-12-15 12:57:01 +11004619 err = mddev_lock(mddev);
4620 if (err)
4621 return err;
NeilBrown6d7ff7382006-01-06 00:21:16 -08004622 if (mddev->persistent) {
4623 rdev = md_import_device(dev, mddev->major_version,
4624 mddev->minor_version);
4625 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
NeilBrown3cb03002011-10-11 16:45:26 +11004626 struct md_rdev *rdev0
4627 = list_entry(mddev->disks.next,
4628 struct md_rdev, same_set);
NeilBrown6d7ff7382006-01-06 00:21:16 -08004629 err = super_types[mddev->major_version]
4630 .load_super(rdev, rdev0, mddev->minor_version);
4631 if (err < 0)
4632 goto out;
4633 }
NeilBrownc5d79ad2008-02-06 01:39:54 -08004634 } else if (mddev->external)
4635 rdev = md_import_device(dev, -2, -1);
4636 else
NeilBrown6d7ff7382006-01-06 00:21:16 -08004637 rdev = md_import_device(dev, -1, -1);
4638
NeilBrown9a8c0fa2015-06-25 17:06:40 +10004639 if (IS_ERR(rdev)) {
4640 mddev_unlock(mddev);
NeilBrown6d7ff7382006-01-06 00:21:16 -08004641 return PTR_ERR(rdev);
NeilBrown9a8c0fa2015-06-25 17:06:40 +10004642 }
NeilBrown6d7ff7382006-01-06 00:21:16 -08004643 err = bind_rdev_to_array(rdev, mddev);
4644 out:
4645 if (err)
4646 export_rdev(rdev);
NeilBrown67918752014-12-15 12:57:01 +11004647 mddev_unlock(mddev);
Alexey Obitotskiy5492c462017-07-28 15:49:25 +02004648 if (!err)
4649 md_new_event(mddev);
NeilBrown6d7ff7382006-01-06 00:21:16 -08004650 return err ? err : len;
4651}
4652
4653static struct md_sysfs_entry md_new_device =
NeilBrown80ca3a42006-07-10 04:44:18 -07004654__ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
NeilBrown3b343802006-01-06 00:20:47 -08004655
4656static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004657bitmap_store(struct mddev *mddev, const char *buf, size_t len)
Paul Clements9b1d1da2006-10-03 01:15:49 -07004658{
4659 char *end;
4660 unsigned long chunk, end_chunk;
NeilBrown67918752014-12-15 12:57:01 +11004661 int err;
Paul Clements9b1d1da2006-10-03 01:15:49 -07004662
NeilBrown67918752014-12-15 12:57:01 +11004663 err = mddev_lock(mddev);
4664 if (err)
4665 return err;
Paul Clements9b1d1da2006-10-03 01:15:49 -07004666 if (!mddev->bitmap)
4667 goto out;
4668 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
4669 while (*buf) {
4670 chunk = end_chunk = simple_strtoul(buf, &end, 0);
4671 if (buf == end) break;
4672 if (*end == '-') { /* range */
4673 buf = end + 1;
4674 end_chunk = simple_strtoul(buf, &end, 0);
4675 if (buf == end) break;
4676 }
4677 if (*end && !isspace(*end)) break;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07004678 md_bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
André Goddard Rosae7d28602009-12-14 18:01:06 -08004679 buf = skip_spaces(end);
Paul Clements9b1d1da2006-10-03 01:15:49 -07004680 }
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07004681 md_bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
Paul Clements9b1d1da2006-10-03 01:15:49 -07004682out:
NeilBrown67918752014-12-15 12:57:01 +11004683 mddev_unlock(mddev);
Paul Clements9b1d1da2006-10-03 01:15:49 -07004684 return len;
4685}
4686
4687static struct md_sysfs_entry md_bitmap =
4688__ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
4689
4690static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004691size_show(struct mddev *mddev, char *page)
NeilBrowna35b0d62006-01-06 00:20:49 -08004692{
Andre Noll58c0fed2009-03-31 14:33:13 +11004693 return sprintf(page, "%llu\n",
4694 (unsigned long long)mddev->dev_sectors / 2);
NeilBrowna35b0d62006-01-06 00:20:49 -08004695}
4696
NeilBrownfd01b882011-10-11 16:47:53 +11004697static int update_size(struct mddev *mddev, sector_t num_sectors);
NeilBrowna35b0d62006-01-06 00:20:49 -08004698
4699static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004700size_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrowna35b0d62006-01-06 00:20:49 -08004701{
4702 /* If array is inactive, we can reduce the component size, but
4703 * not increase it (except from 0).
4704 * If array is active, we can try an on-line resize
4705 */
Dan Williamsb522adc2009-03-31 15:00:31 +11004706 sector_t sectors;
4707 int err = strict_blocks_to_sectors(buf, &sectors);
NeilBrowna35b0d62006-01-06 00:20:49 -08004708
Andre Noll58c0fed2009-03-31 14:33:13 +11004709 if (err < 0)
4710 return err;
NeilBrown67918752014-12-15 12:57:01 +11004711 err = mddev_lock(mddev);
4712 if (err)
4713 return err;
NeilBrowna35b0d62006-01-06 00:20:49 -08004714 if (mddev->pers) {
Andre Noll58c0fed2009-03-31 14:33:13 +11004715 err = update_size(mddev, sectors);
Xiao Ni4ba1e782016-06-12 17:18:00 +08004716 if (err == 0)
4717 md_update_sb(mddev, 1);
NeilBrowna35b0d62006-01-06 00:20:49 -08004718 } else {
Andre Noll58c0fed2009-03-31 14:33:13 +11004719 if (mddev->dev_sectors == 0 ||
4720 mddev->dev_sectors > sectors)
4721 mddev->dev_sectors = sectors;
NeilBrowna35b0d62006-01-06 00:20:49 -08004722 else
4723 err = -ENOSPC;
4724 }
NeilBrown67918752014-12-15 12:57:01 +11004725 mddev_unlock(mddev);
NeilBrowna35b0d62006-01-06 00:20:49 -08004726 return err ? err : len;
4727}
4728
4729static struct md_sysfs_entry md_size =
NeilBrown80ca3a42006-07-10 04:44:18 -07004730__ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
NeilBrowna35b0d62006-01-06 00:20:49 -08004731
Masanari Iida83f0d772012-10-30 00:18:08 +09004732/* Metadata version.
NeilBrowne6910632008-02-06 01:39:51 -08004733 * This is one of
4734 * 'none' for arrays with no metadata (good luck...)
4735 * 'external' for arrays with externally managed metadata,
NeilBrown8bb93aa2006-01-06 00:20:50 -08004736 * or N.M for internally known formats
4737 */
4738static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004739metadata_show(struct mddev *mddev, char *page)
NeilBrown8bb93aa2006-01-06 00:20:50 -08004740{
4741 if (mddev->persistent)
4742 return sprintf(page, "%d.%d\n",
4743 mddev->major_version, mddev->minor_version);
NeilBrowne6910632008-02-06 01:39:51 -08004744 else if (mddev->external)
4745 return sprintf(page, "external:%s\n", mddev->metadata_type);
NeilBrown8bb93aa2006-01-06 00:20:50 -08004746 else
4747 return sprintf(page, "none\n");
4748}
4749
4750static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004751metadata_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown8bb93aa2006-01-06 00:20:50 -08004752{
4753 int major, minor;
4754 char *e;
NeilBrown67918752014-12-15 12:57:01 +11004755 int err;
NeilBrownea43ddd2008-10-13 11:55:11 +11004756 /* Changing the details of 'external' metadata is
4757 * always permitted. Otherwise there must be
4758 * no devices attached to the array.
4759 */
NeilBrown67918752014-12-15 12:57:01 +11004760
4761 err = mddev_lock(mddev);
4762 if (err)
4763 return err;
4764 err = -EBUSY;
NeilBrownea43ddd2008-10-13 11:55:11 +11004765 if (mddev->external && strncmp(buf, "external:", 9) == 0)
4766 ;
4767 else if (!list_empty(&mddev->disks))
NeilBrown67918752014-12-15 12:57:01 +11004768 goto out_unlock;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004769
NeilBrown67918752014-12-15 12:57:01 +11004770 err = 0;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004771 if (cmd_match(buf, "none")) {
4772 mddev->persistent = 0;
NeilBrowne6910632008-02-06 01:39:51 -08004773 mddev->external = 0;
4774 mddev->major_version = 0;
4775 mddev->minor_version = 90;
NeilBrown67918752014-12-15 12:57:01 +11004776 goto out_unlock;
NeilBrowne6910632008-02-06 01:39:51 -08004777 }
4778 if (strncmp(buf, "external:", 9) == 0) {
NeilBrown20a49ff2008-02-06 01:39:57 -08004779 size_t namelen = len-9;
NeilBrowne6910632008-02-06 01:39:51 -08004780 if (namelen >= sizeof(mddev->metadata_type))
4781 namelen = sizeof(mddev->metadata_type)-1;
4782 strncpy(mddev->metadata_type, buf+9, namelen);
4783 mddev->metadata_type[namelen] = 0;
4784 if (namelen && mddev->metadata_type[namelen-1] == '\n')
4785 mddev->metadata_type[--namelen] = 0;
4786 mddev->persistent = 0;
4787 mddev->external = 1;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004788 mddev->major_version = 0;
4789 mddev->minor_version = 90;
NeilBrown67918752014-12-15 12:57:01 +11004790 goto out_unlock;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004791 }
4792 major = simple_strtoul(buf, &e, 10);
NeilBrown67918752014-12-15 12:57:01 +11004793 err = -EINVAL;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004794 if (e==buf || *e != '.')
NeilBrown67918752014-12-15 12:57:01 +11004795 goto out_unlock;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004796 buf = e+1;
4797 minor = simple_strtoul(buf, &e, 10);
NeilBrown3f9d7b02006-12-22 01:11:41 -08004798 if (e==buf || (*e && *e != '\n') )
NeilBrown67918752014-12-15 12:57:01 +11004799 goto out_unlock;
4800 err = -ENOENT;
Ahmed S. Darwish50511da2007-05-09 02:35:34 -07004801 if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
NeilBrown67918752014-12-15 12:57:01 +11004802 goto out_unlock;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004803 mddev->major_version = major;
4804 mddev->minor_version = minor;
4805 mddev->persistent = 1;
NeilBrowne6910632008-02-06 01:39:51 -08004806 mddev->external = 0;
NeilBrown67918752014-12-15 12:57:01 +11004807 err = 0;
4808out_unlock:
4809 mddev_unlock(mddev);
4810 return err ?: len;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004811}
4812
4813static struct md_sysfs_entry md_metadata =
NeilBrown750f1992014-09-30 08:53:05 +10004814__ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
NeilBrown8bb93aa2006-01-06 00:20:50 -08004815
NeilBrowna35b0d62006-01-06 00:20:49 -08004816static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004817action_show(struct mddev *mddev, char *page)
NeilBrown24dd4692005-11-08 21:39:26 -08004818{
NeilBrown7eec3142005-11-08 21:39:44 -08004819 char *type = "idle";
NeilBrownb7b17c92014-12-15 12:56:59 +11004820 unsigned long recovery = mddev->recovery;
4821 if (test_bit(MD_RECOVERY_FROZEN, &recovery))
NeilBrownb6a9ce62009-05-26 09:41:17 +10004822 type = "frozen";
NeilBrownb7b17c92014-12-15 12:56:59 +11004823 else if (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
4824 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery))) {
4825 if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
NeilBrownccfcc3c2006-03-27 01:18:09 -08004826 type = "reshape";
NeilBrownb7b17c92014-12-15 12:56:59 +11004827 else if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
4828 if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
NeilBrown24dd4692005-11-08 21:39:26 -08004829 type = "resync";
NeilBrownb7b17c92014-12-15 12:56:59 +11004830 else if (test_bit(MD_RECOVERY_CHECK, &recovery))
NeilBrown24dd4692005-11-08 21:39:26 -08004831 type = "check";
4832 else
4833 type = "repair";
NeilBrownb7b17c92014-12-15 12:56:59 +11004834 } else if (test_bit(MD_RECOVERY_RECOVER, &recovery))
NeilBrown24dd4692005-11-08 21:39:26 -08004835 type = "recover";
NeilBrown985ca972015-07-06 12:26:57 +10004836 else if (mddev->reshape_position != MaxSector)
4837 type = "reshape";
NeilBrown24dd4692005-11-08 21:39:26 -08004838 }
4839 return sprintf(page, "%s\n", type);
4840}
4841
4842static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004843action_store(struct mddev *mddev, const char *page, size_t len)
NeilBrown24dd4692005-11-08 21:39:26 -08004844{
NeilBrown7eec3142005-11-08 21:39:44 -08004845 if (!mddev->pers || !mddev->pers->sync_request)
4846 return -EINVAL;
4847
NeilBrownb6a9ce62009-05-26 09:41:17 +10004848
4849 if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
NeilBrown56ccc112015-05-28 17:53:29 +10004850 if (cmd_match(page, "frozen"))
4851 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4852 else
4853 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
NeilBrown8e8e2512015-06-12 19:51:27 +10004854 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
4855 mddev_lock(mddev) == 0) {
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02004856 if (work_pending(&mddev->del_work))
4857 flush_workqueue(md_misc_wq);
NeilBrown8e8e2512015-06-12 19:51:27 +10004858 if (mddev->sync_thread) {
4859 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
NeilBrown67918752014-12-15 12:57:01 +11004860 md_reap_sync_thread(mddev);
NeilBrown67918752014-12-15 12:57:01 +11004861 }
NeilBrown8e8e2512015-06-12 19:51:27 +10004862 mddev_unlock(mddev);
NeilBrown7eec3142005-11-08 21:39:44 -08004863 }
NeilBrown312045e2015-12-21 11:01:21 +11004864 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
NeilBrown24dd4692005-11-08 21:39:26 -08004865 return -EBUSY;
Neil Brown72a23c22008-06-28 08:31:41 +10004866 else if (cmd_match(page, "resync"))
NeilBrown56ccc112015-05-28 17:53:29 +10004867 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
Neil Brown72a23c22008-06-28 08:31:41 +10004868 else if (cmd_match(page, "recover")) {
NeilBrown56ccc112015-05-28 17:53:29 +10004869 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
Neil Brown72a23c22008-06-28 08:31:41 +10004870 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
Neil Brown72a23c22008-06-28 08:31:41 +10004871 } else if (cmd_match(page, "reshape")) {
NeilBrown16484bf2006-03-27 01:18:13 -08004872 int err;
4873 if (mddev->pers->start_reshape == NULL)
4874 return -EINVAL;
NeilBrown67918752014-12-15 12:57:01 +11004875 err = mddev_lock(mddev);
4876 if (!err) {
NeilBrown312045e2015-12-21 11:01:21 +11004877 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4878 err = -EBUSY;
4879 else {
4880 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4881 err = mddev->pers->start_reshape(mddev);
4882 }
NeilBrown67918752014-12-15 12:57:01 +11004883 mddev_unlock(mddev);
4884 }
NeilBrown16484bf2006-03-27 01:18:13 -08004885 if (err)
4886 return err;
Junxiao Bie1a86db2020-07-14 16:10:26 -07004887 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
NeilBrown16484bf2006-03-27 01:18:13 -08004888 } else {
NeilBrownbce74da2006-01-06 00:20:41 -08004889 if (cmd_match(page, "check"))
NeilBrown7eec3142005-11-08 21:39:44 -08004890 set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
NeilBrown2adc7d42006-05-20 14:59:57 -07004891 else if (!cmd_match(page, "repair"))
NeilBrown7eec3142005-11-08 21:39:44 -08004892 return -EINVAL;
NeilBrown56ccc112015-05-28 17:53:29 +10004893 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
NeilBrown7eec3142005-11-08 21:39:44 -08004894 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
4895 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
NeilBrown7eec3142005-11-08 21:39:44 -08004896 }
NeilBrown48c26dd2012-10-11 14:19:39 +11004897 if (mddev->ro == 2) {
4898 /* A write to sync_action is enough to justify
4899 * canceling read-auto mode
4900 */
4901 mddev->ro = 0;
4902 md_wakeup_thread(mddev->sync_thread);
4903 }
NeilBrown03c902e2006-01-06 00:20:46 -08004904 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
NeilBrown24dd4692005-11-08 21:39:26 -08004905 md_wakeup_thread(mddev->thread);
NeilBrown00bcb4a2010-06-01 19:37:23 +10004906 sysfs_notify_dirent_safe(mddev->sysfs_action);
NeilBrown24dd4692005-11-08 21:39:26 -08004907 return len;
4908}
4909
Jonathan Brassowc4a39552013-06-25 01:23:59 -05004910static struct md_sysfs_entry md_scan_mode =
NeilBrown750f1992014-09-30 08:53:05 +10004911__ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
Jonathan Brassowc4a39552013-06-25 01:23:59 -05004912
4913static ssize_t
4914last_sync_action_show(struct mddev *mddev, char *page)
4915{
4916 return sprintf(page, "%s\n", mddev->last_sync_action);
4917}
4918
4919static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action);
4920
NeilBrown9d888832005-11-08 21:39:26 -08004921static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004922mismatch_cnt_show(struct mddev *mddev, char *page)
NeilBrown9d888832005-11-08 21:39:26 -08004923{
4924 return sprintf(page, "%llu\n",
Jianpeng Ma7f7583d2012-10-11 14:17:59 +11004925 (unsigned long long)
4926 atomic64_read(&mddev->resync_mismatches));
NeilBrown9d888832005-11-08 21:39:26 -08004927}
4928
NeilBrown80ca3a42006-07-10 04:44:18 -07004929static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
NeilBrown9d888832005-11-08 21:39:26 -08004930
NeilBrown88202a02006-01-06 00:21:36 -08004931static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004932sync_min_show(struct mddev *mddev, char *page)
NeilBrown88202a02006-01-06 00:21:36 -08004933{
4934 return sprintf(page, "%d (%s)\n", speed_min(mddev),
4935 mddev->sync_speed_min ? "local": "system");
4936}
4937
4938static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004939sync_min_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown88202a02006-01-06 00:21:36 -08004940{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004941 unsigned int min;
4942 int rv;
4943
NeilBrown88202a02006-01-06 00:21:36 -08004944 if (strncmp(buf, "system", 6)==0) {
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004945 min = 0;
4946 } else {
4947 rv = kstrtouint(buf, 10, &min);
4948 if (rv < 0)
4949 return rv;
4950 if (min == 0)
4951 return -EINVAL;
NeilBrown88202a02006-01-06 00:21:36 -08004952 }
NeilBrown88202a02006-01-06 00:21:36 -08004953 mddev->sync_speed_min = min;
4954 return len;
4955}
4956
4957static struct md_sysfs_entry md_sync_min =
4958__ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
4959
4960static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004961sync_max_show(struct mddev *mddev, char *page)
NeilBrown88202a02006-01-06 00:21:36 -08004962{
4963 return sprintf(page, "%d (%s)\n", speed_max(mddev),
4964 mddev->sync_speed_max ? "local": "system");
4965}
4966
4967static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004968sync_max_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown88202a02006-01-06 00:21:36 -08004969{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004970 unsigned int max;
4971 int rv;
4972
NeilBrown88202a02006-01-06 00:21:36 -08004973 if (strncmp(buf, "system", 6)==0) {
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004974 max = 0;
4975 } else {
4976 rv = kstrtouint(buf, 10, &max);
4977 if (rv < 0)
4978 return rv;
4979 if (max == 0)
4980 return -EINVAL;
NeilBrown88202a02006-01-06 00:21:36 -08004981 }
NeilBrown88202a02006-01-06 00:21:36 -08004982 mddev->sync_speed_max = max;
4983 return len;
4984}
4985
4986static struct md_sysfs_entry md_sync_max =
4987__ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
4988
Iustin Popd7f3d292007-10-16 23:30:54 -07004989static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004990degraded_show(struct mddev *mddev, char *page)
Iustin Popd7f3d292007-10-16 23:30:54 -07004991{
4992 return sprintf(page, "%d\n", mddev->degraded);
4993}
4994static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
NeilBrown88202a02006-01-06 00:21:36 -08004995
4996static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004997sync_force_parallel_show(struct mddev *mddev, char *page)
Bernd Schubert90b08712008-05-23 13:04:38 -07004998{
4999 return sprintf(page, "%d\n", mddev->parallel_resync);
5000}
5001
5002static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005003sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len)
Bernd Schubert90b08712008-05-23 13:04:38 -07005004{
5005 long n;
5006
Jingoo Hanb29bebd2013-06-01 16:15:16 +09005007 if (kstrtol(buf, 10, &n))
Bernd Schubert90b08712008-05-23 13:04:38 -07005008 return -EINVAL;
5009
5010 if (n != 0 && n != 1)
5011 return -EINVAL;
5012
5013 mddev->parallel_resync = n;
5014
5015 if (mddev->sync_thread)
5016 wake_up(&resync_wait);
5017
5018 return len;
5019}
5020
5021/* force parallel resync, even with shared block devices */
5022static struct md_sysfs_entry md_sync_force_parallel =
5023__ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
5024 sync_force_parallel_show, sync_force_parallel_store);
5025
5026static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005027sync_speed_show(struct mddev *mddev, char *page)
NeilBrown88202a02006-01-06 00:21:36 -08005028{
5029 unsigned long resync, dt, db;
NeilBrownd1a7c502009-03-31 15:24:32 +11005030 if (mddev->curr_resync == 0)
5031 return sprintf(page, "none\n");
Andre Noll9687a602008-03-25 22:24:09 +01005032 resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
5033 dt = (jiffies - mddev->resync_mark) / HZ;
NeilBrown88202a02006-01-06 00:21:36 -08005034 if (!dt) dt++;
Andre Noll9687a602008-03-25 22:24:09 +01005035 db = resync - mddev->resync_mark_cnt;
5036 return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
NeilBrown88202a02006-01-06 00:21:36 -08005037}
5038
NeilBrown80ca3a42006-07-10 04:44:18 -07005039static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
NeilBrown88202a02006-01-06 00:21:36 -08005040
5041static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005042sync_completed_show(struct mddev *mddev, char *page)
NeilBrown88202a02006-01-06 00:21:36 -08005043{
RĂ©mi RĂ©rolle13ae8642011-01-14 09:14:34 +11005044 unsigned long long max_sectors, resync;
NeilBrown88202a02006-01-06 00:21:36 -08005045
NeilBrownacb180b2009-04-14 16:28:34 +10005046 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5047 return sprintf(page, "none\n");
5048
NeilBrown72f36d52012-10-11 14:25:57 +11005049 if (mddev->curr_resync == 1 ||
5050 mddev->curr_resync == 2)
5051 return sprintf(page, "delayed\n");
5052
NeilBrownc804cde2012-05-21 09:28:33 +10005053 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
5054 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
Andre Noll58c0fed2009-03-31 14:33:13 +11005055 max_sectors = mddev->resync_max_sectors;
NeilBrown88202a02006-01-06 00:21:36 -08005056 else
Andre Noll58c0fed2009-03-31 14:33:13 +11005057 max_sectors = mddev->dev_sectors;
NeilBrown88202a02006-01-06 00:21:36 -08005058
NeilBrownacb180b2009-04-14 16:28:34 +10005059 resync = mddev->curr_resync_completed;
RĂ©mi RĂ©rolle13ae8642011-01-14 09:14:34 +11005060 return sprintf(page, "%llu / %llu\n", resync, max_sectors);
NeilBrown88202a02006-01-06 00:21:36 -08005061}
5062
NeilBrown750f1992014-09-30 08:53:05 +10005063static struct md_sysfs_entry md_sync_completed =
5064 __ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL);
NeilBrown88202a02006-01-06 00:21:36 -08005065
NeilBrowne464eaf2006-03-27 01:18:14 -08005066static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005067min_sync_show(struct mddev *mddev, char *page)
Neil Brown5e96ee62008-06-28 08:31:24 +10005068{
5069 return sprintf(page, "%llu\n",
5070 (unsigned long long)mddev->resync_min);
5071}
5072static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005073min_sync_store(struct mddev *mddev, const char *buf, size_t len)
Neil Brown5e96ee62008-06-28 08:31:24 +10005074{
5075 unsigned long long min;
NeilBrown23da4222014-12-15 12:57:01 +11005076 int err;
NeilBrown23da4222014-12-15 12:57:01 +11005077
Jingoo Hanb29bebd2013-06-01 16:15:16 +09005078 if (kstrtoull(buf, 10, &min))
Neil Brown5e96ee62008-06-28 08:31:24 +10005079 return -EINVAL;
NeilBrown23da4222014-12-15 12:57:01 +11005080
5081 spin_lock(&mddev->lock);
5082 err = -EINVAL;
Neil Brown5e96ee62008-06-28 08:31:24 +10005083 if (min > mddev->resync_max)
NeilBrown23da4222014-12-15 12:57:01 +11005084 goto out_unlock;
5085
5086 err = -EBUSY;
Neil Brown5e96ee62008-06-28 08:31:24 +10005087 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
NeilBrown23da4222014-12-15 12:57:01 +11005088 goto out_unlock;
Neil Brown5e96ee62008-06-28 08:31:24 +10005089
NeilBrown50c37b12015-03-23 17:36:38 +11005090 /* Round down to multiple of 4K for safety */
5091 mddev->resync_min = round_down(min, 8);
NeilBrown23da4222014-12-15 12:57:01 +11005092 err = 0;
Neil Brown5e96ee62008-06-28 08:31:24 +10005093
NeilBrown23da4222014-12-15 12:57:01 +11005094out_unlock:
5095 spin_unlock(&mddev->lock);
5096 return err ?: len;
Neil Brown5e96ee62008-06-28 08:31:24 +10005097}
5098
5099static struct md_sysfs_entry md_min_sync =
5100__ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
5101
5102static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005103max_sync_show(struct mddev *mddev, char *page)
NeilBrownc6207272008-02-06 01:39:52 -08005104{
5105 if (mddev->resync_max == MaxSector)
5106 return sprintf(page, "max\n");
5107 else
5108 return sprintf(page, "%llu\n",
5109 (unsigned long long)mddev->resync_max);
5110}
5111static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005112max_sync_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrownc6207272008-02-06 01:39:52 -08005113{
NeilBrown23da4222014-12-15 12:57:01 +11005114 int err;
5115 spin_lock(&mddev->lock);
NeilBrownc6207272008-02-06 01:39:52 -08005116 if (strncmp(buf, "max", 3) == 0)
5117 mddev->resync_max = MaxSector;
5118 else {
Neil Brown5e96ee62008-06-28 08:31:24 +10005119 unsigned long long max;
NeilBrown23da4222014-12-15 12:57:01 +11005120 int chunk;
5121
5122 err = -EINVAL;
Jingoo Hanb29bebd2013-06-01 16:15:16 +09005123 if (kstrtoull(buf, 10, &max))
NeilBrown23da4222014-12-15 12:57:01 +11005124 goto out_unlock;
Neil Brown5e96ee62008-06-28 08:31:24 +10005125 if (max < mddev->resync_min)
NeilBrown23da4222014-12-15 12:57:01 +11005126 goto out_unlock;
5127
5128 err = -EBUSY;
NeilBrownc6207272008-02-06 01:39:52 -08005129 if (max < mddev->resync_max &&
NeilBrown4d484a42009-08-13 10:41:50 +10005130 mddev->ro == 0 &&
NeilBrownc6207272008-02-06 01:39:52 -08005131 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
NeilBrown23da4222014-12-15 12:57:01 +11005132 goto out_unlock;
NeilBrownc6207272008-02-06 01:39:52 -08005133
5134 /* Must be a multiple of chunk_size */
NeilBrown23da4222014-12-15 12:57:01 +11005135 chunk = mddev->chunk_sectors;
5136 if (chunk) {
raz ben yehuda2ac06c32009-06-16 17:01:42 +10005137 sector_t temp = max;
NeilBrown23da4222014-12-15 12:57:01 +11005138
5139 err = -EINVAL;
5140 if (sector_div(temp, chunk))
5141 goto out_unlock;
NeilBrownc6207272008-02-06 01:39:52 -08005142 }
5143 mddev->resync_max = max;
5144 }
5145 wake_up(&mddev->recovery_wait);
NeilBrown23da4222014-12-15 12:57:01 +11005146 err = 0;
5147out_unlock:
5148 spin_unlock(&mddev->lock);
5149 return err ?: len;
NeilBrownc6207272008-02-06 01:39:52 -08005150}
5151
5152static struct md_sysfs_entry md_max_sync =
5153__ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
5154
5155static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005156suspend_lo_show(struct mddev *mddev, char *page)
NeilBrowne464eaf2006-03-27 01:18:14 -08005157{
5158 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
5159}
5160
5161static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005162suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrowne464eaf2006-03-27 01:18:14 -08005163{
NeilBrownb03e0cc2017-10-19 12:49:15 +11005164 unsigned long long new;
NeilBrown67918752014-12-15 12:57:01 +11005165 int err;
NeilBrowne464eaf2006-03-27 01:18:14 -08005166
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03005167 err = kstrtoull(buf, 10, &new);
5168 if (err < 0)
5169 return err;
5170 if (new != (sector_t)new)
NeilBrowne464eaf2006-03-27 01:18:14 -08005171 return -EINVAL;
NeilBrown23ddff32011-01-14 09:14:34 +11005172
NeilBrown67918752014-12-15 12:57:01 +11005173 err = mddev_lock(mddev);
5174 if (err)
5175 return err;
5176 err = -EINVAL;
5177 if (mddev->pers == NULL ||
5178 mddev->pers->quiesce == NULL)
5179 goto unlock;
NeilBrownb03e0cc2017-10-19 12:49:15 +11005180 mddev_suspend(mddev);
NeilBrown23ddff32011-01-14 09:14:34 +11005181 mddev->suspend_lo = new;
NeilBrownb03e0cc2017-10-19 12:49:15 +11005182 mddev_resume(mddev);
5183
NeilBrown67918752014-12-15 12:57:01 +11005184 err = 0;
5185unlock:
5186 mddev_unlock(mddev);
5187 return err ?: len;
NeilBrowne464eaf2006-03-27 01:18:14 -08005188}
5189static struct md_sysfs_entry md_suspend_lo =
5190__ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
5191
NeilBrowne464eaf2006-03-27 01:18:14 -08005192static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005193suspend_hi_show(struct mddev *mddev, char *page)
NeilBrowne464eaf2006-03-27 01:18:14 -08005194{
5195 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
5196}
5197
5198static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005199suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrowne464eaf2006-03-27 01:18:14 -08005200{
NeilBrownb03e0cc2017-10-19 12:49:15 +11005201 unsigned long long new;
NeilBrown67918752014-12-15 12:57:01 +11005202 int err;
NeilBrowne464eaf2006-03-27 01:18:14 -08005203
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03005204 err = kstrtoull(buf, 10, &new);
5205 if (err < 0)
5206 return err;
5207 if (new != (sector_t)new)
NeilBrowne464eaf2006-03-27 01:18:14 -08005208 return -EINVAL;
NeilBrown23ddff32011-01-14 09:14:34 +11005209
NeilBrown67918752014-12-15 12:57:01 +11005210 err = mddev_lock(mddev);
5211 if (err)
5212 return err;
5213 err = -EINVAL;
NeilBrownb03e0cc2017-10-19 12:49:15 +11005214 if (mddev->pers == NULL)
NeilBrown67918752014-12-15 12:57:01 +11005215 goto unlock;
NeilBrownb03e0cc2017-10-19 12:49:15 +11005216
5217 mddev_suspend(mddev);
NeilBrown23ddff32011-01-14 09:14:34 +11005218 mddev->suspend_hi = new;
NeilBrownb03e0cc2017-10-19 12:49:15 +11005219 mddev_resume(mddev);
5220
NeilBrown67918752014-12-15 12:57:01 +11005221 err = 0;
5222unlock:
5223 mddev_unlock(mddev);
5224 return err ?: len;
NeilBrowne464eaf2006-03-27 01:18:14 -08005225}
5226static struct md_sysfs_entry md_suspend_hi =
5227__ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
5228
NeilBrown08a02ec2007-05-09 02:35:38 -07005229static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005230reshape_position_show(struct mddev *mddev, char *page)
NeilBrown08a02ec2007-05-09 02:35:38 -07005231{
5232 if (mddev->reshape_position != MaxSector)
5233 return sprintf(page, "%llu\n",
5234 (unsigned long long)mddev->reshape_position);
5235 strcpy(page, "none\n");
5236 return 5;
5237}
5238
5239static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005240reshape_position_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown08a02ec2007-05-09 02:35:38 -07005241{
NeilBrownc6563a82012-05-21 09:27:00 +10005242 struct md_rdev *rdev;
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03005243 unsigned long long new;
NeilBrown67918752014-12-15 12:57:01 +11005244 int err;
NeilBrown67918752014-12-15 12:57:01 +11005245
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03005246 err = kstrtoull(buf, 10, &new);
5247 if (err < 0)
5248 return err;
5249 if (new != (sector_t)new)
NeilBrown08a02ec2007-05-09 02:35:38 -07005250 return -EINVAL;
NeilBrown67918752014-12-15 12:57:01 +11005251 err = mddev_lock(mddev);
5252 if (err)
5253 return err;
5254 err = -EBUSY;
5255 if (mddev->pers)
5256 goto unlock;
NeilBrown08a02ec2007-05-09 02:35:38 -07005257 mddev->reshape_position = new;
5258 mddev->delta_disks = 0;
NeilBrown2c810cd2012-05-21 09:27:00 +10005259 mddev->reshape_backwards = 0;
NeilBrown08a02ec2007-05-09 02:35:38 -07005260 mddev->new_level = mddev->level;
5261 mddev->new_layout = mddev->layout;
Andre Noll664e7c42009-06-18 08:45:27 +10005262 mddev->new_chunk_sectors = mddev->chunk_sectors;
NeilBrownc6563a82012-05-21 09:27:00 +10005263 rdev_for_each(rdev, mddev)
5264 rdev->new_data_offset = rdev->data_offset;
NeilBrown67918752014-12-15 12:57:01 +11005265 err = 0;
5266unlock:
5267 mddev_unlock(mddev);
5268 return err ?: len;
NeilBrown08a02ec2007-05-09 02:35:38 -07005269}
5270
5271static struct md_sysfs_entry md_reshape_position =
5272__ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
5273 reshape_position_store);
5274
Dan Williamsb522adc2009-03-31 15:00:31 +11005275static ssize_t
NeilBrown2c810cd2012-05-21 09:27:00 +10005276reshape_direction_show(struct mddev *mddev, char *page)
5277{
5278 return sprintf(page, "%s\n",
5279 mddev->reshape_backwards ? "backwards" : "forwards");
5280}
5281
5282static ssize_t
5283reshape_direction_store(struct mddev *mddev, const char *buf, size_t len)
5284{
5285 int backwards = 0;
NeilBrown67918752014-12-15 12:57:01 +11005286 int err;
5287
NeilBrown2c810cd2012-05-21 09:27:00 +10005288 if (cmd_match(buf, "forwards"))
5289 backwards = 0;
5290 else if (cmd_match(buf, "backwards"))
5291 backwards = 1;
5292 else
5293 return -EINVAL;
5294 if (mddev->reshape_backwards == backwards)
5295 return len;
5296
NeilBrown67918752014-12-15 12:57:01 +11005297 err = mddev_lock(mddev);
5298 if (err)
5299 return err;
NeilBrown2c810cd2012-05-21 09:27:00 +10005300 /* check if we are allowed to change */
5301 if (mddev->delta_disks)
NeilBrown67918752014-12-15 12:57:01 +11005302 err = -EBUSY;
5303 else if (mddev->persistent &&
NeilBrown2c810cd2012-05-21 09:27:00 +10005304 mddev->major_version == 0)
NeilBrown67918752014-12-15 12:57:01 +11005305 err = -EINVAL;
5306 else
5307 mddev->reshape_backwards = backwards;
5308 mddev_unlock(mddev);
5309 return err ?: len;
NeilBrown2c810cd2012-05-21 09:27:00 +10005310}
5311
5312static struct md_sysfs_entry md_reshape_direction =
5313__ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show,
5314 reshape_direction_store);
5315
5316static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005317array_size_show(struct mddev *mddev, char *page)
Dan Williamsb522adc2009-03-31 15:00:31 +11005318{
5319 if (mddev->external_size)
5320 return sprintf(page, "%llu\n",
5321 (unsigned long long)mddev->array_sectors/2);
5322 else
5323 return sprintf(page, "default\n");
5324}
5325
5326static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005327array_size_store(struct mddev *mddev, const char *buf, size_t len)
Dan Williamsb522adc2009-03-31 15:00:31 +11005328{
5329 sector_t sectors;
NeilBrown67918752014-12-15 12:57:01 +11005330 int err;
5331
5332 err = mddev_lock(mddev);
5333 if (err)
5334 return err;
Dan Williamsb522adc2009-03-31 15:00:31 +11005335
Guoqing Jiangab5a98b2016-05-02 11:33:13 -04005336 /* cluster raid doesn't support change array_sectors */
Zhilong Liub6708832017-04-10 14:15:55 +08005337 if (mddev_is_clustered(mddev)) {
5338 mddev_unlock(mddev);
Guoqing Jiangab5a98b2016-05-02 11:33:13 -04005339 return -EINVAL;
Zhilong Liub6708832017-04-10 14:15:55 +08005340 }
Guoqing Jiangab5a98b2016-05-02 11:33:13 -04005341
Dan Williamsb522adc2009-03-31 15:00:31 +11005342 if (strncmp(buf, "default", 7) == 0) {
5343 if (mddev->pers)
5344 sectors = mddev->pers->size(mddev, 0, 0);
5345 else
5346 sectors = mddev->array_sectors;
5347
5348 mddev->external_size = 0;
5349 } else {
5350 if (strict_blocks_to_sectors(buf, &sectors) < 0)
NeilBrown67918752014-12-15 12:57:01 +11005351 err = -EINVAL;
5352 else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
5353 err = -E2BIG;
5354 else
5355 mddev->external_size = 1;
Dan Williamsb522adc2009-03-31 15:00:31 +11005356 }
5357
NeilBrown67918752014-12-15 12:57:01 +11005358 if (!err) {
5359 mddev->array_sectors = sectors;
Christoph Hellwig2c247c52020-11-16 15:57:11 +01005360 if (mddev->pers)
5361 set_capacity_and_notify(mddev->gendisk,
5362 mddev->array_sectors);
NeilBrowncbe6ef12011-02-16 13:58:38 +11005363 }
NeilBrown67918752014-12-15 12:57:01 +11005364 mddev_unlock(mddev);
5365 return err ?: len;
Dan Williamsb522adc2009-03-31 15:00:31 +11005366}
5367
5368static struct md_sysfs_entry md_array_size =
5369__ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
5370 array_size_store);
NeilBrowne464eaf2006-03-27 01:18:14 -08005371
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005372static ssize_t
5373consistency_policy_show(struct mddev *mddev, char *page)
5374{
5375 int ret;
5376
5377 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
5378 ret = sprintf(page, "journal\n");
5379 } else if (test_bit(MD_HAS_PPL, &mddev->flags)) {
5380 ret = sprintf(page, "ppl\n");
5381 } else if (mddev->bitmap) {
5382 ret = sprintf(page, "bitmap\n");
5383 } else if (mddev->pers) {
5384 if (mddev->pers->sync_request)
5385 ret = sprintf(page, "resync\n");
5386 else
5387 ret = sprintf(page, "none\n");
5388 } else {
5389 ret = sprintf(page, "unknown\n");
5390 }
5391
5392 return ret;
5393}
5394
5395static ssize_t
5396consistency_policy_store(struct mddev *mddev, const char *buf, size_t len)
5397{
Artur Paszkiewiczba903a32017-03-09 10:00:03 +01005398 int err = 0;
5399
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005400 if (mddev->pers) {
Artur Paszkiewiczba903a32017-03-09 10:00:03 +01005401 if (mddev->pers->change_consistency_policy)
5402 err = mddev->pers->change_consistency_policy(mddev, buf);
5403 else
5404 err = -EBUSY;
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005405 } else if (mddev->external && strncmp(buf, "ppl", 3) == 0) {
5406 set_bit(MD_HAS_PPL, &mddev->flags);
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005407 } else {
Artur Paszkiewiczba903a32017-03-09 10:00:03 +01005408 err = -EINVAL;
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005409 }
Artur Paszkiewiczba903a32017-03-09 10:00:03 +01005410
5411 return err ? err : len;
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005412}
5413
5414static struct md_sysfs_entry md_consistency_policy =
5415__ATTR(consistency_policy, S_IRUGO | S_IWUSR, consistency_policy_show,
5416 consistency_policy_store);
5417
Guoqing Jiang9a567842019-07-24 11:09:19 +02005418static ssize_t fail_last_dev_show(struct mddev *mddev, char *page)
5419{
5420 return sprintf(page, "%d\n", mddev->fail_last_dev);
5421}
5422
5423/*
5424 * Setting fail_last_dev to true to allow last device to be forcibly removed
5425 * from RAID1/RAID10.
5426 */
5427static ssize_t
5428fail_last_dev_store(struct mddev *mddev, const char *buf, size_t len)
5429{
5430 int ret;
5431 bool value;
5432
5433 ret = kstrtobool(buf, &value);
5434 if (ret)
5435 return ret;
5436
5437 if (value != mddev->fail_last_dev)
5438 mddev->fail_last_dev = value;
5439
5440 return len;
5441}
5442static struct md_sysfs_entry md_fail_last_dev =
5443__ATTR(fail_last_dev, S_IRUGO | S_IWUSR, fail_last_dev_show,
5444 fail_last_dev_store);
5445
Guoqing Jiang3938f5f2019-12-23 10:48:56 +01005446static ssize_t serialize_policy_show(struct mddev *mddev, char *page)
5447{
5448 if (mddev->pers == NULL || (mddev->pers->level != 1))
5449 return sprintf(page, "n/a\n");
5450 else
5451 return sprintf(page, "%d\n", mddev->serialize_policy);
5452}
5453
5454/*
5455 * Setting serialize_policy to true to enforce write IO is not reordered
5456 * for raid1.
5457 */
5458static ssize_t
5459serialize_policy_store(struct mddev *mddev, const char *buf, size_t len)
5460{
5461 int err;
5462 bool value;
5463
5464 err = kstrtobool(buf, &value);
5465 if (err)
5466 return err;
5467
5468 if (value == mddev->serialize_policy)
5469 return len;
5470
5471 err = mddev_lock(mddev);
5472 if (err)
5473 return err;
5474 if (mddev->pers == NULL || (mddev->pers->level != 1)) {
5475 pr_err("md: serialize_policy is only effective for raid1\n");
5476 err = -EINVAL;
5477 goto unlock;
5478 }
5479
5480 mddev_suspend(mddev);
5481 if (value)
5482 mddev_create_serial_pool(mddev, NULL, true);
5483 else
5484 mddev_destroy_serial_pool(mddev, NULL, true);
5485 mddev->serialize_policy = value;
5486 mddev_resume(mddev);
5487unlock:
5488 mddev_unlock(mddev);
5489 return err ?: len;
5490}
5491
5492static struct md_sysfs_entry md_serialize_policy =
5493__ATTR(serialize_policy, S_IRUGO | S_IWUSR, serialize_policy_show,
5494 serialize_policy_store);
5495
5496
NeilBrowneae17012005-11-08 21:39:23 -08005497static struct attribute *md_default_attrs[] = {
5498 &md_level.attr,
NeilBrownd4dbd022006-06-26 00:27:59 -07005499 &md_layout.attr,
NeilBrowneae17012005-11-08 21:39:23 -08005500 &md_raid_disks.attr,
Sebastian Parschauerec164d072020-07-28 12:01:39 +02005501 &md_uuid.attr,
NeilBrown3b343802006-01-06 00:20:47 -08005502 &md_chunk_size.attr,
NeilBrowna35b0d62006-01-06 00:20:49 -08005503 &md_size.attr,
NeilBrowna94213b2006-06-26 00:28:00 -07005504 &md_resync_start.attr,
NeilBrown8bb93aa2006-01-06 00:20:50 -08005505 &md_metadata.attr,
NeilBrown6d7ff7382006-01-06 00:21:16 -08005506 &md_new_device.attr,
NeilBrown16f17b32006-06-26 00:27:37 -07005507 &md_safe_delay.attr,
NeilBrown9e653b62006-06-26 00:27:58 -07005508 &md_array_state.attr,
NeilBrown08a02ec2007-05-09 02:35:38 -07005509 &md_reshape_position.attr,
NeilBrown2c810cd2012-05-21 09:27:00 +10005510 &md_reshape_direction.attr,
Dan Williamsb522adc2009-03-31 15:00:31 +11005511 &md_array_size.attr,
Robert Becker1e509152009-12-14 12:49:58 +11005512 &max_corr_read_errors.attr,
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005513 &md_consistency_policy.attr,
Guoqing Jiang9a567842019-07-24 11:09:19 +02005514 &md_fail_last_dev.attr,
Guoqing Jiang3938f5f2019-12-23 10:48:56 +01005515 &md_serialize_policy.attr,
NeilBrown411036f2005-11-08 21:39:40 -08005516 NULL,
5517};
5518
5519static struct attribute *md_redundancy_attrs[] = {
NeilBrown24dd4692005-11-08 21:39:26 -08005520 &md_scan_mode.attr,
Jonathan Brassowc4a39552013-06-25 01:23:59 -05005521 &md_last_scan_mode.attr,
NeilBrown9d888832005-11-08 21:39:26 -08005522 &md_mismatches.attr,
NeilBrown88202a02006-01-06 00:21:36 -08005523 &md_sync_min.attr,
5524 &md_sync_max.attr,
5525 &md_sync_speed.attr,
Bernd Schubert90b08712008-05-23 13:04:38 -07005526 &md_sync_force_parallel.attr,
NeilBrown88202a02006-01-06 00:21:36 -08005527 &md_sync_completed.attr,
Neil Brown5e96ee62008-06-28 08:31:24 +10005528 &md_min_sync.attr,
NeilBrownc6207272008-02-06 01:39:52 -08005529 &md_max_sync.attr,
NeilBrowne464eaf2006-03-27 01:18:14 -08005530 &md_suspend_lo.attr,
5531 &md_suspend_hi.attr,
Paul Clements9b1d1da2006-10-03 01:15:49 -07005532 &md_bitmap.attr,
Iustin Popd7f3d292007-10-16 23:30:54 -07005533 &md_degraded.attr,
NeilBrowneae17012005-11-08 21:39:23 -08005534 NULL,
5535};
NeilBrown411036f2005-11-08 21:39:40 -08005536static struct attribute_group md_redundancy_group = {
5537 .name = NULL,
5538 .attrs = md_redundancy_attrs,
5539};
5540
NeilBrowneae17012005-11-08 21:39:23 -08005541static ssize_t
5542md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
5543{
5544 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
NeilBrownfd01b882011-10-11 16:47:53 +11005545 struct mddev *mddev = container_of(kobj, struct mddev, kobj);
NeilBrown96de1e62005-11-08 21:39:39 -08005546 ssize_t rv;
NeilBrowneae17012005-11-08 21:39:23 -08005547
5548 if (!entry->show)
5549 return -EIO;
NeilBrownaf8a2432011-12-08 15:49:46 +11005550 spin_lock(&all_mddevs_lock);
5551 if (list_empty(&mddev->all_mddevs)) {
5552 spin_unlock(&all_mddevs_lock);
5553 return -EBUSY;
5554 }
5555 mddev_get(mddev);
5556 spin_unlock(&all_mddevs_lock);
5557
NeilBrownb7b17c92014-12-15 12:56:59 +11005558 rv = entry->show(mddev, page);
NeilBrownaf8a2432011-12-08 15:49:46 +11005559 mddev_put(mddev);
NeilBrown96de1e62005-11-08 21:39:39 -08005560 return rv;
NeilBrowneae17012005-11-08 21:39:23 -08005561}
5562
5563static ssize_t
5564md_attr_store(struct kobject *kobj, struct attribute *attr,
5565 const char *page, size_t length)
5566{
5567 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
NeilBrownfd01b882011-10-11 16:47:53 +11005568 struct mddev *mddev = container_of(kobj, struct mddev, kobj);
NeilBrown96de1e62005-11-08 21:39:39 -08005569 ssize_t rv;
NeilBrowneae17012005-11-08 21:39:23 -08005570
5571 if (!entry->store)
5572 return -EIO;
NeilBrown67463ac2006-07-10 04:44:19 -07005573 if (!capable(CAP_SYS_ADMIN))
5574 return -EACCES;
NeilBrownaf8a2432011-12-08 15:49:46 +11005575 spin_lock(&all_mddevs_lock);
5576 if (list_empty(&mddev->all_mddevs)) {
5577 spin_unlock(&all_mddevs_lock);
5578 return -EBUSY;
5579 }
5580 mddev_get(mddev);
5581 spin_unlock(&all_mddevs_lock);
NeilBrown67918752014-12-15 12:57:01 +11005582 rv = entry->store(mddev, page, length);
NeilBrownaf8a2432011-12-08 15:49:46 +11005583 mddev_put(mddev);
NeilBrown96de1e62005-11-08 21:39:39 -08005584 return rv;
NeilBrowneae17012005-11-08 21:39:23 -08005585}
5586
5587static void md_free(struct kobject *ko)
5588{
NeilBrownfd01b882011-10-11 16:47:53 +11005589 struct mddev *mddev = container_of(ko, struct mddev, kobj);
NeilBrowna21d1502009-01-09 08:31:09 +11005590
5591 if (mddev->sysfs_state)
5592 sysfs_put(mddev->sysfs_state);
Junxiao Bie1a86db2020-07-14 16:10:26 -07005593 if (mddev->sysfs_level)
5594 sysfs_put(mddev->sysfs_level);
5595
Bart Van Assched8115c352018-02-28 10:15:29 -08005596 if (mddev->gendisk)
5597 del_gendisk(mddev->gendisk);
NeilBrown6cd18e72015-04-27 14:12:22 +10005598 if (mddev->queue)
5599 blk_cleanup_queue(mddev->queue);
Bart Van Assched8115c352018-02-28 10:15:29 -08005600 if (mddev->gendisk)
NeilBrowna21d1502009-01-09 08:31:09 +11005601 put_disk(mddev->gendisk);
NeilBrown4ad23a972017-03-15 14:05:14 +11005602 percpu_ref_exit(&mddev->writes_pending);
NeilBrowna21d1502009-01-09 08:31:09 +11005603
Kent Overstreet28dec872018-06-07 20:52:54 -04005604 bioset_exit(&mddev->bio_set);
5605 bioset_exit(&mddev->sync_set);
Artur Paszkiewicz41d2d842020-07-03 11:13:09 +02005606 mempool_exit(&mddev->md_io_pool);
NeilBrowneae17012005-11-08 21:39:23 -08005607 kfree(mddev);
5608}
5609
Emese Revfy52cf25d2010-01-19 02:58:23 +01005610static const struct sysfs_ops md_sysfs_ops = {
NeilBrowneae17012005-11-08 21:39:23 -08005611 .show = md_attr_show,
5612 .store = md_attr_store,
5613};
5614static struct kobj_type md_ktype = {
5615 .release = md_free,
5616 .sysfs_ops = &md_sysfs_ops,
5617 .default_attrs = md_default_attrs,
5618};
5619
Linus Torvalds1da177e2005-04-16 15:20:36 -07005620int mdp_major = 0;
5621
Dan Williams5fd3a172009-03-04 00:57:25 -07005622static void mddev_delayed_delete(struct work_struct *ws)
5623{
NeilBrownfd01b882011-10-11 16:47:53 +11005624 struct mddev *mddev = container_of(ws, struct mddev, del_work);
Dan Williams5fd3a172009-03-04 00:57:25 -07005625
NeilBrown43a70502009-12-14 12:49:55 +11005626 sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
Dan Williams5fd3a172009-03-04 00:57:25 -07005627 kobject_del(&mddev->kobj);
5628 kobject_put(&mddev->kobj);
5629}
5630
NeilBrown4ad23a972017-03-15 14:05:14 +11005631static void no_op(struct percpu_ref *r) {}
5632
NeilBrowna415c0f2017-06-05 16:05:13 +10005633int mddev_init_writes_pending(struct mddev *mddev)
5634{
5635 if (mddev->writes_pending.percpu_count_ptr)
5636 return 0;
Roman Gushchinddde2af2019-05-07 10:01:49 -07005637 if (percpu_ref_init(&mddev->writes_pending, no_op,
5638 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL) < 0)
NeilBrowna415c0f2017-06-05 16:05:13 +10005639 return -ENOMEM;
5640 /* We want to start with the refcount at zero */
5641 percpu_ref_put(&mddev->writes_pending);
5642 return 0;
5643}
5644EXPORT_SYMBOL_GPL(mddev_init_writes_pending);
5645
NeilBrownefeb53c2009-01-09 08:31:10 +11005646static int md_alloc(dev_t dev, char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005647{
NeilBrown039b7222017-04-12 16:26:13 +10005648 /*
5649 * If dev is zero, name is the name of a device to allocate with
5650 * an arbitrary minor number. It will be "md_???"
5651 * If dev is non-zero it must be a device number with a MAJOR of
5652 * MD_MAJOR or mdp_major. In this case, if "name" is NULL, then
5653 * the device is being created by opening a node in /dev.
5654 * If "name" is not NULL, the device is being created by
5655 * writing to /sys/module/md_mod/parameters/new_array.
5656 */
Arjan van de Ven48c9c272006-03-27 01:18:20 -08005657 static DEFINE_MUTEX(disks_mutex);
NeilBrownfd01b882011-10-11 16:47:53 +11005658 struct mddev *mddev = mddev_find(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005659 struct gendisk *disk;
NeilBrownefeb53c2009-01-09 08:31:10 +11005660 int partitioned;
5661 int shift;
5662 int unit;
Greg Kroah-Hartman3830c622007-12-17 15:54:39 -04005663 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005664
5665 if (!mddev)
NeilBrownefeb53c2009-01-09 08:31:10 +11005666 return -ENODEV;
5667
5668 partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
5669 shift = partitioned ? MdpMinorShift : 0;
5670 unit = MINOR(mddev->unit) >> shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005671
Tejun Heoe804ac72010-10-15 15:36:08 +02005672 /* wait for any previous instance of this device to be
5673 * completely removed (mddev_delayed_delete).
NeilBrownd3374822009-01-09 08:31:10 +11005674 */
Tejun Heoe804ac72010-10-15 15:36:08 +02005675 flush_workqueue(md_misc_wq);
NeilBrownd3374822009-01-09 08:31:10 +11005676
Arjan van de Ven48c9c272006-03-27 01:18:20 -08005677 mutex_lock(&disks_mutex);
NeilBrown0909dc42009-07-01 12:27:21 +10005678 error = -EEXIST;
5679 if (mddev->gendisk)
5680 goto abort;
NeilBrownefeb53c2009-01-09 08:31:10 +11005681
NeilBrown039b7222017-04-12 16:26:13 +10005682 if (name && !dev) {
NeilBrownefeb53c2009-01-09 08:31:10 +11005683 /* Need to ensure that 'name' is not a duplicate.
5684 */
NeilBrownfd01b882011-10-11 16:47:53 +11005685 struct mddev *mddev2;
NeilBrownefeb53c2009-01-09 08:31:10 +11005686 spin_lock(&all_mddevs_lock);
5687
5688 list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
5689 if (mddev2->gendisk &&
5690 strcmp(mddev2->gendisk->disk_name, name) == 0) {
5691 spin_unlock(&all_mddevs_lock);
NeilBrown0909dc42009-07-01 12:27:21 +10005692 goto abort;
NeilBrownefeb53c2009-01-09 08:31:10 +11005693 }
5694 spin_unlock(&all_mddevs_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005695 }
NeilBrown039b7222017-04-12 16:26:13 +10005696 if (name && dev)
5697 /*
5698 * Creating /dev/mdNNN via "newarray", so adjust hold_active.
5699 */
5700 mddev->hold_active = UNTIL_STOP;
NeilBrown8b765392009-01-09 08:31:08 +11005701
Artur Paszkiewicz41d2d842020-07-03 11:13:09 +02005702 error = mempool_init_kmalloc_pool(&mddev->md_io_pool, BIO_POOL_SIZE,
5703 sizeof(struct md_io));
5704 if (error)
5705 goto abort;
5706
NeilBrown0909dc42009-07-01 12:27:21 +10005707 error = -ENOMEM;
Christoph Hellwigc62b37d2020-07-01 10:59:43 +02005708 mddev->queue = blk_alloc_queue(NUMA_NO_NODE);
NeilBrown0909dc42009-07-01 12:27:21 +10005709 if (!mddev->queue)
5710 goto abort;
NeilBrown409c57f2009-03-31 14:39:39 +11005711
Martin K. Petersenb1bd0552012-01-11 16:27:11 +01005712 blk_set_stacking_limits(&mddev->queue->limits);
NeilBrown8b765392009-01-09 08:31:08 +11005713
Linus Torvalds1da177e2005-04-16 15:20:36 -07005714 disk = alloc_disk(1 << shift);
5715 if (!disk) {
NeilBrown8b765392009-01-09 08:31:08 +11005716 blk_cleanup_queue(mddev->queue);
5717 mddev->queue = NULL;
NeilBrown0909dc42009-07-01 12:27:21 +10005718 goto abort;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005719 }
NeilBrownefeb53c2009-01-09 08:31:10 +11005720 disk->major = MAJOR(mddev->unit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005721 disk->first_minor = unit << shift;
NeilBrownefeb53c2009-01-09 08:31:10 +11005722 if (name)
5723 strcpy(disk->disk_name, name);
5724 else if (partitioned)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005725 sprintf(disk->disk_name, "md_d%d", unit);
Greg Kroah-Hartmance7b0f462005-06-20 21:15:16 -07005726 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07005727 sprintf(disk->disk_name, "md%d", unit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005728 disk->fops = &md_fops;
5729 disk->private_data = mddev;
5730 disk->queue = mddev->queue;
Jens Axboe56883a72016-03-30 10:16:53 -06005731 blk_queue_write_cache(mddev->queue, true, true);
NeilBrown92850bb2008-10-21 13:25:32 +11005732 /* Allow extended partitions. This makes the
NeilBrownd3374822009-01-09 08:31:10 +11005733 * 'mdp' device redundant, but we can't really
NeilBrown92850bb2008-10-21 13:25:32 +11005734 * remove it now.
5735 */
5736 disk->flags |= GENHD_FL_EXT_DEVT;
Christoph Hellwiga564e232020-07-08 14:25:41 +02005737 disk->events |= DISK_EVENT_MEDIA_CHANGE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005738 mddev->gendisk = disk;
NeilBrownb0140892011-05-10 17:49:01 +10005739 /* As soon as we call add_disk(), another thread could get
5740 * through to md_open, so make sure it doesn't get too far
5741 */
5742 mutex_lock(&mddev->open_mutex);
5743 add_disk(disk);
5744
Kent Overstreet28dec872018-06-07 20:52:54 -04005745 error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md");
NeilBrown0909dc42009-07-01 12:27:21 +10005746 if (error) {
5747 /* This isn't possible, but as kobject_init_and_add is marked
5748 * __must_check, we must do something with the result
5749 */
NeilBrown9d487392016-11-02 14:16:49 +11005750 pr_debug("md: cannot register %s/md - name in use\n",
5751 disk->disk_name);
NeilBrown0909dc42009-07-01 12:27:21 +10005752 error = 0;
5753 }
NeilBrown00bcb4a2010-06-01 19:37:23 +10005754 if (mddev->kobj.sd &&
5755 sysfs_create_group(&mddev->kobj, &md_bitmap_group))
NeilBrown9d487392016-11-02 14:16:49 +11005756 pr_debug("pointless warning\n");
NeilBrownb0140892011-05-10 17:49:01 +10005757 mutex_unlock(&mddev->open_mutex);
NeilBrown0909dc42009-07-01 12:27:21 +10005758 abort:
5759 mutex_unlock(&disks_mutex);
NeilBrown00bcb4a2010-06-01 19:37:23 +10005760 if (!error && mddev->kobj.sd) {
Greg Kroah-Hartman3830c622007-12-17 15:54:39 -04005761 kobject_uevent(&mddev->kobj, KOBJ_ADD);
NeilBrown00bcb4a2010-06-01 19:37:23 +10005762 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
Junxiao Bie1a86db2020-07-14 16:10:26 -07005763 mddev->sysfs_level = sysfs_get_dirent_safe(mddev->kobj.sd, "level");
NeilBrownb62b7592008-10-21 13:25:21 +11005764 }
NeilBrownd3374822009-01-09 08:31:10 +11005765 mddev_put(mddev);
NeilBrown0909dc42009-07-01 12:27:21 +10005766 return error;
NeilBrownefeb53c2009-01-09 08:31:10 +11005767}
5768
Christoph Hellwig28144f92020-10-29 15:58:34 +01005769static void md_probe(dev_t dev)
NeilBrownefeb53c2009-01-09 08:31:10 +11005770{
Christoph Hellwig28144f92020-10-29 15:58:34 +01005771 if (MAJOR(dev) == MD_MAJOR && MINOR(dev) >= 512)
5772 return;
NeilBrown78b63502017-04-12 16:26:13 +10005773 if (create_on_open)
5774 md_alloc(dev, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005775}
5776
Kees Cooke4dca7b2017-10-17 19:04:42 -07005777static int add_named_array(const char *val, const struct kernel_param *kp)
NeilBrownefeb53c2009-01-09 08:31:10 +11005778{
NeilBrown039b7222017-04-12 16:26:13 +10005779 /*
5780 * val must be "md_*" or "mdNNN".
5781 * For "md_*" we allocate an array with a large free minor number, and
NeilBrownefeb53c2009-01-09 08:31:10 +11005782 * set the name to val. val must not already be an active name.
NeilBrown039b7222017-04-12 16:26:13 +10005783 * For "mdNNN" we allocate an array with the minor number NNN
5784 * which must not already be in use.
NeilBrownefeb53c2009-01-09 08:31:10 +11005785 */
5786 int len = strlen(val);
5787 char buf[DISK_NAME_LEN];
NeilBrown039b7222017-04-12 16:26:13 +10005788 unsigned long devnum;
NeilBrownefeb53c2009-01-09 08:31:10 +11005789
5790 while (len && val[len-1] == '\n')
5791 len--;
5792 if (len >= DISK_NAME_LEN)
5793 return -E2BIG;
5794 strlcpy(buf, val, len+1);
NeilBrown039b7222017-04-12 16:26:13 +10005795 if (strncmp(buf, "md_", 3) == 0)
5796 return md_alloc(0, buf);
5797 if (strncmp(buf, "md", 2) == 0 &&
5798 isdigit(buf[2]) &&
5799 kstrtoul(buf+2, 10, &devnum) == 0 &&
5800 devnum <= MINORMASK)
5801 return md_alloc(MKDEV(MD_MAJOR, devnum), NULL);
5802
5803 return -EINVAL;
NeilBrownefeb53c2009-01-09 08:31:10 +11005804}
5805
Kees Cook8376d3c2017-10-16 17:01:48 -07005806static void md_safemode_timeout(struct timer_list *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005807{
Kees Cook8376d3c2017-10-16 17:01:48 -07005808 struct mddev *mddev = from_timer(mddev, t, safemode_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005809
NeilBrown4ad23a972017-03-15 14:05:14 +11005810 mddev->safemode = 1;
5811 if (mddev->external)
5812 sysfs_notify_dirent_safe(mddev->sysfs_state);
5813
Linus Torvalds1da177e2005-04-16 15:20:36 -07005814 md_wakeup_thread(mddev->thread);
5815}
5816
NeilBrown6ff8d8ec2006-01-06 00:20:15 -08005817static int start_dirty_degraded;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005818
NeilBrownfd01b882011-10-11 16:47:53 +11005819int md_run(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005820{
NeilBrown2604b702006-01-06 00:20:36 -08005821 int err;
NeilBrown3cb03002011-10-11 16:45:26 +11005822 struct md_rdev *rdev;
NeilBrown84fc4b52011-10-11 16:49:58 +11005823 struct md_personality *pers;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005824
NeilBrowna757e642005-04-16 15:26:42 -07005825 if (list_empty(&mddev->disks))
5826 /* cannot run an array with no devices.. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005827 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005828
5829 if (mddev->pers)
5830 return -EBUSY;
NeilBrownbb4f1e92010-08-08 21:18:03 +10005831 /* Cannot run until previous stop completes properly */
5832 if (mddev->sysfs_active)
5833 return -EBUSY;
NeilBrownb6eb1272010-04-15 10:13:47 +10005834
Linus Torvalds1da177e2005-04-16 15:20:36 -07005835 /*
5836 * Analyze all RAID superblock(s)
5837 */
NeilBrown1ec4a932008-02-06 01:39:53 -08005838 if (!mddev->raid_disks) {
5839 if (!mddev->persistent)
5840 return -EINVAL;
Yufen Yu6a5cb532019-10-16 16:00:03 +08005841 err = analyze_sbs(mddev);
5842 if (err)
5843 return -EINVAL;
NeilBrown1ec4a932008-02-06 01:39:53 -08005844 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005845
NeilBrownd9d166c2006-01-06 00:20:51 -08005846 if (mddev->level != LEVEL_NONE)
5847 request_module("md-level-%d", mddev->level);
5848 else if (mddev->clevel[0])
5849 request_module("md-%s", mddev->clevel);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005850
5851 /*
5852 * Drop all container device buffers, from now on
5853 * the only valid external interface is through the md
5854 * device.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005855 */
Heinz Mauelshagen4b6c1062018-02-02 23:13:19 +01005856 mddev->has_superblocks = false;
NeilBrowndafb20f2012-03-19 12:46:39 +11005857 rdev_for_each(rdev, mddev) {
NeilBrownb2d444d2005-11-08 21:39:31 -08005858 if (test_bit(Faulty, &rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005859 continue;
5860 sync_blockdev(rdev->bdev);
Peter Zijlstraf98393a2007-05-06 14:49:54 -07005861 invalidate_bdev(rdev->bdev);
NeilBrown97b20ef2017-04-13 08:53:48 +10005862 if (mddev->ro != 1 &&
5863 (bdev_read_only(rdev->bdev) ||
5864 bdev_read_only(rdev->meta_bdev))) {
5865 mddev->ro = 1;
5866 if (mddev->gendisk)
5867 set_disk_ro(mddev->gendisk, 1);
5868 }
NeilBrownf0d76d72007-07-17 04:06:12 -07005869
Heinz Mauelshagen4b6c1062018-02-02 23:13:19 +01005870 if (rdev->sb_page)
5871 mddev->has_superblocks = true;
5872
NeilBrownf0d76d72007-07-17 04:06:12 -07005873 /* perform some consistency tests on the device.
5874 * We don't want the data to overlap the metadata,
Andre Noll58c0fed2009-03-31 14:33:13 +11005875 * Internal Bitmap issues have been handled elsewhere.
NeilBrownf0d76d72007-07-17 04:06:12 -07005876 */
Jonathan Brassowa6ff7e02011-01-14 09:14:34 +11005877 if (rdev->meta_bdev) {
5878 /* Nothing to check */;
5879 } else if (rdev->data_offset < rdev->sb_start) {
Andre Noll58c0fed2009-03-31 14:33:13 +11005880 if (mddev->dev_sectors &&
5881 rdev->data_offset + mddev->dev_sectors
Andre Noll0f420352008-07-11 22:02:23 +10005882 > rdev->sb_start) {
NeilBrown9d487392016-11-02 14:16:49 +11005883 pr_warn("md: %s: data overlaps metadata\n",
5884 mdname(mddev));
NeilBrownf0d76d72007-07-17 04:06:12 -07005885 return -EINVAL;
5886 }
5887 } else {
Andre Noll0f420352008-07-11 22:02:23 +10005888 if (rdev->sb_start + rdev->sb_size/512
NeilBrownf0d76d72007-07-17 04:06:12 -07005889 > rdev->data_offset) {
NeilBrown9d487392016-11-02 14:16:49 +11005890 pr_warn("md: %s: metadata overlaps data\n",
5891 mdname(mddev));
NeilBrownf0d76d72007-07-17 04:06:12 -07005892 return -EINVAL;
5893 }
5894 }
NeilBrown00bcb4a2010-06-01 19:37:23 +10005895 sysfs_notify_dirent_safe(rdev->sysfs_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005896 }
5897
Kent Overstreetafeee512018-05-20 18:25:52 -04005898 if (!bioset_initialized(&mddev->bio_set)) {
5899 err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
5900 if (err)
5901 return err;
Ming Lei10273172017-02-14 23:29:00 +08005902 }
Kent Overstreetafeee512018-05-20 18:25:52 -04005903 if (!bioset_initialized(&mddev->sync_set)) {
5904 err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
5905 if (err)
Kent Overstreet28dec872018-06-07 20:52:54 -04005906 return err;
NeilBrown5a850712017-06-21 09:12:21 +10005907 }
NeilBrowna167f662010-10-26 18:31:13 +11005908
Linus Torvalds1da177e2005-04-16 15:20:36 -07005909 spin_lock(&pers_lock);
NeilBrownd9d166c2006-01-06 00:20:51 -08005910 pers = find_pers(mddev->level, mddev->clevel);
NeilBrown2604b702006-01-06 00:20:36 -08005911 if (!pers || !try_module_get(pers->owner)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005912 spin_unlock(&pers_lock);
NeilBrownd9d166c2006-01-06 00:20:51 -08005913 if (mddev->level != LEVEL_NONE)
NeilBrown9d487392016-11-02 14:16:49 +11005914 pr_warn("md: personality for level %d is not loaded!\n",
5915 mddev->level);
NeilBrownd9d166c2006-01-06 00:20:51 -08005916 else
NeilBrown9d487392016-11-02 14:16:49 +11005917 pr_warn("md: personality for level %s is not loaded!\n",
5918 mddev->clevel);
Shaohua Libfc9dfd2018-06-13 08:39:49 -07005919 err = -EINVAL;
5920 goto abort;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005921 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005922 spin_unlock(&pers_lock);
NeilBrown34817e82009-03-31 14:39:38 +11005923 if (mddev->level != pers->level) {
5924 mddev->level = pers->level;
5925 mddev->new_level = pers->level;
5926 }
NeilBrownd9d166c2006-01-06 00:20:51 -08005927 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005928
NeilBrownf6705572006-03-27 01:18:11 -08005929 if (mddev->reshape_position != MaxSector &&
NeilBrown63c70c42006-03-27 01:18:13 -08005930 pers->start_reshape == NULL) {
NeilBrownf6705572006-03-27 01:18:11 -08005931 /* This personality cannot handle reshaping... */
NeilBrownf6705572006-03-27 01:18:11 -08005932 module_put(pers->owner);
Shaohua Libfc9dfd2018-06-13 08:39:49 -07005933 err = -EINVAL;
5934 goto abort;
NeilBrownf6705572006-03-27 01:18:11 -08005935 }
5936
NeilBrown7dd5e7c32007-02-28 20:11:35 -08005937 if (pers->sync_request) {
5938 /* Warn if this is a potentially silly
5939 * configuration.
5940 */
5941 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
NeilBrown3cb03002011-10-11 16:45:26 +11005942 struct md_rdev *rdev2;
NeilBrown7dd5e7c32007-02-28 20:11:35 -08005943 int warned = 0;
Cheng Renquan159ec1f2009-01-09 08:31:08 +11005944
NeilBrowndafb20f2012-03-19 12:46:39 +11005945 rdev_for_each(rdev, mddev)
5946 rdev_for_each(rdev2, mddev) {
NeilBrown7dd5e7c32007-02-28 20:11:35 -08005947 if (rdev < rdev2 &&
Christoph Hellwig61a27e1f2020-09-03 07:40:58 +02005948 rdev->bdev->bd_disk ==
5949 rdev2->bdev->bd_disk) {
NeilBrown9d487392016-11-02 14:16:49 +11005950 pr_warn("%s: WARNING: %s appears to be on the same physical disk as %s.\n",
5951 mdname(mddev),
5952 bdevname(rdev->bdev,b),
5953 bdevname(rdev2->bdev,b2));
NeilBrown7dd5e7c32007-02-28 20:11:35 -08005954 warned = 1;
5955 }
5956 }
Cheng Renquan159ec1f2009-01-09 08:31:08 +11005957
NeilBrown7dd5e7c32007-02-28 20:11:35 -08005958 if (warned)
NeilBrown9d487392016-11-02 14:16:49 +11005959 pr_warn("True protection against single-disk failure might be compromised.\n");
NeilBrown7dd5e7c32007-02-28 20:11:35 -08005960 }
5961
NeilBrown657390d2005-08-26 18:34:16 -07005962 mddev->recovery = 0;
Andre Noll58c0fed2009-03-31 14:33:13 +11005963 /* may be over-ridden by personality */
5964 mddev->resync_max_sectors = mddev->dev_sectors;
5965
NeilBrown6ff8d8ec2006-01-06 00:20:15 -08005966 mddev->ok_start_degraded = start_dirty_degraded;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005967
NeilBrown0f9552b52009-12-30 12:08:50 +11005968 if (start_readonly && mddev->ro == 0)
NeilBrownf91de922005-11-08 21:39:36 -08005969 mddev->ro = 2; /* read-only, but switch on first write */
5970
NeilBrown36d091f2014-12-15 12:56:58 +11005971 err = pers->run(mddev);
Andre Noll13e53df2008-03-26 00:07:03 +01005972 if (err)
NeilBrown9d487392016-11-02 14:16:49 +11005973 pr_warn("md: pers->run() failed ...\n");
NeilBrown36d091f2014-12-15 12:56:58 +11005974 else if (pers->size(mddev, 0, 0) < mddev->array_sectors) {
NeilBrown9d487392016-11-02 14:16:49 +11005975 WARN_ONCE(!mddev->external_size,
5976 "%s: default size too small, but 'external_size' not in effect?\n",
5977 __func__);
5978 pr_warn("md: invalid array_size %llu > default size %llu\n",
5979 (unsigned long long)mddev->array_sectors / 2,
5980 (unsigned long long)pers->size(mddev, 0, 0) / 2);
Dan Williamsb522adc2009-03-31 15:00:31 +11005981 err = -EINVAL;
Dan Williamsb522adc2009-03-31 15:00:31 +11005982 }
NeilBrown36d091f2014-12-15 12:56:58 +11005983 if (err == 0 && pers->sync_request &&
NeilBrownef99bf42012-05-22 13:55:08 +10005984 (mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05005985 struct bitmap *bitmap;
5986
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07005987 bitmap = md_bitmap_create(mddev, -1);
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05005988 if (IS_ERR(bitmap)) {
5989 err = PTR_ERR(bitmap);
NeilBrown9d487392016-11-02 14:16:49 +11005990 pr_warn("%s: failed to create bitmap (%d)\n",
5991 mdname(mddev), err);
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05005992 } else
5993 mddev->bitmap = bitmap;
5994
NeilBrownb15c2e52006-01-06 00:20:16 -08005995 }
Guoqing Jiangd4945492019-06-14 17:10:39 +08005996 if (err)
5997 goto bitmap_abort;
Guoqing Jiang3e148a32019-06-19 17:30:46 +08005998
5999 if (mddev->bitmap_info.max_write_behind > 0) {
Guoqing Jiang3e173ab2019-12-23 10:48:54 +01006000 bool create_pool = false;
Guoqing Jiang3e148a32019-06-19 17:30:46 +08006001
6002 rdev_for_each(rdev, mddev) {
6003 if (test_bit(WriteMostly, &rdev->flags) &&
Guoqing Jiang404659c2019-12-23 10:48:53 +01006004 rdev_init_serial(rdev))
Guoqing Jiang3e173ab2019-12-23 10:48:54 +01006005 create_pool = true;
Guoqing Jiang3e148a32019-06-19 17:30:46 +08006006 }
Guoqing Jiang3e173ab2019-12-23 10:48:54 +01006007 if (create_pool && mddev->serial_info_pool == NULL) {
Guoqing Jiang404659c2019-12-23 10:48:53 +01006008 mddev->serial_info_pool =
6009 mempool_create_kmalloc_pool(NR_SERIAL_INFOS,
6010 sizeof(struct serial_info));
6011 if (!mddev->serial_info_pool) {
Guoqing Jiang3e148a32019-06-19 17:30:46 +08006012 err = -ENOMEM;
Guoqing Jiangd4945492019-06-14 17:10:39 +08006013 goto bitmap_abort;
Guoqing Jiang3e148a32019-06-19 17:30:46 +08006014 }
6015 }
6016 }
6017
NeilBrown5c675f82014-12-15 12:56:56 +11006018 if (mddev->queue) {
Shaohua Libb086a82016-09-30 09:45:40 -07006019 bool nonrot = true;
6020
6021 rdev_for_each(rdev, mddev) {
6022 if (rdev->raid_disk >= 0 &&
6023 !blk_queue_nonrot(bdev_get_queue(rdev->bdev))) {
6024 nonrot = false;
6025 break;
6026 }
6027 }
6028 if (mddev->degraded)
6029 nonrot = false;
6030 if (nonrot)
Bart Van Assche8b904b52018-03-07 17:10:10 -08006031 blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue);
Shaohua Libb086a82016-09-30 09:45:40 -07006032 else
Bart Van Assche8b904b52018-03-07 17:10:10 -08006033 blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue);
NeilBrown5c675f82014-12-15 12:56:56 +11006034 }
NeilBrown36d091f2014-12-15 12:56:58 +11006035 if (pers->sync_request) {
NeilBrown00bcb4a2010-06-01 19:37:23 +10006036 if (mddev->kobj.sd &&
6037 sysfs_create_group(&mddev->kobj, &md_redundancy_group))
NeilBrown9d487392016-11-02 14:16:49 +11006038 pr_warn("md: cannot register extra attributes for %s\n",
6039 mdname(mddev));
NeilBrown00bcb4a2010-06-01 19:37:23 +10006040 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
Junxiao Bie8efa9b2020-08-04 17:27:18 -07006041 mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed");
6042 mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded");
NeilBrown5e55e2f2007-03-26 21:32:14 -08006043 } else if (mddev->ro == 2) /* auto-readonly not meaningful */
NeilBrownfd9d49c2005-11-08 21:39:42 -08006044 mddev->ro = 0;
6045
Robert Becker1e509152009-12-14 12:49:58 +11006046 atomic_set(&mddev->max_corr_read_errors,
6047 MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006048 mddev->safemode = 0;
Goldwyn Rodrigues28c1b9f2015-10-22 16:01:25 +11006049 if (mddev_is_clustered(mddev))
6050 mddev->safemode_delay = 0;
6051 else
Zhao Heming7c9d5c52020-07-21 02:08:52 +08006052 mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006053 mddev->in_sync = 1;
NeilBrown0ca69882011-01-14 09:14:33 +11006054 smp_wmb();
NeilBrown36d091f2014-12-15 12:56:58 +11006055 spin_lock(&mddev->lock);
6056 mddev->pers = pers;
NeilBrown36d091f2014-12-15 12:56:58 +11006057 spin_unlock(&mddev->lock);
NeilBrowndafb20f2012-03-19 12:46:39 +11006058 rdev_for_each(rdev, mddev)
Namhyung Kim36fad852011-07-27 11:00:36 +10006059 if (rdev->raid_disk >= 0)
Yufen Yue5b521e2019-06-14 15:41:07 -07006060 sysfs_link_rdev(mddev, rdev); /* failure here is OK */
NeilBrownf72ffdd2014-09-30 14:23:59 +10006061
NeilBrowna4a3d262015-07-17 11:57:30 +10006062 if (mddev->degraded && !mddev->ro)
6063 /* This ensures that recovering status is reported immediately
6064 * via sysfs - until a lack of spares is confirmed.
6065 */
6066 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006067 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
NeilBrownf72ffdd2014-09-30 14:23:59 +10006068
Shaohua Li29530792016-12-08 15:48:19 -08006069 if (mddev->sb_flags)
NeilBrown850b2b422006-10-03 01:15:46 -07006070 md_update_sb(mddev, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006071
NeilBrownd7603b72006-01-06 00:20:30 -08006072 md_new_event(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006073 return 0;
Xiao Nib1261942018-01-24 12:17:38 +08006074
Guoqing Jiangd4945492019-06-14 17:10:39 +08006075bitmap_abort:
6076 mddev_detach(mddev);
6077 if (mddev->private)
6078 pers->free(mddev, mddev->private);
6079 mddev->private = NULL;
6080 module_put(pers->owner);
6081 md_bitmap_destroy(mddev);
Xiao Nib1261942018-01-24 12:17:38 +08006082abort:
NeilBrown4bc034d2019-03-29 10:46:16 -07006083 bioset_exit(&mddev->bio_set);
6084 bioset_exit(&mddev->sync_set);
Xiao Nib1261942018-01-24 12:17:38 +08006085 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006086}
NeilBrown390ee602010-06-01 19:37:27 +10006087EXPORT_SYMBOL_GPL(md_run);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006088
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02006089int do_md_run(struct mddev *mddev)
NeilBrownfe60b012010-03-29 11:10:42 +11006090{
6091 int err;
6092
NeilBrown9d4b45d2019-08-20 10:21:09 +10006093 set_bit(MD_NOT_READY, &mddev->flags);
NeilBrownfe60b012010-03-29 11:10:42 +11006094 err = md_run(mddev);
6095 if (err)
6096 goto out;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07006097 err = md_bitmap_load(mddev);
NeilBrown69e51b42010-06-01 19:37:35 +10006098 if (err) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07006099 md_bitmap_destroy(mddev);
NeilBrown69e51b42010-06-01 19:37:35 +10006100 goto out;
6101 }
Jonathan Brassow0fd018a2011-06-07 17:49:36 -05006102
Goldwyn Rodrigues28c1b9f2015-10-22 16:01:25 +11006103 if (mddev_is_clustered(mddev))
6104 md_allow_write(mddev);
6105
Song Liud5d885f2017-11-19 22:17:01 -08006106 /* run start up tasks that require md_thread */
6107 md_start(mddev);
6108
Jonathan Brassow0fd018a2011-06-07 17:49:36 -05006109 md_wakeup_thread(mddev->thread);
6110 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
6111
Christoph Hellwig2c247c52020-11-16 15:57:11 +01006112 set_capacity_and_notify(mddev->gendisk, mddev->array_sectors);
NeilBrown9d4b45d2019-08-20 10:21:09 +10006113 clear_bit(MD_NOT_READY, &mddev->flags);
NeilBrownf0b4f7e2011-02-24 17:26:41 +11006114 mddev->changed = 1;
NeilBrownfe60b012010-03-29 11:10:42 +11006115 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
NeilBrown9d4b45d2019-08-20 10:21:09 +10006116 sysfs_notify_dirent_safe(mddev->sysfs_state);
6117 sysfs_notify_dirent_safe(mddev->sysfs_action);
Junxiao Bie1a86db2020-07-14 16:10:26 -07006118 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
NeilBrownfe60b012010-03-29 11:10:42 +11006119out:
NeilBrown9d4b45d2019-08-20 10:21:09 +10006120 clear_bit(MD_NOT_READY, &mddev->flags);
NeilBrownfe60b012010-03-29 11:10:42 +11006121 return err;
6122}
6123
Song Liud5d885f2017-11-19 22:17:01 -08006124int md_start(struct mddev *mddev)
6125{
6126 int ret = 0;
6127
6128 if (mddev->pers->start) {
6129 set_bit(MD_RECOVERY_WAIT, &mddev->recovery);
6130 md_wakeup_thread(mddev->thread);
6131 ret = mddev->pers->start(mddev);
6132 clear_bit(MD_RECOVERY_WAIT, &mddev->recovery);
6133 md_wakeup_thread(mddev->sync_thread);
6134 }
6135 return ret;
6136}
6137EXPORT_SYMBOL_GPL(md_start);
6138
NeilBrownfd01b882011-10-11 16:47:53 +11006139static int restart_array(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006140{
6141 struct gendisk *disk = mddev->gendisk;
NeilBrown97b20ef2017-04-13 08:53:48 +10006142 struct md_rdev *rdev;
6143 bool has_journal = false;
6144 bool has_readonly = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006145
Andre Noll80fab1d2008-07-11 22:02:21 +10006146 /* Complain if it has no devices */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006147 if (list_empty(&mddev->disks))
Andre Noll80fab1d2008-07-11 22:02:21 +10006148 return -ENXIO;
6149 if (!mddev->pers)
6150 return -EINVAL;
6151 if (!mddev->ro)
6152 return -EBUSY;
Song Liu339421d2015-10-08 21:54:13 -07006153
NeilBrown97b20ef2017-04-13 08:53:48 +10006154 rcu_read_lock();
6155 rdev_for_each_rcu(rdev, mddev) {
6156 if (test_bit(Journal, &rdev->flags) &&
6157 !test_bit(Faulty, &rdev->flags))
6158 has_journal = true;
6159 if (bdev_read_only(rdev->bdev))
6160 has_readonly = true;
Song Liu339421d2015-10-08 21:54:13 -07006161 }
NeilBrown97b20ef2017-04-13 08:53:48 +10006162 rcu_read_unlock();
6163 if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !has_journal)
6164 /* Don't restart rw with journal missing/faulty */
6165 return -EINVAL;
6166 if (has_readonly)
6167 return -EROFS;
Song Liu339421d2015-10-08 21:54:13 -07006168
Andre Noll80fab1d2008-07-11 22:02:21 +10006169 mddev->safemode = 0;
6170 mddev->ro = 0;
6171 set_disk_ro(disk, 0);
NeilBrown9d487392016-11-02 14:16:49 +11006172 pr_debug("md: %s switched to read-write mode.\n", mdname(mddev));
Andre Noll80fab1d2008-07-11 22:02:21 +10006173 /* Kick recovery or resync if necessary */
6174 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6175 md_wakeup_thread(mddev->thread);
6176 md_wakeup_thread(mddev->sync_thread);
NeilBrown00bcb4a2010-06-01 19:37:23 +10006177 sysfs_notify_dirent_safe(mddev->sysfs_state);
Andre Noll80fab1d2008-07-11 22:02:21 +10006178 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006179}
6180
NeilBrownfd01b882011-10-11 16:47:53 +11006181static void md_clean(struct mddev *mddev)
NeilBrown6177b472010-03-29 11:37:13 +11006182{
6183 mddev->array_sectors = 0;
6184 mddev->external_size = 0;
6185 mddev->dev_sectors = 0;
6186 mddev->raid_disks = 0;
6187 mddev->recovery_cp = 0;
6188 mddev->resync_min = 0;
6189 mddev->resync_max = MaxSector;
6190 mddev->reshape_position = MaxSector;
6191 mddev->external = 0;
6192 mddev->persistent = 0;
6193 mddev->level = LEVEL_NONE;
6194 mddev->clevel[0] = 0;
6195 mddev->flags = 0;
Shaohua Li29530792016-12-08 15:48:19 -08006196 mddev->sb_flags = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006197 mddev->ro = 0;
6198 mddev->metadata_type[0] = 0;
6199 mddev->chunk_sectors = 0;
6200 mddev->ctime = mddev->utime = 0;
6201 mddev->layout = 0;
6202 mddev->max_disks = 0;
6203 mddev->events = 0;
NeilBrowna8707c02010-05-18 09:28:43 +10006204 mddev->can_decrease_events = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006205 mddev->delta_disks = 0;
NeilBrown2c810cd2012-05-21 09:27:00 +10006206 mddev->reshape_backwards = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006207 mddev->new_level = LEVEL_NONE;
6208 mddev->new_layout = 0;
6209 mddev->new_chunk_sectors = 0;
6210 mddev->curr_resync = 0;
Jianpeng Ma7f7583d2012-10-11 14:17:59 +11006211 atomic64_set(&mddev->resync_mismatches, 0);
NeilBrown6177b472010-03-29 11:37:13 +11006212 mddev->suspend_lo = mddev->suspend_hi = 0;
6213 mddev->sync_speed_min = mddev->sync_speed_max = 0;
6214 mddev->recovery = 0;
6215 mddev->in_sync = 0;
NeilBrownf0b4f7e2011-02-24 17:26:41 +11006216 mddev->changed = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006217 mddev->degraded = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006218 mddev->safemode = 0;
NeilBrownbd691922015-06-25 17:01:40 +10006219 mddev->private = NULL;
Guoqing Jiangc20c33f2016-08-12 13:42:38 +08006220 mddev->cluster_info = NULL;
NeilBrown6177b472010-03-29 11:37:13 +11006221 mddev->bitmap_info.offset = 0;
6222 mddev->bitmap_info.default_offset = 0;
NeilBrown6409bb02012-05-22 13:55:07 +10006223 mddev->bitmap_info.default_space = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006224 mddev->bitmap_info.chunksize = 0;
6225 mddev->bitmap_info.daemon_sleep = 0;
6226 mddev->bitmap_info.max_write_behind = 0;
Guoqing Jiangc20c33f2016-08-12 13:42:38 +08006227 mddev->bitmap_info.nodes = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006228}
6229
NeilBrownfd01b882011-10-11 16:47:53 +11006230static void __md_stop_writes(struct mddev *mddev)
NeilBrowna047e122010-03-29 12:07:53 +11006231{
NeilBrown6b6204e2013-05-09 09:48:30 +10006232 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
Guoqing Jiang21e09582020-04-04 23:57:07 +02006233 if (work_pending(&mddev->del_work))
6234 flush_workqueue(md_misc_wq);
NeilBrowna047e122010-03-29 12:07:53 +11006235 if (mddev->sync_thread) {
NeilBrowna047e122010-03-29 12:07:53 +11006236 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10006237 md_reap_sync_thread(mddev);
NeilBrowna047e122010-03-29 12:07:53 +11006238 }
6239
6240 del_timer_sync(&mddev->safemode_timer);
6241
Shaohua Li034e33f2016-11-21 10:29:19 -08006242 if (mddev->pers && mddev->pers->quiesce) {
6243 mddev->pers->quiesce(mddev, 1);
6244 mddev->pers->quiesce(mddev, 0);
6245 }
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07006246 md_bitmap_flush(mddev);
NeilBrowna047e122010-03-29 12:07:53 +11006247
NeilBrownb6d428c2013-04-24 11:42:42 +10006248 if (mddev->ro == 0 &&
Goldwyn Rodrigues28c1b9f2015-10-22 16:01:25 +11006249 ((!mddev->in_sync && !mddev_is_clustered(mddev)) ||
Shaohua Li29530792016-12-08 15:48:19 -08006250 mddev->sb_flags)) {
NeilBrowna047e122010-03-29 12:07:53 +11006251 /* mark array as shutdown cleanly */
Goldwyn Rodrigues28c1b9f2015-10-22 16:01:25 +11006252 if (!mddev_is_clustered(mddev))
6253 mddev->in_sync = 1;
NeilBrowna047e122010-03-29 12:07:53 +11006254 md_update_sb(mddev, 1);
6255 }
Guoqing Jiang69b00b52019-12-23 10:49:00 +01006256 /* disable policy to guarantee rdevs free resources for serialization */
6257 mddev->serialize_policy = 0;
6258 mddev_destroy_serial_pool(mddev, NULL, true);
NeilBrowna047e122010-03-29 12:07:53 +11006259}
NeilBrowndefad612011-01-14 09:14:33 +11006260
NeilBrownfd01b882011-10-11 16:47:53 +11006261void md_stop_writes(struct mddev *mddev)
NeilBrowndefad612011-01-14 09:14:33 +11006262{
NeilBrown29f097c2013-11-14 17:54:51 +11006263 mddev_lock_nointr(mddev);
NeilBrowndefad612011-01-14 09:14:33 +11006264 __md_stop_writes(mddev);
6265 mddev_unlock(mddev);
6266}
NeilBrown390ee602010-06-01 19:37:27 +10006267EXPORT_SYMBOL_GPL(md_stop_writes);
NeilBrowna047e122010-03-29 12:07:53 +11006268
NeilBrown5aa61f42014-12-15 12:56:57 +11006269static void mddev_detach(struct mddev *mddev)
6270{
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07006271 md_bitmap_wait_behind_writes(mddev);
Guoqing Jiang6b40bec2020-02-11 11:10:04 +01006272 if (mddev->pers && mddev->pers->quiesce && !mddev->suspended) {
NeilBrown5aa61f42014-12-15 12:56:57 +11006273 mddev->pers->quiesce(mddev, 1);
6274 mddev->pers->quiesce(mddev, 0);
6275 }
6276 md_unregister_thread(&mddev->thread);
6277 if (mddev->queue)
6278 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
6279}
6280
NeilBrown5eff3c42012-11-19 10:47:48 +11006281static void __md_stop(struct mddev *mddev)
NeilBrown6177b472010-03-29 11:37:13 +11006282{
NeilBrown36d091f2014-12-15 12:56:58 +11006283 struct md_personality *pers = mddev->pers;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07006284 md_bitmap_destroy(mddev);
NeilBrown5aa61f42014-12-15 12:56:57 +11006285 mddev_detach(mddev);
NeilBrownee5d0042015-07-22 10:20:07 +10006286 /* Ensure ->event_work is done */
Guoqing Jiang21e09582020-04-04 23:57:07 +02006287 if (mddev->event_work.func)
6288 flush_workqueue(md_misc_wq);
NeilBrown36d091f2014-12-15 12:56:58 +11006289 spin_lock(&mddev->lock);
NeilBrown6177b472010-03-29 11:37:13 +11006290 mddev->pers = NULL;
NeilBrown36d091f2014-12-15 12:56:58 +11006291 spin_unlock(&mddev->lock);
6292 pers->free(mddev, mddev->private);
NeilBrownbd691922015-06-25 17:01:40 +10006293 mddev->private = NULL;
NeilBrown36d091f2014-12-15 12:56:58 +11006294 if (pers->sync_request && mddev->to_remove == NULL)
6295 mddev->to_remove = &md_redundancy_group;
6296 module_put(pers->owner);
NeilBrowncca9cf92010-04-01 12:08:16 +11006297 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
Jack Wang6aaa58c2018-10-19 16:21:31 +02006298}
6299
6300void md_stop(struct mddev *mddev)
6301{
6302 /* stop the array and free an attached data structures.
6303 * This is called from dm-raid
6304 */
6305 __md_stop(mddev);
Kent Overstreetafeee512018-05-20 18:25:52 -04006306 bioset_exit(&mddev->bio_set);
6307 bioset_exit(&mddev->sync_set);
NeilBrown5eff3c42012-11-19 10:47:48 +11006308}
6309
NeilBrown390ee602010-06-01 19:37:27 +10006310EXPORT_SYMBOL_GPL(md_stop);
NeilBrown6177b472010-03-29 11:37:13 +11006311
NeilBrowna05b7ea2012-07-19 15:59:18 +10006312static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
NeilBrowna4bd82d2010-03-29 13:23:10 +11006313{
6314 int err = 0;
NeilBrown30b8feb2013-11-14 15:16:17 +11006315 int did_freeze = 0;
6316
6317 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
6318 did_freeze = 1;
6319 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6320 md_wakeup_thread(mddev->thread);
6321 }
NeilBrownf851b602014-12-11 10:02:10 +11006322 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
NeilBrown30b8feb2013-11-14 15:16:17 +11006323 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
NeilBrownf851b602014-12-11 10:02:10 +11006324 if (mddev->sync_thread)
NeilBrown30b8feb2013-11-14 15:16:17 +11006325 /* Thread might be blocked waiting for metadata update
6326 * which will now never happen */
6327 wake_up_process(mddev->sync_thread->tsk);
NeilBrownf851b602014-12-11 10:02:10 +11006328
Shaohua Li29530792016-12-08 15:48:19 -08006329 if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
NeilBrown88724bf2015-09-24 14:00:51 +10006330 return -EBUSY;
NeilBrown30b8feb2013-11-14 15:16:17 +11006331 mddev_unlock(mddev);
NeilBrownf851b602014-12-11 10:02:10 +11006332 wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
6333 &mddev->recovery));
NeilBrown88724bf2015-09-24 14:00:51 +10006334 wait_event(mddev->sb_wait,
Shaohua Li29530792016-12-08 15:48:19 -08006335 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
NeilBrown30b8feb2013-11-14 15:16:17 +11006336 mddev_lock_nointr(mddev);
6337
NeilBrowna4bd82d2010-03-29 13:23:10 +11006338 mutex_lock(&mddev->open_mutex);
NeilBrown9ba3b7f2014-09-09 14:00:15 +10006339 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
NeilBrown30b8feb2013-11-14 15:16:17 +11006340 mddev->sync_thread ||
Guoqing Jiangaf8d8e62016-08-12 13:42:37 +08006341 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
NeilBrown9d487392016-11-02 14:16:49 +11006342 pr_warn("md: %s still in use.\n",mdname(mddev));
NeilBrown30b8feb2013-11-14 15:16:17 +11006343 if (did_freeze) {
6344 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
NeilBrown45eaf452014-10-29 08:49:50 +11006345 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
NeilBrown30b8feb2013-11-14 15:16:17 +11006346 md_wakeup_thread(mddev->thread);
6347 }
NeilBrowna4bd82d2010-03-29 13:23:10 +11006348 err = -EBUSY;
6349 goto out;
6350 }
6351 if (mddev->pers) {
NeilBrowndefad612011-01-14 09:14:33 +11006352 __md_stop_writes(mddev);
NeilBrowna4bd82d2010-03-29 13:23:10 +11006353
6354 err = -ENXIO;
6355 if (mddev->ro==1)
6356 goto out;
6357 mddev->ro = 1;
6358 set_disk_ro(mddev->gendisk, 1);
6359 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
NeilBrown45eaf452014-10-29 08:49:50 +11006360 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6361 md_wakeup_thread(mddev->thread);
NeilBrown00bcb4a2010-06-01 19:37:23 +10006362 sysfs_notify_dirent_safe(mddev->sysfs_state);
NeilBrown30b8feb2013-11-14 15:16:17 +11006363 err = 0;
NeilBrowna4bd82d2010-03-29 13:23:10 +11006364 }
6365out:
6366 mutex_unlock(&mddev->open_mutex);
6367 return err;
6368}
6369
NeilBrown9e653b62006-06-26 00:27:58 -07006370/* mode:
6371 * 0 - completely stop and dis-assemble array
NeilBrown9e653b62006-06-26 00:27:58 -07006372 * 2 - stop but do not disassemble array
6373 */
NeilBrownf72ffdd2014-09-30 14:23:59 +10006374static int do_md_stop(struct mddev *mddev, int mode,
NeilBrowna05b7ea2012-07-19 15:59:18 +10006375 struct block_device *bdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006376{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006377 struct gendisk *disk = mddev->gendisk;
NeilBrown3cb03002011-10-11 16:45:26 +11006378 struct md_rdev *rdev;
NeilBrown30b8feb2013-11-14 15:16:17 +11006379 int did_freeze = 0;
6380
6381 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
6382 did_freeze = 1;
6383 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6384 md_wakeup_thread(mddev->thread);
6385 }
NeilBrownf851b602014-12-11 10:02:10 +11006386 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
NeilBrown30b8feb2013-11-14 15:16:17 +11006387 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
NeilBrownf851b602014-12-11 10:02:10 +11006388 if (mddev->sync_thread)
NeilBrown30b8feb2013-11-14 15:16:17 +11006389 /* Thread might be blocked waiting for metadata update
6390 * which will now never happen */
6391 wake_up_process(mddev->sync_thread->tsk);
NeilBrownf851b602014-12-11 10:02:10 +11006392
NeilBrown30b8feb2013-11-14 15:16:17 +11006393 mddev_unlock(mddev);
NeilBrownf851b602014-12-11 10:02:10 +11006394 wait_event(resync_wait, (mddev->sync_thread == NULL &&
6395 !test_bit(MD_RECOVERY_RUNNING,
6396 &mddev->recovery)));
NeilBrown30b8feb2013-11-14 15:16:17 +11006397 mddev_lock_nointr(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006398
NeilBrownc8c00a62009-08-10 12:50:52 +10006399 mutex_lock(&mddev->open_mutex);
NeilBrown9ba3b7f2014-09-09 14:00:15 +10006400 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
NeilBrown30b8feb2013-11-14 15:16:17 +11006401 mddev->sysfs_active ||
6402 mddev->sync_thread ||
Guoqing Jiangaf8d8e62016-08-12 13:42:37 +08006403 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
NeilBrown9d487392016-11-02 14:16:49 +11006404 pr_warn("md: %s still in use.\n",mdname(mddev));
NeilBrown6e17b022010-08-07 21:41:19 +10006405 mutex_unlock(&mddev->open_mutex);
NeilBrown30b8feb2013-11-14 15:16:17 +11006406 if (did_freeze) {
6407 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
NeilBrown45eaf452014-10-29 08:49:50 +11006408 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
NeilBrown30b8feb2013-11-14 15:16:17 +11006409 md_wakeup_thread(mddev->thread);
6410 }
NeilBrown260fa032013-08-27 16:44:13 +10006411 return -EBUSY;
6412 }
NeilBrown6e17b022010-08-07 21:41:19 +10006413 if (mddev->pers) {
NeilBrowna4bd82d2010-03-29 13:23:10 +11006414 if (mddev->ro)
6415 set_disk_ro(disk, 0);
NeilBrown409c57f2009-03-31 14:39:39 +11006416
NeilBrowndefad612011-01-14 09:14:33 +11006417 __md_stop_writes(mddev);
NeilBrown5eff3c42012-11-19 10:47:48 +11006418 __md_stop(mddev);
NeilBrown6177b472010-03-29 11:37:13 +11006419
NeilBrowna4bd82d2010-03-29 13:23:10 +11006420 /* tell userspace to handle 'inactive' */
NeilBrown00bcb4a2010-06-01 19:37:23 +10006421 sysfs_notify_dirent_safe(mddev->sysfs_state);
NeilBrown0d4ca602006-12-10 02:20:44 -08006422
NeilBrowndafb20f2012-03-19 12:46:39 +11006423 rdev_for_each(rdev, mddev)
Namhyung Kim36fad852011-07-27 11:00:36 +10006424 if (rdev->raid_disk >= 0)
6425 sysfs_unlink_rdev(mddev, rdev);
NeilBrownc4647292009-05-07 12:51:06 +10006426
Christoph Hellwig2c247c52020-11-16 15:57:11 +01006427 set_capacity_and_notify(disk, 0);
NeilBrown6e17b022010-08-07 21:41:19 +10006428 mutex_unlock(&mddev->open_mutex);
NeilBrownf0b4f7e2011-02-24 17:26:41 +11006429 mddev->changed = 1;
NeilBrown0d4ca602006-12-10 02:20:44 -08006430
NeilBrowna4bd82d2010-03-29 13:23:10 +11006431 if (mddev->ro)
6432 mddev->ro = 0;
NeilBrown6e17b022010-08-07 21:41:19 +10006433 } else
6434 mutex_unlock(&mddev->open_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006435 /*
6436 * Free resources if final stop
6437 */
NeilBrown9e653b62006-06-26 00:27:58 -07006438 if (mode == 0) {
NeilBrown9d487392016-11-02 14:16:49 +11006439 pr_info("md: %s stopped.\n", mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006440
NeilBrownc3d97142009-12-14 12:49:52 +11006441 if (mddev->bitmap_info.file) {
NeilBrown4af1a042014-12-15 12:57:00 +11006442 struct file *f = mddev->bitmap_info.file;
6443 spin_lock(&mddev->lock);
NeilBrownc3d97142009-12-14 12:49:52 +11006444 mddev->bitmap_info.file = NULL;
NeilBrown4af1a042014-12-15 12:57:00 +11006445 spin_unlock(&mddev->lock);
6446 fput(f);
NeilBrown978f9462006-02-02 14:28:05 -08006447 }
NeilBrownc3d97142009-12-14 12:49:52 +11006448 mddev->bitmap_info.offset = 0;
NeilBrown978f9462006-02-02 14:28:05 -08006449
Linus Torvalds1da177e2005-04-16 15:20:36 -07006450 export_array(mddev);
6451
NeilBrown6177b472010-03-29 11:37:13 +11006452 md_clean(mddev);
NeilBrownefeb53c2009-01-09 08:31:10 +11006453 if (mddev->hold_active == UNTIL_STOP)
6454 mddev->hold_active = 0;
NeilBrowna4bd82d2010-03-29 13:23:10 +11006455 }
NeilBrownd7603b72006-01-06 00:20:30 -08006456 md_new_event(mddev);
NeilBrown00bcb4a2010-06-01 19:37:23 +10006457 sysfs_notify_dirent_safe(mddev->sysfs_state);
NeilBrown6e17b022010-08-07 21:41:19 +10006458 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006459}
6460
Jeff Garzikfdee8ae2006-12-10 02:20:50 -08006461#ifndef MODULE
NeilBrownfd01b882011-10-11 16:47:53 +11006462static void autorun_array(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006463{
NeilBrown3cb03002011-10-11 16:45:26 +11006464 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006465 int err;
6466
NeilBrowna757e642005-04-16 15:26:42 -07006467 if (list_empty(&mddev->disks))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006468 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006469
NeilBrown9d487392016-11-02 14:16:49 +11006470 pr_info("md: running: ");
Linus Torvalds1da177e2005-04-16 15:20:36 -07006471
NeilBrowndafb20f2012-03-19 12:46:39 +11006472 rdev_for_each(rdev, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006473 char b[BDEVNAME_SIZE];
NeilBrown9d487392016-11-02 14:16:49 +11006474 pr_cont("<%s>", bdevname(rdev->bdev,b));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006475 }
NeilBrown9d487392016-11-02 14:16:49 +11006476 pr_cont("\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07006477
NeilBrownd710e132008-10-13 11:55:12 +11006478 err = do_md_run(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006479 if (err) {
NeilBrown9d487392016-11-02 14:16:49 +11006480 pr_warn("md: do_md_run() returned %d\n", err);
NeilBrowna05b7ea2012-07-19 15:59:18 +10006481 do_md_stop(mddev, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006482 }
6483}
6484
6485/*
6486 * lets try to run arrays based on all disks that have arrived
6487 * until now. (those are in pending_raid_disks)
6488 *
6489 * the method: pick the first pending disk, collect all disks with
6490 * the same UUID, remove all from the pending list and put them into
6491 * the 'same_array' list. Then order this list based on superblock
6492 * update time (freshest comes first), kick out 'old' disks and
6493 * compare superblocks. If everything's fine then run it.
6494 *
6495 * If "unit" is allocated, then bump its reference count
6496 */
6497static void autorun_devices(int part)
6498{
NeilBrown3cb03002011-10-11 16:45:26 +11006499 struct md_rdev *rdev0, *rdev, *tmp;
NeilBrownfd01b882011-10-11 16:47:53 +11006500 struct mddev *mddev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006501 char b[BDEVNAME_SIZE];
6502
NeilBrown9d487392016-11-02 14:16:49 +11006503 pr_info("md: autorun ...\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07006504 while (!list_empty(&pending_raid_disks)) {
NeilBrowne8703fe2006-10-03 01:15:59 -07006505 int unit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006506 dev_t dev;
NeilBrownad01c9e2006-03-27 01:18:07 -08006507 LIST_HEAD(candidates);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006508 rdev0 = list_entry(pending_raid_disks.next,
NeilBrown3cb03002011-10-11 16:45:26 +11006509 struct md_rdev, same_set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006510
NeilBrown9d487392016-11-02 14:16:49 +11006511 pr_debug("md: considering %s ...\n", bdevname(rdev0->bdev,b));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006512 INIT_LIST_HEAD(&candidates);
Cheng Renquan159ec1f2009-01-09 08:31:08 +11006513 rdev_for_each_list(rdev, tmp, &pending_raid_disks)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006514 if (super_90_load(rdev, rdev0, 0) >= 0) {
NeilBrown9d487392016-11-02 14:16:49 +11006515 pr_debug("md: adding %s ...\n",
6516 bdevname(rdev->bdev,b));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006517 list_move(&rdev->same_set, &candidates);
6518 }
6519 /*
6520 * now we have a set of devices, with all of them having
6521 * mostly sane superblocks. It's time to allocate the
6522 * mddev.
6523 */
NeilBrowne8703fe2006-10-03 01:15:59 -07006524 if (part) {
6525 dev = MKDEV(mdp_major,
6526 rdev0->preferred_minor << MdpMinorShift);
6527 unit = MINOR(dev) >> MdpMinorShift;
6528 } else {
6529 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
6530 unit = MINOR(dev);
6531 }
6532 if (rdev0->preferred_minor != unit) {
NeilBrown9d487392016-11-02 14:16:49 +11006533 pr_warn("md: unit number in %s is bad: %d\n",
6534 bdevname(rdev0->bdev, b), rdev0->preferred_minor);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006535 break;
6536 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006537
Christoph Hellwig28144f92020-10-29 15:58:34 +01006538 md_probe(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006539 mddev = mddev_find(dev);
Neil Brown9bbbca32008-06-28 08:31:17 +10006540 if (!mddev || !mddev->gendisk) {
6541 if (mddev)
6542 mddev_put(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006543 break;
6544 }
NeilBrownf72ffdd2014-09-30 14:23:59 +10006545 if (mddev_lock(mddev))
NeilBrown9d487392016-11-02 14:16:49 +11006546 pr_warn("md: %s locked, cannot run\n", mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006547 else if (mddev->raid_disks || mddev->major_version
6548 || !list_empty(&mddev->disks)) {
NeilBrown9d487392016-11-02 14:16:49 +11006549 pr_warn("md: %s already running, cannot run %s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006550 mdname(mddev), bdevname(rdev0->bdev,b));
6551 mddev_unlock(mddev);
6552 } else {
NeilBrown9d487392016-11-02 14:16:49 +11006553 pr_debug("md: created %s\n", mdname(mddev));
NeilBrown1ec4a932008-02-06 01:39:53 -08006554 mddev->persistent = 1;
Cheng Renquan159ec1f2009-01-09 08:31:08 +11006555 rdev_for_each_list(rdev, tmp, &candidates) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006556 list_del_init(&rdev->same_set);
6557 if (bind_rdev_to_array(rdev, mddev))
6558 export_rdev(rdev);
6559 }
6560 autorun_array(mddev);
6561 mddev_unlock(mddev);
6562 }
6563 /* on success, candidates will be empty, on error
6564 * it won't...
6565 */
Cheng Renquan159ec1f2009-01-09 08:31:08 +11006566 rdev_for_each_list(rdev, tmp, &candidates) {
NeilBrown4b809912008-07-21 17:05:25 +10006567 list_del_init(&rdev->same_set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006568 export_rdev(rdev);
NeilBrown4b809912008-07-21 17:05:25 +10006569 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006570 mddev_put(mddev);
6571 }
NeilBrown9d487392016-11-02 14:16:49 +11006572 pr_info("md: ... autorun DONE.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07006573}
Jeff Garzikfdee8ae2006-12-10 02:20:50 -08006574#endif /* !MODULE */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006575
NeilBrownf72ffdd2014-09-30 14:23:59 +10006576static int get_version(void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006577{
6578 mdu_version_t ver;
6579
6580 ver.major = MD_MAJOR_VERSION;
6581 ver.minor = MD_MINOR_VERSION;
6582 ver.patchlevel = MD_PATCHLEVEL_VERSION;
6583
6584 if (copy_to_user(arg, &ver, sizeof(ver)))
6585 return -EFAULT;
6586
6587 return 0;
6588}
6589
NeilBrownf72ffdd2014-09-30 14:23:59 +10006590static int get_array_info(struct mddev *mddev, void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006591{
6592 mdu_array_info_t info;
NeilBrowna9f326e2009-09-23 18:06:41 +10006593 int nr,working,insync,failed,spare;
NeilBrown3cb03002011-10-11 16:45:26 +11006594 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006595
NeilBrown1ca69c42012-10-11 13:37:33 +11006596 nr = working = insync = failed = spare = 0;
6597 rcu_read_lock();
6598 rdev_for_each_rcu(rdev, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006599 nr++;
NeilBrownb2d444d2005-11-08 21:39:31 -08006600 if (test_bit(Faulty, &rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006601 failed++;
6602 else {
6603 working++;
NeilBrownb2d444d2005-11-08 21:39:31 -08006604 if (test_bit(In_sync, &rdev->flags))
NeilBrownf72ffdd2014-09-30 14:23:59 +10006605 insync++;
Song Liub347af82016-08-11 17:14:45 -07006606 else if (test_bit(Journal, &rdev->flags))
6607 /* TODO: add journal count to md_u.h */
6608 ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006609 else
6610 spare++;
6611 }
6612 }
NeilBrown1ca69c42012-10-11 13:37:33 +11006613 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006614
6615 info.major_version = mddev->major_version;
6616 info.minor_version = mddev->minor_version;
6617 info.patch_version = MD_PATCHLEVEL_VERSION;
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11006618 info.ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006619 info.level = mddev->level;
Andre Noll58c0fed2009-03-31 14:33:13 +11006620 info.size = mddev->dev_sectors / 2;
6621 if (info.size != mddev->dev_sectors / 2) /* overflow */
NeilBrown284ae7c2006-02-03 03:03:40 -08006622 info.size = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006623 info.nr_disks = nr;
6624 info.raid_disks = mddev->raid_disks;
6625 info.md_minor = mddev->md_minor;
6626 info.not_persistent= !mddev->persistent;
6627
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11006628 info.utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006629 info.state = 0;
6630 if (mddev->in_sync)
6631 info.state = (1<<MD_SB_CLEAN);
NeilBrownc3d97142009-12-14 12:49:52 +11006632 if (mddev->bitmap && mddev->bitmap_info.offset)
NeilBrown9bd35922014-07-02 11:35:06 +10006633 info.state |= (1<<MD_SB_BITMAP_PRESENT);
Goldwyn Rodriguesca8895d2014-11-26 12:22:03 -06006634 if (mddev_is_clustered(mddev))
6635 info.state |= (1<<MD_SB_CLUSTERED);
NeilBrowna9f326e2009-09-23 18:06:41 +10006636 info.active_disks = insync;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006637 info.working_disks = working;
6638 info.failed_disks = failed;
6639 info.spare_disks = spare;
6640
6641 info.layout = mddev->layout;
Andre Noll9d8f0362009-06-18 08:45:01 +10006642 info.chunk_size = mddev->chunk_sectors << 9;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006643
6644 if (copy_to_user(arg, &info, sizeof(info)))
6645 return -EFAULT;
6646
6647 return 0;
6648}
6649
NeilBrownf72ffdd2014-09-30 14:23:59 +10006650static int get_bitmap_file(struct mddev *mddev, void __user * arg)
NeilBrown32a76272005-06-21 17:17:14 -07006651{
6652 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
NeilBrownf4ad3d32014-12-15 12:57:00 +11006653 char *ptr;
NeilBrown4af1a042014-12-15 12:57:00 +11006654 int err;
NeilBrown32a76272005-06-21 17:17:14 -07006655
Benjamin Randazzob6878d92015-07-25 16:36:50 +02006656 file = kzalloc(sizeof(*file), GFP_NOIO);
NeilBrown32a76272005-06-21 17:17:14 -07006657 if (!file)
NeilBrown4af1a042014-12-15 12:57:00 +11006658 return -ENOMEM;
NeilBrown32a76272005-06-21 17:17:14 -07006659
NeilBrown32a76272005-06-21 17:17:14 -07006660 err = 0;
NeilBrown4af1a042014-12-15 12:57:00 +11006661 spin_lock(&mddev->lock);
Benjamin Randazzo25eafe12015-07-25 16:36:50 +02006662 /* bitmap enabled */
6663 if (mddev->bitmap_info.file) {
6664 ptr = file_path(mddev->bitmap_info.file, file->pathname,
6665 sizeof(file->pathname));
6666 if (IS_ERR(ptr))
6667 err = PTR_ERR(ptr);
6668 else
6669 memmove(file->pathname, ptr,
6670 sizeof(file->pathname)-(ptr-file->pathname));
6671 }
NeilBrown4af1a042014-12-15 12:57:00 +11006672 spin_unlock(&mddev->lock);
6673
6674 if (err == 0 &&
6675 copy_to_user(arg, file, sizeof(*file)))
NeilBrown32a76272005-06-21 17:17:14 -07006676 err = -EFAULT;
NeilBrown4af1a042014-12-15 12:57:00 +11006677
NeilBrown32a76272005-06-21 17:17:14 -07006678 kfree(file);
6679 return err;
6680}
6681
NeilBrownf72ffdd2014-09-30 14:23:59 +10006682static int get_disk_info(struct mddev *mddev, void __user * arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006683{
6684 mdu_disk_info_t info;
NeilBrown3cb03002011-10-11 16:45:26 +11006685 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006686
6687 if (copy_from_user(&info, arg, sizeof(info)))
6688 return -EFAULT;
6689
NeilBrown1ca69c42012-10-11 13:37:33 +11006690 rcu_read_lock();
Goldwyn Rodrigues57d051d2015-04-14 10:43:55 -05006691 rdev = md_find_rdev_nr_rcu(mddev, info.number);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006692 if (rdev) {
6693 info.major = MAJOR(rdev->bdev->bd_dev);
6694 info.minor = MINOR(rdev->bdev->bd_dev);
6695 info.raid_disk = rdev->raid_disk;
6696 info.state = 0;
NeilBrownb2d444d2005-11-08 21:39:31 -08006697 if (test_bit(Faulty, &rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006698 info.state |= (1<<MD_DISK_FAULTY);
NeilBrownb2d444d2005-11-08 21:39:31 -08006699 else if (test_bit(In_sync, &rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006700 info.state |= (1<<MD_DISK_ACTIVE);
6701 info.state |= (1<<MD_DISK_SYNC);
6702 }
Shaohua Li9efdca12015-10-12 16:59:50 -07006703 if (test_bit(Journal, &rdev->flags))
Song Liubac624f2015-08-13 14:31:55 -07006704 info.state |= (1<<MD_DISK_JOURNAL);
NeilBrown8ddf9ef2005-09-09 16:23:45 -07006705 if (test_bit(WriteMostly, &rdev->flags))
6706 info.state |= (1<<MD_DISK_WRITEMOSTLY);
NeilBrown688834e2016-11-18 16:16:11 +11006707 if (test_bit(FailFast, &rdev->flags))
6708 info.state |= (1<<MD_DISK_FAILFAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006709 } else {
6710 info.major = info.minor = 0;
6711 info.raid_disk = -1;
6712 info.state = (1<<MD_DISK_REMOVED);
6713 }
NeilBrown1ca69c42012-10-11 13:37:33 +11006714 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006715
6716 if (copy_to_user(arg, &info, sizeof(info)))
6717 return -EFAULT;
6718
6719 return 0;
6720}
6721
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02006722int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006723{
6724 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
NeilBrown3cb03002011-10-11 16:45:26 +11006725 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006726 dev_t dev = MKDEV(info->major,info->minor);
6727
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006728 if (mddev_is_clustered(mddev) &&
6729 !(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) {
NeilBrown9d487392016-11-02 14:16:49 +11006730 pr_warn("%s: Cannot add to clustered mddev.\n",
6731 mdname(mddev));
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006732 return -EINVAL;
6733 }
6734
Linus Torvalds1da177e2005-04-16 15:20:36 -07006735 if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
6736 return -EOVERFLOW;
6737
6738 if (!mddev->raid_disks) {
6739 int err;
6740 /* expecting a device which has a superblock */
6741 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
6742 if (IS_ERR(rdev)) {
NeilBrown9d487392016-11-02 14:16:49 +11006743 pr_warn("md: md_import_device returned %ld\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006744 PTR_ERR(rdev));
6745 return PTR_ERR(rdev);
6746 }
6747 if (!list_empty(&mddev->disks)) {
NeilBrown3cb03002011-10-11 16:45:26 +11006748 struct md_rdev *rdev0
6749 = list_entry(mddev->disks.next,
6750 struct md_rdev, same_set);
NeilBrowna9f326e2009-09-23 18:06:41 +10006751 err = super_types[mddev->major_version]
Linus Torvalds1da177e2005-04-16 15:20:36 -07006752 .load_super(rdev, rdev0, mddev->minor_version);
6753 if (err < 0) {
NeilBrown9d487392016-11-02 14:16:49 +11006754 pr_warn("md: %s has different UUID to %s\n",
NeilBrownf72ffdd2014-09-30 14:23:59 +10006755 bdevname(rdev->bdev,b),
Linus Torvalds1da177e2005-04-16 15:20:36 -07006756 bdevname(rdev0->bdev,b2));
6757 export_rdev(rdev);
6758 return -EINVAL;
6759 }
6760 }
6761 err = bind_rdev_to_array(rdev, mddev);
6762 if (err)
6763 export_rdev(rdev);
6764 return err;
6765 }
6766
6767 /*
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02006768 * md_add_new_disk can be used once the array is assembled
Linus Torvalds1da177e2005-04-16 15:20:36 -07006769 * to add "hot spares". They must already have a superblock
6770 * written
6771 */
6772 if (mddev->pers) {
6773 int err;
6774 if (!mddev->pers->hot_add_disk) {
NeilBrown9d487392016-11-02 14:16:49 +11006775 pr_warn("%s: personality does not support diskops!\n",
6776 mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006777 return -EINVAL;
6778 }
NeilBrown7b1e35f2005-09-09 16:23:50 -07006779 if (mddev->persistent)
6780 rdev = md_import_device(dev, mddev->major_version,
6781 mddev->minor_version);
6782 else
6783 rdev = md_import_device(dev, -1, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006784 if (IS_ERR(rdev)) {
NeilBrown9d487392016-11-02 14:16:49 +11006785 pr_warn("md: md_import_device returned %ld\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006786 PTR_ERR(rdev));
6787 return PTR_ERR(rdev);
6788 }
NeilBrown1a855a02010-12-09 16:36:28 +11006789 /* set saved_raid_disk if appropriate */
NeilBrown41158c72005-06-21 17:17:25 -07006790 if (!mddev->persistent) {
6791 if (info->state & (1<<MD_DISK_SYNC) &&
NeilBrownbf572542011-01-12 09:03:35 +11006792 info->raid_disk < mddev->raid_disks) {
NeilBrown41158c72005-06-21 17:17:25 -07006793 rdev->raid_disk = info->raid_disk;
NeilBrownbf572542011-01-12 09:03:35 +11006794 set_bit(In_sync, &rdev->flags);
NeilBrown8313b8e2013-12-12 10:13:33 +11006795 clear_bit(Bitmap_sync, &rdev->flags);
NeilBrownbf572542011-01-12 09:03:35 +11006796 } else
NeilBrown41158c72005-06-21 17:17:25 -07006797 rdev->raid_disk = -1;
NeilBrownf4667222013-12-09 12:04:56 +11006798 rdev->saved_raid_disk = rdev->raid_disk;
NeilBrown41158c72005-06-21 17:17:25 -07006799 } else
6800 super_types[mddev->major_version].
6801 validate_super(mddev, rdev);
NeilBrownbedd86b2011-05-11 14:26:20 +10006802 if ((info->state & (1<<MD_DISK_SYNC)) &&
NeilBrownf4563092012-07-03 15:59:06 +10006803 rdev->raid_disk != info->raid_disk) {
NeilBrownbedd86b2011-05-11 14:26:20 +10006804 /* This was a hot-add request, but events doesn't
6805 * match, so reject it.
6806 */
6807 export_rdev(rdev);
6808 return -EINVAL;
6809 }
6810
NeilBrownb2d444d2005-11-08 21:39:31 -08006811 clear_bit(In_sync, &rdev->flags); /* just to be sure */
NeilBrown8ddf9ef2005-09-09 16:23:45 -07006812 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
6813 set_bit(WriteMostly, &rdev->flags);
NeilBrown575a80f2009-03-31 14:33:13 +11006814 else
6815 clear_bit(WriteMostly, &rdev->flags);
NeilBrown688834e2016-11-18 16:16:11 +11006816 if (info->state & (1<<MD_DISK_FAILFAST))
6817 set_bit(FailFast, &rdev->flags);
6818 else
6819 clear_bit(FailFast, &rdev->flags);
NeilBrown8ddf9ef2005-09-09 16:23:45 -07006820
Shaohua Lif6b6ec52015-12-21 10:51:02 +11006821 if (info->state & (1<<MD_DISK_JOURNAL)) {
6822 struct md_rdev *rdev2;
6823 bool has_journal = false;
6824
6825 /* make sure no existing journal disk */
6826 rdev_for_each(rdev2, mddev) {
6827 if (test_bit(Journal, &rdev2->flags)) {
6828 has_journal = true;
6829 break;
6830 }
6831 }
NeilBrown230b55f2017-10-17 14:24:09 +11006832 if (has_journal || mddev->bitmap) {
Shaohua Lif6b6ec52015-12-21 10:51:02 +11006833 export_rdev(rdev);
6834 return -EBUSY;
6835 }
Song Liubac624f2015-08-13 14:31:55 -07006836 set_bit(Journal, &rdev->flags);
Shaohua Lif6b6ec52015-12-21 10:51:02 +11006837 }
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006838 /*
6839 * check whether the device shows up in other nodes
6840 */
6841 if (mddev_is_clustered(mddev)) {
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006842 if (info->state & (1 << MD_DISK_CANDIDATE))
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006843 set_bit(Candidate, &rdev->flags);
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006844 else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) {
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006845 /* --add initiated by this node */
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006846 err = md_cluster_ops->add_new_disk(mddev, rdev);
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006847 if (err) {
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006848 export_rdev(rdev);
6849 return err;
6850 }
6851 }
6852 }
6853
Linus Torvalds1da177e2005-04-16 15:20:36 -07006854 rdev->raid_disk = -1;
6855 err = bind_rdev_to_array(rdev, mddev);
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006856
Linus Torvalds1da177e2005-04-16 15:20:36 -07006857 if (err)
6858 export_rdev(rdev);
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006859
6860 if (mddev_is_clustered(mddev)) {
Guoqing Jiange566aef2016-08-12 13:42:34 +08006861 if (info->state & (1 << MD_DISK_CANDIDATE)) {
6862 if (!err) {
6863 err = md_cluster_ops->new_disk_ack(mddev,
6864 err == 0);
6865 if (err)
6866 md_kick_rdev_from_array(rdev);
6867 }
6868 } else {
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006869 if (err)
6870 md_cluster_ops->add_new_disk_cancel(mddev);
6871 else
6872 err = add_bound_rdev(rdev);
6873 }
6874
6875 } else if (!err)
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05006876 err = add_bound_rdev(rdev);
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006877
Linus Torvalds1da177e2005-04-16 15:20:36 -07006878 return err;
6879 }
6880
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02006881 /* otherwise, md_add_new_disk is only allowed
Linus Torvalds1da177e2005-04-16 15:20:36 -07006882 * for major_version==0 superblocks
6883 */
6884 if (mddev->major_version != 0) {
NeilBrown9d487392016-11-02 14:16:49 +11006885 pr_warn("%s: ADD_NEW_DISK not supported\n", mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006886 return -EINVAL;
6887 }
6888
6889 if (!(info->state & (1<<MD_DISK_FAULTY))) {
6890 int err;
NeilBrownd710e132008-10-13 11:55:12 +11006891 rdev = md_import_device(dev, -1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006892 if (IS_ERR(rdev)) {
NeilBrown9d487392016-11-02 14:16:49 +11006893 pr_warn("md: error, md_import_device() returned %ld\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006894 PTR_ERR(rdev));
6895 return PTR_ERR(rdev);
6896 }
6897 rdev->desc_nr = info->number;
6898 if (info->raid_disk < mddev->raid_disks)
6899 rdev->raid_disk = info->raid_disk;
6900 else
6901 rdev->raid_disk = -1;
6902
Linus Torvalds1da177e2005-04-16 15:20:36 -07006903 if (rdev->raid_disk < mddev->raid_disks)
NeilBrownb2d444d2005-11-08 21:39:31 -08006904 if (info->state & (1<<MD_DISK_SYNC))
6905 set_bit(In_sync, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006906
NeilBrown8ddf9ef2005-09-09 16:23:45 -07006907 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
6908 set_bit(WriteMostly, &rdev->flags);
NeilBrown688834e2016-11-18 16:16:11 +11006909 if (info->state & (1<<MD_DISK_FAILFAST))
6910 set_bit(FailFast, &rdev->flags);
NeilBrown8ddf9ef2005-09-09 16:23:45 -07006911
Linus Torvalds1da177e2005-04-16 15:20:36 -07006912 if (!mddev->persistent) {
NeilBrown9d487392016-11-02 14:16:49 +11006913 pr_debug("md: nonpersistent superblock ...\n");
Mike Snitzer77304d22010-11-08 14:39:12 +01006914 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
6915 } else
Jonathan Brassow57b2caa2011-01-14 09:14:33 +11006916 rdev->sb_start = calc_dev_sboffset(rdev);
NeilBrown8190e752009-06-18 08:48:58 +10006917 rdev->sectors = rdev->sb_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006918
NeilBrown2bf071b2006-01-06 00:20:55 -08006919 err = bind_rdev_to_array(rdev, mddev);
6920 if (err) {
6921 export_rdev(rdev);
6922 return err;
6923 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006924 }
6925
6926 return 0;
6927}
6928
NeilBrownf72ffdd2014-09-30 14:23:59 +10006929static int hot_remove_disk(struct mddev *mddev, dev_t dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006930{
6931 char b[BDEVNAME_SIZE];
NeilBrown3cb03002011-10-11 16:45:26 +11006932 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006933
Yufen Yuc42a0e22018-05-04 18:08:10 +08006934 if (!mddev->pers)
6935 return -ENODEV;
6936
Linus Torvalds1da177e2005-04-16 15:20:36 -07006937 rdev = find_rdev(mddev, dev);
6938 if (!rdev)
6939 return -ENXIO;
6940
Goldwyn Rodrigues2910ff12015-09-28 10:27:26 -05006941 if (rdev->raid_disk < 0)
6942 goto kick_rdev;
Goldwyn Rodrigues293467a2014-06-07 01:44:51 -05006943
NeilBrown3ea8929d2013-04-24 11:42:41 +10006944 clear_bit(Blocked, &rdev->flags);
6945 remove_and_add_spares(mddev, rdev);
6946
Linus Torvalds1da177e2005-04-16 15:20:36 -07006947 if (rdev->raid_disk >= 0)
6948 goto busy;
6949
Goldwyn Rodrigues2910ff12015-09-28 10:27:26 -05006950kick_rdev:
Zhao Hemingbca5b062020-11-19 19:41:34 +08006951 if (mddev_is_clustered(mddev)) {
6952 if (md_cluster_ops->remove_disk(mddev, rdev))
6953 goto busy;
6954 }
Goldwyn Rodrigues88bcfef2015-04-14 10:44:44 -05006955
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05006956 md_kick_rdev_from_array(rdev);
Shaohua Li29530792016-12-08 15:48:19 -08006957 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrown060b0682016-11-04 16:46:03 +11006958 if (mddev->thread)
6959 md_wakeup_thread(mddev->thread);
6960 else
6961 md_update_sb(mddev, 1);
NeilBrownd7603b72006-01-06 00:20:30 -08006962 md_new_event(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006963
6964 return 0;
6965busy:
NeilBrown9d487392016-11-02 14:16:49 +11006966 pr_debug("md: cannot remove active disk %s from %s ...\n",
6967 bdevname(rdev->bdev,b), mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006968 return -EBUSY;
6969}
6970
NeilBrownf72ffdd2014-09-30 14:23:59 +10006971static int hot_add_disk(struct mddev *mddev, dev_t dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006972{
6973 char b[BDEVNAME_SIZE];
6974 int err;
NeilBrown3cb03002011-10-11 16:45:26 +11006975 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006976
6977 if (!mddev->pers)
6978 return -ENODEV;
6979
6980 if (mddev->major_version != 0) {
NeilBrown9d487392016-11-02 14:16:49 +11006981 pr_warn("%s: HOT_ADD may only be used with version-0 superblocks.\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006982 mdname(mddev));
6983 return -EINVAL;
6984 }
6985 if (!mddev->pers->hot_add_disk) {
NeilBrown9d487392016-11-02 14:16:49 +11006986 pr_warn("%s: personality does not support diskops!\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006987 mdname(mddev));
6988 return -EINVAL;
6989 }
6990
NeilBrownd710e132008-10-13 11:55:12 +11006991 rdev = md_import_device(dev, -1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006992 if (IS_ERR(rdev)) {
NeilBrown9d487392016-11-02 14:16:49 +11006993 pr_warn("md: error, md_import_device() returned %ld\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006994 PTR_ERR(rdev));
6995 return -EINVAL;
6996 }
6997
6998 if (mddev->persistent)
Jonathan Brassow57b2caa2011-01-14 09:14:33 +11006999 rdev->sb_start = calc_dev_sboffset(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007000 else
Mike Snitzer77304d22010-11-08 14:39:12 +01007001 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007002
NeilBrown8190e752009-06-18 08:48:58 +10007003 rdev->sectors = rdev->sb_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007004
NeilBrownb2d444d2005-11-08 21:39:31 -08007005 if (test_bit(Faulty, &rdev->flags)) {
NeilBrown9d487392016-11-02 14:16:49 +11007006 pr_warn("md: can not hot-add faulty %s disk to %s!\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07007007 bdevname(rdev->bdev,b), mdname(mddev));
7008 err = -EINVAL;
7009 goto abort_export;
7010 }
Goldwyn Rodrigues293467a2014-06-07 01:44:51 -05007011
NeilBrownb2d444d2005-11-08 21:39:31 -08007012 clear_bit(In_sync, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007013 rdev->desc_nr = -1;
NeilBrown58427302006-10-06 00:44:04 -07007014 rdev->saved_raid_disk = -1;
NeilBrown2bf071b2006-01-06 00:20:55 -08007015 err = bind_rdev_to_array(rdev, mddev);
7016 if (err)
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05007017 goto abort_export;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007018
7019 /*
7020 * The rest should better be atomic, we can have disk failures
7021 * noticed in interrupt contexts ...
7022 */
7023
Linus Torvalds1da177e2005-04-16 15:20:36 -07007024 rdev->raid_disk = -1;
7025
Shaohua Li29530792016-12-08 15:48:19 -08007026 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrown060b0682016-11-04 16:46:03 +11007027 if (!mddev->thread)
7028 md_update_sb(mddev, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007029 /*
7030 * Kick recovery, maybe this spare has to be added to the
7031 * array immediately.
7032 */
7033 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7034 md_wakeup_thread(mddev->thread);
NeilBrownd7603b72006-01-06 00:20:30 -08007035 md_new_event(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007036 return 0;
7037
Linus Torvalds1da177e2005-04-16 15:20:36 -07007038abort_export:
7039 export_rdev(rdev);
7040 return err;
7041}
7042
NeilBrownfd01b882011-10-11 16:47:53 +11007043static int set_bitmap_file(struct mddev *mddev, int fd)
NeilBrown32a76272005-06-21 17:17:14 -07007044{
NeilBrown035328c2014-04-09 12:25:40 +10007045 int err = 0;
NeilBrown32a76272005-06-21 17:17:14 -07007046
NeilBrown36fa3062005-09-09 16:23:45 -07007047 if (mddev->pers) {
NeilBrownd66b1b32014-08-08 15:40:24 +10007048 if (!mddev->pers->quiesce || !mddev->thread)
NeilBrown36fa3062005-09-09 16:23:45 -07007049 return -EBUSY;
7050 if (mddev->recovery || mddev->sync_thread)
7051 return -EBUSY;
7052 /* we should be able to change the bitmap.. */
NeilBrown32a76272005-06-21 17:17:14 -07007053 }
7054
NeilBrown36fa3062005-09-09 16:23:45 -07007055 if (fd >= 0) {
NeilBrown035328c2014-04-09 12:25:40 +10007056 struct inode *inode;
NeilBrown1e594bb2014-12-15 12:57:00 +11007057 struct file *f;
NeilBrown36fa3062005-09-09 16:23:45 -07007058
NeilBrown1e594bb2014-12-15 12:57:00 +11007059 if (mddev->bitmap || mddev->bitmap_info.file)
7060 return -EEXIST; /* cannot add when bitmap is present */
7061 f = fget(fd);
7062
7063 if (f == NULL) {
NeilBrown9d487392016-11-02 14:16:49 +11007064 pr_warn("%s: error: failed to get bitmap file\n",
7065 mdname(mddev));
NeilBrown36fa3062005-09-09 16:23:45 -07007066 return -EBADF;
7067 }
7068
NeilBrown1e594bb2014-12-15 12:57:00 +11007069 inode = f->f_mapping->host;
NeilBrown035328c2014-04-09 12:25:40 +10007070 if (!S_ISREG(inode->i_mode)) {
NeilBrown9d487392016-11-02 14:16:49 +11007071 pr_warn("%s: error: bitmap file must be a regular file\n",
7072 mdname(mddev));
NeilBrown035328c2014-04-09 12:25:40 +10007073 err = -EBADF;
NeilBrown1e594bb2014-12-15 12:57:00 +11007074 } else if (!(f->f_mode & FMODE_WRITE)) {
NeilBrown9d487392016-11-02 14:16:49 +11007075 pr_warn("%s: error: bitmap file must open for write\n",
7076 mdname(mddev));
NeilBrown035328c2014-04-09 12:25:40 +10007077 err = -EBADF;
7078 } else if (atomic_read(&inode->i_writecount) != 1) {
NeilBrown9d487392016-11-02 14:16:49 +11007079 pr_warn("%s: error: bitmap file is already in use\n",
7080 mdname(mddev));
NeilBrown035328c2014-04-09 12:25:40 +10007081 err = -EBUSY;
7082 }
7083 if (err) {
NeilBrown1e594bb2014-12-15 12:57:00 +11007084 fput(f);
NeilBrown36fa3062005-09-09 16:23:45 -07007085 return err;
7086 }
NeilBrown1e594bb2014-12-15 12:57:00 +11007087 mddev->bitmap_info.file = f;
NeilBrownc3d97142009-12-14 12:49:52 +11007088 mddev->bitmap_info.offset = 0; /* file overrides offset */
NeilBrown36fa3062005-09-09 16:23:45 -07007089 } else if (mddev->bitmap == NULL)
7090 return -ENOENT; /* cannot remove what isn't there */
7091 err = 0;
7092 if (mddev->pers) {
NeilBrown69e51b42010-06-01 19:37:35 +10007093 if (fd >= 0) {
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05007094 struct bitmap *bitmap;
7095
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007096 bitmap = md_bitmap_create(mddev, -1);
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007097 mddev_suspend(mddev);
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05007098 if (!IS_ERR(bitmap)) {
7099 mddev->bitmap = bitmap;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007100 err = md_bitmap_load(mddev);
NeilBrownba599ac2015-02-25 11:44:11 +11007101 } else
7102 err = PTR_ERR(bitmap);
NeilBrown52a0d492017-10-17 13:46:43 +11007103 if (err) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007104 md_bitmap_destroy(mddev);
NeilBrown52a0d492017-10-17 13:46:43 +11007105 fd = -1;
7106 }
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007107 mddev_resume(mddev);
NeilBrown52a0d492017-10-17 13:46:43 +11007108 } else if (fd < 0) {
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007109 mddev_suspend(mddev);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007110 md_bitmap_destroy(mddev);
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007111 mddev_resume(mddev);
NeilBrownd7375ab2006-06-26 00:27:43 -07007112 }
NeilBrownd7375ab2006-06-26 00:27:43 -07007113 }
7114 if (fd < 0) {
NeilBrown4af1a042014-12-15 12:57:00 +11007115 struct file *f = mddev->bitmap_info.file;
7116 if (f) {
7117 spin_lock(&mddev->lock);
7118 mddev->bitmap_info.file = NULL;
7119 spin_unlock(&mddev->lock);
7120 fput(f);
7121 }
NeilBrown36fa3062005-09-09 16:23:45 -07007122 }
7123
NeilBrown32a76272005-06-21 17:17:14 -07007124 return err;
7125}
7126
Linus Torvalds1da177e2005-04-16 15:20:36 -07007127/*
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02007128 * md_set_array_info is used two different ways
Linus Torvalds1da177e2005-04-16 15:20:36 -07007129 * The original usage is when creating a new array.
7130 * In this usage, raid_disks is > 0 and it together with
7131 * level, size, not_persistent,layout,chunksize determine the
7132 * shape of the array.
7133 * This will always create an array with a type-0.90.0 superblock.
7134 * The newer usage is when assembling an array.
7135 * In this case raid_disks will be 0, and the major_version field is
7136 * use to determine which style super-blocks are to be found on the devices.
7137 * The minor and patch _version numbers are also kept incase the
7138 * super_block handler wishes to interpret them.
7139 */
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02007140int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007141{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007142 if (info->raid_disks == 0) {
7143 /* just setting version number for superblock loading */
7144 if (info->major_version < 0 ||
Ahmed S. Darwish50511da2007-05-09 02:35:34 -07007145 info->major_version >= ARRAY_SIZE(super_types) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07007146 super_types[info->major_version].name == NULL) {
7147 /* maybe try to auto-load a module? */
NeilBrown9d487392016-11-02 14:16:49 +11007148 pr_warn("md: superblock version %d not known\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07007149 info->major_version);
7150 return -EINVAL;
7151 }
7152 mddev->major_version = info->major_version;
7153 mddev->minor_version = info->minor_version;
7154 mddev->patch_version = info->patch_version;
NeilBrown3f9d7b02006-12-22 01:11:41 -08007155 mddev->persistent = !info->not_persistent;
NeilBrowncbd19982009-12-30 12:08:49 +11007156 /* ensure mddev_put doesn't delete this now that there
7157 * is some minimal configuration.
7158 */
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11007159 mddev->ctime = ktime_get_real_seconds();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007160 return 0;
7161 }
7162 mddev->major_version = MD_MAJOR_VERSION;
7163 mddev->minor_version = MD_MINOR_VERSION;
7164 mddev->patch_version = MD_PATCHLEVEL_VERSION;
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11007165 mddev->ctime = ktime_get_real_seconds();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007166
7167 mddev->level = info->level;
NeilBrown17115e02006-01-16 22:14:57 -08007168 mddev->clevel[0] = 0;
Andre Noll58c0fed2009-03-31 14:33:13 +11007169 mddev->dev_sectors = 2 * (sector_t)info->size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007170 mddev->raid_disks = info->raid_disks;
7171 /* don't set md_minor, it is determined by which /dev/md* was
7172 * openned
7173 */
7174 if (info->state & (1<<MD_SB_CLEAN))
7175 mddev->recovery_cp = MaxSector;
7176 else
7177 mddev->recovery_cp = 0;
7178 mddev->persistent = ! info->not_persistent;
NeilBrowne6910632008-02-06 01:39:51 -08007179 mddev->external = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007180
7181 mddev->layout = info->layout;
NeilBrown33f2c352019-09-09 16:52:29 +10007182 if (mddev->level == 0)
7183 /* Cannot trust RAID0 layout info here */
7184 mddev->layout = -1;
Andre Noll9d8f0362009-06-18 08:45:01 +10007185 mddev->chunk_sectors = info->chunk_size >> 9;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007186
Shaohua Li29530792016-12-08 15:48:19 -08007187 if (mddev->persistent) {
NeilBrown1b3bae42017-03-01 07:31:28 +11007188 mddev->max_disks = MD_SB_DISKS;
7189 mddev->flags = 0;
7190 mddev->sb_flags = 0;
Shaohua Li29530792016-12-08 15:48:19 -08007191 }
7192 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007193
NeilBrownc3d97142009-12-14 12:49:52 +11007194 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
NeilBrown6409bb02012-05-22 13:55:07 +10007195 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
NeilBrownc3d97142009-12-14 12:49:52 +11007196 mddev->bitmap_info.offset = 0;
NeilBrownb2a27032005-11-28 13:44:12 -08007197
NeilBrownf6705572006-03-27 01:18:11 -08007198 mddev->reshape_position = MaxSector;
7199
Linus Torvalds1da177e2005-04-16 15:20:36 -07007200 /*
7201 * Generate a 128 bit UUID
7202 */
7203 get_random_bytes(mddev->uuid, 16);
7204
NeilBrownf6705572006-03-27 01:18:11 -08007205 mddev->new_level = mddev->level;
Andre Noll664e7c42009-06-18 08:45:27 +10007206 mddev->new_chunk_sectors = mddev->chunk_sectors;
NeilBrownf6705572006-03-27 01:18:11 -08007207 mddev->new_layout = mddev->layout;
7208 mddev->delta_disks = 0;
NeilBrown2c810cd2012-05-21 09:27:00 +10007209 mddev->reshape_backwards = 0;
NeilBrownf6705572006-03-27 01:18:11 -08007210
Linus Torvalds1da177e2005-04-16 15:20:36 -07007211 return 0;
7212}
7213
NeilBrownfd01b882011-10-11 16:47:53 +11007214void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors)
Dan Williams1f403622009-03-31 14:59:03 +11007215{
Shaohua Liefa4b772017-10-18 22:08:13 -07007216 lockdep_assert_held(&mddev->reconfig_mutex);
Dan Williamsb522adc2009-03-31 15:00:31 +11007217
7218 if (mddev->external_size)
7219 return;
7220
Dan Williams1f403622009-03-31 14:59:03 +11007221 mddev->array_sectors = array_sectors;
7222}
7223EXPORT_SYMBOL(md_set_array_sectors);
7224
NeilBrownfd01b882011-10-11 16:47:53 +11007225static int update_size(struct mddev *mddev, sector_t num_sectors)
NeilBrowna35b0d62006-01-06 00:20:49 -08007226{
NeilBrown3cb03002011-10-11 16:45:26 +11007227 struct md_rdev *rdev;
NeilBrowna35b0d62006-01-06 00:20:49 -08007228 int rv;
Andre Nolld71f9f82008-07-11 22:02:22 +10007229 int fit = (num_sectors == 0);
Guoqing Jiang818da592017-03-01 16:42:40 +08007230 sector_t old_dev_sectors = mddev->dev_sectors;
Guoqing Jiangab5a98b2016-05-02 11:33:13 -04007231
NeilBrowna35b0d62006-01-06 00:20:49 -08007232 if (mddev->pers->resize == NULL)
7233 return -EINVAL;
Andre Nolld71f9f82008-07-11 22:02:22 +10007234 /* The "num_sectors" is the number of sectors of each device that
7235 * is used. This can only make sense for arrays with redundancy.
7236 * linear and raid0 always use whatever space is available. We can only
7237 * consider changing this number if no resync or reconstruction is
7238 * happening, and if the new size is acceptable. It must fit before the
Andre Noll0f420352008-07-11 22:02:23 +10007239 * sb_start or, if that is <data_offset, it must fit before the size
Andre Nolld71f9f82008-07-11 22:02:22 +10007240 * of each device. If num_sectors is zero, we find the largest size
7241 * that fits.
NeilBrowna35b0d62006-01-06 00:20:49 -08007242 */
NeilBrownf851b602014-12-11 10:02:10 +11007243 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
7244 mddev->sync_thread)
NeilBrowna35b0d62006-01-06 00:20:49 -08007245 return -EBUSY;
NeilBrownbd8839e2014-05-28 13:39:21 +10007246 if (mddev->ro)
7247 return -EROFS;
NeilBrowna4a61252012-05-22 13:55:27 +10007248
NeilBrowndafb20f2012-03-19 12:46:39 +11007249 rdev_for_each(rdev, mddev) {
Andre Nolldd8ac332009-03-31 14:33:13 +11007250 sector_t avail = rdev->sectors;
NeilBrown01ab5662006-10-28 10:38:30 -07007251
Andre Nolld71f9f82008-07-11 22:02:22 +10007252 if (fit && (num_sectors == 0 || num_sectors > avail))
7253 num_sectors = avail;
7254 if (avail < num_sectors)
NeilBrowna35b0d62006-01-06 00:20:49 -08007255 return -ENOSPC;
7256 }
Andre Nolld71f9f82008-07-11 22:02:22 +10007257 rv = mddev->pers->resize(mddev, num_sectors);
Guoqing Jiangc9483632017-02-24 11:15:23 +08007258 if (!rv) {
Guoqing Jiang818da592017-03-01 16:42:40 +08007259 if (mddev_is_clustered(mddev))
7260 md_cluster_ops->update_size(mddev, old_dev_sectors);
7261 else if (mddev->queue) {
Christoph Hellwig2c247c52020-11-16 15:57:11 +01007262 set_capacity_and_notify(mddev->gendisk,
7263 mddev->array_sectors);
Guoqing Jiangc9483632017-02-24 11:15:23 +08007264 }
7265 }
NeilBrowna35b0d62006-01-06 00:20:49 -08007266 return rv;
7267}
7268
NeilBrownfd01b882011-10-11 16:47:53 +11007269static int update_raid_disks(struct mddev *mddev, int raid_disks)
NeilBrownda943b992006-01-06 00:20:54 -08007270{
7271 int rv;
NeilBrownc6563a82012-05-21 09:27:00 +10007272 struct md_rdev *rdev;
NeilBrownda943b992006-01-06 00:20:54 -08007273 /* change the number of raid disks */
NeilBrown63c70c42006-03-27 01:18:13 -08007274 if (mddev->pers->check_reshape == NULL)
NeilBrownda943b992006-01-06 00:20:54 -08007275 return -EINVAL;
NeilBrownbd8839e2014-05-28 13:39:21 +10007276 if (mddev->ro)
7277 return -EROFS;
NeilBrownda943b992006-01-06 00:20:54 -08007278 if (raid_disks <= 0 ||
NeilBrown233fca32010-04-14 17:02:09 +10007279 (mddev->max_disks && raid_disks >= mddev->max_disks))
NeilBrownda943b992006-01-06 00:20:54 -08007280 return -EINVAL;
NeilBrownf851b602014-12-11 10:02:10 +11007281 if (mddev->sync_thread ||
7282 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
Zhao Heminga8da01f2020-11-19 19:41:33 +08007283 test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) ||
NeilBrownf851b602014-12-11 10:02:10 +11007284 mddev->reshape_position != MaxSector)
NeilBrownda943b992006-01-06 00:20:54 -08007285 return -EBUSY;
NeilBrownc6563a82012-05-21 09:27:00 +10007286
7287 rdev_for_each(rdev, mddev) {
7288 if (mddev->raid_disks < raid_disks &&
7289 rdev->data_offset < rdev->new_data_offset)
7290 return -EINVAL;
7291 if (mddev->raid_disks > raid_disks &&
7292 rdev->data_offset > rdev->new_data_offset)
7293 return -EINVAL;
7294 }
7295
NeilBrown63c70c42006-03-27 01:18:13 -08007296 mddev->delta_disks = raid_disks - mddev->raid_disks;
NeilBrown2c810cd2012-05-21 09:27:00 +10007297 if (mddev->delta_disks < 0)
7298 mddev->reshape_backwards = 1;
7299 else if (mddev->delta_disks > 0)
7300 mddev->reshape_backwards = 0;
NeilBrown63c70c42006-03-27 01:18:13 -08007301
7302 rv = mddev->pers->check_reshape(mddev);
NeilBrown2c810cd2012-05-21 09:27:00 +10007303 if (rv < 0) {
NeilBrownde171cb2011-01-31 11:57:42 +11007304 mddev->delta_disks = 0;
NeilBrown2c810cd2012-05-21 09:27:00 +10007305 mddev->reshape_backwards = 0;
7306 }
NeilBrownda943b992006-01-06 00:20:54 -08007307 return rv;
7308}
7309
Linus Torvalds1da177e2005-04-16 15:20:36 -07007310/*
7311 * update_array_info is used to change the configuration of an
7312 * on-line array.
7313 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
7314 * fields in the info are checked against the array.
7315 * Any differences that cannot be handled will cause an error.
7316 * Normally, only one change can be managed at a time.
7317 */
NeilBrownfd01b882011-10-11 16:47:53 +11007318static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007319{
7320 int rv = 0;
7321 int cnt = 0;
NeilBrown36fa3062005-09-09 16:23:45 -07007322 int state = 0;
7323
7324 /* calculate expected state,ignoring low bits */
NeilBrownc3d97142009-12-14 12:49:52 +11007325 if (mddev->bitmap && mddev->bitmap_info.offset)
NeilBrown36fa3062005-09-09 16:23:45 -07007326 state |= (1 << MD_SB_BITMAP_PRESENT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007327
7328 if (mddev->major_version != info->major_version ||
7329 mddev->minor_version != info->minor_version ||
7330/* mddev->patch_version != info->patch_version || */
7331 mddev->ctime != info->ctime ||
7332 mddev->level != info->level ||
7333/* mddev->layout != info->layout || */
Firo Yang4e023612015-06-11 09:41:10 +08007334 mddev->persistent != !info->not_persistent ||
Andre Noll9d8f0362009-06-18 08:45:01 +10007335 mddev->chunk_sectors != info->chunk_size >> 9 ||
NeilBrown36fa3062005-09-09 16:23:45 -07007336 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
7337 ((state^info->state) & 0xfffffe00)
7338 )
Linus Torvalds1da177e2005-04-16 15:20:36 -07007339 return -EINVAL;
7340 /* Check there is only one change */
Andre Noll58c0fed2009-03-31 14:33:13 +11007341 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
7342 cnt++;
7343 if (mddev->raid_disks != info->raid_disks)
7344 cnt++;
7345 if (mddev->layout != info->layout)
7346 cnt++;
7347 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
7348 cnt++;
7349 if (cnt == 0)
7350 return 0;
7351 if (cnt > 1)
7352 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007353
7354 if (mddev->layout != info->layout) {
7355 /* Change layout
7356 * we don't need to do anything at the md level, the
7357 * personality will take care of it all.
7358 */
NeilBrown50ac1682009-06-18 08:47:55 +10007359 if (mddev->pers->check_reshape == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007360 return -EINVAL;
NeilBrown597a7112009-06-18 08:47:42 +10007361 else {
7362 mddev->new_layout = info->layout;
NeilBrown50ac1682009-06-18 08:47:55 +10007363 rv = mddev->pers->check_reshape(mddev);
NeilBrown597a7112009-06-18 08:47:42 +10007364 if (rv)
7365 mddev->new_layout = mddev->layout;
7366 return rv;
7367 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007368 }
Andre Noll58c0fed2009-03-31 14:33:13 +11007369 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
Andre Nolld71f9f82008-07-11 22:02:22 +10007370 rv = update_size(mddev, (sector_t)info->size * 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007371
NeilBrownda943b992006-01-06 00:20:54 -08007372 if (mddev->raid_disks != info->raid_disks)
7373 rv = update_raid_disks(mddev, info->raid_disks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007374
NeilBrown36fa3062005-09-09 16:23:45 -07007375 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
Goldwyn Rodrigues293467a2014-06-07 01:44:51 -05007376 if (mddev->pers->quiesce == NULL || mddev->thread == NULL) {
7377 rv = -EINVAL;
7378 goto err;
7379 }
7380 if (mddev->recovery || mddev->sync_thread) {
7381 rv = -EBUSY;
7382 goto err;
7383 }
NeilBrown36fa3062005-09-09 16:23:45 -07007384 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05007385 struct bitmap *bitmap;
NeilBrown36fa3062005-09-09 16:23:45 -07007386 /* add the bitmap */
Goldwyn Rodrigues293467a2014-06-07 01:44:51 -05007387 if (mddev->bitmap) {
7388 rv = -EEXIST;
7389 goto err;
7390 }
7391 if (mddev->bitmap_info.default_offset == 0) {
7392 rv = -EINVAL;
7393 goto err;
7394 }
NeilBrownc3d97142009-12-14 12:49:52 +11007395 mddev->bitmap_info.offset =
7396 mddev->bitmap_info.default_offset;
NeilBrown6409bb02012-05-22 13:55:07 +10007397 mddev->bitmap_info.space =
7398 mddev->bitmap_info.default_space;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007399 bitmap = md_bitmap_create(mddev, -1);
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007400 mddev_suspend(mddev);
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05007401 if (!IS_ERR(bitmap)) {
7402 mddev->bitmap = bitmap;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007403 rv = md_bitmap_load(mddev);
NeilBrownba599ac2015-02-25 11:44:11 +11007404 } else
7405 rv = PTR_ERR(bitmap);
NeilBrown36fa3062005-09-09 16:23:45 -07007406 if (rv)
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007407 md_bitmap_destroy(mddev);
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007408 mddev_resume(mddev);
NeilBrown36fa3062005-09-09 16:23:45 -07007409 } else {
7410 /* remove the bitmap */
Goldwyn Rodrigues293467a2014-06-07 01:44:51 -05007411 if (!mddev->bitmap) {
7412 rv = -ENOENT;
7413 goto err;
7414 }
7415 if (mddev->bitmap->storage.file) {
7416 rv = -EINVAL;
7417 goto err;
7418 }
Guoqing Jiangf6a2dc62015-12-21 10:51:00 +11007419 if (mddev->bitmap_info.nodes) {
7420 /* hold PW on all the bitmap lock */
7421 if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) {
NeilBrown9d487392016-11-02 14:16:49 +11007422 pr_warn("md: can't change bitmap to none since the array is in use by more than one node\n");
Guoqing Jiangf6a2dc62015-12-21 10:51:00 +11007423 rv = -EPERM;
7424 md_cluster_ops->unlock_all_bitmaps(mddev);
7425 goto err;
7426 }
7427
7428 mddev->bitmap_info.nodes = 0;
7429 md_cluster_ops->leave(mddev);
Zhao Hemingedee9df2020-07-21 02:08:53 +08007430 module_put(md_cluster_mod);
Zhao Heming7c9d5c52020-07-21 02:08:52 +08007431 mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY;
Guoqing Jiangf6a2dc62015-12-21 10:51:00 +11007432 }
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007433 mddev_suspend(mddev);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007434 md_bitmap_destroy(mddev);
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007435 mddev_resume(mddev);
NeilBrownc3d97142009-12-14 12:49:52 +11007436 mddev->bitmap_info.offset = 0;
NeilBrown36fa3062005-09-09 16:23:45 -07007437 }
7438 }
NeilBrown850b2b422006-10-03 01:15:46 -07007439 md_update_sb(mddev, 1);
Goldwyn Rodrigues293467a2014-06-07 01:44:51 -05007440 return rv;
7441err:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007442 return rv;
7443}
7444
NeilBrownfd01b882011-10-11 16:47:53 +11007445static int set_disk_faulty(struct mddev *mddev, dev_t dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007446{
NeilBrown3cb03002011-10-11 16:45:26 +11007447 struct md_rdev *rdev;
NeilBrown1ca69c42012-10-11 13:37:33 +11007448 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007449
7450 if (mddev->pers == NULL)
7451 return -ENODEV;
7452
NeilBrown1ca69c42012-10-11 13:37:33 +11007453 rcu_read_lock();
Tomasz Majchrzak1532d9e2017-12-27 10:31:40 +01007454 rdev = md_find_rdev_rcu(mddev, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007455 if (!rdev)
NeilBrown1ca69c42012-10-11 13:37:33 +11007456 err = -ENODEV;
7457 else {
7458 md_error(mddev, rdev);
7459 if (!test_bit(Faulty, &rdev->flags))
7460 err = -EBUSY;
7461 }
7462 rcu_read_unlock();
7463 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007464}
7465
Andre Noll2f9618c2008-04-25 18:57:58 +02007466/*
7467 * We have a problem here : there is no easy way to give a CHS
7468 * virtual geometry. We currently pretend that we have a 2 heads
7469 * 4 sectors (with a BIG number of cylinders...). This drives
7470 * dosfs just mad... ;-)
7471 */
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08007472static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
7473{
NeilBrownfd01b882011-10-11 16:47:53 +11007474 struct mddev *mddev = bdev->bd_disk->private_data;
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08007475
7476 geo->heads = 2;
7477 geo->sectors = 4;
NeilBrown49ce6ce2010-03-29 10:51:42 +11007478 geo->cylinders = mddev->array_sectors / 8;
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08007479 return 0;
7480}
7481
Nicolas Schichancb335f82014-01-15 16:58:52 +01007482static inline bool md_ioctl_valid(unsigned int cmd)
7483{
7484 switch (cmd) {
7485 case ADD_NEW_DISK:
Nicolas Schichancb335f82014-01-15 16:58:52 +01007486 case GET_ARRAY_INFO:
7487 case GET_BITMAP_FILE:
7488 case GET_DISK_INFO:
7489 case HOT_ADD_DISK:
7490 case HOT_REMOVE_DISK:
Nicolas Schichancb335f82014-01-15 16:58:52 +01007491 case RAID_VERSION:
7492 case RESTART_ARRAY_RW:
7493 case RUN_ARRAY:
7494 case SET_ARRAY_INFO:
7495 case SET_BITMAP_FILE:
7496 case SET_DISK_FAULTY:
7497 case STOP_ARRAY:
7498 case STOP_ARRAY_RO:
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05007499 case CLUSTERED_DISK_NACK:
Nicolas Schichancb335f82014-01-15 16:58:52 +01007500 return true;
7501 default:
7502 return false;
7503 }
7504}
7505
Al Viroa39907f2008-03-02 10:31:15 -05007506static int md_ioctl(struct block_device *bdev, fmode_t mode,
Linus Torvalds1da177e2005-04-16 15:20:36 -07007507 unsigned int cmd, unsigned long arg)
7508{
7509 int err = 0;
7510 void __user *argp = (void __user *)arg;
NeilBrownfd01b882011-10-11 16:47:53 +11007511 struct mddev *mddev = NULL;
NeilBrown065e5192017-04-06 11:16:33 +08007512 bool did_set_md_closing = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007513
Nicolas Schichancb335f82014-01-15 16:58:52 +01007514 if (!md_ioctl_valid(cmd))
7515 return -ENOTTY;
7516
NeilBrown506c9e42011-12-23 10:17:26 +11007517 switch (cmd) {
7518 case RAID_VERSION:
7519 case GET_ARRAY_INFO:
7520 case GET_DISK_INFO:
7521 break;
7522 default:
7523 if (!capable(CAP_SYS_ADMIN))
7524 return -EACCES;
7525 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007526
7527 /*
7528 * Commands dealing with the RAID driver but not any
7529 * particular array:
7530 */
NeilBrownc02c0ae2012-12-11 13:39:21 +11007531 switch (cmd) {
7532 case RAID_VERSION:
7533 err = get_version(argp);
NeilBrown3adc28d2014-09-30 15:46:41 +10007534 goto out;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007535 default:;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007536 }
7537
7538 /*
7539 * Commands creating/starting a new array:
7540 */
7541
Al Viroa39907f2008-03-02 10:31:15 -05007542 mddev = bdev->bd_disk->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007543
7544 if (!mddev) {
7545 BUG();
NeilBrown3adc28d2014-09-30 15:46:41 +10007546 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007547 }
7548
NeilBrown1ca69c42012-10-11 13:37:33 +11007549 /* Some actions do not requires the mutex */
7550 switch (cmd) {
7551 case GET_ARRAY_INFO:
7552 if (!mddev->raid_disks && !mddev->external)
7553 err = -ENODEV;
7554 else
7555 err = get_array_info(mddev, argp);
NeilBrown3adc28d2014-09-30 15:46:41 +10007556 goto out;
NeilBrown1ca69c42012-10-11 13:37:33 +11007557
7558 case GET_DISK_INFO:
7559 if (!mddev->raid_disks && !mddev->external)
7560 err = -ENODEV;
7561 else
7562 err = get_disk_info(mddev, argp);
NeilBrown3adc28d2014-09-30 15:46:41 +10007563 goto out;
NeilBrown1ca69c42012-10-11 13:37:33 +11007564
7565 case SET_DISK_FAULTY:
7566 err = set_disk_faulty(mddev, new_decode_dev(arg));
NeilBrown3adc28d2014-09-30 15:46:41 +10007567 goto out;
NeilBrown4af1a042014-12-15 12:57:00 +11007568
7569 case GET_BITMAP_FILE:
7570 err = get_bitmap_file(mddev, argp);
7571 goto out;
7572
NeilBrown1ca69c42012-10-11 13:37:33 +11007573 }
7574
Guoqing Jiang78b990c2020-04-04 23:57:10 +02007575 if (cmd == ADD_NEW_DISK || cmd == HOT_ADD_DISK)
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02007576 flush_rdev_wq(mddev);
NeilBrowna7a3f082012-12-11 13:35:54 +11007577
Hannes Reinecke90f5f7a2013-04-02 08:38:55 +02007578 if (cmd == HOT_REMOVE_DISK)
7579 /* need to ensure recovery thread has run */
7580 wait_event_interruptible_timeout(mddev->sb_wait,
7581 !test_bit(MD_RECOVERY_NEEDED,
Shaohua Li82a301c2016-12-08 15:48:18 -08007582 &mddev->recovery),
Hannes Reinecke90f5f7a2013-04-02 08:38:55 +02007583 msecs_to_jiffies(5000));
NeilBrown260fa032013-08-27 16:44:13 +10007584 if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
7585 /* Need to flush page cache, and ensure no-one else opens
7586 * and writes
7587 */
7588 mutex_lock(&mddev->open_mutex);
NeilBrown9ba3b7f2014-09-09 14:00:15 +10007589 if (mddev->pers && atomic_read(&mddev->openers) > 1) {
NeilBrown260fa032013-08-27 16:44:13 +10007590 mutex_unlock(&mddev->open_mutex);
7591 err = -EBUSY;
NeilBrown3adc28d2014-09-30 15:46:41 +10007592 goto out;
NeilBrown260fa032013-08-27 16:44:13 +10007593 }
Dae R. Jeongc731b842020-10-22 10:21:28 +09007594 if (test_and_set_bit(MD_CLOSING, &mddev->flags)) {
7595 mutex_unlock(&mddev->open_mutex);
7596 err = -EBUSY;
7597 goto out;
7598 }
NeilBrown065e5192017-04-06 11:16:33 +08007599 did_set_md_closing = true;
NeilBrown260fa032013-08-27 16:44:13 +10007600 mutex_unlock(&mddev->open_mutex);
7601 sync_blockdev(bdev);
7602 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007603 err = mddev_lock(mddev);
7604 if (err) {
NeilBrown9d487392016-11-02 14:16:49 +11007605 pr_debug("md: ioctl lock interrupted, reason %d, cmd %d\n",
7606 err, cmd);
NeilBrown3adc28d2014-09-30 15:46:41 +10007607 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007608 }
7609
NeilBrownc02c0ae2012-12-11 13:39:21 +11007610 if (cmd == SET_ARRAY_INFO) {
7611 mdu_array_info_t info;
7612 if (!arg)
7613 memset(&info, 0, sizeof(info));
7614 else if (copy_from_user(&info, argp, sizeof(info))) {
7615 err = -EFAULT;
NeilBrown3adc28d2014-09-30 15:46:41 +10007616 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007617 }
7618 if (mddev->pers) {
7619 err = update_array_info(mddev, &info);
7620 if (err) {
NeilBrown9d487392016-11-02 14:16:49 +11007621 pr_warn("md: couldn't update array info. %d\n", err);
NeilBrown3adc28d2014-09-30 15:46:41 +10007622 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007623 }
NeilBrown3adc28d2014-09-30 15:46:41 +10007624 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007625 }
7626 if (!list_empty(&mddev->disks)) {
NeilBrown9d487392016-11-02 14:16:49 +11007627 pr_warn("md: array %s already has disks!\n", mdname(mddev));
NeilBrownc02c0ae2012-12-11 13:39:21 +11007628 err = -EBUSY;
NeilBrown3adc28d2014-09-30 15:46:41 +10007629 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007630 }
7631 if (mddev->raid_disks) {
NeilBrown9d487392016-11-02 14:16:49 +11007632 pr_warn("md: array %s already initialised!\n", mdname(mddev));
NeilBrownc02c0ae2012-12-11 13:39:21 +11007633 err = -EBUSY;
NeilBrown3adc28d2014-09-30 15:46:41 +10007634 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007635 }
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02007636 err = md_set_array_info(mddev, &info);
NeilBrownc02c0ae2012-12-11 13:39:21 +11007637 if (err) {
NeilBrown9d487392016-11-02 14:16:49 +11007638 pr_warn("md: couldn't set array info. %d\n", err);
NeilBrown3adc28d2014-09-30 15:46:41 +10007639 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007640 }
NeilBrown3adc28d2014-09-30 15:46:41 +10007641 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007642 }
7643
7644 /*
7645 * Commands querying/configuring an existing array:
7646 */
NeilBrown32a76272005-06-21 17:17:14 -07007647 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
NeilBrown3f9d7b02006-12-22 01:11:41 -08007648 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
NeilBrowna17184a2008-02-06 01:39:55 -08007649 if ((!mddev->raid_disks && !mddev->external)
7650 && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
7651 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
7652 && cmd != GET_BITMAP_FILE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007653 err = -ENODEV;
NeilBrown3adc28d2014-09-30 15:46:41 +10007654 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007655 }
7656
7657 /*
7658 * Commands even a read-only array can execute:
7659 */
NeilBrownc02c0ae2012-12-11 13:39:21 +11007660 switch (cmd) {
NeilBrownc02c0ae2012-12-11 13:39:21 +11007661 case RESTART_ARRAY_RW:
7662 err = restart_array(mddev);
NeilBrown3adc28d2014-09-30 15:46:41 +10007663 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007664
7665 case STOP_ARRAY:
7666 err = do_md_stop(mddev, 0, bdev);
NeilBrown3adc28d2014-09-30 15:46:41 +10007667 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007668
7669 case STOP_ARRAY_RO:
7670 err = md_set_readonly(mddev, bdev);
NeilBrown3adc28d2014-09-30 15:46:41 +10007671 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007672
NeilBrown3ea8929d2013-04-24 11:42:41 +10007673 case HOT_REMOVE_DISK:
7674 err = hot_remove_disk(mddev, new_decode_dev(arg));
NeilBrown3adc28d2014-09-30 15:46:41 +10007675 goto unlock;
NeilBrown3ea8929d2013-04-24 11:42:41 +10007676
NeilBrown7ceb17e2013-04-24 11:42:42 +10007677 case ADD_NEW_DISK:
7678 /* We can support ADD_NEW_DISK on read-only arrays
Wei Fang466ad292016-03-21 19:19:30 +08007679 * only if we are re-adding a preexisting device.
NeilBrown7ceb17e2013-04-24 11:42:42 +10007680 * So require mddev->pers and MD_DISK_SYNC.
7681 */
7682 if (mddev->pers) {
7683 mdu_disk_info_t info;
7684 if (copy_from_user(&info, argp, sizeof(info)))
7685 err = -EFAULT;
7686 else if (!(info.state & (1<<MD_DISK_SYNC)))
7687 /* Need to clear read-only for this */
7688 break;
7689 else
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02007690 err = md_add_new_disk(mddev, &info);
NeilBrown3adc28d2014-09-30 15:46:41 +10007691 goto unlock;
NeilBrown7ceb17e2013-04-24 11:42:42 +10007692 }
7693 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007694 }
7695
7696 /*
7697 * The remaining ioctls are changing the state of the
NeilBrownf91de922005-11-08 21:39:36 -08007698 * superblock, so we do not allow them on read-only arrays.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007699 */
NeilBrown326eb172014-09-30 15:36:28 +10007700 if (mddev->ro && mddev->pers) {
NeilBrownf91de922005-11-08 21:39:36 -08007701 if (mddev->ro == 2) {
7702 mddev->ro = 0;
NeilBrown00bcb4a2010-06-01 19:37:23 +10007703 sysfs_notify_dirent_safe(mddev->sysfs_state);
Neil Brown0fd62b82008-06-28 08:31:36 +10007704 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
NeilBrownf3378b42013-02-28 11:59:03 +11007705 /* mddev_unlock will wake thread */
7706 /* If a device failed while we were read-only, we
7707 * need to make sure the metadata is updated now.
7708 */
Shaohua Li29530792016-12-08 15:48:19 -08007709 if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) {
NeilBrownf3378b42013-02-28 11:59:03 +11007710 mddev_unlock(mddev);
7711 wait_event(mddev->sb_wait,
Shaohua Li29530792016-12-08 15:48:19 -08007712 !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) &&
7713 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
NeilBrown29f097c2013-11-14 17:54:51 +11007714 mddev_lock_nointr(mddev);
NeilBrownf3378b42013-02-28 11:59:03 +11007715 }
NeilBrownf91de922005-11-08 21:39:36 -08007716 } else {
7717 err = -EROFS;
NeilBrown3adc28d2014-09-30 15:46:41 +10007718 goto unlock;
NeilBrownf91de922005-11-08 21:39:36 -08007719 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007720 }
7721
NeilBrownc02c0ae2012-12-11 13:39:21 +11007722 switch (cmd) {
7723 case ADD_NEW_DISK:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007724 {
NeilBrownc02c0ae2012-12-11 13:39:21 +11007725 mdu_disk_info_t info;
7726 if (copy_from_user(&info, argp, sizeof(info)))
7727 err = -EFAULT;
7728 else
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02007729 err = md_add_new_disk(mddev, &info);
NeilBrown3adc28d2014-09-30 15:46:41 +10007730 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007731 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007732
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05007733 case CLUSTERED_DISK_NACK:
7734 if (mddev_is_clustered(mddev))
7735 md_cluster_ops->new_disk_ack(mddev, false);
7736 else
7737 err = -EINVAL;
7738 goto unlock;
7739
NeilBrownc02c0ae2012-12-11 13:39:21 +11007740 case HOT_ADD_DISK:
7741 err = hot_add_disk(mddev, new_decode_dev(arg));
NeilBrown3adc28d2014-09-30 15:46:41 +10007742 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007743
NeilBrownc02c0ae2012-12-11 13:39:21 +11007744 case RUN_ARRAY:
7745 err = do_md_run(mddev);
NeilBrown3adc28d2014-09-30 15:46:41 +10007746 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007747
NeilBrownc02c0ae2012-12-11 13:39:21 +11007748 case SET_BITMAP_FILE:
7749 err = set_bitmap_file(mddev, (int)arg);
NeilBrown3adc28d2014-09-30 15:46:41 +10007750 goto unlock;
NeilBrown32a76272005-06-21 17:17:14 -07007751
NeilBrownc02c0ae2012-12-11 13:39:21 +11007752 default:
7753 err = -EINVAL;
NeilBrown3adc28d2014-09-30 15:46:41 +10007754 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007755 }
7756
NeilBrown3adc28d2014-09-30 15:46:41 +10007757unlock:
NeilBrownd3374822009-01-09 08:31:10 +11007758 if (mddev->hold_active == UNTIL_IOCTL &&
7759 err != -EINVAL)
7760 mddev->hold_active = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007761 mddev_unlock(mddev);
NeilBrown3adc28d2014-09-30 15:46:41 +10007762out:
NeilBrown065e5192017-04-06 11:16:33 +08007763 if(did_set_md_closing)
7764 clear_bit(MD_CLOSING, &mddev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007765 return err;
7766}
Arnd Bergmannaa98aa32009-12-14 12:50:05 +11007767#ifdef CONFIG_COMPAT
7768static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
7769 unsigned int cmd, unsigned long arg)
7770{
7771 switch (cmd) {
7772 case HOT_REMOVE_DISK:
7773 case HOT_ADD_DISK:
7774 case SET_DISK_FAULTY:
7775 case SET_BITMAP_FILE:
7776 /* These take in integer arg, do not convert */
7777 break;
7778 default:
7779 arg = (unsigned long)compat_ptr(arg);
7780 break;
7781 }
7782
7783 return md_ioctl(bdev, mode, cmd, arg);
7784}
7785#endif /* CONFIG_COMPAT */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007786
Christoph Hellwig118cf082020-11-03 11:00:13 +01007787static int md_set_read_only(struct block_device *bdev, bool ro)
7788{
7789 struct mddev *mddev = bdev->bd_disk->private_data;
7790 int err;
7791
7792 err = mddev_lock(mddev);
7793 if (err)
7794 return err;
7795
7796 if (!mddev->raid_disks && !mddev->external) {
7797 err = -ENODEV;
7798 goto out_unlock;
7799 }
7800
7801 /*
7802 * Transitioning to read-auto need only happen for arrays that call
7803 * md_write_start and which are not ready for writes yet.
7804 */
7805 if (!ro && mddev->ro == 1 && mddev->pers) {
7806 err = restart_array(mddev);
7807 if (err)
7808 goto out_unlock;
7809 mddev->ro = 2;
7810 }
7811
7812out_unlock:
7813 mddev_unlock(mddev);
7814 return err;
7815}
7816
Al Viroa39907f2008-03-02 10:31:15 -05007817static int md_open(struct block_device *bdev, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007818{
7819 /*
7820 * Succeed if we can lock the mddev, which confirms that
7821 * it isn't being stopped right now.
7822 */
NeilBrownfd01b882011-10-11 16:47:53 +11007823 struct mddev *mddev = mddev_find(bdev->bd_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007824 int err;
7825
Yuanhan Liu0c098222012-05-22 13:55:32 +10007826 if (!mddev)
7827 return -ENODEV;
7828
NeilBrownd3374822009-01-09 08:31:10 +11007829 if (mddev->gendisk != bdev->bd_disk) {
7830 /* we are racing with mddev_put which is discarding this
7831 * bd_disk.
7832 */
7833 mddev_put(mddev);
7834 /* Wait until bdev->bd_disk is definitely gone */
Guoqing Jiangf6766ff2020-04-04 23:57:09 +02007835 if (work_pending(&mddev->del_work))
7836 flush_workqueue(md_misc_wq);
NeilBrownd3374822009-01-09 08:31:10 +11007837 /* Then retry the open from the top */
7838 return -ERESTARTSYS;
7839 }
7840 BUG_ON(mddev != bdev->bd_disk->private_data);
7841
NeilBrownc8c00a62009-08-10 12:50:52 +10007842 if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07007843 goto out;
7844
Guoqing Jiangaf8d8e62016-08-12 13:42:37 +08007845 if (test_bit(MD_CLOSING, &mddev->flags)) {
7846 mutex_unlock(&mddev->open_mutex);
NeilBrowne2342ca2016-12-05 16:40:50 +11007847 err = -ENODEV;
7848 goto out;
Guoqing Jiangaf8d8e62016-08-12 13:42:37 +08007849 }
7850
Linus Torvalds1da177e2005-04-16 15:20:36 -07007851 err = 0;
NeilBrownf2ea68c2008-07-21 17:05:25 +10007852 atomic_inc(&mddev->openers);
NeilBrownc8c00a62009-08-10 12:50:52 +10007853 mutex_unlock(&mddev->open_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007854
Christoph Hellwig818077d2020-09-08 16:53:43 +02007855 bdev_check_media_change(bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007856 out:
NeilBrowne2342ca2016-12-05 16:40:50 +11007857 if (err)
7858 mddev_put(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007859 return err;
7860}
7861
Al Virodb2a1442013-05-05 21:52:57 -04007862static void md_release(struct gendisk *disk, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007863{
NeilBrownf72ffdd2014-09-30 14:23:59 +10007864 struct mddev *mddev = disk->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007865
Eric Sesterhenn52e5f9d2006-10-03 23:33:23 +02007866 BUG_ON(!mddev);
NeilBrownf2ea68c2008-07-21 17:05:25 +10007867 atomic_dec(&mddev->openers);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007868 mddev_put(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007869}
NeilBrownf0b4f7e2011-02-24 17:26:41 +11007870
Christoph Hellwiga564e232020-07-08 14:25:41 +02007871static unsigned int md_check_events(struct gendisk *disk, unsigned int clearing)
NeilBrownf0b4f7e2011-02-24 17:26:41 +11007872{
NeilBrownfd01b882011-10-11 16:47:53 +11007873 struct mddev *mddev = disk->private_data;
Christoph Hellwiga564e232020-07-08 14:25:41 +02007874 unsigned int ret = 0;
NeilBrownf0b4f7e2011-02-24 17:26:41 +11007875
Christoph Hellwiga564e232020-07-08 14:25:41 +02007876 if (mddev->changed)
7877 ret = DISK_EVENT_MEDIA_CHANGE;
NeilBrownf0b4f7e2011-02-24 17:26:41 +11007878 mddev->changed = 0;
Christoph Hellwiga564e232020-07-08 14:25:41 +02007879 return ret;
NeilBrownf0b4f7e2011-02-24 17:26:41 +11007880}
Christoph Hellwiga564e232020-07-08 14:25:41 +02007881
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02007882const struct block_device_operations md_fops =
Linus Torvalds1da177e2005-04-16 15:20:36 -07007883{
7884 .owner = THIS_MODULE,
Christoph Hellwigc62b37d2020-07-01 10:59:43 +02007885 .submit_bio = md_submit_bio,
Al Viroa39907f2008-03-02 10:31:15 -05007886 .open = md_open,
7887 .release = md_release,
NeilBrownb492b852009-05-26 12:57:36 +10007888 .ioctl = md_ioctl,
Arnd Bergmannaa98aa32009-12-14 12:50:05 +11007889#ifdef CONFIG_COMPAT
7890 .compat_ioctl = md_compat_ioctl,
7891#endif
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08007892 .getgeo = md_getgeo,
Christoph Hellwiga564e232020-07-08 14:25:41 +02007893 .check_events = md_check_events,
Christoph Hellwig118cf082020-11-03 11:00:13 +01007894 .set_read_only = md_set_read_only,
Linus Torvalds1da177e2005-04-16 15:20:36 -07007895};
7896
NeilBrownf72ffdd2014-09-30 14:23:59 +10007897static int md_thread(void *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007898{
NeilBrown2b8bf342011-10-11 16:48:23 +11007899 struct md_thread *thread = arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007900
Linus Torvalds1da177e2005-04-16 15:20:36 -07007901 /*
7902 * md_thread is a 'system-thread', it's priority should be very
7903 * high. We avoid resource deadlocks individually in each
7904 * raid personality. (RAID5 does preallocation) We also use RR and
7905 * the very same RT priority as kswapd, thus we will never get
7906 * into a priority inversion deadlock.
7907 *
7908 * we definitely have to have equal or higher priority than
7909 * bdflush, otherwise bdflush will deadlock if there are too
7910 * many dirty RAID5 blocks.
7911 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007912
NeilBrown6985c432005-10-19 21:23:47 -07007913 allow_signal(SIGKILL);
NeilBrowna6fb0932005-09-09 16:23:56 -07007914 while (!kthread_should_stop()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007915
NeilBrown93588e22005-11-15 00:09:12 -08007916 /* We need to wait INTERRUPTIBLE so that
7917 * we don't add to the load-average.
7918 * That means we need to be sure no signals are
7919 * pending
7920 */
7921 if (signal_pending(current))
7922 flush_signals(current);
7923
7924 wait_event_interruptible_timeout
7925 (thread->wqueue,
7926 test_bit(THREAD_WAKEUP, &thread->flags)
Shaohua Lice1ccd02016-11-21 10:29:18 -08007927 || kthread_should_stop() || kthread_should_park(),
NeilBrown93588e22005-11-15 00:09:12 -08007928 thread->timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007929
NeilBrown6c987912011-01-14 09:13:53 +11007930 clear_bit(THREAD_WAKEUP, &thread->flags);
Shaohua Lice1ccd02016-11-21 10:29:18 -08007931 if (kthread_should_park())
7932 kthread_parkme();
NeilBrown6c987912011-01-14 09:13:53 +11007933 if (!kthread_should_stop())
Shaohua Li4ed87312012-10-11 13:34:00 +11007934 thread->run(thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007935 }
NeilBrowna6fb0932005-09-09 16:23:56 -07007936
Linus Torvalds1da177e2005-04-16 15:20:36 -07007937 return 0;
7938}
7939
NeilBrown2b8bf342011-10-11 16:48:23 +11007940void md_wakeup_thread(struct md_thread *thread)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007941{
7942 if (thread) {
NeilBrown36a4e1f2011-10-07 14:23:17 +11007943 pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm);
Guoqing Jiangd1d90142017-10-09 10:32:48 +08007944 set_bit(THREAD_WAKEUP, &thread->flags);
7945 wake_up(&thread->wqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007946 }
7947}
NeilBrown6c144d32014-09-30 16:15:38 +10007948EXPORT_SYMBOL(md_wakeup_thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007949
Shaohua Li4ed87312012-10-11 13:34:00 +11007950struct md_thread *md_register_thread(void (*run) (struct md_thread *),
7951 struct mddev *mddev, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007952{
NeilBrown2b8bf342011-10-11 16:48:23 +11007953 struct md_thread *thread;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007954
NeilBrown2b8bf342011-10-11 16:48:23 +11007955 thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007956 if (!thread)
7957 return NULL;
7958
Linus Torvalds1da177e2005-04-16 15:20:36 -07007959 init_waitqueue_head(&thread->wqueue);
7960
Linus Torvalds1da177e2005-04-16 15:20:36 -07007961 thread->run = run;
7962 thread->mddev = mddev;
NeilBrown32a76272005-06-21 17:17:14 -07007963 thread->timeout = MAX_SCHEDULE_TIMEOUT;
NeilBrown0da3c612009-09-23 18:09:45 +10007964 thread->tsk = kthread_run(md_thread, thread,
7965 "%s_%s",
7966 mdname(thread->mddev),
NeilBrown02326052012-07-03 15:56:52 +10007967 name);
NeilBrowna6fb0932005-09-09 16:23:56 -07007968 if (IS_ERR(thread->tsk)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007969 kfree(thread);
7970 return NULL;
7971 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007972 return thread;
7973}
NeilBrown6c144d32014-09-30 16:15:38 +10007974EXPORT_SYMBOL(md_register_thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007975
NeilBrown2b8bf342011-10-11 16:48:23 +11007976void md_unregister_thread(struct md_thread **threadp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007977{
NeilBrown2b8bf342011-10-11 16:48:23 +11007978 struct md_thread *thread = *threadp;
NeilBrowne0cf8f02009-03-31 14:39:39 +11007979 if (!thread)
7980 return;
NeilBrown36a4e1f2011-10-07 14:23:17 +11007981 pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
NeilBrown01f96c02011-09-21 15:30:20 +10007982 /* Locking ensures that mddev_unlock does not wake_up a
7983 * non-existent thread
7984 */
7985 spin_lock(&pers_lock);
7986 *threadp = NULL;
7987 spin_unlock(&pers_lock);
NeilBrowna6fb0932005-09-09 16:23:56 -07007988
7989 kthread_stop(thread->tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007990 kfree(thread);
7991}
NeilBrown6c144d32014-09-30 16:15:38 +10007992EXPORT_SYMBOL(md_unregister_thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007993
NeilBrownfd01b882011-10-11 16:47:53 +11007994void md_error(struct mddev *mddev, struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007995{
NeilBrownb2d444d2005-11-08 21:39:31 -08007996 if (!rdev || test_bit(Faulty, &rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07007997 return;
Dan Williams6bfe0b42008-04-30 00:52:32 -07007998
NeilBrownde393cd2011-07-28 11:31:48 +10007999 if (!mddev->pers || !mddev->pers->error_handler)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008000 return;
8001 mddev->pers->error_handler(mddev,rdev);
Neil Brown72a23c22008-06-28 08:31:41 +10008002 if (mddev->degraded)
8003 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
NeilBrown00bcb4a2010-06-01 19:37:23 +10008004 sysfs_notify_dirent_safe(rdev->sysfs_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008005 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
8006 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8007 md_wakeup_thread(mddev->thread);
NeilBrown768a4182010-07-26 11:49:55 +10008008 if (mddev->event_work.func)
Tejun Heoe804ac72010-10-15 15:36:08 +02008009 queue_work(md_misc_wq, &mddev->event_work);
Guoqing Jiangbb9ef712015-12-28 10:46:38 +08008010 md_new_event(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008011}
NeilBrown6c144d32014-09-30 16:15:38 +10008012EXPORT_SYMBOL(md_error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008013
8014/* seq_file implementation /proc/mdstat */
8015
8016static void status_unused(struct seq_file *seq)
8017{
8018 int i = 0;
NeilBrown3cb03002011-10-11 16:45:26 +11008019 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008020
8021 seq_printf(seq, "unused devices: ");
8022
Cheng Renquan159ec1f2009-01-09 08:31:08 +11008023 list_for_each_entry(rdev, &pending_raid_disks, same_set) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008024 char b[BDEVNAME_SIZE];
8025 i++;
8026 seq_printf(seq, "%s ",
8027 bdevname(rdev->bdev,b));
8028 }
8029 if (!i)
8030 seq_printf(seq, "<none>");
8031
8032 seq_printf(seq, "\n");
8033}
8034
NeilBrownf7851be2015-07-02 17:12:58 +10008035static int status_resync(struct seq_file *seq, struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008036{
NeilBrowndd71cf62009-05-07 12:49:35 +10008037 sector_t max_sectors, resync, res;
Mariusz Tkaczyk9642fa72019-06-13 16:11:41 +02008038 unsigned long dt, db = 0;
8039 sector_t rt, curr_mark_cnt, resync_mark_cnt;
8040 int scale, recovery_active;
NeilBrown4588b422006-03-27 01:18:04 -08008041 unsigned int per_milli;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008042
NeilBrownc804cde2012-05-21 09:28:33 +10008043 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
8044 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
NeilBrowndd71cf62009-05-07 12:49:35 +10008045 max_sectors = mddev->resync_max_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008046 else
NeilBrowndd71cf62009-05-07 12:49:35 +10008047 max_sectors = mddev->dev_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008048
NeilBrownf7851be2015-07-02 17:12:58 +10008049 resync = mddev->curr_resync;
8050 if (resync <= 3) {
8051 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
8052 /* Still cleaning up */
8053 resync = max_sectors;
Nate Daileyd2e2ec82017-11-30 11:33:30 -05008054 } else if (resync > max_sectors)
8055 resync = max_sectors;
8056 else
NeilBrownf7851be2015-07-02 17:12:58 +10008057 resync -= atomic_read(&mddev->recovery_active);
8058
8059 if (resync == 0) {
Guoqing Jiang0357ba22018-07-02 16:26:25 +08008060 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery)) {
8061 struct md_rdev *rdev;
8062
8063 rdev_for_each(rdev, mddev)
8064 if (rdev->raid_disk >= 0 &&
8065 !test_bit(Faulty, &rdev->flags) &&
8066 rdev->recovery_offset != MaxSector &&
8067 rdev->recovery_offset) {
8068 seq_printf(seq, "\trecover=REMOTE");
8069 return 1;
8070 }
8071 if (mddev->reshape_position != MaxSector)
8072 seq_printf(seq, "\treshape=REMOTE");
8073 else
8074 seq_printf(seq, "\tresync=REMOTE");
8075 return 1;
8076 }
NeilBrownf7851be2015-07-02 17:12:58 +10008077 if (mddev->recovery_cp < MaxSector) {
8078 seq_printf(seq, "\tresync=PENDING");
8079 return 1;
8080 }
8081 return 0;
8082 }
8083 if (resync < 3) {
8084 seq_printf(seq, "\tresync=DELAYED");
8085 return 1;
8086 }
8087
NeilBrown403df472014-09-30 15:52:29 +10008088 WARN_ON(max_sectors == 0);
NeilBrown4588b422006-03-27 01:18:04 -08008089 /* Pick 'scale' such that (resync>>scale)*1000 will fit
NeilBrowndd71cf62009-05-07 12:49:35 +10008090 * in a sector_t, and (max_sectors>>scale) will fit in a
NeilBrown4588b422006-03-27 01:18:04 -08008091 * u32, as those are the requirements for sector_div.
8092 * Thus 'scale' must be at least 10
8093 */
8094 scale = 10;
8095 if (sizeof(sector_t) > sizeof(unsigned long)) {
NeilBrowndd71cf62009-05-07 12:49:35 +10008096 while ( max_sectors/2 > (1ULL<<(scale+32)))
NeilBrown4588b422006-03-27 01:18:04 -08008097 scale++;
8098 }
8099 res = (resync>>scale)*1000;
NeilBrowndd71cf62009-05-07 12:49:35 +10008100 sector_div(res, (u32)((max_sectors>>scale)+1));
NeilBrown4588b422006-03-27 01:18:04 -08008101
8102 per_milli = res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008103 {
NeilBrown4588b422006-03-27 01:18:04 -08008104 int i, x = per_milli/50, y = 20-x;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008105 seq_printf(seq, "[");
8106 for (i = 0; i < x; i++)
8107 seq_printf(seq, "=");
8108 seq_printf(seq, ">");
8109 for (i = 0; i < y; i++)
8110 seq_printf(seq, ".");
8111 seq_printf(seq, "] ");
8112 }
NeilBrown4588b422006-03-27 01:18:04 -08008113 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
NeilBrownccfcc3c2006-03-27 01:18:09 -08008114 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
8115 "reshape" :
NeilBrown61df9d92006-10-03 01:15:57 -07008116 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
8117 "check" :
8118 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
8119 "resync" : "recovery"))),
8120 per_milli/10, per_milli % 10,
NeilBrowndd71cf62009-05-07 12:49:35 +10008121 (unsigned long long) resync/2,
8122 (unsigned long long) max_sectors/2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008123
8124 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07008125 * dt: time from mark until now
8126 * db: blocks written from mark until now
8127 * rt: remaining time
NeilBrowndd71cf62009-05-07 12:49:35 +10008128 *
Mariusz Tkaczyk9642fa72019-06-13 16:11:41 +02008129 * rt is a sector_t, which is always 64bit now. We are keeping
8130 * the original algorithm, but it is not really necessary.
8131 *
8132 * Original algorithm:
8133 * So we divide before multiply in case it is 32bit and close
8134 * to the limit.
8135 * We scale the divisor (db) by 32 to avoid losing precision
8136 * near the end of resync when the number of remaining sectors
8137 * is close to 'db'.
8138 * We then divide rt by 32 after multiplying by db to compensate.
8139 * The '+1' avoids division by zero if db is very small.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008140 */
8141 dt = ((jiffies - mddev->resync_mark) / HZ);
8142 if (!dt) dt++;
Mariusz Tkaczyk9642fa72019-06-13 16:11:41 +02008143
8144 curr_mark_cnt = mddev->curr_mark_cnt;
8145 recovery_active = atomic_read(&mddev->recovery_active);
8146 resync_mark_cnt = mddev->resync_mark_cnt;
8147
8148 if (curr_mark_cnt >= (recovery_active + resync_mark_cnt))
8149 db = curr_mark_cnt - (recovery_active + resync_mark_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008150
NeilBrowndd71cf62009-05-07 12:49:35 +10008151 rt = max_sectors - resync; /* number of remaining sectors */
Mariusz Tkaczyk9642fa72019-06-13 16:11:41 +02008152 rt = div64_u64(rt, db/32+1);
NeilBrowndd71cf62009-05-07 12:49:35 +10008153 rt *= dt;
8154 rt >>= 5;
8155
8156 seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
8157 ((unsigned long)rt % 60)/6);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008158
NeilBrownff4e8d92006-07-10 04:44:16 -07008159 seq_printf(seq, " speed=%ldK/sec", db/2/dt);
NeilBrownf7851be2015-07-02 17:12:58 +10008160 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008161}
8162
8163static void *md_seq_start(struct seq_file *seq, loff_t *pos)
8164{
8165 struct list_head *tmp;
8166 loff_t l = *pos;
NeilBrownfd01b882011-10-11 16:47:53 +11008167 struct mddev *mddev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008168
8169 if (l >= 0x10000)
8170 return NULL;
8171 if (!l--)
8172 /* header */
8173 return (void*)1;
8174
8175 spin_lock(&all_mddevs_lock);
8176 list_for_each(tmp,&all_mddevs)
8177 if (!l--) {
NeilBrownfd01b882011-10-11 16:47:53 +11008178 mddev = list_entry(tmp, struct mddev, all_mddevs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008179 mddev_get(mddev);
8180 spin_unlock(&all_mddevs_lock);
8181 return mddev;
8182 }
8183 spin_unlock(&all_mddevs_lock);
8184 if (!l--)
8185 return (void*)2;/* tail */
8186 return NULL;
8187}
8188
8189static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
8190{
8191 struct list_head *tmp;
NeilBrownfd01b882011-10-11 16:47:53 +11008192 struct mddev *next_mddev, *mddev = v;
NeilBrownf72ffdd2014-09-30 14:23:59 +10008193
Linus Torvalds1da177e2005-04-16 15:20:36 -07008194 ++*pos;
8195 if (v == (void*)2)
8196 return NULL;
8197
8198 spin_lock(&all_mddevs_lock);
8199 if (v == (void*)1)
8200 tmp = all_mddevs.next;
8201 else
8202 tmp = mddev->all_mddevs.next;
8203 if (tmp != &all_mddevs)
NeilBrownfd01b882011-10-11 16:47:53 +11008204 next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs));
Linus Torvalds1da177e2005-04-16 15:20:36 -07008205 else {
8206 next_mddev = (void*)2;
8207 *pos = 0x10000;
NeilBrownf72ffdd2014-09-30 14:23:59 +10008208 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008209 spin_unlock(&all_mddevs_lock);
8210
8211 if (v != (void*)1)
8212 mddev_put(mddev);
8213 return next_mddev;
8214
8215}
8216
8217static void md_seq_stop(struct seq_file *seq, void *v)
8218{
NeilBrownfd01b882011-10-11 16:47:53 +11008219 struct mddev *mddev = v;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008220
8221 if (mddev && v != (void*)1 && v != (void*)2)
8222 mddev_put(mddev);
8223}
8224
8225static int md_seq_show(struct seq_file *seq, void *v)
8226{
NeilBrownfd01b882011-10-11 16:47:53 +11008227 struct mddev *mddev = v;
Andre Nolldd8ac332009-03-31 14:33:13 +11008228 sector_t sectors;
NeilBrown3cb03002011-10-11 16:45:26 +11008229 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008230
8231 if (v == (void*)1) {
NeilBrown84fc4b52011-10-11 16:49:58 +11008232 struct md_personality *pers;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008233 seq_printf(seq, "Personalities : ");
8234 spin_lock(&pers_lock);
NeilBrown2604b702006-01-06 00:20:36 -08008235 list_for_each_entry(pers, &pers_list, list)
8236 seq_printf(seq, "[%s] ", pers->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008237
8238 spin_unlock(&pers_lock);
8239 seq_printf(seq, "\n");
Kay Sieversf1514632011-07-12 20:48:39 +02008240 seq->poll_event = atomic_read(&md_event_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008241 return 0;
8242 }
8243 if (v == (void*)2) {
8244 status_unused(seq);
8245 return 0;
8246 }
8247
NeilBrown36d091f2014-12-15 12:56:58 +11008248 spin_lock(&mddev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008249 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
8250 seq_printf(seq, "%s : %sactive", mdname(mddev),
8251 mddev->pers ? "" : "in");
8252 if (mddev->pers) {
NeilBrownf91de922005-11-08 21:39:36 -08008253 if (mddev->ro==1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008254 seq_printf(seq, " (read-only)");
NeilBrownf91de922005-11-08 21:39:36 -08008255 if (mddev->ro==2)
NeilBrown52720ae2008-03-10 11:43:47 -07008256 seq_printf(seq, " (auto-read-only)");
Linus Torvalds1da177e2005-04-16 15:20:36 -07008257 seq_printf(seq, " %s", mddev->pers->name);
8258 }
8259
Andre Nolldd8ac332009-03-31 14:33:13 +11008260 sectors = 0;
NeilBrownf97fcad2014-12-15 12:56:59 +11008261 rcu_read_lock();
8262 rdev_for_each_rcu(rdev, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008263 char b[BDEVNAME_SIZE];
8264 seq_printf(seq, " %s[%d]",
8265 bdevname(rdev->bdev,b), rdev->desc_nr);
NeilBrown8ddf9ef2005-09-09 16:23:45 -07008266 if (test_bit(WriteMostly, &rdev->flags))
8267 seq_printf(seq, "(W)");
Shaohua Li9efdca12015-10-12 16:59:50 -07008268 if (test_bit(Journal, &rdev->flags))
8269 seq_printf(seq, "(J)");
NeilBrownb2d444d2005-11-08 21:39:31 -08008270 if (test_bit(Faulty, &rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008271 seq_printf(seq, "(F)");
8272 continue;
NeilBrown2d78f8c2011-12-23 10:17:51 +11008273 }
8274 if (rdev->raid_disk < 0)
NeilBrownb325a322005-09-09 16:24:00 -07008275 seq_printf(seq, "(S)"); /* spare */
NeilBrown2d78f8c2011-12-23 10:17:51 +11008276 if (test_bit(Replacement, &rdev->flags))
8277 seq_printf(seq, "(R)");
Andre Nolldd8ac332009-03-31 14:33:13 +11008278 sectors += rdev->sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008279 }
NeilBrownf97fcad2014-12-15 12:56:59 +11008280 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07008281
8282 if (!list_empty(&mddev->disks)) {
8283 if (mddev->pers)
8284 seq_printf(seq, "\n %llu blocks",
Andre Nollf233ea52008-07-21 17:05:22 +10008285 (unsigned long long)
8286 mddev->array_sectors / 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008287 else
8288 seq_printf(seq, "\n %llu blocks",
Andre Nolldd8ac332009-03-31 14:33:13 +11008289 (unsigned long long)sectors / 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008290 }
NeilBrown1cd6bf12005-09-09 16:24:00 -07008291 if (mddev->persistent) {
8292 if (mddev->major_version != 0 ||
8293 mddev->minor_version != 90) {
8294 seq_printf(seq," super %d.%d",
8295 mddev->major_version,
8296 mddev->minor_version);
8297 }
NeilBrowne6910632008-02-06 01:39:51 -08008298 } else if (mddev->external)
8299 seq_printf(seq, " super external:%s",
8300 mddev->metadata_type);
8301 else
NeilBrown1cd6bf12005-09-09 16:24:00 -07008302 seq_printf(seq, " super non-persistent");
Linus Torvalds1da177e2005-04-16 15:20:36 -07008303
8304 if (mddev->pers) {
NeilBrownd710e132008-10-13 11:55:12 +11008305 mddev->pers->status(seq, mddev);
NeilBrownf72ffdd2014-09-30 14:23:59 +10008306 seq_printf(seq, "\n ");
NeilBrown8e1b39d2005-11-08 21:39:41 -08008307 if (mddev->pers->sync_request) {
NeilBrownf7851be2015-07-02 17:12:58 +10008308 if (status_resync(seq, mddev))
NeilBrown8e1b39d2005-11-08 21:39:41 -08008309 seq_printf(seq, "\n ");
NeilBrown8e1b39d2005-11-08 21:39:41 -08008310 }
NeilBrown32a76272005-06-21 17:17:14 -07008311 } else
8312 seq_printf(seq, "\n ");
8313
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07008314 md_bitmap_status(seq, mddev->bitmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008315
8316 seq_printf(seq, "\n");
8317 }
NeilBrown36d091f2014-12-15 12:56:58 +11008318 spin_unlock(&mddev->lock);
NeilBrownf72ffdd2014-09-30 14:23:59 +10008319
Linus Torvalds1da177e2005-04-16 15:20:36 -07008320 return 0;
8321}
8322
Jan Engelhardt110518b2009-05-07 12:49:37 +10008323static const struct seq_operations md_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008324 .start = md_seq_start,
8325 .next = md_seq_next,
8326 .stop = md_seq_stop,
8327 .show = md_seq_show,
8328};
8329
8330static int md_seq_open(struct inode *inode, struct file *file)
8331{
Kay Sieversf1514632011-07-12 20:48:39 +02008332 struct seq_file *seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008333 int error;
8334
8335 error = seq_open(file, &md_seq_ops);
NeilBrownd7603b72006-01-06 00:20:30 -08008336 if (error)
Kay Sieversf1514632011-07-12 20:48:39 +02008337 return error;
8338
8339 seq = file->private_data;
8340 seq->poll_event = atomic_read(&md_event_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008341 return error;
8342}
8343
NeilBrowne2f23b62014-04-09 14:33:51 +10008344static int md_unloading;
Al Viroafc9a422017-07-03 06:39:46 -04008345static __poll_t mdstat_poll(struct file *filp, poll_table *wait)
NeilBrownd7603b72006-01-06 00:20:30 -08008346{
Kay Sieversf1514632011-07-12 20:48:39 +02008347 struct seq_file *seq = filp->private_data;
Al Viroafc9a422017-07-03 06:39:46 -04008348 __poll_t mask;
NeilBrownd7603b72006-01-06 00:20:30 -08008349
NeilBrowne2f23b62014-04-09 14:33:51 +10008350 if (md_unloading)
Linus Torvaldsa9a08842018-02-11 14:34:03 -08008351 return EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI;
NeilBrownd7603b72006-01-06 00:20:30 -08008352 poll_wait(filp, &md_event_waiters, wait);
8353
8354 /* always allow read */
Linus Torvaldsa9a08842018-02-11 14:34:03 -08008355 mask = EPOLLIN | EPOLLRDNORM;
NeilBrownd7603b72006-01-06 00:20:30 -08008356
Kay Sieversf1514632011-07-12 20:48:39 +02008357 if (seq->poll_event != atomic_read(&md_event_count))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08008358 mask |= EPOLLERR | EPOLLPRI;
NeilBrownd7603b72006-01-06 00:20:30 -08008359 return mask;
8360}
8361
Alexey Dobriyan97a32532020-02-03 17:37:17 -08008362static const struct proc_ops mdstat_proc_ops = {
8363 .proc_open = md_seq_open,
8364 .proc_read = seq_read,
8365 .proc_lseek = seq_lseek,
8366 .proc_release = seq_release,
8367 .proc_poll = mdstat_poll,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008368};
8369
NeilBrown84fc4b52011-10-11 16:49:58 +11008370int register_md_personality(struct md_personality *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008371{
NeilBrown9d487392016-11-02 14:16:49 +11008372 pr_debug("md: %s personality registered for level %d\n",
8373 p->name, p->level);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008374 spin_lock(&pers_lock);
NeilBrown2604b702006-01-06 00:20:36 -08008375 list_add_tail(&p->list, &pers_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008376 spin_unlock(&pers_lock);
8377 return 0;
8378}
NeilBrown6c144d32014-09-30 16:15:38 +10008379EXPORT_SYMBOL(register_md_personality);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008380
NeilBrown84fc4b52011-10-11 16:49:58 +11008381int unregister_md_personality(struct md_personality *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008382{
NeilBrown9d487392016-11-02 14:16:49 +11008383 pr_debug("md: %s personality unregistered\n", p->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008384 spin_lock(&pers_lock);
NeilBrown2604b702006-01-06 00:20:36 -08008385 list_del_init(&p->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008386 spin_unlock(&pers_lock);
8387 return 0;
8388}
NeilBrown6c144d32014-09-30 16:15:38 +10008389EXPORT_SYMBOL(unregister_md_personality);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008390
NeilBrown6022e752015-08-13 12:32:55 +10008391int register_md_cluster_operations(struct md_cluster_operations *ops,
8392 struct module *module)
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008393{
NeilBrown6022e752015-08-13 12:32:55 +10008394 int ret = 0;
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008395 spin_lock(&pers_lock);
NeilBrown6022e752015-08-13 12:32:55 +10008396 if (md_cluster_ops != NULL)
8397 ret = -EALREADY;
8398 else {
8399 md_cluster_ops = ops;
8400 md_cluster_mod = module;
8401 }
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008402 spin_unlock(&pers_lock);
NeilBrown6022e752015-08-13 12:32:55 +10008403 return ret;
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008404}
8405EXPORT_SYMBOL(register_md_cluster_operations);
8406
8407int unregister_md_cluster_operations(void)
8408{
8409 spin_lock(&pers_lock);
8410 md_cluster_ops = NULL;
8411 spin_unlock(&pers_lock);
8412 return 0;
8413}
8414EXPORT_SYMBOL(unregister_md_cluster_operations);
8415
8416int md_setup_cluster(struct mddev *mddev, int nodes)
8417{
Zhao Heming7c9d5c52020-07-21 02:08:52 +08008418 int ret;
Guoqing Jiang47a7b0d2016-09-04 22:17:28 -04008419 if (!md_cluster_ops)
8420 request_module("md-cluster");
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008421 spin_lock(&pers_lock);
Guoqing Jiang47a7b0d2016-09-04 22:17:28 -04008422 /* ensure module won't be unloaded */
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008423 if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
NeilBrown9d487392016-11-02 14:16:49 +11008424 pr_warn("can't find md-cluster module or get it's reference.\n");
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008425 spin_unlock(&pers_lock);
8426 return -ENOENT;
8427 }
8428 spin_unlock(&pers_lock);
8429
Zhao Heming7c9d5c52020-07-21 02:08:52 +08008430 ret = md_cluster_ops->join(mddev, nodes);
8431 if (!ret)
8432 mddev->safemode_delay = 0;
8433 return ret;
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008434}
8435
8436void md_cluster_stop(struct mddev *mddev)
8437{
Goldwyn Rodriguesc4ce8672014-03-29 10:20:02 -05008438 if (!md_cluster_ops)
8439 return;
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008440 md_cluster_ops->leave(mddev);
8441 module_put(md_cluster_mod);
8442}
8443
NeilBrownfd01b882011-10-11 16:47:53 +11008444static int is_mddev_idle(struct mddev *mddev, int init)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008445{
NeilBrownf72ffdd2014-09-30 14:23:59 +10008446 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008447 int idle;
NeilBrowneea1bf32009-03-31 14:27:02 +11008448 int curr_events;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008449
8450 idle = 1;
NeilBrown4b809912008-07-21 17:05:25 +10008451 rcu_read_lock();
8452 rdev_for_each_rcu(rdev, mddev) {
Christoph Hellwig4245e522020-09-03 07:40:59 +02008453 struct gendisk *disk = rdev->bdev->bd_disk;
Christoph Hellwig8446fe92020-11-24 09:36:54 +01008454 curr_events = (int)part_stat_read_accum(disk->part0, sectors) -
NeilBrowneea1bf32009-03-31 14:27:02 +11008455 atomic_read(&disk->sync_io);
NeilBrown713f6ab2007-07-17 04:06:12 -07008456 /* sync IO will cause sync_io to increase before the disk_stats
8457 * as sync_io is counted when a request starts, and
8458 * disk_stats is counted when it completes.
8459 * So resync activity will cause curr_events to be smaller than
8460 * when there was no such activity.
8461 * non-sync IO will cause disk_stat to increase without
8462 * increasing sync_io so curr_events will (eventually)
8463 * be larger than it was before. Once it becomes
8464 * substantially larger, the test below will cause
8465 * the array to appear non-idle, and resync will slow
8466 * down.
8467 * If there is a lot of outstanding resync activity when
8468 * we set last_event to curr_events, then all that activity
8469 * completing might cause the array to appear non-idle
8470 * and resync will be slowed down even though there might
8471 * not have been non-resync activity. This will only
8472 * happen once though. 'last_events' will soon reflect
8473 * the state where there is little or no outstanding
8474 * resync requests, and further resync activity will
8475 * always make curr_events less than last_events.
NeilBrownc0e48522005-11-18 01:11:01 -08008476 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07008477 */
NeilBrowneea1bf32009-03-31 14:27:02 +11008478 if (init || curr_events - rdev->last_events > 64) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008479 rdev->last_events = curr_events;
8480 idle = 0;
8481 }
8482 }
NeilBrown4b809912008-07-21 17:05:25 +10008483 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07008484 return idle;
8485}
8486
NeilBrownfd01b882011-10-11 16:47:53 +11008487void md_done_sync(struct mddev *mddev, int blocks, int ok)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008488{
8489 /* another "blocks" (512byte) blocks have been synced */
8490 atomic_sub(blocks, &mddev->recovery_active);
8491 wake_up(&mddev->recovery_wait);
8492 if (!ok) {
NeilBrowndfc70642008-05-23 13:04:39 -07008493 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
majianpeng0a19caa2012-11-19 19:57:34 +08008494 set_bit(MD_RECOVERY_ERROR, &mddev->recovery);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008495 md_wakeup_thread(mddev->thread);
8496 // stop recovery, signal do_sync ....
8497 }
8498}
NeilBrown6c144d32014-09-30 16:15:38 +10008499EXPORT_SYMBOL(md_done_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008500
NeilBrown06d91a52005-06-21 17:17:12 -07008501/* md_write_start(mddev, bi)
8502 * If we need to update some array metadata (e.g. 'active' flag
NeilBrown3d310eb2005-06-21 17:17:26 -07008503 * in superblock) before writing, schedule a superblock update
8504 * and wait for it to complete.
NeilBrowncc27b0c2017-06-05 16:49:39 +10008505 * A return value of 'false' means that the write wasn't recorded
8506 * and cannot proceed as the array is being suspend.
NeilBrown06d91a52005-06-21 17:17:12 -07008507 */
NeilBrowncc27b0c2017-06-05 16:49:39 +10008508bool md_write_start(struct mddev *mddev, struct bio *bi)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008509{
Neil Brown0fd62b82008-06-28 08:31:36 +10008510 int did_change = 0;
Heinz Mauelshagen4b6c1062018-02-02 23:13:19 +01008511
NeilBrown06d91a52005-06-21 17:17:12 -07008512 if (bio_data_dir(bi) != WRITE)
NeilBrowncc27b0c2017-06-05 16:49:39 +10008513 return true;
NeilBrown06d91a52005-06-21 17:17:12 -07008514
NeilBrownf91de922005-11-08 21:39:36 -08008515 BUG_ON(mddev->ro == 1);
8516 if (mddev->ro == 2) {
8517 /* need to switch to read/write */
8518 mddev->ro = 0;
8519 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8520 md_wakeup_thread(mddev->thread);
NeilBrown25156192008-03-04 14:29:32 -08008521 md_wakeup_thread(mddev->sync_thread);
Neil Brown0fd62b82008-06-28 08:31:36 +10008522 did_change = 1;
NeilBrownf91de922005-11-08 21:39:36 -08008523 }
NeilBrown4ad23a972017-03-15 14:05:14 +11008524 rcu_read_lock();
8525 percpu_ref_get(&mddev->writes_pending);
NeilBrown55cc39f2017-03-15 14:05:14 +11008526 smp_mb(); /* Match smp_mb in set_in_sync() */
NeilBrown31a59e32008-04-30 00:52:30 -07008527 if (mddev->safemode == 1)
8528 mddev->safemode = 0;
NeilBrown4ad23a972017-03-15 14:05:14 +11008529 /* sync_checkers is always 0 when writes_pending is in per-cpu mode */
NeilBrown81fe48e2017-08-08 16:56:36 +10008530 if (mddev->in_sync || mddev->sync_checkers) {
NeilBrown85572d72014-12-15 12:56:56 +11008531 spin_lock(&mddev->lock);
NeilBrown3d310eb2005-06-21 17:17:26 -07008532 if (mddev->in_sync) {
8533 mddev->in_sync = 0;
Shaohua Li29530792016-12-08 15:48:19 -08008534 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
8535 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
NeilBrown3d310eb2005-06-21 17:17:26 -07008536 md_wakeup_thread(mddev->thread);
Neil Brown0fd62b82008-06-28 08:31:36 +10008537 did_change = 1;
NeilBrown3d310eb2005-06-21 17:17:26 -07008538 }
NeilBrown85572d72014-12-15 12:56:56 +11008539 spin_unlock(&mddev->lock);
NeilBrown06d91a52005-06-21 17:17:12 -07008540 }
NeilBrown4ad23a972017-03-15 14:05:14 +11008541 rcu_read_unlock();
Neil Brown0fd62b82008-06-28 08:31:36 +10008542 if (did_change)
NeilBrown00bcb4a2010-06-01 19:37:23 +10008543 sysfs_notify_dirent_safe(mddev->sysfs_state);
Heinz Mauelshagen4b6c1062018-02-02 23:13:19 +01008544 if (!mddev->has_superblocks)
8545 return true;
NeilBrown09a44cc2008-05-23 13:04:36 -07008546 wait_event(mddev->sb_wait,
NeilBrownd47c8ad2017-10-05 16:23:16 +11008547 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) ||
8548 mddev->suspended);
NeilBrowncc27b0c2017-06-05 16:49:39 +10008549 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
8550 percpu_ref_put(&mddev->writes_pending);
8551 return false;
8552 }
8553 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008554}
NeilBrown6c144d32014-09-30 16:15:38 +10008555EXPORT_SYMBOL(md_write_start);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008556
NeilBrown49728052017-03-15 14:05:12 +11008557/* md_write_inc can only be called when md_write_start() has
8558 * already been called at least once of the current request.
8559 * It increments the counter and is useful when a single request
8560 * is split into several parts. Each part causes an increment and
8561 * so needs a matching md_write_end().
8562 * Unlike md_write_start(), it is safe to call md_write_inc() inside
8563 * a spinlocked region.
8564 */
8565void md_write_inc(struct mddev *mddev, struct bio *bi)
8566{
8567 if (bio_data_dir(bi) != WRITE)
8568 return;
8569 WARN_ON_ONCE(mddev->in_sync || mddev->ro);
NeilBrown4ad23a972017-03-15 14:05:14 +11008570 percpu_ref_get(&mddev->writes_pending);
NeilBrown49728052017-03-15 14:05:12 +11008571}
8572EXPORT_SYMBOL(md_write_inc);
8573
NeilBrownfd01b882011-10-11 16:47:53 +11008574void md_write_end(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008575{
NeilBrown4ad23a972017-03-15 14:05:14 +11008576 percpu_ref_put(&mddev->writes_pending);
8577
8578 if (mddev->safemode == 2)
8579 md_wakeup_thread(mddev->thread);
8580 else if (mddev->safemode_delay)
8581 /* The roundup() ensures this only performs locking once
8582 * every ->safemode_delay jiffies
8583 */
8584 mod_timer(&mddev->safemode_timer,
8585 roundup(jiffies, mddev->safemode_delay) +
8586 mddev->safemode_delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008587}
NeilBrown4ad23a972017-03-15 14:05:14 +11008588
NeilBrown6c144d32014-09-30 16:15:38 +10008589EXPORT_SYMBOL(md_write_end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008590
NeilBrown2a2275d2007-01-26 00:57:11 -08008591/* md_allow_write(mddev)
8592 * Calling this ensures that the array is marked 'active' so that writes
8593 * may proceed without blocking. It is important to call this before
8594 * attempting a GFP_KERNEL allocation while holding the mddev lock.
8595 * Must be called with mddev_lock held.
8596 */
Artur Paszkiewicz2214c262017-05-08 11:56:55 +02008597void md_allow_write(struct mddev *mddev)
NeilBrown2a2275d2007-01-26 00:57:11 -08008598{
8599 if (!mddev->pers)
Artur Paszkiewicz2214c262017-05-08 11:56:55 +02008600 return;
NeilBrown2a2275d2007-01-26 00:57:11 -08008601 if (mddev->ro)
Artur Paszkiewicz2214c262017-05-08 11:56:55 +02008602 return;
Neil Brown1a0fd492008-06-28 08:31:27 +10008603 if (!mddev->pers->sync_request)
Artur Paszkiewicz2214c262017-05-08 11:56:55 +02008604 return;
NeilBrown2a2275d2007-01-26 00:57:11 -08008605
NeilBrown85572d72014-12-15 12:56:56 +11008606 spin_lock(&mddev->lock);
NeilBrown2a2275d2007-01-26 00:57:11 -08008607 if (mddev->in_sync) {
8608 mddev->in_sync = 0;
Shaohua Li29530792016-12-08 15:48:19 -08008609 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
8610 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
NeilBrown2a2275d2007-01-26 00:57:11 -08008611 if (mddev->safemode_delay &&
8612 mddev->safemode == 0)
8613 mddev->safemode = 1;
NeilBrown85572d72014-12-15 12:56:56 +11008614 spin_unlock(&mddev->lock);
NeilBrown2a2275d2007-01-26 00:57:11 -08008615 md_update_sb(mddev, 0);
NeilBrown00bcb4a2010-06-01 19:37:23 +10008616 sysfs_notify_dirent_safe(mddev->sysfs_state);
Artur Paszkiewicz2214c262017-05-08 11:56:55 +02008617 /* wait for the dirty state to be recorded in the metadata */
8618 wait_event(mddev->sb_wait,
Artur Paszkiewicz2214c262017-05-08 11:56:55 +02008619 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
NeilBrown2a2275d2007-01-26 00:57:11 -08008620 } else
NeilBrown85572d72014-12-15 12:56:56 +11008621 spin_unlock(&mddev->lock);
NeilBrown2a2275d2007-01-26 00:57:11 -08008622}
8623EXPORT_SYMBOL_GPL(md_allow_write);
8624
Linus Torvalds1da177e2005-04-16 15:20:36 -07008625#define SYNC_MARKS 10
8626#define SYNC_MARK_STEP (3*HZ)
majianpeng54f89342012-10-31 11:59:10 +11008627#define UPDATE_FREQUENCY (5*60*HZ)
Shaohua Li4ed87312012-10-11 13:34:00 +11008628void md_do_sync(struct md_thread *thread)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008629{
Shaohua Li4ed87312012-10-11 13:34:00 +11008630 struct mddev *mddev = thread->mddev;
NeilBrownfd01b882011-10-11 16:47:53 +11008631 struct mddev *mddev2;
Yufen Yue5b521e2019-06-14 15:41:07 -07008632 unsigned int currspeed = 0, window;
Xiao Niac7e50a2014-08-07 09:37:41 -04008633 sector_t max_sectors,j, io_sectors, recovery_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008634 unsigned long mark[SYNC_MARKS];
majianpeng54f89342012-10-31 11:59:10 +11008635 unsigned long update_time;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008636 sector_t mark_cnt[SYNC_MARKS];
8637 int last_mark,m;
8638 struct list_head *tmp;
8639 sector_t last_check;
NeilBrown57afd892005-06-21 17:17:13 -07008640 int skipped = 0;
NeilBrown3cb03002011-10-11 16:45:26 +11008641 struct md_rdev *rdev;
Jonathan Brassowc4a39552013-06-25 01:23:59 -05008642 char *desc, *action = NULL;
majianpeng7c2c57c2012-07-03 12:12:26 +10008643 struct blk_plug plug;
Guoqing Jiang41a9a0d2016-05-02 11:33:08 -04008644 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008645
8646 /* just incase thread restarts... */
Song Liud5d885f2017-11-19 22:17:01 -08008647 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
8648 test_bit(MD_RECOVERY_WAIT, &mddev->recovery))
Linus Torvalds1da177e2005-04-16 15:20:36 -07008649 return;
NeilBrown3991b312014-05-28 13:39:23 +10008650 if (mddev->ro) {/* never try to sync a read-only array */
8651 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008652 return;
NeilBrown3991b312014-05-28 13:39:23 +10008653 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008654
Guoqing Jiang41a9a0d2016-05-02 11:33:08 -04008655 if (mddev_is_clustered(mddev)) {
8656 ret = md_cluster_ops->resync_start(mddev);
8657 if (ret)
8658 goto skip;
8659
Guoqing Jiangbb8bf152016-06-02 23:32:04 -04008660 set_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags);
Guoqing Jiang41a9a0d2016-05-02 11:33:08 -04008661 if (!(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
8662 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ||
8663 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
8664 && ((unsigned long long)mddev->curr_resync_completed
8665 < (unsigned long long)mddev->resync_max_sectors))
8666 goto skip;
8667 }
8668
NeilBrown61df9d92006-10-03 01:15:57 -07008669 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
Jonathan Brassowc4a39552013-06-25 01:23:59 -05008670 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
NeilBrown61df9d92006-10-03 01:15:57 -07008671 desc = "data-check";
Jonathan Brassowc4a39552013-06-25 01:23:59 -05008672 action = "check";
8673 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
NeilBrown61df9d92006-10-03 01:15:57 -07008674 desc = "requested-resync";
Jonathan Brassowc4a39552013-06-25 01:23:59 -05008675 action = "repair";
8676 } else
NeilBrown61df9d92006-10-03 01:15:57 -07008677 desc = "resync";
8678 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
8679 desc = "reshape";
8680 else
8681 desc = "recovery";
8682
Jonathan Brassowc4a39552013-06-25 01:23:59 -05008683 mddev->last_sync_action = action ?: desc;
8684
Linus Torvalds1da177e2005-04-16 15:20:36 -07008685 /* we overload curr_resync somewhat here.
8686 * 0 == not engaged in resync at all
8687 * 2 == checking that there is no conflict with another sync
8688 * 1 == like 2, but have yielded to allow conflicting resync to
Yufen Yue5b521e2019-06-14 15:41:07 -07008689 * commence
Linus Torvalds1da177e2005-04-16 15:20:36 -07008690 * other == active in resync - this many blocks
8691 *
8692 * Before starting a resync we must have set curr_resync to
8693 * 2, and then checked that every "conflicting" array has curr_resync
8694 * less than ours. When we find one that is the same or higher
8695 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync
8696 * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
8697 * This will mean we have to start checking from the beginning again.
8698 *
8699 */
8700
8701 do {
Artur Paszkiewiczc622ca52016-08-16 14:26:08 +02008702 int mddev2_minor = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008703 mddev->curr_resync = 2;
8704
8705 try_again:
NeilBrown404e4b42009-12-30 15:25:23 +11008706 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
Linus Torvalds1da177e2005-04-16 15:20:36 -07008707 goto skip;
NeilBrown29ac4aa2008-02-06 01:39:58 -08008708 for_each_mddev(mddev2, tmp) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008709 if (mddev2 == mddev)
8710 continue;
Bernd Schubert90b08712008-05-23 13:04:38 -07008711 if (!mddev->parallel_resync
8712 && mddev2->curr_resync
8713 && match_mddev_units(mddev, mddev2)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008714 DEFINE_WAIT(wq);
8715 if (mddev < mddev2 && mddev->curr_resync == 2) {
8716 /* arbitrarily yield */
8717 mddev->curr_resync = 1;
8718 wake_up(&resync_wait);
8719 }
8720 if (mddev > mddev2 && mddev->curr_resync == 1)
8721 /* no need to wait here, we can wait the next
8722 * time 'round when curr_resync == 2
8723 */
8724 continue;
NeilBrown97441972008-09-19 11:49:54 +10008725 /* We need to wait 'interruptible' so as not to
8726 * contribute to the load average, and not to
8727 * be caught by 'softlockup'
8728 */
8729 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
NeilBrownc91abf52013-11-19 12:02:01 +11008730 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
NeilBrown8712e552005-10-26 01:58:58 -07008731 mddev2->curr_resync >= mddev->curr_resync) {
Artur Paszkiewiczc622ca52016-08-16 14:26:08 +02008732 if (mddev2_minor != mddev2->md_minor) {
8733 mddev2_minor = mddev2->md_minor;
NeilBrown9d487392016-11-02 14:16:49 +11008734 pr_info("md: delaying %s of %s until %s has finished (they share one or more physical units)\n",
8735 desc, mdname(mddev),
8736 mdname(mddev2));
Artur Paszkiewiczc622ca52016-08-16 14:26:08 +02008737 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008738 mddev_put(mddev2);
NeilBrown97441972008-09-19 11:49:54 +10008739 if (signal_pending(current))
8740 flush_signals(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008741 schedule();
8742 finish_wait(&resync_wait, &wq);
8743 goto try_again;
8744 }
8745 finish_wait(&resync_wait, &wq);
8746 }
8747 }
8748 } while (mddev->curr_resync < 2);
8749
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008750 j = 0;
NeilBrown9d888832005-11-08 21:39:26 -08008751 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008752 /* resync follows the size requested by the personality,
NeilBrown57afd892005-06-21 17:17:13 -07008753 * which defaults to physical size, but can be virtual size
Linus Torvalds1da177e2005-04-16 15:20:36 -07008754 */
8755 max_sectors = mddev->resync_max_sectors;
Jianpeng Ma7f7583d2012-10-11 14:17:59 +11008756 atomic64_set(&mddev->resync_mismatches, 0);
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008757 /* we don't use the checkpoint if there's a bitmap */
Neil Brown5e96ee62008-06-28 08:31:24 +10008758 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
8759 j = mddev->resync_min;
8760 else if (!mddev->bitmap)
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008761 j = mddev->recovery_cp;
Neil Brown5e96ee62008-06-28 08:31:24 +10008762
Guoqing Jiangcb9ee152018-10-18 16:37:47 +08008763 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
NeilBrownc804cde2012-05-21 09:28:33 +10008764 max_sectors = mddev->resync_max_sectors;
Guoqing Jiangcb9ee152018-10-18 16:37:47 +08008765 /*
8766 * If the original node aborts reshaping then we continue the
8767 * reshaping, so set j again to avoid restart reshape from the
8768 * first beginning
8769 */
8770 if (mddev_is_clustered(mddev) &&
8771 mddev->reshape_position != MaxSector)
8772 j = mddev->reshape_position;
8773 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008774 /* recovery follows the physical size of devices */
Andre Noll58c0fed2009-03-31 14:33:13 +11008775 max_sectors = mddev->dev_sectors;
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008776 j = MaxSector;
Dan Williams4e59ca72009-12-12 21:17:06 -07008777 rcu_read_lock();
NeilBrowndafb20f2012-03-19 12:46:39 +11008778 rdev_for_each_rcu(rdev, mddev)
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008779 if (rdev->raid_disk >= 0 &&
Shaohua Lif2076e72015-10-08 21:54:12 -07008780 !test_bit(Journal, &rdev->flags) &&
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008781 !test_bit(Faulty, &rdev->flags) &&
8782 !test_bit(In_sync, &rdev->flags) &&
8783 rdev->recovery_offset < j)
8784 j = rdev->recovery_offset;
Dan Williams4e59ca72009-12-12 21:17:06 -07008785 rcu_read_unlock();
NeilBrown133d4522014-07-02 12:04:14 +10008786
8787 /* If there is a bitmap, we need to make sure all
8788 * writes that started before we added a spare
8789 * complete before we start doing a recovery.
8790 * Otherwise the write might complete and (via
8791 * bitmap_endwrite) set a bit in the bitmap after the
8792 * recovery has checked that bit and skipped that
8793 * region.
8794 */
8795 if (mddev->bitmap) {
8796 mddev->pers->quiesce(mddev, 1);
8797 mddev->pers->quiesce(mddev, 0);
8798 }
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008799 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008800
NeilBrown9d487392016-11-02 14:16:49 +11008801 pr_info("md: %s of RAID array %s\n", desc, mdname(mddev));
8802 pr_debug("md: minimum _guaranteed_ speed: %d KB/sec/disk.\n", speed_min(mddev));
8803 pr_debug("md: using maximum available idle IO bandwidth (but not more than %d KB/sec) for %s.\n",
8804 speed_max(mddev), desc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008805
NeilBrowneea1bf32009-03-31 14:27:02 +11008806 is_mddev_idle(mddev, 1); /* this initializes IO event counters */
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008807
NeilBrown57afd892005-06-21 17:17:13 -07008808 io_sectors = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008809 for (m = 0; m < SYNC_MARKS; m++) {
8810 mark[m] = jiffies;
NeilBrown57afd892005-06-21 17:17:13 -07008811 mark_cnt[m] = io_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008812 }
8813 last_mark = 0;
8814 mddev->resync_mark = mark[last_mark];
8815 mddev->resync_mark_cnt = mark_cnt[last_mark];
8816
8817 /*
8818 * Tune reconstruction:
8819 */
Yufen Yue5b521e2019-06-14 15:41:07 -07008820 window = 32 * (PAGE_SIZE / 512);
NeilBrown9d487392016-11-02 14:16:49 +11008821 pr_debug("md: using %dk window, over a total of %lluk.\n",
8822 window/2, (unsigned long long)max_sectors/2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008823
8824 atomic_set(&mddev->recovery_active, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008825 last_check = 0;
8826
8827 if (j>2) {
NeilBrown9d487392016-11-02 14:16:49 +11008828 pr_debug("md: resuming %s of %s from checkpoint.\n",
8829 desc, mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07008830 mddev->curr_resync = j;
NeilBrown72f36d52012-10-11 14:25:57 +11008831 } else
8832 mddev->curr_resync = 3; /* no longer delayed */
NeilBrown75d3da42011-01-14 09:14:34 +11008833 mddev->curr_resync_completed = j;
Junxiao Bie1a86db2020-07-14 16:10:26 -07008834 sysfs_notify_dirent_safe(mddev->sysfs_completed);
NeilBrown72f36d52012-10-11 14:25:57 +11008835 md_new_event(mddev);
majianpeng54f89342012-10-31 11:59:10 +11008836 update_time = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008837
majianpeng7c2c57c2012-07-03 12:12:26 +10008838 blk_start_plug(&plug);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008839 while (j < max_sectors) {
NeilBrown57afd892005-06-21 17:17:13 -07008840 sector_t sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008841
NeilBrown57afd892005-06-21 17:17:13 -07008842 skipped = 0;
NeilBrown97e4f422009-03-31 14:33:13 +11008843
NeilBrown7a91ee12009-05-26 12:57:21 +10008844 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8845 ((mddev->curr_resync > mddev->curr_resync_completed &&
8846 (mddev->curr_resync - mddev->curr_resync_completed)
8847 > (max_sectors >> 4)) ||
majianpeng54f89342012-10-31 11:59:10 +11008848 time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) ||
NeilBrown7a91ee12009-05-26 12:57:21 +10008849 (j - mddev->curr_resync_completed)*2
NeilBrownc5e19d92015-07-17 12:06:02 +10008850 >= mddev->resync_max - mddev->curr_resync_completed ||
8851 mddev->curr_resync_completed > mddev->resync_max
NeilBrown7a91ee12009-05-26 12:57:21 +10008852 )) {
NeilBrown97e4f422009-03-31 14:33:13 +11008853 /* time to update curr_resync_completed */
NeilBrown97e4f422009-03-31 14:33:13 +11008854 wait_event(mddev->recovery_wait,
8855 atomic_read(&mddev->recovery_active) == 0);
NeilBrown75d3da42011-01-14 09:14:34 +11008856 mddev->curr_resync_completed = j;
kernelmail35d78c62012-10-31 11:59:10 +11008857 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
8858 j > mddev->recovery_cp)
8859 mddev->recovery_cp = j;
majianpeng54f89342012-10-31 11:59:10 +11008860 update_time = jiffies;
Shaohua Li29530792016-12-08 15:48:19 -08008861 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
Junxiao Bie1a86db2020-07-14 16:10:26 -07008862 sysfs_notify_dirent_safe(mddev->sysfs_completed);
NeilBrown97e4f422009-03-31 14:33:13 +11008863 }
NeilBrownacb180b2009-04-14 16:28:34 +10008864
NeilBrownc91abf52013-11-19 12:02:01 +11008865 while (j >= mddev->resync_max &&
8866 !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
NeilBrowne62e58a2009-07-01 13:15:35 +10008867 /* As this condition is controlled by user-space,
8868 * we can block indefinitely, so use '_interruptible'
8869 * to avoid triggering warnings.
8870 */
8871 flush_signals(current); /* just in case */
8872 wait_event_interruptible(mddev->recovery_wait,
8873 mddev->resync_max > j
NeilBrownc91abf52013-11-19 12:02:01 +11008874 || test_bit(MD_RECOVERY_INTR,
8875 &mddev->recovery));
NeilBrowne62e58a2009-07-01 13:15:35 +10008876 }
NeilBrownacb180b2009-04-14 16:28:34 +10008877
NeilBrownc91abf52013-11-19 12:02:01 +11008878 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8879 break;
NeilBrownacb180b2009-04-14 16:28:34 +10008880
NeilBrown09314792015-02-19 16:04:40 +11008881 sectors = mddev->pers->sync_request(mddev, j, &skipped);
NeilBrown57afd892005-06-21 17:17:13 -07008882 if (sectors == 0) {
NeilBrowndfc70642008-05-23 13:04:39 -07008883 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
NeilBrownc91abf52013-11-19 12:02:01 +11008884 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008885 }
NeilBrown57afd892005-06-21 17:17:13 -07008886
8887 if (!skipped) { /* actual IO requested */
8888 io_sectors += sectors;
8889 atomic_add(sectors, &mddev->recovery_active);
8890 }
8891
NeilBrowne875ece2011-07-28 11:39:24 +10008892 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8893 break;
8894
Linus Torvalds1da177e2005-04-16 15:20:36 -07008895 j += sectors;
NeilBrown5ed1df22015-07-24 13:27:08 +10008896 if (j > max_sectors)
8897 /* when skipping, extra large numbers can be returned. */
8898 j = max_sectors;
NeilBrown72f36d52012-10-11 14:25:57 +11008899 if (j > 2)
8900 mddev->curr_resync = j;
NeilBrownff4e8d92006-07-10 04:44:16 -07008901 mddev->curr_mark_cnt = io_sectors;
NeilBrownd7603b72006-01-06 00:20:30 -08008902 if (last_check == 0)
NeilBrowne875ece2011-07-28 11:39:24 +10008903 /* this is the earliest that rebuild will be
NeilBrownd7603b72006-01-06 00:20:30 -08008904 * visible in /proc/mdstat
8905 */
8906 md_new_event(mddev);
NeilBrown57afd892005-06-21 17:17:13 -07008907
8908 if (last_check + window > io_sectors || j == max_sectors)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008909 continue;
8910
NeilBrown57afd892005-06-21 17:17:13 -07008911 last_check = io_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008912 repeat:
8913 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
8914 /* step marks */
8915 int next = (last_mark+1) % SYNC_MARKS;
8916
8917 mddev->resync_mark = mark[next];
8918 mddev->resync_mark_cnt = mark_cnt[next];
8919 mark[next] = jiffies;
NeilBrown57afd892005-06-21 17:17:13 -07008920 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008921 last_mark = next;
8922 }
8923
NeilBrownc91abf52013-11-19 12:02:01 +11008924 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8925 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008926
8927 /*
8928 * this loop exits only if either when we are slower than
8929 * the 'hard' speed limit, or the system was IO-idle for
8930 * a jiffy.
8931 * the system might be non-idle CPU-wise, but we only care
8932 * about not overloading the IO subsystem. (things like an
8933 * e2fsck being done on the RAID array should execute fast)
8934 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07008935 cond_resched();
8936
Xiao Niac7e50a2014-08-07 09:37:41 -04008937 recovery_done = io_sectors - atomic_read(&mddev->recovery_active);
8938 currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2
NeilBrown57afd892005-06-21 17:17:13 -07008939 /((jiffies-mddev->resync_mark)/HZ +1) +1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008940
NeilBrown88202a02006-01-06 00:21:36 -08008941 if (currspeed > speed_min(mddev)) {
NeilBrownac8fa412015-02-19 16:55:00 +11008942 if (currspeed > speed_max(mddev)) {
NeilBrownc0e48522005-11-18 01:11:01 -08008943 msleep(500);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008944 goto repeat;
8945 }
NeilBrownac8fa412015-02-19 16:55:00 +11008946 if (!is_mddev_idle(mddev, 0)) {
8947 /*
8948 * Give other IO more of a chance.
8949 * The faster the devices, the less we wait.
8950 */
8951 wait_event(mddev->recovery_wait,
8952 !atomic_read(&mddev->recovery_active));
8953 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008954 }
8955 }
NeilBrown9d487392016-11-02 14:16:49 +11008956 pr_info("md: %s: %s %s.\n",mdname(mddev), desc,
8957 test_bit(MD_RECOVERY_INTR, &mddev->recovery)
8958 ? "interrupted" : "done");
Linus Torvalds1da177e2005-04-16 15:20:36 -07008959 /*
8960 * this also signals 'finished resyncing' to md_stop
8961 */
majianpeng7c2c57c2012-07-03 12:12:26 +10008962 blk_finish_plug(&plug);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008963 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
8964
NeilBrown5ed1df22015-07-24 13:27:08 +10008965 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8966 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
NeilBrown1217e1d2016-10-28 15:59:41 +11008967 mddev->curr_resync > 3) {
NeilBrown5ed1df22015-07-24 13:27:08 +10008968 mddev->curr_resync_completed = mddev->curr_resync;
Junxiao Bie1a86db2020-07-14 16:10:26 -07008969 sysfs_notify_dirent_safe(mddev->sysfs_completed);
NeilBrown5ed1df22015-07-24 13:27:08 +10008970 }
NeilBrown09314792015-02-19 16:04:40 +11008971 mddev->pers->sync_request(mddev, max_sectors, &skipped);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008972
NeilBrowndfc70642008-05-23 13:04:39 -07008973 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
NeilBrown1217e1d2016-10-28 15:59:41 +11008974 mddev->curr_resync > 3) {
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008975 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
8976 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
8977 if (mddev->curr_resync >= mddev->recovery_cp) {
NeilBrown9d487392016-11-02 14:16:49 +11008978 pr_debug("md: checkpointing %s of %s.\n",
8979 desc, mdname(mddev));
majianpeng0a19caa2012-11-19 19:57:34 +08008980 if (test_bit(MD_RECOVERY_ERROR,
8981 &mddev->recovery))
8982 mddev->recovery_cp =
8983 mddev->curr_resync_completed;
8984 else
8985 mddev->recovery_cp =
8986 mddev->curr_resync;
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008987 }
8988 } else
8989 mddev->recovery_cp = MaxSector;
8990 } else {
8991 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8992 mddev->curr_resync = MaxSector;
NeilBrowndb0505d2017-10-17 16:18:36 +11008993 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8994 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) {
8995 rcu_read_lock();
8996 rdev_for_each_rcu(rdev, mddev)
8997 if (rdev->raid_disk >= 0 &&
8998 mddev->delta_disks >= 0 &&
8999 !test_bit(Journal, &rdev->flags) &&
9000 !test_bit(Faulty, &rdev->flags) &&
9001 !test_bit(In_sync, &rdev->flags) &&
9002 rdev->recovery_offset < mddev->curr_resync)
9003 rdev->recovery_offset = mddev->curr_resync;
9004 rcu_read_unlock();
9005 }
NeilBrown5fd6c1d2006-06-26 00:27:40 -07009006 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009007 }
NeilBrowndb91ff52012-02-07 12:01:51 +11009008 skip:
Guoqing Jiangbb8bf152016-06-02 23:32:04 -04009009 /* set CHANGE_PENDING here since maybe another update is needed,
9010 * so other nodes are informed. It should be harmless for normal
9011 * raid */
Shaohua Li29530792016-12-08 15:48:19 -08009012 set_mask_bits(&mddev->sb_flags, 0,
9013 BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS));
Goldwyn Rodriguesc186b122015-09-30 13:20:35 -05009014
BingJing Chang88763912018-02-22 13:34:46 +08009015 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
9016 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
9017 mddev->delta_disks > 0 &&
9018 mddev->pers->finish_reshape &&
9019 mddev->pers->size &&
9020 mddev->queue) {
9021 mddev_lock_nointr(mddev);
9022 md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0));
9023 mddev_unlock(mddev);
Christoph Hellwig2c247c52020-11-16 15:57:11 +01009024 if (!mddev_is_clustered(mddev))
9025 set_capacity_and_notify(mddev->gendisk,
9026 mddev->array_sectors);
BingJing Chang88763912018-02-22 13:34:46 +08009027 }
9028
NeilBrown23da4222014-12-15 12:57:01 +11009029 spin_lock(&mddev->lock);
NeilBrownc07b70a2009-12-14 12:49:48 +11009030 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
9031 /* We completed so min/max setting can be forgotten if used. */
9032 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
9033 mddev->resync_min = 0;
9034 mddev->resync_max = MaxSector;
9035 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
9036 mddev->resync_min = mddev->curr_resync_completed;
NeilBrownf7851be2015-07-02 17:12:58 +10009037 set_bit(MD_RECOVERY_DONE, &mddev->recovery);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009038 mddev->curr_resync = 0;
NeilBrown23da4222014-12-15 12:57:01 +11009039 spin_unlock(&mddev->lock);
9040
Linus Torvalds1da177e2005-04-16 15:20:36 -07009041 wake_up(&resync_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009042 md_wakeup_thread(mddev->thread);
NeilBrownc6207272008-02-06 01:39:52 -08009043 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009044}
NeilBrown29269552006-03-27 01:18:10 -08009045EXPORT_SYMBOL_GPL(md_do_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009046
NeilBrown746d3202013-04-24 11:42:41 +10009047static int remove_and_add_spares(struct mddev *mddev,
9048 struct md_rdev *this)
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009049{
NeilBrown3cb03002011-10-11 16:45:26 +11009050 struct md_rdev *rdev;
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009051 int spares = 0;
NeilBrownf2a371c2012-01-09 00:46:41 +11009052 int removed = 0;
NeilBrownd787be42016-06-02 16:19:53 +10009053 bool remove_some = false;
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009054
NeilBrown39772f02018-02-03 09:19:30 +11009055 if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
9056 /* Mustn't remove devices when resync thread is running */
9057 return 0;
9058
NeilBrownd787be42016-06-02 16:19:53 +10009059 rdev_for_each(rdev, mddev) {
NeilBrown746d3202013-04-24 11:42:41 +10009060 if ((this == NULL || rdev == this) &&
9061 rdev->raid_disk >= 0 &&
Dan Williams6bfe0b42008-04-30 00:52:32 -07009062 !test_bit(Blocked, &rdev->flags) &&
NeilBrownd787be42016-06-02 16:19:53 +10009063 test_bit(Faulty, &rdev->flags) &&
9064 atomic_read(&rdev->nr_pending)==0) {
9065 /* Faulty non-Blocked devices with nr_pending == 0
9066 * never get nr_pending incremented,
9067 * never get Faulty cleared, and never get Blocked set.
9068 * So we can synchronize_rcu now rather than once per device
9069 */
9070 remove_some = true;
9071 set_bit(RemoveSynchronized, &rdev->flags);
9072 }
9073 }
9074
9075 if (remove_some)
9076 synchronize_rcu();
9077 rdev_for_each(rdev, mddev) {
9078 if ((this == NULL || rdev == this) &&
9079 rdev->raid_disk >= 0 &&
9080 !test_bit(Blocked, &rdev->flags) &&
9081 ((test_bit(RemoveSynchronized, &rdev->flags) ||
Shaohua Lif2076e72015-10-08 21:54:12 -07009082 (!test_bit(In_sync, &rdev->flags) &&
9083 !test_bit(Journal, &rdev->flags))) &&
NeilBrownd787be42016-06-02 16:19:53 +10009084 atomic_read(&rdev->nr_pending)==0)) {
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009085 if (mddev->pers->hot_remove_disk(
NeilBrownb8321b62011-12-23 10:17:51 +11009086 mddev, rdev) == 0) {
Namhyung Kim36fad852011-07-27 11:00:36 +10009087 sysfs_unlink_rdev(mddev, rdev);
NeilBrown011abdc2018-04-26 14:46:29 +10009088 rdev->saved_raid_disk = rdev->raid_disk;
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009089 rdev->raid_disk = -1;
NeilBrownf2a371c2012-01-09 00:46:41 +11009090 removed++;
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009091 }
9092 }
NeilBrownd787be42016-06-02 16:19:53 +10009093 if (remove_some && test_bit(RemoveSynchronized, &rdev->flags))
9094 clear_bit(RemoveSynchronized, &rdev->flags);
9095 }
9096
Jonathan Brassow90584fc2013-03-07 16:24:26 -06009097 if (removed && mddev->kobj.sd)
Junxiao Bie1a86db2020-07-14 16:10:26 -07009098 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009099
Goldwyn Rodrigues2910ff12015-09-28 10:27:26 -05009100 if (this && removed)
NeilBrown746d3202013-04-24 11:42:41 +10009101 goto no_add;
9102
NeilBrowndafb20f2012-03-19 12:46:39 +11009103 rdev_for_each(rdev, mddev) {
Goldwyn Rodrigues2910ff12015-09-28 10:27:26 -05009104 if (this && this != rdev)
9105 continue;
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05009106 if (test_bit(Candidate, &rdev->flags))
9107 continue;
NeilBrown7bfec5f2011-12-23 10:17:53 +11009108 if (rdev->raid_disk >= 0 &&
9109 !test_bit(In_sync, &rdev->flags) &&
Shaohua Lif2076e72015-10-08 21:54:12 -07009110 !test_bit(Journal, &rdev->flags) &&
NeilBrown7bfec5f2011-12-23 10:17:53 +11009111 !test_bit(Faulty, &rdev->flags))
9112 spares++;
NeilBrown7ceb17e2013-04-24 11:42:42 +10009113 if (rdev->raid_disk >= 0)
9114 continue;
9115 if (test_bit(Faulty, &rdev->flags))
9116 continue;
Shaohua Lif6b6ec52015-12-21 10:51:02 +11009117 if (!test_bit(Journal, &rdev->flags)) {
9118 if (mddev->ro &&
9119 ! (rdev->saved_raid_disk >= 0 &&
9120 !test_bit(Bitmap_sync, &rdev->flags)))
9121 continue;
NeilBrown7ceb17e2013-04-24 11:42:42 +10009122
Shaohua Lif6b6ec52015-12-21 10:51:02 +11009123 rdev->recovery_offset = 0;
9124 }
Guoqing Jiang3f79cc22020-04-04 23:57:11 +02009125 if (mddev->pers->hot_add_disk(mddev, rdev) == 0) {
Damien Le Moal5e3b8a82020-07-16 13:54:40 +09009126 /* failure here is OK */
9127 sysfs_link_rdev(mddev, rdev);
Shaohua Lif6b6ec52015-12-21 10:51:02 +11009128 if (!test_bit(Journal, &rdev->flags))
9129 spares++;
NeilBrown7ceb17e2013-04-24 11:42:42 +10009130 md_new_event(mddev);
Shaohua Li29530792016-12-08 15:48:19 -08009131 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrowndfc70642008-05-23 13:04:39 -07009132 }
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009133 }
NeilBrown746d3202013-04-24 11:42:41 +10009134no_add:
NeilBrown6dafab62012-09-19 12:54:22 +10009135 if (removed)
Shaohua Li29530792016-12-08 15:48:19 -08009136 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009137 return spares;
9138}
NeilBrown7ebc0be2011-01-14 09:14:33 +11009139
NeilBrownac05f252014-09-30 08:10:42 +10009140static void md_start_sync(struct work_struct *ws)
9141{
9142 struct mddev *mddev = container_of(ws, struct mddev, del_work);
Goldwyn Rodriguesc186b122015-09-30 13:20:35 -05009143
NeilBrownac05f252014-09-30 08:10:42 +10009144 mddev->sync_thread = md_register_thread(md_do_sync,
9145 mddev,
9146 "resync");
9147 if (!mddev->sync_thread) {
NeilBrown9d487392016-11-02 14:16:49 +11009148 pr_warn("%s: could not start resync thread...\n",
9149 mdname(mddev));
NeilBrownac05f252014-09-30 08:10:42 +10009150 /* leave the spares where they are, it shouldn't hurt */
9151 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9152 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
9153 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
9154 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
9155 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
NeilBrownf851b602014-12-11 10:02:10 +11009156 wake_up(&resync_wait);
NeilBrownac05f252014-09-30 08:10:42 +10009157 if (test_and_clear_bit(MD_RECOVERY_RECOVER,
9158 &mddev->recovery))
9159 if (mddev->sysfs_action)
9160 sysfs_notify_dirent_safe(mddev->sysfs_action);
9161 } else
9162 md_wakeup_thread(mddev->sync_thread);
9163 sysfs_notify_dirent_safe(mddev->sysfs_action);
9164 md_new_event(mddev);
9165}
9166
Linus Torvalds1da177e2005-04-16 15:20:36 -07009167/*
9168 * This routine is regularly called by all per-raid-array threads to
9169 * deal with generic issues like resync and super-block update.
9170 * Raid personalities that don't have a thread (linear/raid0) do not
9171 * need this as they never do any recovery or update the superblock.
9172 *
9173 * It does not do any resync itself, but rather "forks" off other threads
9174 * to do that as needed.
9175 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
9176 * "->recovery" and create a thread at ->sync_thread.
NeilBrowndfc70642008-05-23 13:04:39 -07009177 * When the thread finishes it sets MD_RECOVERY_DONE
Linus Torvalds1da177e2005-04-16 15:20:36 -07009178 * and wakeups up this thread which will reap the thread and finish up.
9179 * This thread also removes any faulty devices (with nr_pending == 0).
9180 *
9181 * The overall approach is:
9182 * 1/ if the superblock needs updating, update it.
9183 * 2/ If a recovery thread is running, don't do anything else.
9184 * 3/ If recovery has finished, clean up, possibly marking spares active.
9185 * 4/ If there are any faulty devices, remove them.
9186 * 5/ If array is degraded, try to add spares devices
9187 * 6/ If array has spares or is not in-sync, start a resync thread.
9188 */
NeilBrownfd01b882011-10-11 16:47:53 +11009189void md_check_recovery(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009190{
NeilBrown059421e2018-10-03 15:04:41 +10009191 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) {
9192 /* Write superblock - thread that called mddev_suspend()
9193 * holds reconfig_mutex for us.
9194 */
9195 set_bit(MD_UPDATING_SB, &mddev->flags);
9196 smp_mb__after_atomic();
9197 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags))
9198 md_update_sb(mddev, 0);
9199 clear_bit_unlock(MD_UPDATING_SB, &mddev->flags);
9200 wake_up(&mddev->sb_wait);
9201 }
9202
Jonathan Brassow68866e42011-06-08 15:10:08 +10009203 if (mddev->suspended)
9204 return;
9205
NeilBrown5f404022005-06-21 17:17:16 -07009206 if (mddev->bitmap)
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07009207 md_bitmap_daemon_work(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009208
NeilBrownfca4d842005-06-21 17:17:11 -07009209 if (signal_pending(current)) {
NeilBrown31a59e32008-04-30 00:52:30 -07009210 if (mddev->pers->sync_request && !mddev->external) {
NeilBrown9d487392016-11-02 14:16:49 +11009211 pr_debug("md: %s in immediate safe mode\n",
9212 mdname(mddev));
NeilBrownfca4d842005-06-21 17:17:11 -07009213 mddev->safemode = 2;
9214 }
9215 flush_signals(current);
9216 }
9217
NeilBrownc89a8ee2008-08-05 15:54:13 +10009218 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
9219 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009220 if ( ! (
Shaohua Li29530792016-12-08 15:48:19 -08009221 (mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07009222 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
NeilBrownfca4d842005-06-21 17:17:11 -07009223 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
NeilBrown31a59e32008-04-30 00:52:30 -07009224 (mddev->external == 0 && mddev->safemode == 1) ||
NeilBrown4ad23a972017-03-15 14:05:14 +11009225 (mddev->safemode == 2
NeilBrownfca4d842005-06-21 17:17:11 -07009226 && !mddev->in_sync && mddev->recovery_cp == MaxSector)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009227 ))
9228 return;
NeilBrownfca4d842005-06-21 17:17:11 -07009229
NeilBrowndf5b89b2006-03-27 01:18:20 -08009230 if (mddev_trylock(mddev)) {
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009231 int spares = 0;
NeilBrown480523f2019-08-20 10:21:09 +10009232 bool try_set_sync = mddev->safemode != 0;
NeilBrownfca4d842005-06-21 17:17:11 -07009233
Shaohua Liafc1f552017-08-11 20:34:45 -07009234 if (!mddev->external && mddev->safemode == 1)
NeilBrown33182d12017-08-08 16:56:36 +10009235 mddev->safemode = 0;
9236
NeilBrownc89a8ee2008-08-05 15:54:13 +10009237 if (mddev->ro) {
Neil Brownab16bfc2015-06-17 12:31:46 +10009238 struct md_rdev *rdev;
9239 if (!mddev->external && mddev->in_sync)
9240 /* 'Blocked' flag not needed as failed devices
9241 * will be recorded if array switched to read/write.
9242 * Leaving it set will prevent the device
9243 * from being removed.
9244 */
9245 rdev_for_each(rdev, mddev)
9246 clear_bit(Blocked, &rdev->flags);
NeilBrown7ceb17e2013-04-24 11:42:42 +10009247 /* On a read-only array we can:
9248 * - remove failed devices
9249 * - add already-in_sync devices if the array itself
9250 * is in-sync.
9251 * As we only add devices that are already in-sync,
9252 * we can activate the spares immediately.
NeilBrownc89a8ee2008-08-05 15:54:13 +10009253 */
NeilBrown7ceb17e2013-04-24 11:42:42 +10009254 remove_and_add_spares(mddev, NULL);
NeilBrown8313b8e2013-12-12 10:13:33 +11009255 /* There is no thread, but we need to call
9256 * ->spare_active and clear saved_raid_disk
9257 */
NeilBrown2ac295a2014-05-29 11:40:03 +10009258 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
NeilBrown8313b8e2013-12-12 10:13:33 +11009259 md_reap_sync_thread(mddev);
NeilBrowna4a3d262015-07-17 11:57:30 +10009260 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
NeilBrown8313b8e2013-12-12 10:13:33 +11009261 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
Shaohua Li29530792016-12-08 15:48:19 -08009262 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
NeilBrownc89a8ee2008-08-05 15:54:13 +10009263 goto unlock;
9264 }
9265
Guoqing Jiang659b2542015-12-21 10:50:59 +11009266 if (mddev_is_clustered(mddev)) {
9267 struct md_rdev *rdev;
9268 /* kick the device if another node issued a
9269 * remove disk.
9270 */
9271 rdev_for_each(rdev, mddev) {
9272 if (test_and_clear_bit(ClusterRemove, &rdev->flags) &&
9273 rdev->raid_disk < 0)
9274 md_kick_rdev_from_array(rdev);
9275 }
9276 }
9277
NeilBrown480523f2019-08-20 10:21:09 +10009278 if (try_set_sync && !mddev->external && !mddev->in_sync) {
NeilBrown85572d72014-12-15 12:56:56 +11009279 spin_lock(&mddev->lock);
NeilBrown6497709b2017-03-15 14:05:14 +11009280 set_in_sync(mddev);
NeilBrown85572d72014-12-15 12:56:56 +11009281 spin_unlock(&mddev->lock);
NeilBrownfca4d842005-06-21 17:17:11 -07009282 }
NeilBrownfca4d842005-06-21 17:17:11 -07009283
Shaohua Li29530792016-12-08 15:48:19 -08009284 if (mddev->sb_flags)
NeilBrown850b2b422006-10-03 01:15:46 -07009285 md_update_sb(mddev, 0);
NeilBrown06d91a52005-06-21 17:17:12 -07009286
Linus Torvalds1da177e2005-04-16 15:20:36 -07009287 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
9288 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
9289 /* resync/recovery still happening */
9290 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9291 goto unlock;
9292 }
9293 if (mddev->sync_thread) {
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009294 md_reap_sync_thread(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009295 goto unlock;
9296 }
Neil Brown72a23c22008-06-28 08:31:41 +10009297 /* Set RUNNING before clearing NEEDED to avoid
9298 * any transients in the value of "sync_action".
9299 */
NeilBrown72f36d52012-10-11 14:25:57 +11009300 mddev->curr_resync_completed = 0;
NeilBrown23da4222014-12-15 12:57:01 +11009301 spin_lock(&mddev->lock);
Neil Brown72a23c22008-06-28 08:31:41 +10009302 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
NeilBrown23da4222014-12-15 12:57:01 +11009303 spin_unlock(&mddev->lock);
NeilBrown24dd4692005-11-08 21:39:26 -08009304 /* Clear some bits that don't mean anything, but
9305 * might be left set
9306 */
NeilBrown24dd4692005-11-08 21:39:26 -08009307 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
9308 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009309
NeilBrowned209582012-04-24 10:23:14 +10009310 if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
9311 test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
NeilBrownac05f252014-09-30 08:10:42 +10009312 goto not_running;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009313 /* no recovery is running.
9314 * remove any failed drives, then
9315 * add spares if possible.
NeilBrown72f36d52012-10-11 14:25:57 +11009316 * Spares are also removed and re-added, to allow
Linus Torvalds1da177e2005-04-16 15:20:36 -07009317 * the personality to fail the re-add.
9318 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009319
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009320 if (mddev->reshape_position != MaxSector) {
NeilBrown50ac1682009-06-18 08:47:55 +10009321 if (mddev->pers->check_reshape == NULL ||
9322 mddev->pers->check_reshape(mddev) != 0)
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009323 /* Cannot proceed */
NeilBrownac05f252014-09-30 08:10:42 +10009324 goto not_running;
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009325 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
Neil Brown72a23c22008-06-28 08:31:41 +10009326 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
NeilBrown746d3202013-04-24 11:42:41 +10009327 } else if ((spares = remove_and_add_spares(mddev, NULL))) {
NeilBrown24dd4692005-11-08 21:39:26 -08009328 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9329 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
Dan Williams56ac36d2008-08-07 10:02:47 -07009330 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
Neil Brown72a23c22008-06-28 08:31:41 +10009331 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
NeilBrown24dd4692005-11-08 21:39:26 -08009332 } else if (mddev->recovery_cp < MaxSector) {
9333 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
Neil Brown72a23c22008-06-28 08:31:41 +10009334 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
NeilBrown24dd4692005-11-08 21:39:26 -08009335 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
9336 /* nothing to be done ... */
NeilBrownac05f252014-09-30 08:10:42 +10009337 goto not_running;
NeilBrown24dd4692005-11-08 21:39:26 -08009338
Linus Torvalds1da177e2005-04-16 15:20:36 -07009339 if (mddev->pers->sync_request) {
NeilBrownef99bf42012-05-22 13:55:08 +10009340 if (spares) {
NeilBrowna654b9d82005-06-21 17:17:27 -07009341 /* We are adding a device or devices to an array
9342 * which has the bitmap stored on all devices.
9343 * So make sure all bitmap pages get written
9344 */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07009345 md_bitmap_write_all(mddev->bitmap);
NeilBrowna654b9d82005-06-21 17:17:27 -07009346 }
NeilBrownac05f252014-09-30 08:10:42 +10009347 INIT_WORK(&mddev->del_work, md_start_sync);
9348 queue_work(md_misc_wq, &mddev->del_work);
9349 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009350 }
NeilBrownac05f252014-09-30 08:10:42 +10009351 not_running:
Neil Brown72a23c22008-06-28 08:31:41 +10009352 if (!mddev->sync_thread) {
9353 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
NeilBrownf851b602014-12-11 10:02:10 +11009354 wake_up(&resync_wait);
Neil Brown72a23c22008-06-28 08:31:41 +10009355 if (test_and_clear_bit(MD_RECOVERY_RECOVER,
9356 &mddev->recovery))
NeilBrown0c3573f2009-01-09 08:31:05 +11009357 if (mddev->sysfs_action)
NeilBrown00bcb4a2010-06-01 19:37:23 +10009358 sysfs_notify_dirent_safe(mddev->sysfs_action);
Neil Brown72a23c22008-06-28 08:31:41 +10009359 }
NeilBrownac05f252014-09-30 08:10:42 +10009360 unlock:
9361 wake_up(&mddev->sb_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009362 mddev_unlock(mddev);
9363 }
9364}
NeilBrown6c144d32014-09-30 16:15:38 +10009365EXPORT_SYMBOL(md_check_recovery);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009366
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009367void md_reap_sync_thread(struct mddev *mddev)
9368{
9369 struct md_rdev *rdev;
Guoqing Jiangaefb2e52018-10-18 16:37:44 +08009370 sector_t old_dev_sectors = mddev->dev_sectors;
9371 bool is_reshaped = false;
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009372
9373 /* resync has finished, collect result */
9374 md_unregister_thread(&mddev->sync_thread);
9375 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
Guoqing Jiang0d8ed0e92019-07-24 11:09:21 +02009376 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
9377 mddev->degraded != mddev->raid_disks) {
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009378 /* success...*/
9379 /* activate any spares */
9380 if (mddev->pers->spare_active(mddev)) {
Junxiao Bie1a86db2020-07-14 16:10:26 -07009381 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
Shaohua Li29530792016-12-08 15:48:19 -08009382 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009383 }
9384 }
9385 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
Guoqing Jiangaefb2e52018-10-18 16:37:44 +08009386 mddev->pers->finish_reshape) {
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009387 mddev->pers->finish_reshape(mddev);
Guoqing Jiangaefb2e52018-10-18 16:37:44 +08009388 if (mddev_is_clustered(mddev))
9389 is_reshaped = true;
9390 }
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009391
9392 /* If array is no-longer degraded, then any saved_raid_disk
NeilBrownf4667222013-12-09 12:04:56 +11009393 * information must be scrapped.
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009394 */
NeilBrownf4667222013-12-09 12:04:56 +11009395 if (!mddev->degraded)
9396 rdev_for_each(rdev, mddev)
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009397 rdev->saved_raid_disk = -1;
9398
9399 md_update_sb(mddev, 1);
Shaohua Li29530792016-12-08 15:48:19 -08009400 /* MD_SB_CHANGE_PENDING should be cleared by md_update_sb, so we can
Guoqing Jiangbb8bf152016-06-02 23:32:04 -04009401 * call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by
9402 * clustered raid */
9403 if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags))
9404 md_cluster_ops->resync_finish(mddev);
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009405 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
NeilBrownea358cd2015-06-12 20:05:04 +10009406 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009407 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9408 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
9409 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
9410 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
Guoqing Jiangaefb2e52018-10-18 16:37:44 +08009411 /*
9412 * We call md_cluster_ops->update_size here because sync_size could
9413 * be changed by md_update_sb, and MD_RECOVERY_RESHAPE is cleared,
9414 * so it is time to update size across cluster.
9415 */
9416 if (mddev_is_clustered(mddev) && is_reshaped
9417 && !test_bit(MD_CLOSING, &mddev->flags))
9418 md_cluster_ops->update_size(mddev, old_dev_sectors);
NeilBrownf851b602014-12-11 10:02:10 +11009419 wake_up(&resync_wait);
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009420 /* flag recovery needed just to double check */
9421 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9422 sysfs_notify_dirent_safe(mddev->sysfs_action);
9423 md_new_event(mddev);
9424 if (mddev->event_work.func)
9425 queue_work(md_misc_wq, &mddev->event_work);
9426}
NeilBrown6c144d32014-09-30 16:15:38 +10009427EXPORT_SYMBOL(md_reap_sync_thread);
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009428
NeilBrownfd01b882011-10-11 16:47:53 +11009429void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev)
Dan Williams6bfe0b42008-04-30 00:52:32 -07009430{
NeilBrown00bcb4a2010-06-01 19:37:23 +10009431 sysfs_notify_dirent_safe(rdev->sysfs_state);
Dan Williams6bfe0b42008-04-30 00:52:32 -07009432 wait_event_timeout(rdev->blocked_wait,
NeilBrownde393cd2011-07-28 11:31:48 +10009433 !test_bit(Blocked, &rdev->flags) &&
9434 !test_bit(BlockedBadBlocks, &rdev->flags),
Dan Williams6bfe0b42008-04-30 00:52:32 -07009435 msecs_to_jiffies(5000));
9436 rdev_dec_pending(rdev, mddev);
9437}
9438EXPORT_SYMBOL(md_wait_for_blocked_rdev);
9439
NeilBrownc6563a82012-05-21 09:27:00 +10009440void md_finish_reshape(struct mddev *mddev)
9441{
9442 /* called be personality module when reshape completes. */
9443 struct md_rdev *rdev;
9444
9445 rdev_for_each(rdev, mddev) {
9446 if (rdev->data_offset > rdev->new_data_offset)
9447 rdev->sectors += rdev->data_offset - rdev->new_data_offset;
9448 else
9449 rdev->sectors -= rdev->new_data_offset - rdev->data_offset;
9450 rdev->data_offset = rdev->new_data_offset;
9451 }
9452}
9453EXPORT_SYMBOL(md_finish_reshape);
NeilBrown2230dfe2011-07-28 11:31:46 +10009454
Vishal Vermafc974ee2015-12-24 19:20:34 -07009455/* Bad block management */
NeilBrown2230dfe2011-07-28 11:31:46 +10009456
Vishal Vermafc974ee2015-12-24 19:20:34 -07009457/* Returns 1 on success, 0 on failure */
NeilBrown3cb03002011-10-11 16:45:26 +11009458int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
NeilBrownc6563a82012-05-21 09:27:00 +10009459 int is_new)
NeilBrown2230dfe2011-07-28 11:31:46 +10009460{
Guoqing Jiang85ad1d12016-05-03 22:22:13 -04009461 struct mddev *mddev = rdev->mddev;
NeilBrownc6563a82012-05-21 09:27:00 +10009462 int rv;
9463 if (is_new)
9464 s += rdev->new_data_offset;
9465 else
9466 s += rdev->data_offset;
Vishal Vermafc974ee2015-12-24 19:20:34 -07009467 rv = badblocks_set(&rdev->badblocks, s, sectors, 0);
9468 if (rv == 0) {
NeilBrown2230dfe2011-07-28 11:31:46 +10009469 /* Make sure they get written out promptly */
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02009470 if (test_bit(ExternalBbl, &rdev->flags))
Junxiao Bie1a86db2020-07-14 16:10:26 -07009471 sysfs_notify_dirent_safe(rdev->sysfs_unack_badblocks);
NeilBrown8bd2f0a2011-12-08 16:26:08 +11009472 sysfs_notify_dirent_safe(rdev->sysfs_state);
Shaohua Li29530792016-12-08 15:48:19 -08009473 set_mask_bits(&mddev->sb_flags, 0,
9474 BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING));
NeilBrown2230dfe2011-07-28 11:31:46 +10009475 md_wakeup_thread(rdev->mddev->thread);
Vishal Vermafc974ee2015-12-24 19:20:34 -07009476 return 1;
9477 } else
9478 return 0;
NeilBrown2230dfe2011-07-28 11:31:46 +10009479}
9480EXPORT_SYMBOL_GPL(rdev_set_badblocks);
9481
NeilBrownc6563a82012-05-21 09:27:00 +10009482int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
9483 int is_new)
NeilBrown2230dfe2011-07-28 11:31:46 +10009484{
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02009485 int rv;
NeilBrownc6563a82012-05-21 09:27:00 +10009486 if (is_new)
9487 s += rdev->new_data_offset;
9488 else
9489 s += rdev->data_offset;
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02009490 rv = badblocks_clear(&rdev->badblocks, s, sectors);
9491 if ((rv == 0) && test_bit(ExternalBbl, &rdev->flags))
Junxiao Bie1a86db2020-07-14 16:10:26 -07009492 sysfs_notify_dirent_safe(rdev->sysfs_badblocks);
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02009493 return rv;
NeilBrown2230dfe2011-07-28 11:31:46 +10009494}
9495EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
9496
Adrian Bunk75c96f82005-05-05 16:16:09 -07009497static int md_notify_reboot(struct notifier_block *this,
9498 unsigned long code, void *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009499{
9500 struct list_head *tmp;
NeilBrownfd01b882011-10-11 16:47:53 +11009501 struct mddev *mddev;
Daniel P. Berrange2dba6a92011-09-23 10:40:45 +01009502 int need_delay = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009503
NeilBrownc744a652012-03-19 12:46:37 +11009504 for_each_mddev(mddev, tmp) {
9505 if (mddev_trylock(mddev)) {
NeilBrown30b8aa92012-04-24 10:23:16 +10009506 if (mddev->pers)
9507 __md_stop_writes(mddev);
NeilBrown0f62fb22014-05-06 09:36:08 +10009508 if (mddev->persistent)
9509 mddev->safemode = 2;
NeilBrownc744a652012-03-19 12:46:37 +11009510 mddev_unlock(mddev);
Daniel P. Berrange2dba6a92011-09-23 10:40:45 +01009511 }
NeilBrownc744a652012-03-19 12:46:37 +11009512 need_delay = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009513 }
NeilBrownc744a652012-03-19 12:46:37 +11009514 /*
9515 * certain more exotic SCSI devices are known to be
9516 * volatile wrt too early system reboots. While the
9517 * right place to handle this issue is the given
9518 * driver, we do want to have a safe RAID driver ...
9519 */
9520 if (need_delay)
9521 mdelay(1000*1);
9522
Linus Torvalds1da177e2005-04-16 15:20:36 -07009523 return NOTIFY_DONE;
9524}
9525
Adrian Bunk75c96f82005-05-05 16:16:09 -07009526static struct notifier_block md_notifier = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009527 .notifier_call = md_notify_reboot,
9528 .next = NULL,
9529 .priority = INT_MAX, /* before any real devices */
9530};
9531
9532static void md_geninit(void)
9533{
NeilBrown36a4e1f2011-10-07 14:23:17 +11009534 pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
Linus Torvalds1da177e2005-04-16 15:20:36 -07009535
Alexey Dobriyan97a32532020-02-03 17:37:17 -08009536 proc_create("mdstat", S_IRUGO, NULL, &mdstat_proc_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009537}
9538
Adrian Bunk75c96f82005-05-05 16:16:09 -07009539static int __init md_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009540{
Tejun Heoe804ac72010-10-15 15:36:08 +02009541 int ret = -ENOMEM;
9542
Tejun Heoada609e2011-01-25 14:35:54 +01009543 md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0);
Tejun Heoe804ac72010-10-15 15:36:08 +02009544 if (!md_wq)
9545 goto err_wq;
9546
9547 md_misc_wq = alloc_workqueue("md_misc", 0, 0);
9548 if (!md_misc_wq)
9549 goto err_misc_wq;
9550
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02009551 md_rdev_misc_wq = alloc_workqueue("md_rdev_misc", 0, 0);
Guoqing Jiangcf0b9b42020-10-08 05:19:09 +02009552 if (!md_rdev_misc_wq)
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02009553 goto err_rdev_misc_wq;
9554
Christoph Hellwig28144f92020-10-29 15:58:34 +01009555 ret = __register_blkdev(MD_MAJOR, "md", md_probe);
9556 if (ret < 0)
Tejun Heoe804ac72010-10-15 15:36:08 +02009557 goto err_md;
9558
Christoph Hellwig28144f92020-10-29 15:58:34 +01009559 ret = __register_blkdev(0, "mdp", md_probe);
9560 if (ret < 0)
Tejun Heoe804ac72010-10-15 15:36:08 +02009561 goto err_mdp;
9562 mdp_major = ret;
9563
Linus Torvalds1da177e2005-04-16 15:20:36 -07009564 register_reboot_notifier(&md_notifier);
Eric W. Biederman0b4d4142007-02-14 00:34:09 -08009565 raid_table_header = register_sysctl_table(raid_root_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009566
9567 md_geninit();
NeilBrownd710e132008-10-13 11:55:12 +11009568 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009569
Tejun Heoe804ac72010-10-15 15:36:08 +02009570err_mdp:
9571 unregister_blkdev(MD_MAJOR, "md");
9572err_md:
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02009573 destroy_workqueue(md_rdev_misc_wq);
9574err_rdev_misc_wq:
Tejun Heoe804ac72010-10-15 15:36:08 +02009575 destroy_workqueue(md_misc_wq);
9576err_misc_wq:
9577 destroy_workqueue(md_wq);
9578err_wq:
9579 return ret;
9580}
Linus Torvalds1da177e2005-04-16 15:20:36 -07009581
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009582static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
Goldwyn Rodrigues1d7e3e92014-06-07 01:53:00 -05009583{
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009584 struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
9585 struct md_rdev *rdev2;
9586 int role, ret;
9587 char b[BDEVNAME_SIZE];
Goldwyn Rodrigues1d7e3e92014-06-07 01:53:00 -05009588
Guoqing Jiang818da592017-03-01 16:42:40 +08009589 /*
9590 * If size is changed in another node then we need to
9591 * do resize as well.
9592 */
9593 if (mddev->dev_sectors != le64_to_cpu(sb->size)) {
9594 ret = mddev->pers->resize(mddev, le64_to_cpu(sb->size));
9595 if (ret)
9596 pr_info("md-cluster: resize failed\n");
9597 else
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07009598 md_bitmap_update_sb(mddev->bitmap);
Guoqing Jiang818da592017-03-01 16:42:40 +08009599 }
9600
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009601 /* Check for change of roles in the active devices */
9602 rdev_for_each(rdev2, mddev) {
9603 if (test_bit(Faulty, &rdev2->flags))
9604 continue;
9605
9606 /* Check if the roles changed */
9607 role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]);
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05009608
9609 if (test_bit(Candidate, &rdev2->flags)) {
9610 if (role == 0xfffe) {
9611 pr_info("md: Removing Candidate device %s because add failed\n", bdevname(rdev2->bdev,b));
9612 md_kick_rdev_from_array(rdev2);
9613 continue;
9614 }
9615 else
9616 clear_bit(Candidate, &rdev2->flags);
9617 }
9618
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009619 if (role != rdev2->raid_disk) {
Guoqing Jiangca1e98e2018-10-18 16:37:45 +08009620 /*
9621 * got activated except reshape is happening.
9622 */
9623 if (rdev2->raid_disk == -1 && role != 0xffff &&
9624 !(le32_to_cpu(sb->feature_map) &
9625 MD_FEATURE_RESHAPE_ACTIVE)) {
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009626 rdev2->saved_raid_disk = role;
9627 ret = remove_and_add_spares(mddev, rdev2);
9628 pr_info("Activated spare: %s\n",
NeilBrown9d487392016-11-02 14:16:49 +11009629 bdevname(rdev2->bdev,b));
Guoqing Jianga5781832016-05-02 11:33:14 -04009630 /* wakeup mddev->thread here, so array could
9631 * perform resync with the new activated disk */
9632 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9633 md_wakeup_thread(mddev->thread);
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009634 }
9635 /* device faulty
9636 * We just want to do the minimum to mark the disk
9637 * as faulty. The recovery is performed by the
9638 * one who initiated the error.
9639 */
9640 if ((role == 0xfffe) || (role == 0xfffd)) {
9641 md_error(mddev, rdev2);
9642 clear_bit(Blocked, &rdev2->flags);
9643 }
9644 }
Goldwyn Rodrigues1d7e3e92014-06-07 01:53:00 -05009645 }
9646
Zhao Heminga8da01f2020-11-19 19:41:33 +08009647 if (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) {
9648 ret = update_raid_disks(mddev, le32_to_cpu(sb->raid_disks));
9649 if (ret)
9650 pr_warn("md: updating array disks failed. %d\n", ret);
9651 }
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009652
Guoqing Jiang7564bed2018-10-18 16:37:42 +08009653 /*
9654 * Since mddev->delta_disks has already updated in update_raid_disks,
9655 * so it is time to check reshape.
9656 */
9657 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) &&
9658 (le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
9659 /*
9660 * reshape is happening in the remote node, we need to
9661 * update reshape_position and call start_reshape.
9662 */
Christoph Hellwiged4d0a4e2019-04-04 18:56:10 +02009663 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
Guoqing Jiang7564bed2018-10-18 16:37:42 +08009664 if (mddev->pers->update_reshape_pos)
9665 mddev->pers->update_reshape_pos(mddev);
9666 if (mddev->pers->start_reshape)
9667 mddev->pers->start_reshape(mddev);
9668 } else if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) &&
9669 mddev->reshape_position != MaxSector &&
9670 !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
9671 /* reshape is just done in another node. */
9672 mddev->reshape_position = MaxSector;
9673 if (mddev->pers->update_reshape_pos)
9674 mddev->pers->update_reshape_pos(mddev);
9675 }
9676
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009677 /* Finally set the event to be up to date */
9678 mddev->events = le64_to_cpu(sb->events);
9679}
9680
9681static int read_rdev(struct mddev *mddev, struct md_rdev *rdev)
9682{
9683 int err;
9684 struct page *swapout = rdev->sb_page;
9685 struct mdp_superblock_1 *sb;
9686
9687 /* Store the sb page of the rdev in the swapout temporary
9688 * variable in case we err in the future
9689 */
9690 rdev->sb_page = NULL;
NeilBrown7f0f0d82016-11-02 14:16:49 +11009691 err = alloc_disk_sb(rdev);
9692 if (err == 0) {
9693 ClearPageUptodate(rdev->sb_page);
9694 rdev->sb_loaded = 0;
9695 err = super_types[mddev->major_version].
9696 load_super(rdev, NULL, mddev->minor_version);
9697 }
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009698 if (err < 0) {
9699 pr_warn("%s: %d Could not reload rdev(%d) err: %d. Restoring old values\n",
9700 __func__, __LINE__, rdev->desc_nr, err);
NeilBrown7f0f0d82016-11-02 14:16:49 +11009701 if (rdev->sb_page)
9702 put_page(rdev->sb_page);
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009703 rdev->sb_page = swapout;
9704 rdev->sb_loaded = 1;
9705 return err;
9706 }
9707
9708 sb = page_address(rdev->sb_page);
9709 /* Read the offset unconditionally, even if MD_FEATURE_RECOVERY_OFFSET
9710 * is not set
9711 */
9712
9713 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RECOVERY_OFFSET))
9714 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
9715
9716 /* The other node finished recovery, call spare_active to set
9717 * device In_sync and mddev->degraded
9718 */
9719 if (rdev->recovery_offset == MaxSector &&
9720 !test_bit(In_sync, &rdev->flags) &&
9721 mddev->pers->spare_active(mddev))
Junxiao Bie1a86db2020-07-14 16:10:26 -07009722 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009723
9724 put_page(swapout);
9725 return 0;
9726}
9727
9728void md_reload_sb(struct mddev *mddev, int nr)
9729{
9730 struct md_rdev *rdev;
9731 int err;
9732
9733 /* Find the rdev */
9734 rdev_for_each_rcu(rdev, mddev) {
9735 if (rdev->desc_nr == nr)
9736 break;
9737 }
9738
9739 if (!rdev || rdev->desc_nr != nr) {
9740 pr_warn("%s: %d Could not find rdev with nr %d\n", __func__, __LINE__, nr);
9741 return;
9742 }
9743
9744 err = read_rdev(mddev, rdev);
9745 if (err < 0)
9746 return;
9747
9748 check_sb_changes(mddev, rdev);
9749
9750 /* Read all rdev's to update recovery_offset */
Guoqing Jiang0ea99242018-04-09 17:01:21 +08009751 rdev_for_each_rcu(rdev, mddev) {
9752 if (!test_bit(Faulty, &rdev->flags))
9753 read_rdev(mddev, rdev);
9754 }
Goldwyn Rodrigues1d7e3e92014-06-07 01:53:00 -05009755}
9756EXPORT_SYMBOL(md_reload_sb);
9757
Linus Torvalds1da177e2005-04-16 15:20:36 -07009758#ifndef MODULE
9759
9760/*
9761 * Searches all registered partitions for autorun RAID arrays
9762 * at boot time.
9763 */
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009764
Cong Wang5b1f5bc32016-06-08 09:20:16 -07009765static DEFINE_MUTEX(detected_devices_mutex);
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009766static LIST_HEAD(all_detected_devices);
9767struct detected_devices_node {
9768 struct list_head list;
9769 dev_t dev;
9770};
Linus Torvalds1da177e2005-04-16 15:20:36 -07009771
9772void md_autodetect_dev(dev_t dev)
9773{
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009774 struct detected_devices_node *node_detected_dev;
9775
9776 node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
9777 if (node_detected_dev) {
9778 node_detected_dev->dev = dev;
Cong Wang5b1f5bc32016-06-08 09:20:16 -07009779 mutex_lock(&detected_devices_mutex);
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009780 list_add_tail(&node_detected_dev->list, &all_detected_devices);
Cong Wang5b1f5bc32016-06-08 09:20:16 -07009781 mutex_unlock(&detected_devices_mutex);
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009782 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009783}
9784
Christoph Hellwigd82fa812020-06-06 15:00:24 +02009785void md_autostart_arrays(int part)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009786{
NeilBrown3cb03002011-10-11 16:45:26 +11009787 struct md_rdev *rdev;
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009788 struct detected_devices_node *node_detected_dev;
9789 dev_t dev;
9790 int i_scanned, i_passed;
9791
9792 i_scanned = 0;
9793 i_passed = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009794
NeilBrown9d487392016-11-02 14:16:49 +11009795 pr_info("md: Autodetecting RAID arrays.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07009796
Cong Wang5b1f5bc32016-06-08 09:20:16 -07009797 mutex_lock(&detected_devices_mutex);
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009798 while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
9799 i_scanned++;
9800 node_detected_dev = list_entry(all_detected_devices.next,
9801 struct detected_devices_node, list);
9802 list_del(&node_detected_dev->list);
9803 dev = node_detected_dev->dev;
9804 kfree(node_detected_dev);
Shaohua Li90bcf1332016-09-14 14:26:54 -07009805 mutex_unlock(&detected_devices_mutex);
NeilBrowndf968c42007-07-17 04:06:11 -07009806 rdev = md_import_device(dev,0, 90);
Shaohua Li90bcf1332016-09-14 14:26:54 -07009807 mutex_lock(&detected_devices_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009808 if (IS_ERR(rdev))
9809 continue;
9810
NeilBrown403df472014-09-30 15:52:29 +10009811 if (test_bit(Faulty, &rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009812 continue;
NeilBrown403df472014-09-30 15:52:29 +10009813
NeilBrownd0fae182008-03-04 14:29:31 -08009814 set_bit(AutoDetected, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009815 list_add(&rdev->same_set, &pending_raid_disks);
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009816 i_passed++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009817 }
Cong Wang5b1f5bc32016-06-08 09:20:16 -07009818 mutex_unlock(&detected_devices_mutex);
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009819
NeilBrown9d487392016-11-02 14:16:49 +11009820 pr_debug("md: Scanned %d and added %d devices.\n", i_scanned, i_passed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009821
9822 autorun_devices(part);
9823}
9824
Jeff Garzikfdee8ae2006-12-10 02:20:50 -08009825#endif /* !MODULE */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009826
9827static __exit void md_exit(void)
9828{
NeilBrownfd01b882011-10-11 16:47:53 +11009829 struct mddev *mddev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009830 struct list_head *tmp;
NeilBrowne2f23b62014-04-09 14:33:51 +10009831 int delay = 1;
Greg Kroah-Hartman8ab5e4c2005-06-20 21:15:16 -07009832
Christoph Hellwig3dbd8c22009-03-31 14:27:02 +11009833 unregister_blkdev(MD_MAJOR,"md");
Linus Torvalds1da177e2005-04-16 15:20:36 -07009834 unregister_blkdev(mdp_major, "mdp");
9835 unregister_reboot_notifier(&md_notifier);
9836 unregister_sysctl_table(raid_table_header);
NeilBrowne2f23b62014-04-09 14:33:51 +10009837
9838 /* We cannot unload the modules while some process is
9839 * waiting for us in select() or poll() - wake them up
9840 */
9841 md_unloading = 1;
9842 while (waitqueue_active(&md_event_waiters)) {
9843 /* not safe to leave yet */
9844 wake_up(&md_event_waiters);
9845 msleep(delay);
9846 delay += delay;
9847 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009848 remove_proc_entry("mdstat", NULL);
NeilBrowne2f23b62014-04-09 14:33:51 +10009849
NeilBrown29ac4aa2008-02-06 01:39:58 -08009850 for_each_mddev(mddev, tmp) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009851 export_array(mddev);
NeilBrown93568632017-02-06 13:41:39 +11009852 mddev->ctime = 0;
NeilBrownd3374822009-01-09 08:31:10 +11009853 mddev->hold_active = 0;
NeilBrown93568632017-02-06 13:41:39 +11009854 /*
9855 * for_each_mddev() will call mddev_put() at the end of each
9856 * iteration. As the mddev is now fully clear, this will
9857 * schedule the mddev for destruction by a workqueue, and the
9858 * destroy_workqueue() below will wait for that to complete.
9859 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009860 }
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02009861 destroy_workqueue(md_rdev_misc_wq);
Tejun Heoe804ac72010-10-15 15:36:08 +02009862 destroy_workqueue(md_misc_wq);
9863 destroy_workqueue(md_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009864}
9865
Dan Williams685784a2007-07-09 11:56:42 -07009866subsys_initcall(md_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009867module_exit(md_exit)
9868
Kees Cooke4dca7b2017-10-17 19:04:42 -07009869static int get_ro(char *buffer, const struct kernel_param *kp)
NeilBrownf91de922005-11-08 21:39:36 -08009870{
Xiongfeng Wang3f999802020-05-11 16:23:25 +08009871 return sprintf(buffer, "%d\n", start_readonly);
NeilBrownf91de922005-11-08 21:39:36 -08009872}
Kees Cooke4dca7b2017-10-17 19:04:42 -07009873static int set_ro(const char *val, const struct kernel_param *kp)
NeilBrownf91de922005-11-08 21:39:36 -08009874{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03009875 return kstrtouint(val, 10, (unsigned int *)&start_readonly);
NeilBrownf91de922005-11-08 21:39:36 -08009876}
9877
NeilBrown80ca3a42006-07-10 04:44:18 -07009878module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
9879module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
NeilBrownefeb53c2009-01-09 08:31:10 +11009880module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
NeilBrown78b63502017-04-12 16:26:13 +10009881module_param(create_on_open, bool, S_IRUSR|S_IWUSR);
NeilBrownf91de922005-11-08 21:39:36 -08009882
Linus Torvalds1da177e2005-04-16 15:20:36 -07009883MODULE_LICENSE("GPL");
NeilBrown0efb9e62009-12-14 12:49:58 +11009884MODULE_DESCRIPTION("MD RAID framework");
NeilBrownaa1595e2005-08-04 12:53:32 -07009885MODULE_ALIAS("md");
NeilBrown72008652005-08-26 18:34:15 -07009886MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);