blob: 399c81bddc1ae1b195ba4ff36de35abbd0c5f3d8 [file] [log] [blame]
Thomas Gleixneraf1a8892019-05-20 19:08:12 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 md.c : Multiple Devices driver for Linux
NeilBrownf72ffdd2014-09-30 14:23:59 +10004 Copyright (C) 1998, 1999, 2000 Ingo Molnar
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 completely rewritten, based on the MD driver code from Marc Zyngier
7
8 Changes:
9
10 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
11 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
12 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
13 - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
14 - kmod support by: Cyrus Durgin
15 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
16 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
17
18 - lots of fixes and improvements to the RAID1/RAID5 and generic
19 RAID code (such as request based resynchronization):
20
21 Neil Brown <neilb@cse.unsw.edu.au>.
22
NeilBrown32a76272005-06-21 17:17:14 -070023 - persistent bitmap code
24 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
25
NeilBrown9d487392016-11-02 14:16:49 +110026
27 Errors, Warnings, etc.
28 Please use:
29 pr_crit() for error conditions that risk data loss
30 pr_err() for error conditions that are unexpected, like an IO error
31 or internal inconsistency
32 pr_warn() for error conditions that could have been predicated, like
33 adding a device to an array when it has incompatible metadata
34 pr_info() for every interesting, very rare events, like an array starting
35 or stopping, or resync starting or stopping
36 pr_debug() for everything else.
37
Linus Torvalds1da177e2005-04-16 15:20:36 -070038*/
39
Guoqing Jiang963c5552019-06-14 17:10:36 +080040#include <linux/sched/mm.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +010041#include <linux/sched/signal.h>
NeilBrowna6fb0932005-09-09 16:23:56 -070042#include <linux/kthread.h>
NeilBrownbff61972009-03-31 14:33:13 +110043#include <linux/blkdev.h>
Vishal Vermafc974ee2015-12-24 19:20:34 -070044#include <linux/badblocks.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045#include <linux/sysctl.h>
NeilBrownbff61972009-03-31 14:33:13 +110046#include <linux/seq_file.h>
Al Viroff01bb42011-09-16 02:31:11 -040047#include <linux/fs.h>
NeilBrownd7603b72006-01-06 00:20:30 -080048#include <linux/poll.h>
NeilBrown16f17b32006-06-26 00:27:37 -070049#include <linux/ctype.h>
André Goddard Rosae7d28602009-12-14 18:01:06 -080050#include <linux/string.h>
NeilBrownfb4d8c72008-10-13 11:55:12 +110051#include <linux/hdreg.h>
52#include <linux/proc_fs.h>
53#include <linux/random.h>
Paul Gortmaker056075c2011-07-03 13:58:33 -040054#include <linux/module.h>
NeilBrownfb4d8c72008-10-13 11:55:12 +110055#include <linux/reboot.h>
NeilBrown32a76272005-06-21 17:17:14 -070056#include <linux/file.h>
Arnd Bergmannaa98aa32009-12-14 12:50:05 +110057#include <linux/compat.h>
Stephen Rothwell25570722008-10-15 09:09:21 +110058#include <linux/delay.h>
NeilBrownbff61972009-03-31 14:33:13 +110059#include <linux/raid/md_p.h>
60#include <linux/raid/md_u.h>
Christoph Hellwig74cc979c2020-03-24 08:25:19 +010061#include <linux/raid/detect.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090062#include <linux/slab.h>
NeilBrown4ad23a972017-03-15 14:05:14 +110063#include <linux/percpu-refcount.h>
Christoph Hellwigc6a564ff2020-03-25 16:48:42 +010064#include <linux/part_stat.h>
NeilBrown4ad23a972017-03-15 14:05:14 +110065
Shaohua Li504634f2016-11-18 09:44:08 -080066#include <trace/events/block.h>
NeilBrown43b2e5d2009-03-31 14:33:13 +110067#include "md.h"
Mike Snitzer935fe092017-10-10 17:02:41 -040068#include "md-bitmap.h"
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -050069#include "md-cluster.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070070
NeilBrown01f96c02011-09-21 15:30:20 +100071/* pers_list is a list of registered personalities protected
72 * by pers_lock.
73 * pers_lock does extra service to protect accesses to
74 * mddev->thread when the mutex cannot be held.
75 */
NeilBrown2604b702006-01-06 00:20:36 -080076static LIST_HEAD(pers_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -070077static DEFINE_SPINLOCK(pers_lock);
78
Kent Overstreet28dec872018-06-07 20:52:54 -040079static struct kobj_type md_ktype;
80
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -050081struct md_cluster_operations *md_cluster_ops;
Goldwyn Rodrigues589a1c42014-06-07 02:39:37 -050082EXPORT_SYMBOL(md_cluster_ops);
Christoph Hellwig2b598ee2019-04-04 18:56:14 +020083static struct module *md_cluster_mod;
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -050084
Bernd Schubert90b08712008-05-23 13:04:38 -070085static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
Tejun Heoe804ac72010-10-15 15:36:08 +020086static struct workqueue_struct *md_wq;
87static struct workqueue_struct *md_misc_wq;
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +020088static struct workqueue_struct *md_rdev_misc_wq;
Bernd Schubert90b08712008-05-23 13:04:38 -070089
NeilBrown746d3202013-04-24 11:42:41 +100090static int remove_and_add_spares(struct mddev *mddev,
91 struct md_rdev *this);
NeilBrown5aa61f42014-12-15 12:56:57 +110092static void mddev_detach(struct mddev *mddev);
NeilBrown746d3202013-04-24 11:42:41 +100093
Linus Torvalds1da177e2005-04-16 15:20:36 -070094/*
Robert Becker1e509152009-12-14 12:49:58 +110095 * Default number of read corrections we'll attempt on an rdev
96 * before ejecting it from the array. We divide the read error
97 * count by 2 for every hour elapsed between read errors.
98 */
99#define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
Zhao Heming7c9d5c52020-07-21 02:08:52 +0800100/* Default safemode delay: 200 msec */
101#define DEFAULT_SAFEMODE_DELAY ((200 * HZ)/1000 +1)
Robert Becker1e509152009-12-14 12:49:58 +1100102/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
104 * is 1000 KB/sec, so the extra system load does not show up that much.
105 * Increase it if you want to have more _guaranteed_ speed. Note that
Adrian Bunk338cec32005-09-10 00:26:54 -0700106 * the RAID driver will use the maximum available bandwidth if the IO
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 * subsystem is idle. There is also an 'absolute maximum' reconstruction
108 * speed limit - in case reconstruction slows down your system despite
109 * idle IO detection.
110 *
111 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
NeilBrown88202a02006-01-06 00:21:36 -0800112 * or /sys/block/mdX/md/sync_speed_{min,max}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 */
114
115static int sysctl_speed_limit_min = 1000;
116static int sysctl_speed_limit_max = 200000;
NeilBrownfd01b882011-10-11 16:47:53 +1100117static inline int speed_min(struct mddev *mddev)
NeilBrown88202a02006-01-06 00:21:36 -0800118{
119 return mddev->sync_speed_min ?
120 mddev->sync_speed_min : sysctl_speed_limit_min;
121}
122
NeilBrownfd01b882011-10-11 16:47:53 +1100123static inline int speed_max(struct mddev *mddev)
NeilBrown88202a02006-01-06 00:21:36 -0800124{
125 return mddev->sync_speed_max ?
126 mddev->sync_speed_max : sysctl_speed_limit_max;
127}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100129static void rdev_uninit_serial(struct md_rdev *rdev)
Guoqing Jiang3e148a32019-06-19 17:30:46 +0800130{
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100131 if (!test_and_clear_bit(CollisionCheck, &rdev->flags))
132 return;
Guoqing Jiang3e148a32019-06-19 17:30:46 +0800133
Guoqing Jiang025471f2019-12-23 10:49:01 +0100134 kvfree(rdev->serial);
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100135 rdev->serial = NULL;
Guoqing Jiang3e148a32019-06-19 17:30:46 +0800136}
137
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100138static void rdevs_uninit_serial(struct mddev *mddev)
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100139{
140 struct md_rdev *rdev;
141
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100142 rdev_for_each(rdev, mddev)
143 rdev_uninit_serial(rdev);
144}
145
146static int rdev_init_serial(struct md_rdev *rdev)
147{
Guoqing Jiang025471f2019-12-23 10:49:01 +0100148 /* serial_nums equals with BARRIER_BUCKETS_NR */
149 int i, serial_nums = 1 << ((PAGE_SHIFT - ilog2(sizeof(atomic_t))));
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100150 struct serial_in_rdev *serial = NULL;
151
152 if (test_bit(CollisionCheck, &rdev->flags))
153 return 0;
154
Guoqing Jiang025471f2019-12-23 10:49:01 +0100155 serial = kvmalloc(sizeof(struct serial_in_rdev) * serial_nums,
156 GFP_KERNEL);
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100157 if (!serial)
158 return -ENOMEM;
159
Guoqing Jiang025471f2019-12-23 10:49:01 +0100160 for (i = 0; i < serial_nums; i++) {
161 struct serial_in_rdev *serial_tmp = &serial[i];
162
163 spin_lock_init(&serial_tmp->serial_lock);
164 serial_tmp->serial_rb = RB_ROOT_CACHED;
165 init_waitqueue_head(&serial_tmp->serial_io_wait);
166 }
167
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100168 rdev->serial = serial;
169 set_bit(CollisionCheck, &rdev->flags);
170
171 return 0;
172}
173
174static int rdevs_init_serial(struct mddev *mddev)
175{
176 struct md_rdev *rdev;
177 int ret = 0;
178
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100179 rdev_for_each(rdev, mddev) {
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100180 ret = rdev_init_serial(rdev);
181 if (ret)
182 break;
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100183 }
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100184
185 /* Free all resources if pool is not existed */
186 if (ret && !mddev->serial_info_pool)
187 rdevs_uninit_serial(mddev);
188
189 return ret;
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100190}
191
Guoqing Jiang963c5552019-06-14 17:10:36 +0800192/*
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100193 * rdev needs to enable serial stuffs if it meets the conditions:
194 * 1. it is multi-queue device flaged with writemostly.
195 * 2. the write-behind mode is enabled.
196 */
197static int rdev_need_serial(struct md_rdev *rdev)
198{
199 return (rdev && rdev->mddev->bitmap_info.max_write_behind > 0 &&
Christoph Hellwige556f6b2020-06-26 10:01:56 +0200200 rdev->bdev->bd_disk->queue->nr_hw_queues != 1 &&
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100201 test_bit(WriteMostly, &rdev->flags));
202}
203
204/*
205 * Init resource for rdev(s), then create serial_info_pool if:
206 * 1. rdev is the first device which return true from rdev_enable_serial.
207 * 2. rdev is NULL, means we want to enable serialization for all rdevs.
Guoqing Jiang963c5552019-06-14 17:10:36 +0800208 */
Guoqing Jiang404659c2019-12-23 10:48:53 +0100209void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100210 bool is_suspend)
Guoqing Jiang963c5552019-06-14 17:10:36 +0800211{
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100212 int ret = 0;
213
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100214 if (rdev && !rdev_need_serial(rdev) &&
215 !test_bit(CollisionCheck, &rdev->flags))
Guoqing Jiang963c5552019-06-14 17:10:36 +0800216 return;
217
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100218 if (!is_suspend)
219 mddev_suspend(mddev);
220
221 if (!rdev)
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100222 ret = rdevs_init_serial(mddev);
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100223 else
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100224 ret = rdev_init_serial(rdev);
225 if (ret)
226 goto abort;
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100227
Guoqing Jiang404659c2019-12-23 10:48:53 +0100228 if (mddev->serial_info_pool == NULL) {
Coly Li3024ba22020-04-09 22:17:23 +0800229 /*
230 * already in memalloc noio context by
231 * mddev_suspend()
232 */
Guoqing Jiang404659c2019-12-23 10:48:53 +0100233 mddev->serial_info_pool =
234 mempool_create_kmalloc_pool(NR_SERIAL_INFOS,
235 sizeof(struct serial_info));
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100236 if (!mddev->serial_info_pool) {
237 rdevs_uninit_serial(mddev);
Guoqing Jiang404659c2019-12-23 10:48:53 +0100238 pr_err("can't alloc memory pool for serialization\n");
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100239 }
Guoqing Jiang963c5552019-06-14 17:10:36 +0800240 }
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100241
242abort:
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100243 if (!is_suspend)
244 mddev_resume(mddev);
Guoqing Jiang963c5552019-06-14 17:10:36 +0800245}
Guoqing Jiang963c5552019-06-14 17:10:36 +0800246
247/*
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100248 * Free resource from rdev(s), and destroy serial_info_pool under conditions:
249 * 1. rdev is the last device flaged with CollisionCheck.
250 * 2. when bitmap is destroyed while policy is not enabled.
251 * 3. for disable policy, the pool is destroyed only when no rdev needs it.
Guoqing Jiang963c5552019-06-14 17:10:36 +0800252 */
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100253void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
254 bool is_suspend)
Guoqing Jiang963c5552019-06-14 17:10:36 +0800255{
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100256 if (rdev && !test_bit(CollisionCheck, &rdev->flags))
Guoqing Jiang963c5552019-06-14 17:10:36 +0800257 return;
258
Guoqing Jiang404659c2019-12-23 10:48:53 +0100259 if (mddev->serial_info_pool) {
Guoqing Jiang963c5552019-06-14 17:10:36 +0800260 struct md_rdev *temp;
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100261 int num = 0; /* used to track if other rdevs need the pool */
Guoqing Jiang963c5552019-06-14 17:10:36 +0800262
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100263 if (!is_suspend)
264 mddev_suspend(mddev);
265 rdev_for_each(temp, mddev) {
266 if (!rdev) {
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100267 if (!mddev->serialize_policy ||
268 !rdev_need_serial(temp))
269 rdev_uninit_serial(temp);
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100270 else
271 num++;
272 } else if (temp != rdev &&
273 test_bit(CollisionCheck, &temp->flags))
Guoqing Jiang963c5552019-06-14 17:10:36 +0800274 num++;
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100275 }
276
277 if (rdev)
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100278 rdev_uninit_serial(rdev);
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100279
280 if (num)
281 pr_info("The mempool could be used by other devices\n");
282 else {
Guoqing Jiang404659c2019-12-23 10:48:53 +0100283 mempool_destroy(mddev->serial_info_pool);
284 mddev->serial_info_pool = NULL;
Guoqing Jiang963c5552019-06-14 17:10:36 +0800285 }
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100286 if (!is_suspend)
287 mddev_resume(mddev);
Guoqing Jiang963c5552019-06-14 17:10:36 +0800288 }
289}
290
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291static struct ctl_table_header *raid_table_header;
292
Joe Perches82592c32013-11-14 15:16:18 +1100293static struct ctl_table raid_table[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 .procname = "speed_limit_min",
296 .data = &sysctl_speed_limit_min,
297 .maxlen = sizeof(int),
NeilBrown80ca3a42006-07-10 04:44:18 -0700298 .mode = S_IRUGO|S_IWUSR,
Eric W. Biederman6d456112009-11-16 03:11:48 -0800299 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 },
301 {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 .procname = "speed_limit_max",
303 .data = &sysctl_speed_limit_max,
304 .maxlen = sizeof(int),
NeilBrown80ca3a42006-07-10 04:44:18 -0700305 .mode = S_IRUGO|S_IWUSR,
Eric W. Biederman6d456112009-11-16 03:11:48 -0800306 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 },
Eric W. Biederman894d2492009-11-05 14:34:02 -0800308 { }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309};
310
Joe Perches82592c32013-11-14 15:16:18 +1100311static struct ctl_table raid_dir_table[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 .procname = "raid",
314 .maxlen = 0,
NeilBrown80ca3a42006-07-10 04:44:18 -0700315 .mode = S_IRUGO|S_IXUGO,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 .child = raid_table,
317 },
Eric W. Biederman894d2492009-11-05 14:34:02 -0800318 { }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319};
320
Joe Perches82592c32013-11-14 15:16:18 +1100321static struct ctl_table raid_root_table[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 .procname = "dev",
324 .maxlen = 0,
325 .mode = 0555,
326 .child = raid_dir_table,
327 },
Eric W. Biederman894d2492009-11-05 14:34:02 -0800328 { }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329};
330
NeilBrownf91de922005-11-08 21:39:36 -0800331static int start_readonly;
332
NeilBrown78b63502017-04-12 16:26:13 +1000333/*
334 * The original mechanism for creating an md device is to create
335 * a device node in /dev and to open it. This causes races with device-close.
336 * The preferred method is to write to the "new_array" module parameter.
337 * This can avoid races.
338 * Setting create_on_open to false disables the original mechanism
339 * so all the races disappear.
340 */
341static bool create_on_open = true;
342
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343/*
NeilBrownd7603b72006-01-06 00:20:30 -0800344 * We have a system wide 'event count' that is incremented
345 * on any 'interesting' event, and readers of /proc/mdstat
346 * can use 'poll' or 'select' to find out when the event
347 * count increases.
348 *
349 * Events are:
350 * start array, stop array, error, add device, remove device,
351 * start build, activate spare
352 */
NeilBrown2989ddb2006-01-06 00:20:43 -0800353static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
NeilBrownd7603b72006-01-06 00:20:30 -0800354static atomic_t md_event_count;
NeilBrownfd01b882011-10-11 16:47:53 +1100355void md_new_event(struct mddev *mddev)
NeilBrownd7603b72006-01-06 00:20:30 -0800356{
357 atomic_inc(&md_event_count);
358 wake_up(&md_event_waiters);
359}
NeilBrown29269552006-03-27 01:18:10 -0800360EXPORT_SYMBOL_GPL(md_new_event);
NeilBrownd7603b72006-01-06 00:20:30 -0800361
362/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 * Enables to iterate over all existing md arrays
364 * all_mddevs_lock protects this list.
365 */
366static LIST_HEAD(all_mddevs);
367static DEFINE_SPINLOCK(all_mddevs_lock);
368
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369/*
370 * iterates through all used mddevs in the system.
371 * We take care to grab the all_mddevs_lock whenever navigating
372 * the list, and to always hold a refcount when unlocked.
373 * Any code which breaks out of this loop while own
374 * a reference to the current mddev and must mddev_put it.
375 */
NeilBrownfd01b882011-10-11 16:47:53 +1100376#define for_each_mddev(_mddev,_tmp) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 \
NeilBrownf72ffdd2014-09-30 14:23:59 +1000378 for (({ spin_lock(&all_mddevs_lock); \
NeilBrownfd01b882011-10-11 16:47:53 +1100379 _tmp = all_mddevs.next; \
380 _mddev = NULL;}); \
381 ({ if (_tmp != &all_mddevs) \
382 mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 spin_unlock(&all_mddevs_lock); \
NeilBrownfd01b882011-10-11 16:47:53 +1100384 if (_mddev) mddev_put(_mddev); \
385 _mddev = list_entry(_tmp, struct mddev, all_mddevs); \
386 _tmp != &all_mddevs;}); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 ({ spin_lock(&all_mddevs_lock); \
NeilBrownfd01b882011-10-11 16:47:53 +1100388 _tmp = _tmp->next;}) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 )
390
NeilBrown409c57f2009-03-31 14:39:39 +1100391/* Rather than calling directly into the personality make_request function,
392 * IO requests come here first so that we can check if the device is
393 * being suspended pending a reconfiguration.
394 * We hold a refcount over the call to ->make_request. By the time that
395 * call has finished, the bio has been linked into some internal structure
396 * and so is visible to ->quiesce(), so we don't need the refcount any more.
397 */
NeilBrownb3143b92017-10-17 13:46:43 +1100398static bool is_suspended(struct mddev *mddev, struct bio *bio)
399{
400 if (mddev->suspended)
401 return true;
402 if (bio_data_dir(bio) != WRITE)
403 return false;
404 if (mddev->suspend_lo >= mddev->suspend_hi)
405 return false;
406 if (bio->bi_iter.bi_sector >= mddev->suspend_hi)
407 return false;
408 if (bio_end_sector(bio) < mddev->suspend_lo)
409 return false;
410 return true;
411}
412
Shaohua Li393debc2017-09-21 10:23:35 -0700413void md_handle_request(struct mddev *mddev, struct bio *bio)
414{
415check_suspended:
416 rcu_read_lock();
NeilBrownb3143b92017-10-17 13:46:43 +1100417 if (is_suspended(mddev, bio)) {
Shaohua Li393debc2017-09-21 10:23:35 -0700418 DEFINE_WAIT(__wait);
419 for (;;) {
420 prepare_to_wait(&mddev->sb_wait, &__wait,
421 TASK_UNINTERRUPTIBLE);
NeilBrownb3143b92017-10-17 13:46:43 +1100422 if (!is_suspended(mddev, bio))
Shaohua Li393debc2017-09-21 10:23:35 -0700423 break;
424 rcu_read_unlock();
425 schedule();
426 rcu_read_lock();
427 }
428 finish_wait(&mddev->sb_wait, &__wait);
429 }
430 atomic_inc(&mddev->active_io);
431 rcu_read_unlock();
432
433 if (!mddev->pers->make_request(mddev, bio)) {
434 atomic_dec(&mddev->active_io);
435 wake_up(&mddev->sb_wait);
436 goto check_suspended;
437 }
438
439 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
440 wake_up(&mddev->sb_wait);
441}
442EXPORT_SYMBOL(md_handle_request);
443
Artur Paszkiewicz41d2d842020-07-03 11:13:09 +0200444struct md_io {
445 struct mddev *mddev;
446 bio_end_io_t *orig_bi_end_io;
447 void *orig_bi_private;
Christoph Hellwig99dfc432021-01-24 11:02:37 +0100448 struct block_device *orig_bi_bdev;
Artur Paszkiewicz41d2d842020-07-03 11:13:09 +0200449 unsigned long start_time;
450};
451
452static void md_end_io(struct bio *bio)
453{
454 struct md_io *md_io = bio->bi_private;
455 struct mddev *mddev = md_io->mddev;
456
Christoph Hellwig99dfc432021-01-24 11:02:37 +0100457 bio_end_io_acct_remapped(bio, md_io->start_time, md_io->orig_bi_bdev);
Artur Paszkiewicz41d2d842020-07-03 11:13:09 +0200458
459 bio->bi_end_io = md_io->orig_bi_end_io;
460 bio->bi_private = md_io->orig_bi_private;
461
462 mempool_free(md_io, &mddev->md_io_pool);
463
464 if (bio->bi_end_io)
465 bio->bi_end_io(bio);
466}
467
Christoph Hellwigc62b37d2020-07-01 10:59:43 +0200468static blk_qc_t md_submit_bio(struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469{
NeilBrown49077322010-03-25 16:20:56 +1100470 const int rw = bio_data_dir(bio);
Christoph Hellwig309dca302021-01-24 11:02:34 +0100471 struct mddev *mddev = bio->bi_bdev->bd_disk->private_data;
NeilBrown49077322010-03-25 16:20:56 +1100472
Colin Ian King9a5a8592020-07-02 12:35:02 +0100473 if (mddev == NULL || mddev->pers == NULL) {
474 bio_io_error(bio);
475 return BLK_QC_T_NONE;
476 }
NeilBrown409c57f2009-03-31 14:39:39 +1100477
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -0300478 if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) {
479 bio_io_error(bio);
480 return BLK_QC_T_NONE;
481 }
482
Christoph Hellwigf695ca32020-07-01 10:59:39 +0200483 blk_queue_split(&bio);
Kent Overstreet54efd502015-04-23 22:37:18 -0700484
Sebastian Riemerbbfa57c2013-02-21 13:28:09 +1100485 if (mddev->ro == 1 && unlikely(rw == WRITE)) {
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200486 if (bio_sectors(bio) != 0)
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200487 bio->bi_status = BLK_STS_IOERR;
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200488 bio_endio(bio);
Jens Axboedece1632015-11-05 10:41:16 -0700489 return BLK_QC_T_NONE;
Sebastian Riemerbbfa57c2013-02-21 13:28:09 +1100490 }
NeilBrown49077322010-03-25 16:20:56 +1100491
Artur Paszkiewicz41d2d842020-07-03 11:13:09 +0200492 if (bio->bi_end_io != md_end_io) {
493 struct md_io *md_io;
494
495 md_io = mempool_alloc(&mddev->md_io_pool, GFP_NOIO);
496 md_io->mddev = mddev;
497 md_io->orig_bi_end_io = bio->bi_end_io;
498 md_io->orig_bi_private = bio->bi_private;
Christoph Hellwig99dfc432021-01-24 11:02:37 +0100499 md_io->orig_bi_bdev = bio->bi_bdev;
Artur Paszkiewicz41d2d842020-07-03 11:13:09 +0200500
501 bio->bi_end_io = md_end_io;
502 bio->bi_private = md_io;
503
Christoph Hellwig99dfc432021-01-24 11:02:37 +0100504 md_io->start_time = bio_start_io_acct(bio);
Artur Paszkiewicz41d2d842020-07-03 11:13:09 +0200505 }
506
Shaohua Li9c573de2016-04-25 16:52:38 -0700507 /* bio could be mergeable after passing to underlayer */
Jens Axboe1eff9d32016-08-05 15:35:16 -0600508 bio->bi_opf &= ~REQ_NOMERGE;
Shaohua Li393debc2017-09-21 10:23:35 -0700509
510 md_handle_request(mddev, bio);
NeilBrown49077322010-03-25 16:20:56 +1100511
Jens Axboedece1632015-11-05 10:41:16 -0700512 return BLK_QC_T_NONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513}
514
NeilBrown9e35b992010-04-06 14:23:02 +1000515/* mddev_suspend makes sure no new requests are submitted
516 * to the device, and that any requests that have been submitted
517 * are completely handled.
NeilBrownafa0f552014-12-15 12:56:58 +1100518 * Once mddev_detach() is called and completes, the module will be
519 * completely unused.
NeilBrown9e35b992010-04-06 14:23:02 +1000520 */
NeilBrownfd01b882011-10-11 16:47:53 +1100521void mddev_suspend(struct mddev *mddev)
NeilBrown409c57f2009-03-31 14:39:39 +1100522{
Heinz Mauelshagen092398d2016-05-03 19:43:57 +0200523 WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk);
NeilBrown4d5324f2017-10-19 12:17:16 +1100524 lockdep_assert_held(&mddev->reconfig_mutex);
Mikulas Patocka0dc10e52015-12-18 15:19:16 +1100525 if (mddev->suspended++)
526 return;
NeilBrown409c57f2009-03-31 14:39:39 +1100527 synchronize_rcu();
NeilBrowncc27b0c2017-06-05 16:49:39 +1000528 wake_up(&mddev->sb_wait);
NeilBrown35bfc522017-10-17 13:46:43 +1100529 set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags);
530 smp_mb__after_atomic();
NeilBrown409c57f2009-03-31 14:39:39 +1100531 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
532 mddev->pers->quiesce(mddev, 1);
NeilBrown35bfc522017-10-17 13:46:43 +1100533 clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags);
534 wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags));
Jonathan Brassow0d9f4f12012-05-16 04:06:14 -0500535
536 del_timer_sync(&mddev->safemode_timer);
Coly Li78f57ef2020-04-09 22:17:20 +0800537 /* restrict memory reclaim I/O during raid array is suspend */
538 mddev->noio_flag = memalloc_noio_save();
NeilBrown409c57f2009-03-31 14:39:39 +1100539}
NeilBrown390ee602010-06-01 19:37:27 +1000540EXPORT_SYMBOL_GPL(mddev_suspend);
NeilBrown409c57f2009-03-31 14:39:39 +1100541
NeilBrownfd01b882011-10-11 16:47:53 +1100542void mddev_resume(struct mddev *mddev)
NeilBrown409c57f2009-03-31 14:39:39 +1100543{
Coly Li78f57ef2020-04-09 22:17:20 +0800544 /* entred the memalloc scope from mddev_suspend() */
545 memalloc_noio_restore(mddev->noio_flag);
NeilBrown4d5324f2017-10-19 12:17:16 +1100546 lockdep_assert_held(&mddev->reconfig_mutex);
Mikulas Patocka0dc10e52015-12-18 15:19:16 +1100547 if (--mddev->suspended)
548 return;
NeilBrown409c57f2009-03-31 14:39:39 +1100549 wake_up(&mddev->sb_wait);
550 mddev->pers->quiesce(mddev, 0);
Jonathan Brassow0fd018a2011-06-07 17:49:36 -0500551
Jonathan Brassow47525e52012-05-22 13:55:29 +1000552 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
Jonathan Brassow0fd018a2011-06-07 17:49:36 -0500553 md_wakeup_thread(mddev->thread);
554 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
NeilBrown409c57f2009-03-31 14:39:39 +1100555}
NeilBrown390ee602010-06-01 19:37:27 +1000556EXPORT_SYMBOL_GPL(mddev_resume);
NeilBrown409c57f2009-03-31 14:39:39 +1100557
NeilBrowna2826aa2009-12-14 12:49:49 +1100558/*
Tejun Heoe9c74692010-09-03 11:56:18 +0200559 * Generic flush handling for md
NeilBrowna2826aa2009-12-14 12:49:49 +1100560 */
NeilBrown4bc034d2019-03-29 10:46:16 -0700561
562static void md_end_flush(struct bio *bio)
NeilBrowna2826aa2009-12-14 12:49:49 +1100563{
NeilBrown4bc034d2019-03-29 10:46:16 -0700564 struct md_rdev *rdev = bio->bi_private;
565 struct mddev *mddev = rdev->mddev;
NeilBrowna2826aa2009-12-14 12:49:49 +1100566
567 rdev_dec_pending(rdev, mddev);
568
NeilBrown4bc034d2019-03-29 10:46:16 -0700569 if (atomic_dec_and_test(&mddev->flush_pending)) {
570 /* The pre-request flush has finished */
571 queue_work(md_wq, &mddev->flush_work);
NeilBrowna2826aa2009-12-14 12:49:49 +1100572 }
NeilBrown4bc034d2019-03-29 10:46:16 -0700573 bio_put(bio);
NeilBrowna2826aa2009-12-14 12:49:49 +1100574}
575
NeilBrown4bc034d2019-03-29 10:46:16 -0700576static void md_submit_flush_data(struct work_struct *ws);
577
578static void submit_flushes(struct work_struct *ws)
NeilBrowna2826aa2009-12-14 12:49:49 +1100579{
NeilBrown4bc034d2019-03-29 10:46:16 -0700580 struct mddev *mddev = container_of(ws, struct mddev, flush_work);
NeilBrown3cb03002011-10-11 16:45:26 +1100581 struct md_rdev *rdev;
NeilBrowna2826aa2009-12-14 12:49:49 +1100582
NeilBrown2bc13b82019-03-29 10:46:17 -0700583 mddev->start_flush = ktime_get_boottime();
NeilBrown4bc034d2019-03-29 10:46:16 -0700584 INIT_WORK(&mddev->flush_work, md_submit_flush_data);
585 atomic_set(&mddev->flush_pending, 1);
NeilBrowna2826aa2009-12-14 12:49:49 +1100586 rcu_read_lock();
NeilBrowndafb20f2012-03-19 12:46:39 +1100587 rdev_for_each_rcu(rdev, mddev)
NeilBrowna2826aa2009-12-14 12:49:49 +1100588 if (rdev->raid_disk >= 0 &&
589 !test_bit(Faulty, &rdev->flags)) {
590 /* Take two references, one is dropped
591 * when request finishes, one after
592 * we reclaim rcu_read_lock
593 */
594 struct bio *bi;
595 atomic_inc(&rdev->nr_pending);
596 atomic_inc(&rdev->nr_pending);
597 rcu_read_unlock();
Christoph Hellwiga78f18d2021-01-26 15:52:41 +0100598 bi = bio_alloc_bioset(GFP_NOIO, 0, &mddev->bio_set);
Xiao Ni5a409b42018-05-21 11:49:54 +0800599 bi->bi_end_io = md_end_flush;
NeilBrown4bc034d2019-03-29 10:46:16 -0700600 bi->bi_private = rdev;
601 bio_set_dev(bi, rdev->bdev);
Christoph Hellwig70fd7612016-11-01 07:40:10 -0600602 bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
NeilBrown4bc034d2019-03-29 10:46:16 -0700603 atomic_inc(&mddev->flush_pending);
Mike Christie4e49ea42016-06-05 14:31:41 -0500604 submit_bio(bi);
NeilBrowna2826aa2009-12-14 12:49:49 +1100605 rcu_read_lock();
606 rdev_dec_pending(rdev, mddev);
607 }
608 rcu_read_unlock();
NeilBrown4bc034d2019-03-29 10:46:16 -0700609 if (atomic_dec_and_test(&mddev->flush_pending))
610 queue_work(md_wq, &mddev->flush_work);
611}
NeilBrowna2826aa2009-12-14 12:49:49 +1100612
NeilBrown4bc034d2019-03-29 10:46:16 -0700613static void md_submit_flush_data(struct work_struct *ws)
614{
615 struct mddev *mddev = container_of(ws, struct mddev, flush_work);
616 struct bio *bio = mddev->flush_bio;
617
618 /*
619 * must reset flush_bio before calling into md_handle_request to avoid a
620 * deadlock, because other bios passed md_handle_request suspend check
621 * could wait for this and below md_handle_request could wait for those
622 * bios because of suspend check
623 */
Xiao Nidc5d17a32020-12-10 14:33:32 +0800624 spin_lock_irq(&mddev->lock);
Pankaj Gupta81ba3c22020-11-11 06:16:56 +0100625 mddev->prev_flush_start = mddev->start_flush;
NeilBrown4bc034d2019-03-29 10:46:16 -0700626 mddev->flush_bio = NULL;
Xiao Nidc5d17a32020-12-10 14:33:32 +0800627 spin_unlock_irq(&mddev->lock);
NeilBrown4bc034d2019-03-29 10:46:16 -0700628 wake_up(&mddev->sb_wait);
629
630 if (bio->bi_iter.bi_size == 0) {
631 /* an empty barrier - all done */
632 bio_endio(bio);
633 } else {
634 bio->bi_opf &= ~REQ_PREFLUSH;
635 md_handle_request(mddev, bio);
NeilBrowna2826aa2009-12-14 12:49:49 +1100636 }
NeilBrowna2826aa2009-12-14 12:49:49 +1100637}
NeilBrown4bc034d2019-03-29 10:46:16 -0700638
David Jeffery775d7832019-09-16 13:15:14 -0400639/*
640 * Manages consolidation of flushes and submitting any flushes needed for
641 * a bio with REQ_PREFLUSH. Returns true if the bio is finished or is
642 * being finished in another context. Returns false if the flushing is
643 * complete but still needs the I/O portion of the bio to be processed.
644 */
645bool md_flush_request(struct mddev *mddev, struct bio *bio)
NeilBrown4bc034d2019-03-29 10:46:16 -0700646{
Pankaj Gupta81ba3c22020-11-11 06:16:56 +0100647 ktime_t req_start = ktime_get_boottime();
NeilBrown4bc034d2019-03-29 10:46:16 -0700648 spin_lock_irq(&mddev->lock);
Pankaj Gupta204d1a62020-11-11 06:16:57 +0100649 /* flush requests wait until ongoing flush completes,
650 * hence coalescing all the pending requests.
651 */
NeilBrown4bc034d2019-03-29 10:46:16 -0700652 wait_event_lock_irq(mddev->sb_wait,
NeilBrown2bc13b82019-03-29 10:46:17 -0700653 !mddev->flush_bio ||
Pankaj Guptaa23f2aa2020-11-11 06:16:58 +0100654 ktime_before(req_start, mddev->prev_flush_start),
NeilBrown4bc034d2019-03-29 10:46:16 -0700655 mddev->lock);
Pankaj Gupta204d1a62020-11-11 06:16:57 +0100656 /* new request after previous flush is completed */
Pankaj Guptaa23f2aa2020-11-11 06:16:58 +0100657 if (ktime_after(req_start, mddev->prev_flush_start)) {
NeilBrown2bc13b82019-03-29 10:46:17 -0700658 WARN_ON(mddev->flush_bio);
659 mddev->flush_bio = bio;
660 bio = NULL;
661 }
NeilBrown4bc034d2019-03-29 10:46:16 -0700662 spin_unlock_irq(&mddev->lock);
663
NeilBrown2bc13b82019-03-29 10:46:17 -0700664 if (!bio) {
665 INIT_WORK(&mddev->flush_work, submit_flushes);
666 queue_work(md_wq, &mddev->flush_work);
667 } else {
668 /* flush was performed for some other bio while we waited. */
669 if (bio->bi_iter.bi_size == 0)
670 /* an empty barrier - all done */
671 bio_endio(bio);
672 else {
673 bio->bi_opf &= ~REQ_PREFLUSH;
David Jeffery775d7832019-09-16 13:15:14 -0400674 return false;
NeilBrown2bc13b82019-03-29 10:46:17 -0700675 }
676 }
David Jeffery775d7832019-09-16 13:15:14 -0400677 return true;
NeilBrown4bc034d2019-03-29 10:46:16 -0700678}
Tejun Heoe9c74692010-09-03 11:56:18 +0200679EXPORT_SYMBOL(md_flush_request);
NeilBrown409c57f2009-03-31 14:39:39 +1100680
NeilBrownfd01b882011-10-11 16:47:53 +1100681static inline struct mddev *mddev_get(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682{
683 atomic_inc(&mddev->active);
684 return mddev;
685}
686
Dan Williams5fd3a172009-03-04 00:57:25 -0700687static void mddev_delayed_delete(struct work_struct *ws);
NeilBrownd3374822009-01-09 08:31:10 +1100688
NeilBrownfd01b882011-10-11 16:47:53 +1100689static void mddev_put(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690{
691 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
692 return;
NeilBrownd3374822009-01-09 08:31:10 +1100693 if (!mddev->raid_disks && list_empty(&mddev->disks) &&
NeilBrowncbd19982009-12-30 12:08:49 +1100694 mddev->ctime == 0 && !mddev->hold_active) {
695 /* Array is not configured at all, and not held active,
696 * so destroy it */
NeilBrownaf8a2432011-12-08 15:49:46 +1100697 list_del_init(&mddev->all_mddevs);
Kent Overstreet28dec872018-06-07 20:52:54 -0400698
699 /*
700 * Call queue_work inside the spinlock so that
701 * flush_workqueue() after mddev_find will succeed in waiting
702 * for the work to be done.
703 */
704 INIT_WORK(&mddev->del_work, mddev_delayed_delete);
705 queue_work(md_misc_wq, &mddev->del_work);
NeilBrownd3374822009-01-09 08:31:10 +1100706 }
707 spin_unlock(&all_mddevs_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708}
709
Kees Cook8376d3c2017-10-16 17:01:48 -0700710static void md_safemode_timeout(struct timer_list *t);
Sasha Levin25b2edf2015-07-24 18:19:58 -0400711
NeilBrownfd01b882011-10-11 16:47:53 +1100712void mddev_init(struct mddev *mddev)
NeilBrownfafd7fb2010-04-01 15:55:30 +1100713{
Kent Overstreet28dec872018-06-07 20:52:54 -0400714 kobject_init(&mddev->kobj, &md_ktype);
NeilBrownfafd7fb2010-04-01 15:55:30 +1100715 mutex_init(&mddev->open_mutex);
716 mutex_init(&mddev->reconfig_mutex);
717 mutex_init(&mddev->bitmap_info.mutex);
718 INIT_LIST_HEAD(&mddev->disks);
719 INIT_LIST_HEAD(&mddev->all_mddevs);
Kees Cook8376d3c2017-10-16 17:01:48 -0700720 timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0);
NeilBrownfafd7fb2010-04-01 15:55:30 +1100721 atomic_set(&mddev->active, 1);
722 atomic_set(&mddev->openers, 0);
723 atomic_set(&mddev->active_io, 0);
NeilBrown85572d72014-12-15 12:56:56 +1100724 spin_lock_init(&mddev->lock);
NeilBrown4bc034d2019-03-29 10:46:16 -0700725 atomic_set(&mddev->flush_pending, 0);
NeilBrownfafd7fb2010-04-01 15:55:30 +1100726 init_waitqueue_head(&mddev->sb_wait);
727 init_waitqueue_head(&mddev->recovery_wait);
728 mddev->reshape_position = MaxSector;
NeilBrown2c810cd2012-05-21 09:27:00 +1000729 mddev->reshape_backwards = 0;
Jonathan Brassowc4a39552013-06-25 01:23:59 -0500730 mddev->last_sync_action = "none";
NeilBrownfafd7fb2010-04-01 15:55:30 +1100731 mddev->resync_min = 0;
732 mddev->resync_max = MaxSector;
733 mddev->level = LEVEL_NONE;
734}
NeilBrown390ee602010-06-01 19:37:27 +1000735EXPORT_SYMBOL_GPL(mddev_init);
NeilBrownfafd7fb2010-04-01 15:55:30 +1100736
NeilBrownf72ffdd2014-09-30 14:23:59 +1000737static struct mddev *mddev_find(dev_t unit)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738{
NeilBrownfd01b882011-10-11 16:47:53 +1100739 struct mddev *mddev, *new = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740
NeilBrown8f5f02c2011-02-16 13:58:51 +1100741 if (unit && MAJOR(unit) != MD_MAJOR)
742 unit &= ~((1<<MdpMinorShift)-1);
743
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 retry:
745 spin_lock(&all_mddevs_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746
NeilBrownefeb53c2009-01-09 08:31:10 +1100747 if (unit) {
748 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
749 if (mddev->unit == unit) {
750 mddev_get(mddev);
751 spin_unlock(&all_mddevs_lock);
752 kfree(new);
753 return mddev;
754 }
755
756 if (new) {
757 list_add(&new->all_mddevs, &all_mddevs);
758 spin_unlock(&all_mddevs_lock);
759 new->hold_active = UNTIL_IOCTL;
760 return new;
761 }
762 } else if (new) {
763 /* find an unused unit number */
764 static int next_minor = 512;
765 int start = next_minor;
766 int is_free = 0;
767 int dev = 0;
768 while (!is_free) {
769 dev = MKDEV(MD_MAJOR, next_minor);
770 next_minor++;
771 if (next_minor > MINORMASK)
772 next_minor = 0;
773 if (next_minor == start) {
774 /* Oh dear, all in use. */
775 spin_unlock(&all_mddevs_lock);
776 kfree(new);
777 return NULL;
778 }
NeilBrownf72ffdd2014-09-30 14:23:59 +1000779
NeilBrownefeb53c2009-01-09 08:31:10 +1100780 is_free = 1;
781 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
782 if (mddev->unit == dev) {
783 is_free = 0;
784 break;
785 }
786 }
787 new->unit = dev;
788 new->md_minor = MINOR(dev);
789 new->hold_active = UNTIL_STOP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 list_add(&new->all_mddevs, &all_mddevs);
791 spin_unlock(&all_mddevs_lock);
792 return new;
793 }
794 spin_unlock(&all_mddevs_lock);
795
NeilBrown9ffae0c2006-01-06 00:20:32 -0800796 new = kzalloc(sizeof(*new), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 if (!new)
798 return NULL;
799
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 new->unit = unit;
801 if (MAJOR(unit) == MD_MAJOR)
802 new->md_minor = MINOR(unit);
803 else
804 new->md_minor = MINOR(unit) >> MdpMinorShift;
805
NeilBrownfafd7fb2010-04-01 15:55:30 +1100806 mddev_init(new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 goto retry;
809}
810
NeilBrownb6eb1272010-04-15 10:13:47 +1000811static struct attribute_group md_redundancy_group;
812
NeilBrown5c47daf2014-12-15 12:57:01 +1100813void mddev_unlock(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814{
NeilBrowna64c8762010-04-14 17:15:37 +1000815 if (mddev->to_remove) {
NeilBrownb6eb1272010-04-15 10:13:47 +1000816 /* These cannot be removed under reconfig_mutex as
817 * an access to the files will try to take reconfig_mutex
818 * while holding the file unremovable, which leads to
819 * a deadlock.
NeilBrownbb4f1e92010-08-08 21:18:03 +1000820 * So hold set sysfs_active while the remove in happeing,
821 * and anything else which might set ->to_remove or my
822 * otherwise change the sysfs namespace will fail with
823 * -EBUSY if sysfs_active is still set.
824 * We set sysfs_active under reconfig_mutex and elsewhere
825 * test it under the same mutex to ensure its correct value
826 * is seen.
NeilBrownb6eb1272010-04-15 10:13:47 +1000827 */
NeilBrowna64c8762010-04-14 17:15:37 +1000828 struct attribute_group *to_remove = mddev->to_remove;
829 mddev->to_remove = NULL;
NeilBrownbb4f1e92010-08-08 21:18:03 +1000830 mddev->sysfs_active = 1;
NeilBrownb6eb1272010-04-15 10:13:47 +1000831 mutex_unlock(&mddev->reconfig_mutex);
832
NeilBrown00bcb4a2010-06-01 19:37:23 +1000833 if (mddev->kobj.sd) {
834 if (to_remove != &md_redundancy_group)
835 sysfs_remove_group(&mddev->kobj, to_remove);
836 if (mddev->pers == NULL ||
837 mddev->pers->sync_request == NULL) {
838 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
839 if (mddev->sysfs_action)
840 sysfs_put(mddev->sysfs_action);
Junxiao Bie8efa9b2020-08-04 17:27:18 -0700841 if (mddev->sysfs_completed)
842 sysfs_put(mddev->sysfs_completed);
843 if (mddev->sysfs_degraded)
844 sysfs_put(mddev->sysfs_degraded);
NeilBrown00bcb4a2010-06-01 19:37:23 +1000845 mddev->sysfs_action = NULL;
Junxiao Bie8efa9b2020-08-04 17:27:18 -0700846 mddev->sysfs_completed = NULL;
847 mddev->sysfs_degraded = NULL;
NeilBrown00bcb4a2010-06-01 19:37:23 +1000848 }
NeilBrowna64c8762010-04-14 17:15:37 +1000849 }
NeilBrownbb4f1e92010-08-08 21:18:03 +1000850 mddev->sysfs_active = 0;
NeilBrownb6eb1272010-04-15 10:13:47 +1000851 } else
852 mutex_unlock(&mddev->reconfig_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853
Chris Dunlop751e67c2011-10-19 16:48:26 +1100854 /* As we've dropped the mutex we need a spinlock to
855 * make sure the thread doesn't disappear
NeilBrown01f96c02011-09-21 15:30:20 +1000856 */
857 spin_lock(&pers_lock);
NeilBrown005eca52005-08-22 13:11:08 -0700858 md_wakeup_thread(mddev->thread);
NeilBrown4d5324f2017-10-19 12:17:16 +1100859 wake_up(&mddev->sb_wait);
NeilBrown01f96c02011-09-21 15:30:20 +1000860 spin_unlock(&pers_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861}
NeilBrown5c47daf2014-12-15 12:57:01 +1100862EXPORT_SYMBOL_GPL(mddev_unlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863
Goldwyn Rodrigues57d051d2015-04-14 10:43:55 -0500864struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr)
NeilBrown1ca69c42012-10-11 13:37:33 +1100865{
866 struct md_rdev *rdev;
867
868 rdev_for_each_rcu(rdev, mddev)
869 if (rdev->desc_nr == nr)
870 return rdev;
871
872 return NULL;
873}
Goldwyn Rodrigues57d051d2015-04-14 10:43:55 -0500874EXPORT_SYMBOL_GPL(md_find_rdev_nr_rcu);
NeilBrown1ca69c42012-10-11 13:37:33 +1100875
876static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877{
NeilBrown3cb03002011-10-11 16:45:26 +1100878 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879
NeilBrowndafb20f2012-03-19 12:46:39 +1100880 rdev_for_each(rdev, mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881 if (rdev->bdev->bd_dev == dev)
882 return rdev;
Cheng Renquan159ec1f2009-01-09 08:31:08 +1100883
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 return NULL;
885}
886
Tomasz Majchrzak1532d9e2017-12-27 10:31:40 +0100887struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev)
NeilBrown1ca69c42012-10-11 13:37:33 +1100888{
889 struct md_rdev *rdev;
890
891 rdev_for_each_rcu(rdev, mddev)
892 if (rdev->bdev->bd_dev == dev)
893 return rdev;
894
895 return NULL;
896}
Tomasz Majchrzak1532d9e2017-12-27 10:31:40 +0100897EXPORT_SYMBOL_GPL(md_find_rdev_rcu);
NeilBrown1ca69c42012-10-11 13:37:33 +1100898
NeilBrown84fc4b52011-10-11 16:49:58 +1100899static struct md_personality *find_pers(int level, char *clevel)
NeilBrown2604b702006-01-06 00:20:36 -0800900{
NeilBrown84fc4b52011-10-11 16:49:58 +1100901 struct md_personality *pers;
NeilBrownd9d166c2006-01-06 00:20:51 -0800902 list_for_each_entry(pers, &pers_list, list) {
903 if (level != LEVEL_NONE && pers->level == level)
NeilBrown2604b702006-01-06 00:20:36 -0800904 return pers;
NeilBrownd9d166c2006-01-06 00:20:51 -0800905 if (strcmp(pers->name, clevel)==0)
906 return pers;
907 }
NeilBrown2604b702006-01-06 00:20:36 -0800908 return NULL;
909}
910
Andre Nollb73df2d2008-07-11 22:02:23 +1000911/* return the offset of the super block in 512byte sectors */
NeilBrown3cb03002011-10-11 16:45:26 +1100912static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913{
Jonathan Brassow57b2caa2011-01-14 09:14:33 +1100914 sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512;
Andre Nollb73df2d2008-07-11 22:02:23 +1000915 return MD_NEW_SIZE_SECTORS(num_sectors);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916}
917
NeilBrownf72ffdd2014-09-30 14:23:59 +1000918static int alloc_disk_sb(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 rdev->sb_page = alloc_page(GFP_KERNEL);
NeilBrown7f0f0d82016-11-02 14:16:49 +1100921 if (!rdev->sb_page)
Andre Nollebc24332008-07-11 22:02:20 +1000922 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923 return 0;
924}
925
NeilBrown545c8792012-05-22 13:54:30 +1000926void md_rdev_clear(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927{
928 if (rdev->sb_page) {
NeilBrown2d1f3b52006-01-06 00:20:31 -0800929 put_page(rdev->sb_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 rdev->sb_loaded = 0;
931 rdev->sb_page = NULL;
Andre Noll0f420352008-07-11 22:02:23 +1000932 rdev->sb_start = 0;
Andre Nolldd8ac332009-03-31 14:33:13 +1100933 rdev->sectors = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934 }
NeilBrown2699b672011-07-28 11:31:47 +1000935 if (rdev->bb_page) {
936 put_page(rdev->bb_page);
937 rdev->bb_page = NULL;
938 }
Dan Williamsd3b407fb2016-01-06 12:19:22 -0800939 badblocks_exit(&rdev->badblocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940}
NeilBrown545c8792012-05-22 13:54:30 +1000941EXPORT_SYMBOL_GPL(md_rdev_clear);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200943static void super_written(struct bio *bio)
NeilBrown7bfa19f2005-06-21 17:17:28 -0700944{
NeilBrown3cb03002011-10-11 16:45:26 +1100945 struct md_rdev *rdev = bio->bi_private;
NeilBrownfd01b882011-10-11 16:47:53 +1100946 struct mddev *mddev = rdev->mddev;
NeilBrown7bfa19f2005-06-21 17:17:28 -0700947
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200948 if (bio->bi_status) {
Guoqing Jiangb3db8a22020-07-28 12:01:41 +0200949 pr_err("md: %s gets error=%d\n", __func__,
950 blk_status_to_errno(bio->bi_status));
NeilBrowna9701a32005-11-08 21:39:34 -0800951 md_error(mddev, rdev);
NeilBrown46533ff2016-11-18 16:16:11 +1100952 if (!test_bit(Faulty, &rdev->flags)
953 && (bio->bi_opf & MD_FAILFAST)) {
Shaohua Li29530792016-12-08 15:48:19 -0800954 set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags);
NeilBrown46533ff2016-11-18 16:16:11 +1100955 set_bit(LastDev, &rdev->flags);
956 }
957 } else
958 clear_bit(LastDev, &rdev->flags);
NeilBrown7bfa19f2005-06-21 17:17:28 -0700959
NeilBrowna9701a32005-11-08 21:39:34 -0800960 if (atomic_dec_and_test(&mddev->pending_writes))
961 wake_up(&mddev->sb_wait);
Shaohua Lied3b98c2016-03-29 14:00:19 -0700962 rdev_dec_pending(rdev, mddev);
Neil Brownf8b58ed2005-06-27 22:29:34 -0700963 bio_put(bio);
NeilBrown7bfa19f2005-06-21 17:17:28 -0700964}
965
NeilBrownfd01b882011-10-11 16:47:53 +1100966void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
NeilBrown7bfa19f2005-06-21 17:17:28 -0700967 sector_t sector, int size, struct page *page)
968{
969 /* write first size bytes of page to sector of rdev
970 * Increment mddev->pending_writes before returning
971 * and decrement it on completion, waking up sb_wait
972 * if zero is reached.
973 * If an error occurred, call md_error
974 */
NeilBrown46533ff2016-11-18 16:16:11 +1100975 struct bio *bio;
976 int ff = 0;
977
Heinz Mauelshagen4b6c1062018-02-02 23:13:19 +0100978 if (!page)
979 return;
980
NeilBrown46533ff2016-11-18 16:16:11 +1100981 if (test_bit(Faulty, &rdev->flags))
982 return;
983
Christoph Hellwig6a596562021-01-26 15:52:43 +0100984 bio = bio_alloc_bioset(GFP_NOIO, 1, &mddev->sync_set);
NeilBrown7bfa19f2005-06-21 17:17:28 -0700985
Shaohua Lied3b98c2016-03-29 14:00:19 -0700986 atomic_inc(&rdev->nr_pending);
987
Christoph Hellwig74d46992017-08-23 19:10:32 +0200988 bio_set_dev(bio, rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev);
Kent Overstreet4f024f32013-10-11 15:44:27 -0700989 bio->bi_iter.bi_sector = sector;
NeilBrown7bfa19f2005-06-21 17:17:28 -0700990 bio_add_page(bio, page, size, 0);
991 bio->bi_private = rdev;
992 bio->bi_end_io = super_written;
NeilBrown46533ff2016-11-18 16:16:11 +1100993
994 if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) &&
995 test_bit(FailFast, &rdev->flags) &&
996 !test_bit(LastDev, &rdev->flags))
997 ff = MD_FAILFAST;
Jan Kara5a8948f2017-05-31 09:44:33 +0200998 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA | ff;
NeilBrowna9701a32005-11-08 21:39:34 -0800999
NeilBrown7bfa19f2005-06-21 17:17:28 -07001000 atomic_inc(&mddev->pending_writes);
Mike Christie4e49ea42016-06-05 14:31:41 -05001001 submit_bio(bio);
NeilBrowna9701a32005-11-08 21:39:34 -08001002}
1003
NeilBrown46533ff2016-11-18 16:16:11 +11001004int md_super_wait(struct mddev *mddev)
NeilBrowna9701a32005-11-08 21:39:34 -08001005{
Tejun Heoe9c74692010-09-03 11:56:18 +02001006 /* wait for all superblock writes that were scheduled to complete */
NeilBrown1967cd52014-09-09 14:20:28 +10001007 wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
Shaohua Li29530792016-12-08 15:48:19 -08001008 if (test_and_clear_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags))
NeilBrown46533ff2016-11-18 16:16:11 +11001009 return -EAGAIN;
1010 return 0;
NeilBrown7bfa19f2005-06-21 17:17:28 -07001011}
1012
NeilBrown3cb03002011-10-11 16:45:26 +11001013int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
Mike Christie796a5cf2016-06-05 14:32:07 -05001014 struct page *page, int op, int op_flags, bool metadata_op)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015{
Christoph Hellwig32637382021-01-26 15:52:42 +01001016 struct bio bio;
1017 struct bio_vec bvec;
1018
1019 bio_init(&bio, &bvec, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020
Christoph Hellwig74d46992017-08-23 19:10:32 +02001021 if (metadata_op && rdev->meta_bdev)
Christoph Hellwig32637382021-01-26 15:52:42 +01001022 bio_set_dev(&bio, rdev->meta_bdev);
Christoph Hellwig74d46992017-08-23 19:10:32 +02001023 else
Christoph Hellwig32637382021-01-26 15:52:42 +01001024 bio_set_dev(&bio, rdev->bdev);
1025 bio.bi_opf = op | op_flags;
Jonathan Brassowccebd4c2011-01-14 09:14:33 +11001026 if (metadata_op)
Christoph Hellwig32637382021-01-26 15:52:42 +01001027 bio.bi_iter.bi_sector = sector + rdev->sb_start;
NeilBrown1fdd6fc92012-05-21 09:28:32 +10001028 else if (rdev->mddev->reshape_position != MaxSector &&
1029 (rdev->mddev->reshape_backwards ==
1030 (sector >= rdev->mddev->reshape_position)))
Christoph Hellwig32637382021-01-26 15:52:42 +01001031 bio.bi_iter.bi_sector = sector + rdev->new_data_offset;
Jonathan Brassowccebd4c2011-01-14 09:14:33 +11001032 else
Christoph Hellwig32637382021-01-26 15:52:42 +01001033 bio.bi_iter.bi_sector = sector + rdev->data_offset;
1034 bio_add_page(&bio, page, size, 0);
Mike Christie4e49ea42016-06-05 14:31:41 -05001035
Christoph Hellwig32637382021-01-26 15:52:42 +01001036 submit_bio_wait(&bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037
Christoph Hellwig32637382021-01-26 15:52:42 +01001038 return !bio.bi_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039}
NeilBrowna8745db2006-01-06 00:20:34 -08001040EXPORT_SYMBOL_GPL(sync_page_io);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041
NeilBrownf72ffdd2014-09-30 14:23:59 +10001042static int read_disk_sb(struct md_rdev *rdev, int size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043{
1044 char b[BDEVNAME_SIZE];
NeilBrown403df472014-09-30 15:52:29 +10001045
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046 if (rdev->sb_loaded)
1047 return 0;
1048
Mike Christie796a5cf2016-06-05 14:32:07 -05001049 if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050 goto fail;
1051 rdev->sb_loaded = 1;
1052 return 0;
1053
1054fail:
NeilBrown9d487392016-11-02 14:16:49 +11001055 pr_err("md: disabled device %s, could not read superblock.\n",
1056 bdevname(rdev->bdev,b));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057 return -EINVAL;
1058}
1059
Amir Goldsteine6fd2092017-05-04 16:26:20 +03001060static int md_uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061{
NeilBrownf72ffdd2014-09-30 14:23:59 +10001062 return sb1->set_uuid0 == sb2->set_uuid0 &&
Andre Noll05710462008-07-11 22:02:20 +10001063 sb1->set_uuid1 == sb2->set_uuid1 &&
1064 sb1->set_uuid2 == sb2->set_uuid2 &&
1065 sb1->set_uuid3 == sb2->set_uuid3;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066}
1067
Amir Goldsteine6fd2092017-05-04 16:26:20 +03001068static int md_sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069{
1070 int ret;
1071 mdp_super_t *tmp1, *tmp2;
1072
1073 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
1074 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
1075
1076 if (!tmp1 || !tmp2) {
1077 ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078 goto abort;
1079 }
1080
1081 *tmp1 = *sb1;
1082 *tmp2 = *sb2;
1083
1084 /*
1085 * nr_disks is not constant
1086 */
1087 tmp1->nr_disks = 0;
1088 tmp2->nr_disks = 0;
1089
Andre Nollce0c8e02008-07-11 22:02:20 +10001090 ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091abort:
Jesper Juhl990a8ba2005-06-21 17:17:30 -07001092 kfree(tmp1);
1093 kfree(tmp2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094 return ret;
1095}
1096
NeilBrown4d167f02007-05-09 02:35:37 -07001097static u32 md_csum_fold(u32 csum)
1098{
1099 csum = (csum & 0xffff) + (csum >> 16);
1100 return (csum & 0xffff) + (csum >> 16);
1101}
1102
NeilBrownf72ffdd2014-09-30 14:23:59 +10001103static unsigned int calc_sb_csum(mdp_super_t *sb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104{
NeilBrown4d167f02007-05-09 02:35:37 -07001105 u64 newcsum = 0;
1106 u32 *sb32 = (u32*)sb;
1107 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108 unsigned int disk_csum, csum;
1109
1110 disk_csum = sb->sb_csum;
1111 sb->sb_csum = 0;
NeilBrown4d167f02007-05-09 02:35:37 -07001112
1113 for (i = 0; i < MD_SB_BYTES/4 ; i++)
1114 newcsum += sb32[i];
1115 csum = (newcsum & 0xffffffff) + (newcsum>>32);
1116
NeilBrown4d167f02007-05-09 02:35:37 -07001117#ifdef CONFIG_ALPHA
1118 /* This used to use csum_partial, which was wrong for several
1119 * reasons including that different results are returned on
1120 * different architectures. It isn't critical that we get exactly
1121 * the same return value as before (we always csum_fold before
1122 * testing, and that removes any differences). However as we
1123 * know that csum_partial always returned a 16bit value on
1124 * alphas, do a fold to maximise conformity to previous behaviour.
1125 */
1126 sb->sb_csum = md_csum_fold(disk_csum);
1127#else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128 sb->sb_csum = disk_csum;
NeilBrown4d167f02007-05-09 02:35:37 -07001129#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130 return csum;
1131}
1132
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133/*
1134 * Handle superblock details.
1135 * We want to be able to handle multiple superblock formats
1136 * so we have a common interface to them all, and an array of
1137 * different handlers.
1138 * We rely on user-space to write the initial superblock, and support
1139 * reading and updating of superblocks.
1140 * Interface methods are:
NeilBrown3cb03002011-10-11 16:45:26 +11001141 * int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142 * loads and validates a superblock on dev.
1143 * if refdev != NULL, compare superblocks on both devices
1144 * Return:
1145 * 0 - dev has a superblock that is compatible with refdev
1146 * 1 - dev has a superblock that is compatible and newer than refdev
1147 * so dev should be used as the refdev in future
1148 * -EINVAL superblock incompatible or invalid
1149 * -othererror e.g. -EIO
1150 *
NeilBrownfd01b882011-10-11 16:47:53 +11001151 * int validate_super(struct mddev *mddev, struct md_rdev *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152 * Verify that dev is acceptable into mddev.
1153 * The first time, mddev->raid_disks will be 0, and data from
1154 * dev should be merged in. Subsequent calls check that dev
1155 * is new enough. Return 0 or -EINVAL
1156 *
NeilBrownfd01b882011-10-11 16:47:53 +11001157 * void sync_super(struct mddev *mddev, struct md_rdev *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158 * Update the superblock for rdev with data in mddev
1159 * This does not write to disc.
1160 *
1161 */
1162
1163struct super_type {
Chris Webb0cd17fe2008-06-28 08:31:46 +10001164 char *name;
1165 struct module *owner;
NeilBrownc6563a82012-05-21 09:27:00 +10001166 int (*load_super)(struct md_rdev *rdev,
1167 struct md_rdev *refdev,
Chris Webb0cd17fe2008-06-28 08:31:46 +10001168 int minor_version);
NeilBrownc6563a82012-05-21 09:27:00 +10001169 int (*validate_super)(struct mddev *mddev,
1170 struct md_rdev *rdev);
1171 void (*sync_super)(struct mddev *mddev,
1172 struct md_rdev *rdev);
NeilBrown3cb03002011-10-11 16:45:26 +11001173 unsigned long long (*rdev_size_change)(struct md_rdev *rdev,
Andre Noll15f4a5f2008-07-21 14:42:12 +10001174 sector_t num_sectors);
NeilBrownc6563a82012-05-21 09:27:00 +10001175 int (*allow_new_offset)(struct md_rdev *rdev,
1176 unsigned long long new_offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177};
1178
1179/*
Andre Noll0894cc32009-06-18 08:49:23 +10001180 * Check that the given mddev has no bitmap.
1181 *
1182 * This function is called from the run method of all personalities that do not
1183 * support bitmaps. It prints an error message and returns non-zero if mddev
1184 * has a bitmap. Otherwise, it returns 0.
1185 *
1186 */
NeilBrownfd01b882011-10-11 16:47:53 +11001187int md_check_no_bitmap(struct mddev *mddev)
Andre Noll0894cc32009-06-18 08:49:23 +10001188{
NeilBrownc3d97142009-12-14 12:49:52 +11001189 if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
Andre Noll0894cc32009-06-18 08:49:23 +10001190 return 0;
NeilBrown9d487392016-11-02 14:16:49 +11001191 pr_warn("%s: bitmaps are not supported for %s\n",
Andre Noll0894cc32009-06-18 08:49:23 +10001192 mdname(mddev), mddev->pers->name);
1193 return 1;
1194}
1195EXPORT_SYMBOL(md_check_no_bitmap);
1196
1197/*
NeilBrownf72ffdd2014-09-30 14:23:59 +10001198 * load_super for 0.90.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 */
NeilBrown3cb03002011-10-11 16:45:26 +11001200static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201{
1202 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1203 mdp_super_t *sb;
1204 int ret;
Yufen Yu228fc7d2019-10-30 18:47:02 +08001205 bool spare_disk = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206
1207 /*
Andre Noll0f420352008-07-11 22:02:23 +10001208 * Calculate the position of the superblock (512byte sectors),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209 * it's at the end of the disk.
1210 *
1211 * It also happens to be a multiple of 4Kb.
1212 */
Jonathan Brassow57b2caa2011-01-14 09:14:33 +11001213 rdev->sb_start = calc_dev_sboffset(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214
NeilBrown0002b272005-09-09 16:23:53 -07001215 ret = read_disk_sb(rdev, MD_SB_BYTES);
NeilBrown9d487392016-11-02 14:16:49 +11001216 if (ret)
1217 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218
1219 ret = -EINVAL;
1220
1221 bdevname(rdev->bdev, b);
Namhyung Kim65a06f062011-07-27 11:00:36 +10001222 sb = page_address(rdev->sb_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223
1224 if (sb->md_magic != MD_SB_MAGIC) {
NeilBrown9d487392016-11-02 14:16:49 +11001225 pr_warn("md: invalid raid superblock magic on %s\n", b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226 goto abort;
1227 }
1228
1229 if (sb->major_version != 0 ||
NeilBrownf6705572006-03-27 01:18:11 -08001230 sb->minor_version < 90 ||
1231 sb->minor_version > 91) {
NeilBrown9d487392016-11-02 14:16:49 +11001232 pr_warn("Bad version number %d.%d on %s\n",
1233 sb->major_version, sb->minor_version, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234 goto abort;
1235 }
1236
1237 if (sb->raid_disks <= 0)
1238 goto abort;
1239
NeilBrown4d167f02007-05-09 02:35:37 -07001240 if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
NeilBrown9d487392016-11-02 14:16:49 +11001241 pr_warn("md: invalid superblock checksum on %s\n", b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242 goto abort;
1243 }
1244
1245 rdev->preferred_minor = sb->md_minor;
1246 rdev->data_offset = 0;
NeilBrownc6563a82012-05-21 09:27:00 +10001247 rdev->new_data_offset = 0;
NeilBrown0002b272005-09-09 16:23:53 -07001248 rdev->sb_size = MD_SB_BYTES;
NeilBrown9f2f3832011-07-28 11:31:47 +10001249 rdev->badblocks.shift = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250
1251 if (sb->level == LEVEL_MULTIPATH)
1252 rdev->desc_nr = -1;
1253 else
1254 rdev->desc_nr = sb->this_disk.number;
1255
Yufen Yu228fc7d2019-10-30 18:47:02 +08001256 /* not spare disk, or LEVEL_MULTIPATH */
1257 if (sb->level == LEVEL_MULTIPATH ||
1258 (rdev->desc_nr >= 0 &&
Yufen Yu3b7436c2019-12-10 15:01:29 +08001259 rdev->desc_nr < MD_SB_DISKS &&
Yufen Yu228fc7d2019-10-30 18:47:02 +08001260 sb->disks[rdev->desc_nr].state &
1261 ((1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))))
1262 spare_disk = false;
1263
Harvey Harrison9a7b2b02008-04-28 02:15:49 -07001264 if (!refdev) {
Yufen Yu228fc7d2019-10-30 18:47:02 +08001265 if (!spare_disk)
Yufen Yu6a5cb532019-10-16 16:00:03 +08001266 ret = 1;
1267 else
1268 ret = 0;
Harvey Harrison9a7b2b02008-04-28 02:15:49 -07001269 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270 __u64 ev1, ev2;
Namhyung Kim65a06f062011-07-27 11:00:36 +10001271 mdp_super_t *refsb = page_address(refdev->sb_page);
Amir Goldsteine6fd2092017-05-04 16:26:20 +03001272 if (!md_uuid_equal(refsb, sb)) {
NeilBrown9d487392016-11-02 14:16:49 +11001273 pr_warn("md: %s has different UUID to %s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274 b, bdevname(refdev->bdev,b2));
1275 goto abort;
1276 }
Amir Goldsteine6fd2092017-05-04 16:26:20 +03001277 if (!md_sb_equal(refsb, sb)) {
NeilBrown9d487392016-11-02 14:16:49 +11001278 pr_warn("md: %s has same UUID but different superblock to %s\n",
1279 b, bdevname(refdev->bdev, b2));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280 goto abort;
1281 }
1282 ev1 = md_event(sb);
1283 ev2 = md_event(refsb);
Yufen Yu6a5cb532019-10-16 16:00:03 +08001284
Yufen Yu228fc7d2019-10-30 18:47:02 +08001285 if (!spare_disk && ev1 > ev2)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286 ret = 1;
NeilBrownf72ffdd2014-09-30 14:23:59 +10001287 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288 ret = 0;
1289 }
NeilBrown8190e752009-06-18 08:48:58 +10001290 rdev->sectors = rdev->sb_start;
NeilBrown667a5312012-08-16 16:46:12 +10001291 /* Limit to 4TB as metadata cannot record more than that.
1292 * (not needed for Linear and RAID0 as metadata doesn't
1293 * record this size)
1294 */
Christoph Hellwig72deb452019-04-05 18:08:59 +02001295 if ((u64)rdev->sectors >= (2ULL << 32) && sb->level >= 1)
Arnd Bergmann3312c952015-12-21 10:51:01 +11001296 rdev->sectors = (sector_t)(2ULL << 32) - 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297
NeilBrown27a7b262011-09-10 17:21:28 +10001298 if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
NeilBrown2bf071b2006-01-06 00:20:55 -08001299 /* "this cannot possibly happen" ... */
1300 ret = -EINVAL;
1301
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302 abort:
1303 return ret;
1304}
1305
1306/*
1307 * validate_super for 0.90.0
1308 */
NeilBrownfd01b882011-10-11 16:47:53 +11001309static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310{
1311 mdp_disk_t *desc;
Namhyung Kim65a06f062011-07-27 11:00:36 +10001312 mdp_super_t *sb = page_address(rdev->sb_page);
NeilBrown07d84d102006-06-26 00:27:56 -07001313 __u64 ev1 = md_event(sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314
NeilBrown41158c72005-06-21 17:17:25 -07001315 rdev->raid_disk = -1;
NeilBrownc5d79ad2008-02-06 01:39:54 -08001316 clear_bit(Faulty, &rdev->flags);
1317 clear_bit(In_sync, &rdev->flags);
NeilBrown8313b8e2013-12-12 10:13:33 +11001318 clear_bit(Bitmap_sync, &rdev->flags);
NeilBrownc5d79ad2008-02-06 01:39:54 -08001319 clear_bit(WriteMostly, &rdev->flags);
NeilBrownc5d79ad2008-02-06 01:39:54 -08001320
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321 if (mddev->raid_disks == 0) {
1322 mddev->major_version = 0;
1323 mddev->minor_version = sb->minor_version;
1324 mddev->patch_version = sb->patch_version;
NeilBrowne6910632008-02-06 01:39:51 -08001325 mddev->external = 0;
Andre Noll9d8f0362009-06-18 08:45:01 +10001326 mddev->chunk_sectors = sb->chunk_size >> 9;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 mddev->ctime = sb->ctime;
1328 mddev->utime = sb->utime;
1329 mddev->level = sb->level;
NeilBrownd9d166c2006-01-06 00:20:51 -08001330 mddev->clevel[0] = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331 mddev->layout = sb->layout;
1332 mddev->raid_disks = sb->raid_disks;
NeilBrown27a7b262011-09-10 17:21:28 +10001333 mddev->dev_sectors = ((sector_t)sb->size) * 2;
NeilBrown07d84d102006-06-26 00:27:56 -07001334 mddev->events = ev1;
NeilBrownc3d97142009-12-14 12:49:52 +11001335 mddev->bitmap_info.offset = 0;
NeilBrown6409bb02012-05-22 13:55:07 +10001336 mddev->bitmap_info.space = 0;
1337 /* bitmap can use 60 K after the 4K superblocks */
NeilBrownc3d97142009-12-14 12:49:52 +11001338 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
NeilBrown6409bb02012-05-22 13:55:07 +10001339 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
NeilBrown2c810cd2012-05-21 09:27:00 +10001340 mddev->reshape_backwards = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341
NeilBrownf6705572006-03-27 01:18:11 -08001342 if (mddev->minor_version >= 91) {
1343 mddev->reshape_position = sb->reshape_position;
1344 mddev->delta_disks = sb->delta_disks;
1345 mddev->new_level = sb->new_level;
1346 mddev->new_layout = sb->new_layout;
Andre Noll664e7c42009-06-18 08:45:27 +10001347 mddev->new_chunk_sectors = sb->new_chunk >> 9;
NeilBrown2c810cd2012-05-21 09:27:00 +10001348 if (mddev->delta_disks < 0)
1349 mddev->reshape_backwards = 1;
NeilBrownf6705572006-03-27 01:18:11 -08001350 } else {
1351 mddev->reshape_position = MaxSector;
1352 mddev->delta_disks = 0;
1353 mddev->new_level = mddev->level;
1354 mddev->new_layout = mddev->layout;
Andre Noll664e7c42009-06-18 08:45:27 +10001355 mddev->new_chunk_sectors = mddev->chunk_sectors;
NeilBrownf6705572006-03-27 01:18:11 -08001356 }
NeilBrown33f2c352019-09-09 16:52:29 +10001357 if (mddev->level == 0)
1358 mddev->layout = -1;
NeilBrownf6705572006-03-27 01:18:11 -08001359
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360 if (sb->state & (1<<MD_SB_CLEAN))
1361 mddev->recovery_cp = MaxSector;
1362 else {
NeilBrownf72ffdd2014-09-30 14:23:59 +10001363 if (sb->events_hi == sb->cp_events_hi &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364 sb->events_lo == sb->cp_events_lo) {
1365 mddev->recovery_cp = sb->recovery_cp;
1366 } else
1367 mddev->recovery_cp = 0;
1368 }
1369
1370 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
1371 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
1372 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
1373 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
1374
1375 mddev->max_disks = MD_SB_DISKS;
NeilBrowna654b9d82005-06-21 17:17:27 -07001376
1377 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
NeilBrown6409bb02012-05-22 13:55:07 +10001378 mddev->bitmap_info.file == NULL) {
NeilBrownc3d97142009-12-14 12:49:52 +11001379 mddev->bitmap_info.offset =
1380 mddev->bitmap_info.default_offset;
NeilBrown6409bb02012-05-22 13:55:07 +10001381 mddev->bitmap_info.space =
Dave Jonesc9ad0202013-08-19 22:26:32 -04001382 mddev->bitmap_info.default_space;
NeilBrown6409bb02012-05-22 13:55:07 +10001383 }
NeilBrowna654b9d82005-06-21 17:17:27 -07001384
NeilBrown41158c72005-06-21 17:17:25 -07001385 } else if (mddev->pers == NULL) {
NeilBrownbe6800a2010-05-18 10:17:09 +10001386 /* Insist on good event counter while assembling, except
1387 * for spares (which don't need an event count) */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388 ++ev1;
NeilBrownbe6800a2010-05-18 10:17:09 +10001389 if (sb->disks[rdev->desc_nr].state & (
1390 (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
NeilBrownf72ffdd2014-09-30 14:23:59 +10001391 if (ev1 < mddev->events)
NeilBrownbe6800a2010-05-18 10:17:09 +10001392 return -EINVAL;
NeilBrown41158c72005-06-21 17:17:25 -07001393 } else if (mddev->bitmap) {
1394 /* if adding to array with a bitmap, then we can accept an
1395 * older device ... but not too old.
1396 */
NeilBrown41158c72005-06-21 17:17:25 -07001397 if (ev1 < mddev->bitmap->events_cleared)
1398 return 0;
NeilBrown8313b8e2013-12-12 10:13:33 +11001399 if (ev1 < mddev->events)
1400 set_bit(Bitmap_sync, &rdev->flags);
NeilBrown07d84d102006-06-26 00:27:56 -07001401 } else {
1402 if (ev1 < mddev->events)
1403 /* just a hot-add of a new device, leave raid_disk at -1 */
1404 return 0;
1405 }
NeilBrown41158c72005-06-21 17:17:25 -07001406
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407 if (mddev->level != LEVEL_MULTIPATH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408 desc = sb->disks + rdev->desc_nr;
1409
1410 if (desc->state & (1<<MD_DISK_FAULTY))
NeilBrownb2d444d2005-11-08 21:39:31 -08001411 set_bit(Faulty, &rdev->flags);
NeilBrown7c7546c2006-06-26 00:27:41 -07001412 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
1413 desc->raid_disk < mddev->raid_disks */) {
NeilBrownb2d444d2005-11-08 21:39:31 -08001414 set_bit(In_sync, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 rdev->raid_disk = desc->raid_disk;
NeilBrownf4667222013-12-09 12:04:56 +11001416 rdev->saved_raid_disk = desc->raid_disk;
NeilBrown0261cd9f2009-11-13 17:40:48 +11001417 } else if (desc->state & (1<<MD_DISK_ACTIVE)) {
1418 /* active but not in sync implies recovery up to
1419 * reshape position. We don't know exactly where
1420 * that is, so set to zero for now */
1421 if (mddev->minor_version >= 91) {
1422 rdev->recovery_offset = 0;
1423 rdev->raid_disk = desc->raid_disk;
1424 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425 }
NeilBrown8ddf9ef2005-09-09 16:23:45 -07001426 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
1427 set_bit(WriteMostly, &rdev->flags);
NeilBrown688834e2016-11-18 16:16:11 +11001428 if (desc->state & (1<<MD_DISK_FAILFAST))
1429 set_bit(FailFast, &rdev->flags);
NeilBrown41158c72005-06-21 17:17:25 -07001430 } else /* MULTIPATH are always insync */
NeilBrownb2d444d2005-11-08 21:39:31 -08001431 set_bit(In_sync, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432 return 0;
1433}
1434
1435/*
1436 * sync_super for 0.90.0
1437 */
NeilBrownfd01b882011-10-11 16:47:53 +11001438static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439{
1440 mdp_super_t *sb;
NeilBrown3cb03002011-10-11 16:45:26 +11001441 struct md_rdev *rdev2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442 int next_spare = mddev->raid_disks;
NeilBrown19133a42005-11-08 21:39:35 -08001443
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444 /* make rdev->sb match mddev data..
1445 *
1446 * 1/ zero out disks
1447 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
1448 * 3/ any empty disks < next_spare become removed
1449 *
1450 * disks[0] gets initialised to REMOVED because
1451 * we cannot be sure from other fields if it has
1452 * been initialised or not.
1453 */
1454 int i;
1455 int active=0, working=0,failed=0,spare=0,nr_disks=0;
1456
NeilBrown61181562005-09-09 16:24:02 -07001457 rdev->sb_size = MD_SB_BYTES;
1458
Namhyung Kim65a06f062011-07-27 11:00:36 +10001459 sb = page_address(rdev->sb_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460
1461 memset(sb, 0, sizeof(*sb));
1462
1463 sb->md_magic = MD_SB_MAGIC;
1464 sb->major_version = mddev->major_version;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465 sb->patch_version = mddev->patch_version;
1466 sb->gvalid_words = 0; /* ignored */
1467 memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
1468 memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
1469 memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
1470 memcpy(&sb->set_uuid3, mddev->uuid+12,4);
1471
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11001472 sb->ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473 sb->level = mddev->level;
Andre Noll58c0fed2009-03-31 14:33:13 +11001474 sb->size = mddev->dev_sectors / 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475 sb->raid_disks = mddev->raid_disks;
1476 sb->md_minor = mddev->md_minor;
NeilBrowne6910632008-02-06 01:39:51 -08001477 sb->not_persistent = 0;
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11001478 sb->utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479 sb->state = 0;
1480 sb->events_hi = (mddev->events>>32);
1481 sb->events_lo = (u32)mddev->events;
1482
NeilBrownf6705572006-03-27 01:18:11 -08001483 if (mddev->reshape_position == MaxSector)
1484 sb->minor_version = 90;
1485 else {
1486 sb->minor_version = 91;
1487 sb->reshape_position = mddev->reshape_position;
1488 sb->new_level = mddev->new_level;
1489 sb->delta_disks = mddev->delta_disks;
1490 sb->new_layout = mddev->new_layout;
Andre Noll664e7c42009-06-18 08:45:27 +10001491 sb->new_chunk = mddev->new_chunk_sectors << 9;
NeilBrownf6705572006-03-27 01:18:11 -08001492 }
1493 mddev->minor_version = sb->minor_version;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494 if (mddev->in_sync)
1495 {
1496 sb->recovery_cp = mddev->recovery_cp;
1497 sb->cp_events_hi = (mddev->events>>32);
1498 sb->cp_events_lo = (u32)mddev->events;
1499 if (mddev->recovery_cp == MaxSector)
1500 sb->state = (1<< MD_SB_CLEAN);
1501 } else
1502 sb->recovery_cp = 0;
1503
1504 sb->layout = mddev->layout;
Andre Noll9d8f0362009-06-18 08:45:01 +10001505 sb->chunk_size = mddev->chunk_sectors << 9;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506
NeilBrownc3d97142009-12-14 12:49:52 +11001507 if (mddev->bitmap && mddev->bitmap_info.file == NULL)
NeilBrowna654b9d82005-06-21 17:17:27 -07001508 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
1509
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510 sb->disks[0].state = (1<<MD_DISK_REMOVED);
NeilBrowndafb20f2012-03-19 12:46:39 +11001511 rdev_for_each(rdev2, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512 mdp_disk_t *d;
NeilBrown86e6ffd2005-11-08 21:39:24 -08001513 int desc_nr;
NeilBrown0261cd9f2009-11-13 17:40:48 +11001514 int is_active = test_bit(In_sync, &rdev2->flags);
1515
1516 if (rdev2->raid_disk >= 0 &&
1517 sb->minor_version >= 91)
1518 /* we have nowhere to store the recovery_offset,
1519 * but if it is not below the reshape_position,
1520 * we can piggy-back on that.
1521 */
1522 is_active = 1;
1523 if (rdev2->raid_disk < 0 ||
1524 test_bit(Faulty, &rdev2->flags))
1525 is_active = 0;
1526 if (is_active)
NeilBrown86e6ffd2005-11-08 21:39:24 -08001527 desc_nr = rdev2->raid_disk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528 else
NeilBrown86e6ffd2005-11-08 21:39:24 -08001529 desc_nr = next_spare++;
NeilBrown19133a42005-11-08 21:39:35 -08001530 rdev2->desc_nr = desc_nr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531 d = &sb->disks[rdev2->desc_nr];
1532 nr_disks++;
1533 d->number = rdev2->desc_nr;
1534 d->major = MAJOR(rdev2->bdev->bd_dev);
1535 d->minor = MINOR(rdev2->bdev->bd_dev);
NeilBrown0261cd9f2009-11-13 17:40:48 +11001536 if (is_active)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537 d->raid_disk = rdev2->raid_disk;
1538 else
1539 d->raid_disk = rdev2->desc_nr; /* compatibility */
NeilBrown1be78922006-03-27 01:18:03 -08001540 if (test_bit(Faulty, &rdev2->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541 d->state = (1<<MD_DISK_FAULTY);
NeilBrown0261cd9f2009-11-13 17:40:48 +11001542 else if (is_active) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543 d->state = (1<<MD_DISK_ACTIVE);
NeilBrown0261cd9f2009-11-13 17:40:48 +11001544 if (test_bit(In_sync, &rdev2->flags))
1545 d->state |= (1<<MD_DISK_SYNC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546 active++;
1547 working++;
1548 } else {
1549 d->state = 0;
1550 spare++;
1551 working++;
1552 }
NeilBrown8ddf9ef2005-09-09 16:23:45 -07001553 if (test_bit(WriteMostly, &rdev2->flags))
1554 d->state |= (1<<MD_DISK_WRITEMOSTLY);
NeilBrown688834e2016-11-18 16:16:11 +11001555 if (test_bit(FailFast, &rdev2->flags))
1556 d->state |= (1<<MD_DISK_FAILFAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558 /* now set the "removed" and "faulty" bits on any missing devices */
1559 for (i=0 ; i < mddev->raid_disks ; i++) {
1560 mdp_disk_t *d = &sb->disks[i];
1561 if (d->state == 0 && d->number == 0) {
1562 d->number = i;
1563 d->raid_disk = i;
1564 d->state = (1<<MD_DISK_REMOVED);
1565 d->state |= (1<<MD_DISK_FAULTY);
1566 failed++;
1567 }
1568 }
1569 sb->nr_disks = nr_disks;
1570 sb->active_disks = active;
1571 sb->working_disks = working;
1572 sb->failed_disks = failed;
1573 sb->spare_disks = spare;
1574
1575 sb->this_disk = sb->disks[rdev->desc_nr];
1576 sb->sb_csum = calc_sb_csum(sb);
1577}
1578
1579/*
Chris Webb0cd17fe2008-06-28 08:31:46 +10001580 * rdev_size_change for 0.90.0
1581 */
1582static unsigned long long
NeilBrown3cb03002011-10-11 16:45:26 +11001583super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
Chris Webb0cd17fe2008-06-28 08:31:46 +10001584{
Andre Noll58c0fed2009-03-31 14:33:13 +11001585 if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
Chris Webb0cd17fe2008-06-28 08:31:46 +10001586 return 0; /* component must fit device */
NeilBrownc3d97142009-12-14 12:49:52 +11001587 if (rdev->mddev->bitmap_info.offset)
Chris Webb0cd17fe2008-06-28 08:31:46 +10001588 return 0; /* can't move bitmap */
Jonathan Brassow57b2caa2011-01-14 09:14:33 +11001589 rdev->sb_start = calc_dev_sboffset(rdev);
Andre Noll15f4a5f2008-07-21 14:42:12 +10001590 if (!num_sectors || num_sectors > rdev->sb_start)
1591 num_sectors = rdev->sb_start;
NeilBrown27a7b262011-09-10 17:21:28 +10001592 /* Limit to 4TB as metadata cannot record more than that.
1593 * 4TB == 2^32 KB, or 2*2^32 sectors.
1594 */
Christoph Hellwig72deb452019-04-05 18:08:59 +02001595 if ((u64)num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
Arnd Bergmann3312c952015-12-21 10:51:01 +11001596 num_sectors = (sector_t)(2ULL << 32) - 2;
NeilBrown46533ff2016-11-18 16:16:11 +11001597 do {
1598 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
Chris Webb0cd17fe2008-06-28 08:31:46 +10001599 rdev->sb_page);
NeilBrown46533ff2016-11-18 16:16:11 +11001600 } while (md_super_wait(rdev->mddev) < 0);
Justin Maggardc26a44e2010-11-24 16:36:17 +11001601 return num_sectors;
Chris Webb0cd17fe2008-06-28 08:31:46 +10001602}
1603
NeilBrownc6563a82012-05-21 09:27:00 +10001604static int
1605super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset)
1606{
1607 /* non-zero offset changes not possible with v0.90 */
1608 return new_offset == 0;
1609}
Chris Webb0cd17fe2008-06-28 08:31:46 +10001610
1611/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612 * version 1 superblock
1613 */
1614
NeilBrownf72ffdd2014-09-30 14:23:59 +10001615static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616{
NeilBrown1c05b4b2006-10-21 10:24:08 -07001617 __le32 disk_csum;
1618 u32 csum;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619 unsigned long long newcsum;
1620 int size = 256 + le32_to_cpu(sb->max_dev)*2;
NeilBrown1c05b4b2006-10-21 10:24:08 -07001621 __le32 *isuper = (__le32*)sb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622
1623 disk_csum = sb->sb_csum;
1624 sb->sb_csum = 0;
1625 newcsum = 0;
NeilBrown1f3c9902012-12-11 13:09:00 +11001626 for (; size >= 4; size -= 4)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627 newcsum += le32_to_cpu(*isuper++);
1628
1629 if (size == 2)
NeilBrown1c05b4b2006-10-21 10:24:08 -07001630 newcsum += le16_to_cpu(*(__le16*) isuper);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631
1632 csum = (newcsum & 0xffffffff) + (newcsum >> 32);
1633 sb->sb_csum = disk_csum;
1634 return cpu_to_le32(csum);
1635}
1636
NeilBrown3cb03002011-10-11 16:45:26 +11001637static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638{
1639 struct mdp_superblock_1 *sb;
1640 int ret;
Andre Noll0f420352008-07-11 22:02:23 +10001641 sector_t sb_start;
NeilBrownc6563a82012-05-21 09:27:00 +10001642 sector_t sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
NeilBrown0002b272005-09-09 16:23:53 -07001644 int bmask;
Yufen Yu228fc7d2019-10-30 18:47:02 +08001645 bool spare_disk = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646
1647 /*
Andre Noll0f420352008-07-11 22:02:23 +10001648 * Calculate the position of the superblock in 512byte sectors.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649 * It is always aligned to a 4K boundary and
1650 * depeding on minor_version, it can be:
1651 * 0: At least 8K, but less than 12K, from end of device
1652 * 1: At start of device
1653 * 2: 4K from start of device.
1654 */
1655 switch(minor_version) {
1656 case 0:
Mike Snitzer77304d22010-11-08 14:39:12 +01001657 sb_start = i_size_read(rdev->bdev->bd_inode) >> 9;
Andre Noll0f420352008-07-11 22:02:23 +10001658 sb_start -= 8*2;
1659 sb_start &= ~(sector_t)(4*2-1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660 break;
1661 case 1:
Andre Noll0f420352008-07-11 22:02:23 +10001662 sb_start = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663 break;
1664 case 2:
Andre Noll0f420352008-07-11 22:02:23 +10001665 sb_start = 8;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666 break;
1667 default:
1668 return -EINVAL;
1669 }
Andre Noll0f420352008-07-11 22:02:23 +10001670 rdev->sb_start = sb_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671
NeilBrown0002b272005-09-09 16:23:53 -07001672 /* superblock is rarely larger than 1K, but it can be larger,
1673 * and it is safe to read 4k, so we do that
1674 */
1675 ret = read_disk_sb(rdev, 4096);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676 if (ret) return ret;
1677
Namhyung Kim65a06f062011-07-27 11:00:36 +10001678 sb = page_address(rdev->sb_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679
1680 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1681 sb->major_version != cpu_to_le32(1) ||
1682 le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
Andre Noll0f420352008-07-11 22:02:23 +10001683 le64_to_cpu(sb->super_offset) != rdev->sb_start ||
NeilBrown71c08052005-09-09 16:23:51 -07001684 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685 return -EINVAL;
1686
1687 if (calc_sb_1_csum(sb) != sb->sb_csum) {
NeilBrown9d487392016-11-02 14:16:49 +11001688 pr_warn("md: invalid superblock checksum on %s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689 bdevname(rdev->bdev,b));
1690 return -EINVAL;
1691 }
1692 if (le64_to_cpu(sb->data_size) < 10) {
NeilBrown9d487392016-11-02 14:16:49 +11001693 pr_warn("md: data_size too small on %s\n",
1694 bdevname(rdev->bdev,b));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695 return -EINVAL;
1696 }
NeilBrownc6563a82012-05-21 09:27:00 +10001697 if (sb->pad0 ||
1698 sb->pad3[0] ||
1699 memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1])))
1700 /* Some padding is non-zero, might be a new feature */
1701 return -EINVAL;
NeilBrowne11e93f2007-05-09 02:35:36 -07001702
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703 rdev->preferred_minor = 0xffff;
1704 rdev->data_offset = le64_to_cpu(sb->data_offset);
NeilBrownc6563a82012-05-21 09:27:00 +10001705 rdev->new_data_offset = rdev->data_offset;
1706 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
1707 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
1708 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
NeilBrown4dbcdc72006-01-06 00:20:52 -08001709 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710
NeilBrown0002b272005-09-09 16:23:53 -07001711 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
Martin K. Petersene1defc42009-05-22 17:17:49 -04001712 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
NeilBrown0002b272005-09-09 16:23:53 -07001713 if (rdev->sb_size & bmask)
NeilBrowna1801f82008-03-04 14:29:31 -08001714 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1715
1716 if (minor_version
Andre Noll0f420352008-07-11 22:02:23 +10001717 && rdev->data_offset < sb_start + (rdev->sb_size/512))
NeilBrowna1801f82008-03-04 14:29:31 -08001718 return -EINVAL;
NeilBrownc6563a82012-05-21 09:27:00 +10001719 if (minor_version
1720 && rdev->new_data_offset < sb_start + (rdev->sb_size/512))
1721 return -EINVAL;
NeilBrown0002b272005-09-09 16:23:53 -07001722
NeilBrown31b65a02006-07-10 04:44:14 -07001723 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1724 rdev->desc_nr = -1;
1725 else
1726 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1727
NeilBrown2699b672011-07-28 11:31:47 +10001728 if (!rdev->bb_page) {
1729 rdev->bb_page = alloc_page(GFP_KERNEL);
1730 if (!rdev->bb_page)
1731 return -ENOMEM;
1732 }
1733 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) &&
1734 rdev->badblocks.count == 0) {
1735 /* need to load the bad block list.
1736 * Currently we limit it to one page.
1737 */
1738 s32 offset;
1739 sector_t bb_sector;
Christoph Hellwig00485d02019-04-04 18:56:12 +02001740 __le64 *bbp;
NeilBrown2699b672011-07-28 11:31:47 +10001741 int i;
1742 int sectors = le16_to_cpu(sb->bblog_size);
1743 if (sectors > (PAGE_SIZE / 512))
1744 return -EINVAL;
1745 offset = le32_to_cpu(sb->bblog_offset);
1746 if (offset == 0)
1747 return -EINVAL;
1748 bb_sector = (long long)offset;
1749 if (!sync_page_io(rdev, bb_sector, sectors << 9,
Mike Christie796a5cf2016-06-05 14:32:07 -05001750 rdev->bb_page, REQ_OP_READ, 0, true))
NeilBrown2699b672011-07-28 11:31:47 +10001751 return -EIO;
Christoph Hellwig00485d02019-04-04 18:56:12 +02001752 bbp = (__le64 *)page_address(rdev->bb_page);
NeilBrown2699b672011-07-28 11:31:47 +10001753 rdev->badblocks.shift = sb->bblog_shift;
1754 for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
1755 u64 bb = le64_to_cpu(*bbp);
1756 int count = bb & (0x3ff);
1757 u64 sector = bb >> 10;
1758 sector <<= sb->bblog_shift;
1759 count <<= sb->bblog_shift;
1760 if (bb + 1 == 0)
1761 break;
Vishal Vermafc974ee2015-12-24 19:20:34 -07001762 if (badblocks_set(&rdev->badblocks, sector, count, 1))
NeilBrown2699b672011-07-28 11:31:47 +10001763 return -EINVAL;
1764 }
NeilBrown486adf72013-04-24 11:42:44 +10001765 } else if (sb->bblog_offset != 0)
1766 rdev->badblocks.shift = 0;
NeilBrown2699b672011-07-28 11:31:47 +10001767
Pawel Baldysiakddc08822017-08-16 17:13:45 +02001768 if ((le32_to_cpu(sb->feature_map) &
1769 (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS))) {
Artur Paszkiewiczea0213e2017-03-09 09:59:57 +01001770 rdev->ppl.offset = (__s16)le16_to_cpu(sb->ppl.offset);
1771 rdev->ppl.size = le16_to_cpu(sb->ppl.size);
1772 rdev->ppl.sector = rdev->sb_start + rdev->ppl.offset;
1773 }
1774
NeilBrown33f2c352019-09-09 16:52:29 +10001775 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT) &&
1776 sb->level != 0)
1777 return -EINVAL;
1778
Yufen Yu228fc7d2019-10-30 18:47:02 +08001779 /* not spare disk, or LEVEL_MULTIPATH */
1780 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH) ||
1781 (rdev->desc_nr >= 0 &&
1782 rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
1783 (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
1784 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL)))
1785 spare_disk = false;
Yufen Yu6a5cb532019-10-16 16:00:03 +08001786
Harvey Harrison9a7b2b02008-04-28 02:15:49 -07001787 if (!refdev) {
Yufen Yu228fc7d2019-10-30 18:47:02 +08001788 if (!spare_disk)
Yufen Yu6a5cb532019-10-16 16:00:03 +08001789 ret = 1;
1790 else
1791 ret = 0;
Harvey Harrison9a7b2b02008-04-28 02:15:49 -07001792 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 __u64 ev1, ev2;
Namhyung Kim65a06f062011-07-27 11:00:36 +10001794 struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795
1796 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1797 sb->level != refsb->level ||
1798 sb->layout != refsb->layout ||
1799 sb->chunksize != refsb->chunksize) {
NeilBrown9d487392016-11-02 14:16:49 +11001800 pr_warn("md: %s has strangely different superblock to %s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801 bdevname(rdev->bdev,b),
1802 bdevname(refdev->bdev,b2));
1803 return -EINVAL;
1804 }
1805 ev1 = le64_to_cpu(sb->events);
1806 ev2 = le64_to_cpu(refsb->events);
1807
Yufen Yu228fc7d2019-10-30 18:47:02 +08001808 if (!spare_disk && ev1 > ev2)
NeilBrown8ed75462006-02-03 03:03:41 -08001809 ret = 1;
1810 else
1811 ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812 }
NeilBrownc6563a82012-05-21 09:27:00 +10001813 if (minor_version) {
1814 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9);
1815 sectors -= rdev->data_offset;
1816 } else
1817 sectors = rdev->sb_start;
1818 if (sectors < le64_to_cpu(sb->data_size))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819 return -EINVAL;
Andre Nolldd8ac332009-03-31 14:33:13 +11001820 rdev->sectors = le64_to_cpu(sb->data_size);
NeilBrown8ed75462006-02-03 03:03:41 -08001821 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822}
1823
NeilBrownfd01b882011-10-11 16:47:53 +11001824static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825{
Namhyung Kim65a06f062011-07-27 11:00:36 +10001826 struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
NeilBrown07d84d102006-06-26 00:27:56 -07001827 __u64 ev1 = le64_to_cpu(sb->events);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828
NeilBrown41158c72005-06-21 17:17:25 -07001829 rdev->raid_disk = -1;
NeilBrownc5d79ad2008-02-06 01:39:54 -08001830 clear_bit(Faulty, &rdev->flags);
1831 clear_bit(In_sync, &rdev->flags);
NeilBrown8313b8e2013-12-12 10:13:33 +11001832 clear_bit(Bitmap_sync, &rdev->flags);
NeilBrownc5d79ad2008-02-06 01:39:54 -08001833 clear_bit(WriteMostly, &rdev->flags);
NeilBrownc5d79ad2008-02-06 01:39:54 -08001834
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835 if (mddev->raid_disks == 0) {
1836 mddev->major_version = 1;
1837 mddev->patch_version = 0;
NeilBrowne6910632008-02-06 01:39:51 -08001838 mddev->external = 0;
Andre Noll9d8f0362009-06-18 08:45:01 +10001839 mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11001840 mddev->ctime = le64_to_cpu(sb->ctime);
1841 mddev->utime = le64_to_cpu(sb->utime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842 mddev->level = le32_to_cpu(sb->level);
NeilBrownd9d166c2006-01-06 00:20:51 -08001843 mddev->clevel[0] = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844 mddev->layout = le32_to_cpu(sb->layout);
1845 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
Andre Noll58c0fed2009-03-31 14:33:13 +11001846 mddev->dev_sectors = le64_to_cpu(sb->size);
NeilBrown07d84d102006-06-26 00:27:56 -07001847 mddev->events = ev1;
NeilBrownc3d97142009-12-14 12:49:52 +11001848 mddev->bitmap_info.offset = 0;
NeilBrown6409bb02012-05-22 13:55:07 +10001849 mddev->bitmap_info.space = 0;
1850 /* Default location for bitmap is 1K after superblock
1851 * using 3K - total of 4K
1852 */
NeilBrownc3d97142009-12-14 12:49:52 +11001853 mddev->bitmap_info.default_offset = 1024 >> 9;
NeilBrown6409bb02012-05-22 13:55:07 +10001854 mddev->bitmap_info.default_space = (4096-1024) >> 9;
NeilBrown2c810cd2012-05-21 09:27:00 +10001855 mddev->reshape_backwards = 0;
1856
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1858 memcpy(mddev->uuid, sb->set_uuid, 16);
1859
1860 mddev->max_disks = (4096-256)/2;
NeilBrowna654b9d82005-06-21 17:17:27 -07001861
NeilBrown71c08052005-09-09 16:23:51 -07001862 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
NeilBrown6409bb02012-05-22 13:55:07 +10001863 mddev->bitmap_info.file == NULL) {
NeilBrownc3d97142009-12-14 12:49:52 +11001864 mddev->bitmap_info.offset =
1865 (__s32)le32_to_cpu(sb->bitmap_offset);
NeilBrown6409bb02012-05-22 13:55:07 +10001866 /* Metadata doesn't record how much space is available.
1867 * For 1.0, we assume we can use up to the superblock
1868 * if before, else to 4K beyond superblock.
1869 * For others, assume no change is possible.
1870 */
1871 if (mddev->minor_version > 0)
1872 mddev->bitmap_info.space = 0;
1873 else if (mddev->bitmap_info.offset > 0)
1874 mddev->bitmap_info.space =
1875 8 - mddev->bitmap_info.offset;
1876 else
1877 mddev->bitmap_info.space =
1878 -mddev->bitmap_info.offset;
1879 }
NeilBrowne11e93f2007-05-09 02:35:36 -07001880
NeilBrownf6705572006-03-27 01:18:11 -08001881 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1882 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1883 mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1884 mddev->new_level = le32_to_cpu(sb->new_level);
1885 mddev->new_layout = le32_to_cpu(sb->new_layout);
Andre Noll664e7c42009-06-18 08:45:27 +10001886 mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
NeilBrown2c810cd2012-05-21 09:27:00 +10001887 if (mddev->delta_disks < 0 ||
1888 (mddev->delta_disks == 0 &&
1889 (le32_to_cpu(sb->feature_map)
1890 & MD_FEATURE_RESHAPE_BACKWARDS)))
1891 mddev->reshape_backwards = 1;
NeilBrownf6705572006-03-27 01:18:11 -08001892 } else {
1893 mddev->reshape_position = MaxSector;
1894 mddev->delta_disks = 0;
1895 mddev->new_level = mddev->level;
1896 mddev->new_layout = mddev->layout;
Andre Noll664e7c42009-06-18 08:45:27 +10001897 mddev->new_chunk_sectors = mddev->chunk_sectors;
NeilBrownf6705572006-03-27 01:18:11 -08001898 }
1899
NeilBrown33f2c352019-09-09 16:52:29 +10001900 if (mddev->level == 0 &&
1901 !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT))
1902 mddev->layout = -1;
1903
Song Liu486b0f72016-08-19 15:34:01 -07001904 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)
Shaohua Lia62ab492016-01-06 14:37:13 -08001905 set_bit(MD_HAS_JOURNAL, &mddev->flags);
Artur Paszkiewiczea0213e2017-03-09 09:59:57 +01001906
Pawel Baldysiakddc08822017-08-16 17:13:45 +02001907 if (le32_to_cpu(sb->feature_map) &
1908 (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS)) {
Artur Paszkiewiczea0213e2017-03-09 09:59:57 +01001909 if (le32_to_cpu(sb->feature_map) &
1910 (MD_FEATURE_BITMAP_OFFSET | MD_FEATURE_JOURNAL))
1911 return -EINVAL;
Pawel Baldysiakddc08822017-08-16 17:13:45 +02001912 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_PPL) &&
1913 (le32_to_cpu(sb->feature_map) &
1914 MD_FEATURE_MULTIPLE_PPLS))
1915 return -EINVAL;
Artur Paszkiewiczea0213e2017-03-09 09:59:57 +01001916 set_bit(MD_HAS_PPL, &mddev->flags);
1917 }
NeilBrown41158c72005-06-21 17:17:25 -07001918 } else if (mddev->pers == NULL) {
NeilBrownbe6800a2010-05-18 10:17:09 +10001919 /* Insist of good event counter while assembling, except for
1920 * spares (which don't need an event count) */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921 ++ev1;
NeilBrownbe6800a2010-05-18 10:17:09 +10001922 if (rdev->desc_nr >= 0 &&
1923 rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
Song Liua3dfbda2015-10-08 21:54:11 -07001924 (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
1925 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL))
NeilBrownbe6800a2010-05-18 10:17:09 +10001926 if (ev1 < mddev->events)
1927 return -EINVAL;
NeilBrown41158c72005-06-21 17:17:25 -07001928 } else if (mddev->bitmap) {
1929 /* If adding to array with a bitmap, then we can accept an
1930 * older device, but not too old.
1931 */
NeilBrown41158c72005-06-21 17:17:25 -07001932 if (ev1 < mddev->bitmap->events_cleared)
1933 return 0;
NeilBrown8313b8e2013-12-12 10:13:33 +11001934 if (ev1 < mddev->events)
1935 set_bit(Bitmap_sync, &rdev->flags);
NeilBrown07d84d102006-06-26 00:27:56 -07001936 } else {
1937 if (ev1 < mddev->events)
1938 /* just a hot-add of a new device, leave raid_disk at -1 */
1939 return 0;
1940 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941 if (mddev->level != LEVEL_MULTIPATH) {
1942 int role;
NeilBrown3673f302009-08-03 10:59:56 +10001943 if (rdev->desc_nr < 0 ||
1944 rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
Song Liuc4d4c912015-08-13 14:31:54 -07001945 role = MD_DISK_ROLE_SPARE;
NeilBrown3673f302009-08-03 10:59:56 +10001946 rdev->desc_nr = -1;
1947 } else
1948 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949 switch(role) {
Song Liuc4d4c912015-08-13 14:31:54 -07001950 case MD_DISK_ROLE_SPARE: /* spare */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951 break;
Song Liuc4d4c912015-08-13 14:31:54 -07001952 case MD_DISK_ROLE_FAULTY: /* faulty */
NeilBrownb2d444d2005-11-08 21:39:31 -08001953 set_bit(Faulty, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954 break;
Song Liubac624f2015-08-13 14:31:55 -07001955 case MD_DISK_ROLE_JOURNAL: /* journal device */
1956 if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)) {
1957 /* journal device without journal feature */
NeilBrown9d487392016-11-02 14:16:49 +11001958 pr_warn("md: journal device provided without journal feature, ignoring the device\n");
Song Liubac624f2015-08-13 14:31:55 -07001959 return -EINVAL;
1960 }
1961 set_bit(Journal, &rdev->flags);
Shaohua Li3069aa82015-08-13 14:31:56 -07001962 rdev->journal_tail = le64_to_cpu(sb->journal_tail);
Shaohua Li9b156032015-12-18 15:19:16 +11001963 rdev->raid_disk = 0;
Song Liubac624f2015-08-13 14:31:55 -07001964 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965 default:
NeilBrownf4667222013-12-09 12:04:56 +11001966 rdev->saved_raid_disk = role;
NeilBrown5fd6c1d2006-06-26 00:27:40 -07001967 if ((le32_to_cpu(sb->feature_map) &
NeilBrownf4667222013-12-09 12:04:56 +11001968 MD_FEATURE_RECOVERY_OFFSET)) {
NeilBrown5fd6c1d2006-06-26 00:27:40 -07001969 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
NeilBrownf4667222013-12-09 12:04:56 +11001970 if (!(le32_to_cpu(sb->feature_map) &
1971 MD_FEATURE_RECOVERY_BITMAP))
1972 rdev->saved_raid_disk = -1;
Guoqing Jiang062f5b2a2019-07-24 11:09:20 +02001973 } else {
1974 /*
1975 * If the array is FROZEN, then the device can't
1976 * be in_sync with rest of array.
1977 */
1978 if (!test_bit(MD_RECOVERY_FROZEN,
1979 &mddev->recovery))
1980 set_bit(In_sync, &rdev->flags);
1981 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001982 rdev->raid_disk = role;
1983 break;
1984 }
NeilBrown8ddf9ef2005-09-09 16:23:45 -07001985 if (sb->devflags & WriteMostly1)
1986 set_bit(WriteMostly, &rdev->flags);
NeilBrown688834e2016-11-18 16:16:11 +11001987 if (sb->devflags & FailFast1)
1988 set_bit(FailFast, &rdev->flags);
NeilBrown2d78f8c2011-12-23 10:17:51 +11001989 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
1990 set_bit(Replacement, &rdev->flags);
NeilBrown41158c72005-06-21 17:17:25 -07001991 } else /* MULTIPATH are always insync */
NeilBrownb2d444d2005-11-08 21:39:31 -08001992 set_bit(In_sync, &rdev->flags);
NeilBrown41158c72005-06-21 17:17:25 -07001993
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994 return 0;
1995}
1996
NeilBrownfd01b882011-10-11 16:47:53 +11001997static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998{
1999 struct mdp_superblock_1 *sb;
NeilBrown3cb03002011-10-11 16:45:26 +11002000 struct md_rdev *rdev2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001 int max_dev, i;
2002 /* make rdev->sb match mddev and rdev data. */
2003
Namhyung Kim65a06f062011-07-27 11:00:36 +10002004 sb = page_address(rdev->sb_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005
2006 sb->feature_map = 0;
2007 sb->pad0 = 0;
NeilBrown5fd6c1d2006-06-26 00:27:40 -07002008 sb->recovery_offset = cpu_to_le64(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002009 memset(sb->pad3, 0, sizeof(sb->pad3));
2010
2011 sb->utime = cpu_to_le64((__u64)mddev->utime);
2012 sb->events = cpu_to_le64(mddev->events);
2013 if (mddev->in_sync)
2014 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
Shaohua Libd18f642015-09-02 13:49:50 -07002015 else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags))
2016 sb->resync_offset = cpu_to_le64(MaxSector);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002017 else
2018 sb->resync_offset = cpu_to_le64(0);
2019
NeilBrown1c05b4b2006-10-21 10:24:08 -07002020 sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
NeilBrown4dbcdc72006-01-06 00:20:52 -08002021
NeilBrownf0ca3402006-02-02 14:28:04 -08002022 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
Andre Noll58c0fed2009-03-31 14:33:13 +11002023 sb->size = cpu_to_le64(mddev->dev_sectors);
Andre Noll9d8f0362009-06-18 08:45:01 +10002024 sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
NeilBrown62e1e382009-05-26 09:40:59 +10002025 sb->level = cpu_to_le32(mddev->level);
2026 sb->layout = cpu_to_le32(mddev->layout);
NeilBrown688834e2016-11-18 16:16:11 +11002027 if (test_bit(FailFast, &rdev->flags))
2028 sb->devflags |= FailFast1;
2029 else
2030 sb->devflags &= ~FailFast1;
NeilBrownf0ca3402006-02-02 14:28:04 -08002031
NeilBrownaeb9b2112011-08-25 14:43:08 +10002032 if (test_bit(WriteMostly, &rdev->flags))
2033 sb->devflags |= WriteMostly1;
2034 else
2035 sb->devflags &= ~WriteMostly1;
NeilBrownc6563a82012-05-21 09:27:00 +10002036 sb->data_offset = cpu_to_le64(rdev->data_offset);
2037 sb->data_size = cpu_to_le64(rdev->sectors);
NeilBrownaeb9b2112011-08-25 14:43:08 +10002038
NeilBrownc3d97142009-12-14 12:49:52 +11002039 if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
2040 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
NeilBrown71c08052005-09-09 16:23:51 -07002041 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
NeilBrowna654b9d82005-06-21 17:17:27 -07002042 }
NeilBrown5fd6c1d2006-06-26 00:27:40 -07002043
Shaohua Lif2076e72015-10-08 21:54:12 -07002044 if (rdev->raid_disk >= 0 && !test_bit(Journal, &rdev->flags) &&
NeilBrown97e4f422009-03-31 14:33:13 +11002045 !test_bit(In_sync, &rdev->flags)) {
NeilBrown93be75f2009-12-14 12:50:06 +11002046 sb->feature_map |=
2047 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
2048 sb->recovery_offset =
2049 cpu_to_le64(rdev->recovery_offset);
NeilBrownf4667222013-12-09 12:04:56 +11002050 if (rdev->saved_raid_disk >= 0 && mddev->bitmap)
2051 sb->feature_map |=
2052 cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP);
NeilBrown5fd6c1d2006-06-26 00:27:40 -07002053 }
Shaohua Li3069aa82015-08-13 14:31:56 -07002054 /* Note: recovery_offset and journal_tail share space */
2055 if (test_bit(Journal, &rdev->flags))
2056 sb->journal_tail = cpu_to_le64(rdev->journal_tail);
NeilBrown2d78f8c2011-12-23 10:17:51 +11002057 if (test_bit(Replacement, &rdev->flags))
2058 sb->feature_map |=
2059 cpu_to_le32(MD_FEATURE_REPLACEMENT);
NeilBrown5fd6c1d2006-06-26 00:27:40 -07002060
NeilBrownf6705572006-03-27 01:18:11 -08002061 if (mddev->reshape_position != MaxSector) {
2062 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
2063 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
2064 sb->new_layout = cpu_to_le32(mddev->new_layout);
2065 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
2066 sb->new_level = cpu_to_le32(mddev->new_level);
Andre Noll664e7c42009-06-18 08:45:27 +10002067 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
NeilBrown2c810cd2012-05-21 09:27:00 +10002068 if (mddev->delta_disks == 0 &&
2069 mddev->reshape_backwards)
2070 sb->feature_map
2071 |= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS);
NeilBrownc6563a82012-05-21 09:27:00 +10002072 if (rdev->new_data_offset != rdev->data_offset) {
2073 sb->feature_map
2074 |= cpu_to_le32(MD_FEATURE_NEW_OFFSET);
2075 sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset
2076 - rdev->data_offset));
2077 }
NeilBrownf6705572006-03-27 01:18:11 -08002078 }
NeilBrowna654b9d82005-06-21 17:17:27 -07002079
Goldwyn Rodrigues3c462c82015-08-19 07:35:54 +10002080 if (mddev_is_clustered(mddev))
2081 sb->feature_map |= cpu_to_le32(MD_FEATURE_CLUSTERED);
2082
NeilBrown2699b672011-07-28 11:31:47 +10002083 if (rdev->badblocks.count == 0)
2084 /* Nothing to do for bad blocks*/ ;
2085 else if (sb->bblog_offset == 0)
2086 /* Cannot record bad blocks on this device */
2087 md_error(mddev, rdev);
2088 else {
2089 struct badblocks *bb = &rdev->badblocks;
Christoph Hellwigae506402019-04-04 18:56:13 +02002090 __le64 *bbp = (__le64 *)page_address(rdev->bb_page);
NeilBrown2699b672011-07-28 11:31:47 +10002091 u64 *p = bb->page;
2092 sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
2093 if (bb->changed) {
2094 unsigned seq;
2095
2096retry:
2097 seq = read_seqbegin(&bb->lock);
2098
2099 memset(bbp, 0xff, PAGE_SIZE);
2100
2101 for (i = 0 ; i < bb->count ; i++) {
majianpeng35f9ac22012-11-08 08:56:27 +08002102 u64 internal_bb = p[i];
NeilBrown2699b672011-07-28 11:31:47 +10002103 u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
2104 | BB_LEN(internal_bb));
majianpeng35f9ac22012-11-08 08:56:27 +08002105 bbp[i] = cpu_to_le64(store_bb);
NeilBrown2699b672011-07-28 11:31:47 +10002106 }
NeilBrownd0962932012-03-19 12:46:41 +11002107 bb->changed = 0;
NeilBrown2699b672011-07-28 11:31:47 +10002108 if (read_seqretry(&bb->lock, seq))
2109 goto retry;
2110
2111 bb->sector = (rdev->sb_start +
2112 (int)le32_to_cpu(sb->bblog_offset));
2113 bb->size = le16_to_cpu(sb->bblog_size);
NeilBrown2699b672011-07-28 11:31:47 +10002114 }
2115 }
2116
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117 max_dev = 0;
NeilBrowndafb20f2012-03-19 12:46:39 +11002118 rdev_for_each(rdev2, mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119 if (rdev2->desc_nr+1 > max_dev)
2120 max_dev = rdev2->desc_nr+1;
NeilBrowna778b732007-05-23 13:58:10 -07002121
NeilBrown70471da2009-08-03 10:59:57 +10002122 if (max_dev > le32_to_cpu(sb->max_dev)) {
2123 int bmask;
NeilBrowna778b732007-05-23 13:58:10 -07002124 sb->max_dev = cpu_to_le32(max_dev);
NeilBrown70471da2009-08-03 10:59:57 +10002125 rdev->sb_size = max_dev * 2 + 256;
2126 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
2127 if (rdev->sb_size & bmask)
2128 rdev->sb_size = (rdev->sb_size | bmask) + 1;
NeilBrownddcf3522010-09-08 16:48:17 +10002129 } else
2130 max_dev = le32_to_cpu(sb->max_dev);
2131
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132 for (i=0; i<max_dev;i++)
Lidong Zhong8df72022017-06-12 10:45:55 +08002133 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
NeilBrownf72ffdd2014-09-30 14:23:59 +10002134
Song Liua97b7892015-10-08 21:54:09 -07002135 if (test_bit(MD_HAS_JOURNAL, &mddev->flags))
2136 sb->feature_map |= cpu_to_le32(MD_FEATURE_JOURNAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137
Artur Paszkiewiczea0213e2017-03-09 09:59:57 +01002138 if (test_bit(MD_HAS_PPL, &mddev->flags)) {
Pawel Baldysiakddc08822017-08-16 17:13:45 +02002139 if (test_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags))
2140 sb->feature_map |=
2141 cpu_to_le32(MD_FEATURE_MULTIPLE_PPLS);
2142 else
2143 sb->feature_map |= cpu_to_le32(MD_FEATURE_PPL);
Artur Paszkiewiczea0213e2017-03-09 09:59:57 +01002144 sb->ppl.offset = cpu_to_le16(rdev->ppl.offset);
2145 sb->ppl.size = cpu_to_le16(rdev->ppl.size);
2146 }
2147
NeilBrowndafb20f2012-03-19 12:46:39 +11002148 rdev_for_each(rdev2, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002149 i = rdev2->desc_nr;
NeilBrownb2d444d2005-11-08 21:39:31 -08002150 if (test_bit(Faulty, &rdev2->flags))
Song Liuc4d4c912015-08-13 14:31:54 -07002151 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY);
NeilBrownb2d444d2005-11-08 21:39:31 -08002152 else if (test_bit(In_sync, &rdev2->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002153 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
Song Liua97b7892015-10-08 21:54:09 -07002154 else if (test_bit(Journal, &rdev2->flags))
Song Liubac624f2015-08-13 14:31:55 -07002155 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_JOURNAL);
NeilBrown93be75f2009-12-14 12:50:06 +11002156 else if (rdev2->raid_disk >= 0)
NeilBrown5fd6c1d2006-06-26 00:27:40 -07002157 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158 else
Song Liuc4d4c912015-08-13 14:31:54 -07002159 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160 }
2161
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162 sb->sb_csum = calc_sb_1_csum(sb);
2163}
2164
Xiao Nid9c0fa52020-06-30 15:55:36 +08002165static sector_t super_1_choose_bm_space(sector_t dev_size)
2166{
2167 sector_t bm_space;
2168
2169 /* if the device is bigger than 8Gig, save 64k for bitmap
2170 * usage, if bigger than 200Gig, save 128k
2171 */
2172 if (dev_size < 64*2)
2173 bm_space = 0;
2174 else if (dev_size - 64*2 >= 200*1024*1024*2)
2175 bm_space = 128*2;
2176 else if (dev_size - 4*2 > 8*1024*1024*2)
2177 bm_space = 64*2;
2178 else
2179 bm_space = 4*2;
2180 return bm_space;
2181}
2182
Chris Webb0cd17fe2008-06-28 08:31:46 +10002183static unsigned long long
NeilBrown3cb03002011-10-11 16:45:26 +11002184super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
Chris Webb0cd17fe2008-06-28 08:31:46 +10002185{
2186 struct mdp_superblock_1 *sb;
Andre Noll15f4a5f2008-07-21 14:42:12 +10002187 sector_t max_sectors;
Andre Noll58c0fed2009-03-31 14:33:13 +11002188 if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
Chris Webb0cd17fe2008-06-28 08:31:46 +10002189 return 0; /* component must fit device */
NeilBrownc6563a82012-05-21 09:27:00 +10002190 if (rdev->data_offset != rdev->new_data_offset)
2191 return 0; /* too confusing */
Andre Noll0f420352008-07-11 22:02:23 +10002192 if (rdev->sb_start < rdev->data_offset) {
Chris Webb0cd17fe2008-06-28 08:31:46 +10002193 /* minor versions 1 and 2; superblock before data */
Mike Snitzer77304d22010-11-08 14:39:12 +01002194 max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
Andre Noll15f4a5f2008-07-21 14:42:12 +10002195 max_sectors -= rdev->data_offset;
2196 if (!num_sectors || num_sectors > max_sectors)
2197 num_sectors = max_sectors;
NeilBrownc3d97142009-12-14 12:49:52 +11002198 } else if (rdev->mddev->bitmap_info.offset) {
Chris Webb0cd17fe2008-06-28 08:31:46 +10002199 /* minor version 0 with bitmap we can't move */
2200 return 0;
2201 } else {
2202 /* minor version 0; superblock after data */
Xiao Nid9c0fa52020-06-30 15:55:36 +08002203 sector_t sb_start, bm_space;
2204 sector_t dev_size = i_size_read(rdev->bdev->bd_inode) >> 9;
2205
2206 /* 8K is for superblock */
2207 sb_start = dev_size - 8*2;
Andre Noll0f420352008-07-11 22:02:23 +10002208 sb_start &= ~(sector_t)(4*2 - 1);
Xiao Nid9c0fa52020-06-30 15:55:36 +08002209
2210 bm_space = super_1_choose_bm_space(dev_size);
2211
2212 /* Space that can be used to store date needs to decrease
2213 * superblock bitmap space and bad block space(4K)
2214 */
2215 max_sectors = sb_start - bm_space - 4*2;
2216
Andre Noll15f4a5f2008-07-21 14:42:12 +10002217 if (!num_sectors || num_sectors > max_sectors)
2218 num_sectors = max_sectors;
Chris Webb0cd17fe2008-06-28 08:31:46 +10002219 }
Namhyung Kim65a06f062011-07-27 11:00:36 +10002220 sb = page_address(rdev->sb_page);
Andre Noll15f4a5f2008-07-21 14:42:12 +10002221 sb->data_size = cpu_to_le64(num_sectors);
Jason Yan3fb632e2017-03-10 11:27:23 +08002222 sb->super_offset = cpu_to_le64(rdev->sb_start);
Chris Webb0cd17fe2008-06-28 08:31:46 +10002223 sb->sb_csum = calc_sb_1_csum(sb);
NeilBrown46533ff2016-11-18 16:16:11 +11002224 do {
2225 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
2226 rdev->sb_page);
2227 } while (md_super_wait(rdev->mddev) < 0);
Justin Maggardc26a44e2010-11-24 16:36:17 +11002228 return num_sectors;
NeilBrownc6563a82012-05-21 09:27:00 +10002229
2230}
2231
2232static int
2233super_1_allow_new_offset(struct md_rdev *rdev,
2234 unsigned long long new_offset)
2235{
2236 /* All necessary checks on new >= old have been done */
2237 struct bitmap *bitmap;
2238 if (new_offset >= rdev->data_offset)
2239 return 1;
2240
2241 /* with 1.0 metadata, there is no metadata to tread on
2242 * so we can always move back */
2243 if (rdev->mddev->minor_version == 0)
2244 return 1;
2245
2246 /* otherwise we must be sure not to step on
2247 * any metadata, so stay:
2248 * 36K beyond start of superblock
2249 * beyond end of badblocks
2250 * beyond write-intent bitmap
2251 */
2252 if (rdev->sb_start + (32+4)*2 > new_offset)
2253 return 0;
2254 bitmap = rdev->mddev->bitmap;
2255 if (bitmap && !rdev->mddev->bitmap_info.file &&
2256 rdev->sb_start + rdev->mddev->bitmap_info.offset +
NeilBrown1ec885c2012-05-22 13:55:10 +10002257 bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset)
NeilBrownc6563a82012-05-21 09:27:00 +10002258 return 0;
2259 if (rdev->badblocks.sector + rdev->badblocks.size > new_offset)
2260 return 0;
2261
2262 return 1;
Chris Webb0cd17fe2008-06-28 08:31:46 +10002263}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002264
Adrian Bunk75c96f82005-05-05 16:16:09 -07002265static struct super_type super_types[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266 [0] = {
2267 .name = "0.90.0",
2268 .owner = THIS_MODULE,
Chris Webb0cd17fe2008-06-28 08:31:46 +10002269 .load_super = super_90_load,
2270 .validate_super = super_90_validate,
2271 .sync_super = super_90_sync,
2272 .rdev_size_change = super_90_rdev_size_change,
NeilBrownc6563a82012-05-21 09:27:00 +10002273 .allow_new_offset = super_90_allow_new_offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274 },
2275 [1] = {
2276 .name = "md-1",
2277 .owner = THIS_MODULE,
Chris Webb0cd17fe2008-06-28 08:31:46 +10002278 .load_super = super_1_load,
2279 .validate_super = super_1_validate,
2280 .sync_super = super_1_sync,
2281 .rdev_size_change = super_1_rdev_size_change,
NeilBrownc6563a82012-05-21 09:27:00 +10002282 .allow_new_offset = super_1_allow_new_offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283 },
2284};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285
NeilBrownfd01b882011-10-11 16:47:53 +11002286static void sync_super(struct mddev *mddev, struct md_rdev *rdev)
Jonathan Brassow076f9682011-06-07 17:51:30 -05002287{
2288 if (mddev->sync_super) {
2289 mddev->sync_super(mddev, rdev);
2290 return;
2291 }
2292
2293 BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));
2294
2295 super_types[mddev->major_version].sync_super(mddev, rdev);
2296}
2297
NeilBrownfd01b882011-10-11 16:47:53 +11002298static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299{
NeilBrown3cb03002011-10-11 16:45:26 +11002300 struct md_rdev *rdev, *rdev2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002301
NeilBrown4b809912008-07-21 17:05:25 +10002302 rcu_read_lock();
Song Liu0b020e82015-09-03 23:00:35 -07002303 rdev_for_each_rcu(rdev, mddev1) {
2304 if (test_bit(Faulty, &rdev->flags) ||
2305 test_bit(Journal, &rdev->flags) ||
2306 rdev->raid_disk == -1)
2307 continue;
2308 rdev_for_each_rcu(rdev2, mddev2) {
2309 if (test_bit(Faulty, &rdev2->flags) ||
2310 test_bit(Journal, &rdev2->flags) ||
2311 rdev2->raid_disk == -1)
2312 continue;
Christoph Hellwig61a27e1f2020-09-03 07:40:58 +02002313 if (rdev->bdev->bd_disk == rdev2->bdev->bd_disk) {
NeilBrown4b809912008-07-21 17:05:25 +10002314 rcu_read_unlock();
NeilBrown7dd5e7c32007-02-28 20:11:35 -08002315 return 1;
NeilBrown4b809912008-07-21 17:05:25 +10002316 }
Song Liu0b020e82015-09-03 23:00:35 -07002317 }
2318 }
NeilBrown4b809912008-07-21 17:05:25 +10002319 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002320 return 0;
2321}
2322
2323static LIST_HEAD(pending_raid_disks);
2324
Andre Nollac5e7112009-08-03 10:59:47 +10002325/*
2326 * Try to register data integrity profile for an mddev
2327 *
2328 * This is called when an array is started and after a disk has been kicked
2329 * from the array. It only succeeds if all working and active component devices
2330 * are integrity capable with matching profiles.
2331 */
NeilBrownfd01b882011-10-11 16:47:53 +11002332int md_integrity_register(struct mddev *mddev)
Martin K. Petersen3f9d99c2009-03-31 14:27:02 +11002333{
NeilBrown3cb03002011-10-11 16:45:26 +11002334 struct md_rdev *rdev, *reference = NULL;
Martin K. Petersen3f9d99c2009-03-31 14:27:02 +11002335
Andre Nollac5e7112009-08-03 10:59:47 +10002336 if (list_empty(&mddev->disks))
2337 return 0; /* nothing to do */
Jonathan Brassow629acb62011-06-08 15:10:08 +10002338 if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
2339 return 0; /* shouldn't register, or already is */
NeilBrowndafb20f2012-03-19 12:46:39 +11002340 rdev_for_each(rdev, mddev) {
Andre Nollac5e7112009-08-03 10:59:47 +10002341 /* skip spares and non-functional disks */
2342 if (test_bit(Faulty, &rdev->flags))
2343 continue;
2344 if (rdev->raid_disk < 0)
2345 continue;
Andre Nollac5e7112009-08-03 10:59:47 +10002346 if (!reference) {
2347 /* Use the first rdev as the reference */
2348 reference = rdev;
2349 continue;
2350 }
2351 /* does this rdev's profile match the reference profile? */
2352 if (blk_integrity_compare(reference->bdev->bd_disk,
2353 rdev->bdev->bd_disk) < 0)
2354 return -EINVAL;
Martin K. Petersen3f9d99c2009-03-31 14:27:02 +11002355 }
Martin K. Petersen89078d52011-03-28 20:09:12 -04002356 if (!reference || !bdev_get_integrity(reference->bdev))
2357 return 0;
Andre Nollac5e7112009-08-03 10:59:47 +10002358 /*
2359 * All component devices are integrity capable and have matching
2360 * profiles, register the common profile for the md device.
2361 */
Martin K. Petersen25520d52015-10-21 13:19:49 -04002362 blk_integrity_register(mddev->gendisk,
2363 bdev_get_integrity(reference->bdev));
2364
NeilBrown9d487392016-11-02 14:16:49 +11002365 pr_debug("md: data integrity enabled on %s\n", mdname(mddev));
Kent Overstreetafeee512018-05-20 18:25:52 -04002366 if (bioset_integrity_create(&mddev->bio_set, BIO_POOL_SIZE)) {
NeilBrown9d487392016-11-02 14:16:49 +11002367 pr_err("md: failed to create integrity pool for %s\n",
Martin K. Petersena91a2782011-03-17 11:11:05 +01002368 mdname(mddev));
2369 return -EINVAL;
2370 }
Andre Nollac5e7112009-08-03 10:59:47 +10002371 return 0;
Martin K. Petersen3f9d99c2009-03-31 14:27:02 +11002372}
Andre Nollac5e7112009-08-03 10:59:47 +10002373EXPORT_SYMBOL(md_integrity_register);
2374
Dan Williams1501efa2016-01-13 16:00:07 -08002375/*
2376 * Attempt to add an rdev, but only if it is consistent with the current
2377 * integrity profile
2378 */
2379int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
Andre Nollac5e7112009-08-03 10:59:47 +10002380{
Jonathan Brassow2863b9e2012-10-11 13:38:58 +11002381 struct blk_integrity *bi_mddev;
Dan Williams1501efa2016-01-13 16:00:07 -08002382 char name[BDEVNAME_SIZE];
Jonathan Brassow2863b9e2012-10-11 13:38:58 +11002383
2384 if (!mddev->gendisk)
Dan Williams1501efa2016-01-13 16:00:07 -08002385 return 0;
Jonathan Brassow2863b9e2012-10-11 13:38:58 +11002386
Jonathan Brassow2863b9e2012-10-11 13:38:58 +11002387 bi_mddev = blk_get_integrity(mddev->gendisk);
Andre Nollac5e7112009-08-03 10:59:47 +10002388
2389 if (!bi_mddev) /* nothing to do */
Dan Williams1501efa2016-01-13 16:00:07 -08002390 return 0;
2391
2392 if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) {
NeilBrown9d487392016-11-02 14:16:49 +11002393 pr_err("%s: incompatible integrity profile for %s\n",
2394 mdname(mddev), bdevname(rdev->bdev, name));
Dan Williams1501efa2016-01-13 16:00:07 -08002395 return -ENXIO;
2396 }
2397
2398 return 0;
Andre Nollac5e7112009-08-03 10:59:47 +10002399}
2400EXPORT_SYMBOL(md_integrity_add_rdev);
Martin K. Petersen3f9d99c2009-03-31 14:27:02 +11002401
NeilBrownf72ffdd2014-09-30 14:23:59 +10002402static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403{
NeilBrown7dd5e7c32007-02-28 20:11:35 -08002404 char b[BDEVNAME_SIZE];
NeilBrown5e55e2f2007-03-26 21:32:14 -08002405 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002406
Dan Williams11e2ede2008-04-30 00:52:32 -07002407 /* prevent duplicates */
2408 if (find_rdev(mddev, rdev->bdev->bd_dev))
2409 return -EEXIST;
2410
NeilBrown97b20ef2017-04-13 08:53:48 +10002411 if ((bdev_read_only(rdev->bdev) || bdev_read_only(rdev->meta_bdev)) &&
2412 mddev->pers)
2413 return -EROFS;
2414
Andre Nolldd8ac332009-03-31 14:33:13 +11002415 /* make sure rdev->sectors exceeds mddev->dev_sectors */
Shaohua Lif6b6ec52015-12-21 10:51:02 +11002416 if (!test_bit(Journal, &rdev->flags) &&
2417 rdev->sectors &&
2418 (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) {
NeilBrowna778b732007-05-23 13:58:10 -07002419 if (mddev->pers) {
2420 /* Cannot change size, so fail
2421 * If mddev->level <= 0, then we don't care
2422 * about aligning sizes (e.g. linear)
2423 */
2424 if (mddev->level > 0)
2425 return -ENOSPC;
2426 } else
Andre Nolldd8ac332009-03-31 14:33:13 +11002427 mddev->dev_sectors = rdev->sectors;
NeilBrown2bf071b2006-01-06 00:20:55 -08002428 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429
2430 /* Verify rdev->desc_nr is unique.
2431 * If it is -1, assign a free number, else
2432 * check number is not in use
2433 */
NeilBrown4878e9e2014-09-25 17:00:11 +10002434 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002435 if (rdev->desc_nr < 0) {
2436 int choice = 0;
NeilBrown4878e9e2014-09-25 17:00:11 +10002437 if (mddev->pers)
2438 choice = mddev->raid_disks;
Goldwyn Rodrigues57d051d2015-04-14 10:43:55 -05002439 while (md_find_rdev_nr_rcu(mddev, choice))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002440 choice++;
2441 rdev->desc_nr = choice;
2442 } else {
Goldwyn Rodrigues57d051d2015-04-14 10:43:55 -05002443 if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) {
NeilBrown4878e9e2014-09-25 17:00:11 +10002444 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445 return -EBUSY;
NeilBrown4878e9e2014-09-25 17:00:11 +10002446 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002447 }
NeilBrown4878e9e2014-09-25 17:00:11 +10002448 rcu_read_unlock();
Shaohua Lif6b6ec52015-12-21 10:51:02 +11002449 if (!test_bit(Journal, &rdev->flags) &&
2450 mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
NeilBrown9d487392016-11-02 14:16:49 +11002451 pr_warn("md: %s: array is limited to %d devices\n",
2452 mdname(mddev), mddev->max_disks);
NeilBrownde01dfa2009-02-06 18:02:46 +11002453 return -EBUSY;
2454 }
NeilBrown19133a42005-11-08 21:39:35 -08002455 bdevname(rdev->bdev,b);
Rasmus Villemoes90a9bef2015-06-25 15:02:36 -07002456 strreplace(b, '/', '!');
Greg Kroah-Hartman649316b2007-12-17 23:05:35 -07002457
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458 rdev->mddev = mddev;
NeilBrown9d487392016-11-02 14:16:49 +11002459 pr_debug("md: bind<%s>\n", b);
NeilBrown86e6ffd2005-11-08 21:39:24 -08002460
Guoqing Jiang963c5552019-06-14 17:10:36 +08002461 if (mddev->raid_disks)
Guoqing Jiang404659c2019-12-23 10:48:53 +01002462 mddev_create_serial_pool(mddev, rdev, false);
Guoqing Jiang963c5552019-06-14 17:10:36 +08002463
Greg Kroah-Hartmanb2d6db52007-12-17 23:05:35 -07002464 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
NeilBrown5e55e2f2007-03-26 21:32:14 -08002465 goto fail;
NeilBrown86e6ffd2005-11-08 21:39:24 -08002466
Damien Le Moal5e3b8a82020-07-16 13:54:40 +09002467 /* failure here is OK */
Christoph Hellwig8d652692020-11-17 08:18:55 +01002468 err = sysfs_create_link(&rdev->kobj, bdev_kobj(rdev->bdev), "block");
NeilBrown00bcb4a2010-06-01 19:37:23 +10002469 rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
Junxiao Bie1a86db2020-07-14 16:10:26 -07002470 rdev->sysfs_unack_badblocks =
2471 sysfs_get_dirent_safe(rdev->kobj.sd, "unacknowledged_bad_blocks");
2472 rdev->sysfs_badblocks =
2473 sysfs_get_dirent_safe(rdev->kobj.sd, "bad_blocks");
NeilBrown3c0ee632008-10-21 13:25:28 +11002474
NeilBrown4b809912008-07-21 17:05:25 +10002475 list_add_rcu(&rdev->same_set, &mddev->disks);
Tejun Heoe09b4572010-11-13 11:55:17 +01002476 bd_link_disk_holder(rdev->bdev, mddev->gendisk);
NeilBrown4044ba52009-01-09 08:31:11 +11002477
2478 /* May as well allow recovery to be retried once */
NeilBrown53890422011-07-27 11:00:36 +10002479 mddev->recovery_disabled++;
Martin K. Petersen3f9d99c2009-03-31 14:27:02 +11002480
Linus Torvalds1da177e2005-04-16 15:20:36 -07002481 return 0;
NeilBrown5e55e2f2007-03-26 21:32:14 -08002482
2483 fail:
NeilBrown9d487392016-11-02 14:16:49 +11002484 pr_warn("md: failed to register dev-%s for %s\n",
2485 b, mdname(mddev));
NeilBrown5e55e2f2007-03-26 21:32:14 -08002486 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002487}
2488
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02002489static void rdev_delayed_delete(struct work_struct *ws)
NeilBrown5792a282007-04-04 19:08:18 -07002490{
NeilBrown3cb03002011-10-11 16:45:26 +11002491 struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work);
NeilBrown5792a282007-04-04 19:08:18 -07002492 kobject_del(&rdev->kobj);
NeilBrown177a99b2008-02-06 01:39:56 -08002493 kobject_put(&rdev->kobj);
NeilBrown5792a282007-04-04 19:08:18 -07002494}
2495
NeilBrownf72ffdd2014-09-30 14:23:59 +10002496static void unbind_rdev_from_array(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497{
2498 char b[BDEVNAME_SIZE];
NeilBrown403df472014-09-30 15:52:29 +10002499
Tejun Heo49731ba2011-01-14 18:43:57 +01002500 bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
NeilBrown4b809912008-07-21 17:05:25 +10002501 list_del_rcu(&rdev->same_set);
NeilBrown9d487392016-11-02 14:16:49 +11002502 pr_debug("md: unbind<%s>\n", bdevname(rdev->bdev,b));
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +01002503 mddev_destroy_serial_pool(rdev->mddev, rdev, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002504 rdev->mddev = NULL;
NeilBrown86e6ffd2005-11-08 21:39:24 -08002505 sysfs_remove_link(&rdev->kobj, "block");
NeilBrown3c0ee632008-10-21 13:25:28 +11002506 sysfs_put(rdev->sysfs_state);
Junxiao Bie1a86db2020-07-14 16:10:26 -07002507 sysfs_put(rdev->sysfs_unack_badblocks);
2508 sysfs_put(rdev->sysfs_badblocks);
NeilBrown3c0ee632008-10-21 13:25:28 +11002509 rdev->sysfs_state = NULL;
Junxiao Bie1a86db2020-07-14 16:10:26 -07002510 rdev->sysfs_unack_badblocks = NULL;
2511 rdev->sysfs_badblocks = NULL;
NeilBrown2230dfe2011-07-28 11:31:46 +10002512 rdev->badblocks.count = 0;
NeilBrown5792a282007-04-04 19:08:18 -07002513 /* We need to delay this, otherwise we can deadlock when
NeilBrown4b809912008-07-21 17:05:25 +10002514 * writing to 'remove' to "dev/state". We also need
2515 * to delay it due to rcu usage.
NeilBrown5792a282007-04-04 19:08:18 -07002516 */
NeilBrown4b809912008-07-21 17:05:25 +10002517 synchronize_rcu();
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02002518 INIT_WORK(&rdev->del_work, rdev_delayed_delete);
NeilBrown177a99b2008-02-06 01:39:56 -08002519 kobject_get(&rdev->kobj);
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02002520 queue_work(md_rdev_misc_wq, &rdev->del_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521}
2522
2523/*
2524 * prevent the device from being mounted, repartitioned or
2525 * otherwise reused by a RAID array (or any other kernel
2526 * subsystem), by bd_claiming the device.
2527 */
NeilBrown3cb03002011-10-11 16:45:26 +11002528static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002529{
2530 int err = 0;
2531 struct block_device *bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002532
Tejun Heod4d77622010-11-13 11:55:18 +01002533 bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
NeilBrown3cb03002011-10-11 16:45:26 +11002534 shared ? (struct md_rdev *)lock_rdev : rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002535 if (IS_ERR(bdev)) {
Christoph Hellwigea3edd42020-03-24 08:25:11 +01002536 pr_warn("md: could not open device unknown-block(%u,%u).\n",
2537 MAJOR(dev), MINOR(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538 return PTR_ERR(bdev);
2539 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002540 rdev->bdev = bdev;
2541 return err;
2542}
2543
NeilBrown3cb03002011-10-11 16:45:26 +11002544static void unlock_rdev(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002545{
2546 struct block_device *bdev = rdev->bdev;
2547 rdev->bdev = NULL;
Tejun Heoe525fd82010-11-13 11:55:17 +01002548 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002549}
2550
2551void md_autodetect_dev(dev_t dev);
2552
NeilBrownf72ffdd2014-09-30 14:23:59 +10002553static void export_rdev(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002554{
2555 char b[BDEVNAME_SIZE];
NeilBrown403df472014-09-30 15:52:29 +10002556
NeilBrown9d487392016-11-02 14:16:49 +11002557 pr_debug("md: export_rdev(%s)\n", bdevname(rdev->bdev,b));
NeilBrown545c8792012-05-22 13:54:30 +10002558 md_rdev_clear(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002559#ifndef MODULE
NeilBrownd0fae182008-03-04 14:29:31 -08002560 if (test_bit(AutoDetected, &rdev->flags))
2561 md_autodetect_dev(rdev->bdev->bd_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002562#endif
2563 unlock_rdev(rdev);
NeilBrown86e6ffd2005-11-08 21:39:24 -08002564 kobject_put(&rdev->kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002565}
2566
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05002567void md_kick_rdev_from_array(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002568{
2569 unbind_rdev_from_array(rdev);
2570 export_rdev(rdev);
2571}
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05002572EXPORT_SYMBOL_GPL(md_kick_rdev_from_array);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002573
NeilBrownfd01b882011-10-11 16:47:53 +11002574static void export_array(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002575{
NeilBrown0638bb02014-09-25 17:43:47 +10002576 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002577
NeilBrown0638bb02014-09-25 17:43:47 +10002578 while (!list_empty(&mddev->disks)) {
2579 rdev = list_first_entry(&mddev->disks, struct md_rdev,
2580 same_set);
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05002581 md_kick_rdev_from_array(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002582 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002583 mddev->raid_disks = 0;
2584 mddev->major_version = 0;
2585}
2586
NeilBrown6497709b2017-03-15 14:05:14 +11002587static bool set_in_sync(struct mddev *mddev)
2588{
Shaohua Liefa4b772017-10-18 22:08:13 -07002589 lockdep_assert_held(&mddev->lock);
NeilBrown4ad23a972017-03-15 14:05:14 +11002590 if (!mddev->in_sync) {
2591 mddev->sync_checkers++;
2592 spin_unlock(&mddev->lock);
2593 percpu_ref_switch_to_atomic_sync(&mddev->writes_pending);
2594 spin_lock(&mddev->lock);
2595 if (!mddev->in_sync &&
2596 percpu_ref_is_zero(&mddev->writes_pending)) {
NeilBrown6497709b2017-03-15 14:05:14 +11002597 mddev->in_sync = 1;
NeilBrown4ad23a972017-03-15 14:05:14 +11002598 /*
2599 * Ensure ->in_sync is visible before we clear
2600 * ->sync_checkers.
2601 */
NeilBrown55cc39f2017-03-15 14:05:14 +11002602 smp_mb();
NeilBrown6497709b2017-03-15 14:05:14 +11002603 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
2604 sysfs_notify_dirent_safe(mddev->sysfs_state);
2605 }
NeilBrown4ad23a972017-03-15 14:05:14 +11002606 if (--mddev->sync_checkers == 0)
2607 percpu_ref_switch_to_percpu(&mddev->writes_pending);
NeilBrown6497709b2017-03-15 14:05:14 +11002608 }
2609 if (mddev->safemode == 1)
2610 mddev->safemode = 0;
2611 return mddev->in_sync;
2612}
2613
NeilBrownf72ffdd2014-09-30 14:23:59 +10002614static void sync_sbs(struct mddev *mddev, int nospares)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002615{
NeilBrown42543762006-06-26 00:27:57 -07002616 /* Update each superblock (in-memory image), but
2617 * if we are allowed to, skip spares which already
2618 * have the right event counter, or have one earlier
2619 * (which would mean they aren't being marked as dirty
2620 * with the rest of the array)
2621 */
NeilBrown3cb03002011-10-11 16:45:26 +11002622 struct md_rdev *rdev;
NeilBrowndafb20f2012-03-19 12:46:39 +11002623 rdev_for_each(rdev, mddev) {
NeilBrown42543762006-06-26 00:27:57 -07002624 if (rdev->sb_events == mddev->events ||
2625 (nospares &&
2626 rdev->raid_disk < 0 &&
NeilBrown42543762006-06-26 00:27:57 -07002627 rdev->sb_events+1 == mddev->events)) {
2628 /* Don't update this superblock */
2629 rdev->sb_loaded = 2;
2630 } else {
Jonathan Brassow076f9682011-06-07 17:51:30 -05002631 sync_super(mddev, rdev);
NeilBrown42543762006-06-26 00:27:57 -07002632 rdev->sb_loaded = 1;
2633 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002634 }
2635}
2636
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002637static bool does_sb_need_changing(struct mddev *mddev)
2638{
2639 struct md_rdev *rdev;
2640 struct mdp_superblock_1 *sb;
2641 int role;
2642
2643 /* Find a good rdev */
2644 rdev_for_each(rdev, mddev)
2645 if ((rdev->raid_disk >= 0) && !test_bit(Faulty, &rdev->flags))
2646 break;
2647
2648 /* No good device found. */
2649 if (!rdev)
2650 return false;
2651
2652 sb = page_address(rdev->sb_page);
2653 /* Check if a device has become faulty or a spare become active */
2654 rdev_for_each(rdev, mddev) {
2655 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
2656 /* Device activated? */
2657 if (role == 0xffff && rdev->raid_disk >=0 &&
2658 !test_bit(Faulty, &rdev->flags))
2659 return true;
2660 /* Device turned faulty? */
2661 if (test_bit(Faulty, &rdev->flags) && (role < 0xfffd))
2662 return true;
2663 }
2664
2665 /* Check if any mddev parameters have changed */
2666 if ((mddev->dev_sectors != le64_to_cpu(sb->size)) ||
2667 (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) ||
Jason Yan13459212017-03-10 11:49:12 +08002668 (mddev->layout != le32_to_cpu(sb->layout)) ||
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002669 (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) ||
2670 (mddev->chunk_sectors != le32_to_cpu(sb->chunksize)))
2671 return true;
2672
2673 return false;
2674}
2675
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05002676void md_update_sb(struct mddev *mddev, int force_change)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002677{
NeilBrown3cb03002011-10-11 16:45:26 +11002678 struct md_rdev *rdev;
NeilBrown06d91a52005-06-21 17:17:12 -07002679 int sync_req;
NeilBrown42543762006-06-26 00:27:57 -07002680 int nospares = 0;
NeilBrown2699b672011-07-28 11:31:47 +10002681 int any_badblocks_changed = 0;
Guoqing Jiang23b63f92015-10-12 17:21:30 +08002682 int ret = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002683
NeilBrownd87f0642013-04-24 11:42:40 +10002684 if (mddev->ro) {
2685 if (force_change)
Shaohua Li29530792016-12-08 15:48:19 -08002686 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrownd87f0642013-04-24 11:42:40 +10002687 return;
2688 }
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002689
Guoqing Jiang2c97cf12016-05-02 11:33:09 -04002690repeat:
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002691 if (mddev_is_clustered(mddev)) {
Shaohua Li29530792016-12-08 15:48:19 -08002692 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002693 force_change = 1;
Shaohua Li29530792016-12-08 15:48:19 -08002694 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
Guoqing Jiang85ad1d12016-05-03 22:22:13 -04002695 nospares = 1;
Guoqing Jiang23b63f92015-10-12 17:21:30 +08002696 ret = md_cluster_ops->metadata_update_start(mddev);
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002697 /* Has someone else has updated the sb */
2698 if (!does_sb_need_changing(mddev)) {
Guoqing Jiang23b63f92015-10-12 17:21:30 +08002699 if (ret == 0)
2700 md_cluster_ops->metadata_update_cancel(mddev);
Shaohua Li29530792016-12-08 15:48:19 -08002701 bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
2702 BIT(MD_SB_CHANGE_DEVS) |
2703 BIT(MD_SB_CHANGE_CLEAN));
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002704 return;
2705 }
2706 }
Guoqing Jiang2c97cf12016-05-02 11:33:09 -04002707
NeilBrowndb0505d2017-10-17 16:18:36 +11002708 /*
2709 * First make sure individual recovery_offsets are correct
2710 * curr_resync_completed can only be used during recovery.
2711 * During reshape/resync it might use array-addresses rather
2712 * that device addresses.
2713 */
NeilBrowndafb20f2012-03-19 12:46:39 +11002714 rdev_for_each(rdev, mddev) {
NeilBrown3a3a5dd2010-08-16 18:09:31 +10002715 if (rdev->raid_disk >= 0 &&
2716 mddev->delta_disks >= 0 &&
NeilBrowndb0505d2017-10-17 16:18:36 +11002717 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
2718 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery) &&
2719 !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
Shaohua Lif2076e72015-10-08 21:54:12 -07002720 !test_bit(Journal, &rdev->flags) &&
NeilBrown3a3a5dd2010-08-16 18:09:31 +10002721 !test_bit(In_sync, &rdev->flags) &&
2722 mddev->curr_resync_completed > rdev->recovery_offset)
2723 rdev->recovery_offset = mddev->curr_resync_completed;
2724
NeilBrownf72ffdd2014-09-30 14:23:59 +10002725 }
Dan Williamsbd52b742010-08-30 17:33:33 +10002726 if (!mddev->persistent) {
Shaohua Li29530792016-12-08 15:48:19 -08002727 clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
2728 clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrownde393cd2011-07-28 11:31:48 +10002729 if (!mddev->external) {
Shaohua Li29530792016-12-08 15:48:19 -08002730 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
NeilBrowndafb20f2012-03-19 12:46:39 +11002731 rdev_for_each(rdev, mddev) {
NeilBrownde393cd2011-07-28 11:31:48 +10002732 if (rdev->badblocks.changed) {
NeilBrownd0962932012-03-19 12:46:41 +11002733 rdev->badblocks.changed = 0;
Vishal Vermafc974ee2015-12-24 19:20:34 -07002734 ack_all_badblocks(&rdev->badblocks);
NeilBrownde393cd2011-07-28 11:31:48 +10002735 md_error(mddev, rdev);
2736 }
2737 clear_bit(Blocked, &rdev->flags);
2738 clear_bit(BlockedBadBlocks, &rdev->flags);
2739 wake_up(&rdev->blocked_wait);
2740 }
2741 }
NeilBrown3a3a5dd2010-08-16 18:09:31 +10002742 wake_up(&mddev->sb_wait);
2743 return;
2744 }
2745
NeilBrown85572d72014-12-15 12:56:56 +11002746 spin_lock(&mddev->lock);
NeilBrown84692192006-08-27 01:23:49 -07002747
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11002748 mddev->utime = ktime_get_real_seconds();
NeilBrown3a3a5dd2010-08-16 18:09:31 +10002749
Shaohua Li29530792016-12-08 15:48:19 -08002750 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
NeilBrown850b2b422006-10-03 01:15:46 -07002751 force_change = 1;
Shaohua Li29530792016-12-08 15:48:19 -08002752 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
NeilBrown850b2b422006-10-03 01:15:46 -07002753 /* just a clean<-> dirty transition, possibly leave spares alone,
2754 * though if events isn't the right even/odd, we will have to do
2755 * spares after all
2756 */
2757 nospares = 1;
2758 if (force_change)
2759 nospares = 0;
2760 if (mddev->degraded)
NeilBrown84692192006-08-27 01:23:49 -07002761 /* If the array is degraded, then skipping spares is both
2762 * dangerous and fairly pointless.
2763 * Dangerous because a device that was removed from the array
2764 * might have a event_count that still looks up-to-date,
2765 * so it can be re-added without a resync.
2766 * Pointless because if there are any spares to skip,
2767 * then a recovery will happen and soon that array won't
2768 * be degraded any more and the spare can go back to sleep then.
2769 */
NeilBrown850b2b422006-10-03 01:15:46 -07002770 nospares = 0;
NeilBrown84692192006-08-27 01:23:49 -07002771
NeilBrown06d91a52005-06-21 17:17:12 -07002772 sync_req = mddev->in_sync;
NeilBrown42543762006-06-26 00:27:57 -07002773
2774 /* If this is just a dirty<->clean transition, and the array is clean
2775 * and 'events' is odd, we can roll back to the previous clean state */
NeilBrown850b2b422006-10-03 01:15:46 -07002776 if (nospares
NeilBrown42543762006-06-26 00:27:57 -07002777 && (mddev->in_sync && mddev->recovery_cp == MaxSector)
NeilBrowna8707c02010-05-18 09:28:43 +10002778 && mddev->can_decrease_events
2779 && mddev->events != 1) {
NeilBrown42543762006-06-26 00:27:57 -07002780 mddev->events--;
NeilBrowna8707c02010-05-18 09:28:43 +10002781 mddev->can_decrease_events = 0;
2782 } else {
NeilBrown42543762006-06-26 00:27:57 -07002783 /* otherwise we have to go forward and ... */
2784 mddev->events ++;
NeilBrowna8707c02010-05-18 09:28:43 +10002785 mddev->can_decrease_events = nospares;
NeilBrown42543762006-06-26 00:27:57 -07002786 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002787
NeilBrown403df472014-09-30 15:52:29 +10002788 /*
2789 * This 64-bit counter should never wrap.
2790 * Either we are in around ~1 trillion A.C., assuming
2791 * 1 reboot per second, or we have a bug...
2792 */
2793 WARN_ON(mddev->events == 0);
NeilBrown2699b672011-07-28 11:31:47 +10002794
NeilBrowndafb20f2012-03-19 12:46:39 +11002795 rdev_for_each(rdev, mddev) {
NeilBrown2699b672011-07-28 11:31:47 +10002796 if (rdev->badblocks.changed)
2797 any_badblocks_changed++;
NeilBrownde393cd2011-07-28 11:31:48 +10002798 if (test_bit(Faulty, &rdev->flags))
2799 set_bit(FaultRecorded, &rdev->flags);
2800 }
NeilBrown2699b672011-07-28 11:31:47 +10002801
NeilBrowne6910632008-02-06 01:39:51 -08002802 sync_sbs(mddev, nospares);
NeilBrown85572d72014-12-15 12:56:56 +11002803 spin_unlock(&mddev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002804
NeilBrown36a4e1f2011-10-07 14:23:17 +11002805 pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
2806 mdname(mddev), mddev->in_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002807
Shaohua Li504634f2016-11-18 09:44:08 -08002808 if (mddev->queue)
2809 blk_add_trace_msg(mddev->queue, "md md_update_sb");
NeilBrown46533ff2016-11-18 16:16:11 +11002810rewrite:
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002811 md_bitmap_update_sb(mddev->bitmap);
NeilBrowndafb20f2012-03-19 12:46:39 +11002812 rdev_for_each(rdev, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002813 char b[BDEVNAME_SIZE];
NeilBrown36a4e1f2011-10-07 14:23:17 +11002814
NeilBrown42543762006-06-26 00:27:57 -07002815 if (rdev->sb_loaded != 1)
2816 continue; /* no noise on spare devices */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002817
NeilBrownf4667222013-12-09 12:04:56 +11002818 if (!test_bit(Faulty, &rdev->flags)) {
NeilBrown7bfa19f2005-06-21 17:17:28 -07002819 md_super_write(mddev,rdev,
Andre Noll0f420352008-07-11 22:02:23 +10002820 rdev->sb_start, rdev->sb_size,
NeilBrown7bfa19f2005-06-21 17:17:28 -07002821 rdev->sb_page);
NeilBrown36a4e1f2011-10-07 14:23:17 +11002822 pr_debug("md: (write) %s's sb offset: %llu\n",
2823 bdevname(rdev->bdev, b),
2824 (unsigned long long)rdev->sb_start);
NeilBrown42543762006-06-26 00:27:57 -07002825 rdev->sb_events = mddev->events;
NeilBrown2699b672011-07-28 11:31:47 +10002826 if (rdev->badblocks.size) {
2827 md_super_write(mddev, rdev,
2828 rdev->badblocks.sector,
2829 rdev->badblocks.size << 9,
2830 rdev->bb_page);
2831 rdev->badblocks.size = 0;
2832 }
NeilBrown7bfa19f2005-06-21 17:17:28 -07002833
NeilBrownf4667222013-12-09 12:04:56 +11002834 } else
NeilBrown36a4e1f2011-10-07 14:23:17 +11002835 pr_debug("md: %s (skipping faulty)\n",
2836 bdevname(rdev->bdev, b));
Andrei Warkentind70ed2e2011-10-18 12:16:48 +11002837
NeilBrown7bfa19f2005-06-21 17:17:28 -07002838 if (mddev->level == LEVEL_MULTIPATH)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002839 /* only need to write one superblock... */
2840 break;
2841 }
NeilBrown46533ff2016-11-18 16:16:11 +11002842 if (md_super_wait(mddev) < 0)
2843 goto rewrite;
Shaohua Li29530792016-12-08 15:48:19 -08002844 /* if there was a failure, MD_SB_CHANGE_DEVS was set, and we re-write super */
NeilBrown7bfa19f2005-06-21 17:17:28 -07002845
Guoqing Jiang2c97cf12016-05-02 11:33:09 -04002846 if (mddev_is_clustered(mddev) && ret == 0)
2847 md_cluster_ops->metadata_update_finish(mddev);
2848
NeilBrown850b2b422006-10-03 01:15:46 -07002849 if (mddev->in_sync != sync_req ||
Shaohua Li29530792016-12-08 15:48:19 -08002850 !bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
2851 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_CLEAN)))
NeilBrown06d91a52005-06-21 17:17:12 -07002852 /* have to write it out again */
NeilBrown06d91a52005-06-21 17:17:12 -07002853 goto repeat;
NeilBrown3d310eb2005-06-21 17:17:26 -07002854 wake_up(&mddev->sb_wait);
NeilBrownacb180b2009-04-14 16:28:34 +10002855 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
Junxiao Bie1a86db2020-07-14 16:10:26 -07002856 sysfs_notify_dirent_safe(mddev->sysfs_completed);
NeilBrown06d91a52005-06-21 17:17:12 -07002857
NeilBrowndafb20f2012-03-19 12:46:39 +11002858 rdev_for_each(rdev, mddev) {
NeilBrownde393cd2011-07-28 11:31:48 +10002859 if (test_and_clear_bit(FaultRecorded, &rdev->flags))
2860 clear_bit(Blocked, &rdev->flags);
2861
2862 if (any_badblocks_changed)
Vishal Vermafc974ee2015-12-24 19:20:34 -07002863 ack_all_badblocks(&rdev->badblocks);
NeilBrownde393cd2011-07-28 11:31:48 +10002864 clear_bit(BlockedBadBlocks, &rdev->flags);
2865 wake_up(&rdev->blocked_wait);
2866 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867}
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05002868EXPORT_SYMBOL(md_update_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002869
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002870static int add_bound_rdev(struct md_rdev *rdev)
2871{
2872 struct mddev *mddev = rdev->mddev;
2873 int err = 0;
Shaohua Li87d4d912016-01-06 14:37:14 -08002874 bool add_journal = test_bit(Journal, &rdev->flags);
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002875
Shaohua Li87d4d912016-01-06 14:37:14 -08002876 if (!mddev->pers->hot_remove_disk || add_journal) {
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002877 /* If there is hot_add_disk but no hot_remove_disk
2878 * then added disks for geometry changes,
2879 * and should be added immediately.
2880 */
2881 super_types[mddev->major_version].
2882 validate_super(mddev, rdev);
Shaohua Li87d4d912016-01-06 14:37:14 -08002883 if (add_journal)
2884 mddev_suspend(mddev);
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002885 err = mddev->pers->hot_add_disk(mddev, rdev);
Shaohua Li87d4d912016-01-06 14:37:14 -08002886 if (add_journal)
2887 mddev_resume(mddev);
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002888 if (err) {
Guoqing Jiangdb767672016-06-02 23:32:05 -04002889 md_kick_rdev_from_array(rdev);
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002890 return err;
2891 }
2892 }
2893 sysfs_notify_dirent_safe(rdev->sysfs_state);
2894
Shaohua Li29530792016-12-08 15:48:19 -08002895 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002896 if (mddev->degraded)
2897 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
2898 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2899 md_new_event(mddev);
2900 md_wakeup_thread(mddev->thread);
2901 return 0;
2902}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002903
Andre Noll7f6ce762008-03-23 18:34:54 +01002904/* words written to sysfs files may, or may not, be \n terminated.
NeilBrownbce74da2006-01-06 00:20:41 -08002905 * We want to accept with case. For this we use cmd_match.
2906 */
2907static int cmd_match(const char *cmd, const char *str)
2908{
2909 /* See if cmd, written into a sysfs file, matches
2910 * str. They must either be the same, or cmd can
2911 * have a trailing newline
2912 */
2913 while (*cmd && *str && *cmd == *str) {
2914 cmd++;
2915 str++;
2916 }
2917 if (*cmd == '\n')
2918 cmd++;
2919 if (*str || *cmd)
2920 return 0;
2921 return 1;
2922}
2923
NeilBrown86e6ffd2005-11-08 21:39:24 -08002924struct rdev_sysfs_entry {
2925 struct attribute attr;
NeilBrown3cb03002011-10-11 16:45:26 +11002926 ssize_t (*show)(struct md_rdev *, char *);
2927 ssize_t (*store)(struct md_rdev *, const char *, size_t);
NeilBrown86e6ffd2005-11-08 21:39:24 -08002928};
2929
2930static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11002931state_show(struct md_rdev *rdev, char *page)
NeilBrown86e6ffd2005-11-08 21:39:24 -08002932{
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02002933 char *sep = ",";
NeilBrown20a49ff2008-02-06 01:39:57 -08002934 size_t len = 0;
Mark Rutland6aa7de02017-10-23 14:07:29 -07002935 unsigned long flags = READ_ONCE(rdev->flags);
NeilBrown86e6ffd2005-11-08 21:39:24 -08002936
NeilBrown758bfc82014-12-15 12:56:59 +11002937 if (test_bit(Faulty, &flags) ||
Tomasz Majchrzakdcbcb482016-10-21 16:27:08 +02002938 (!test_bit(ExternalBbl, &flags) &&
2939 rdev->badblocks.unacked_exist))
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02002940 len += sprintf(page+len, "faulty%s", sep);
2941 if (test_bit(In_sync, &flags))
2942 len += sprintf(page+len, "in_sync%s", sep);
2943 if (test_bit(Journal, &flags))
2944 len += sprintf(page+len, "journal%s", sep);
2945 if (test_bit(WriteMostly, &flags))
2946 len += sprintf(page+len, "write_mostly%s", sep);
NeilBrown758bfc82014-12-15 12:56:59 +11002947 if (test_bit(Blocked, &flags) ||
NeilBrown52c64152011-12-08 16:22:48 +11002948 (rdev->badblocks.unacked_exist
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02002949 && !test_bit(Faulty, &flags)))
2950 len += sprintf(page+len, "blocked%s", sep);
NeilBrown758bfc82014-12-15 12:56:59 +11002951 if (!test_bit(Faulty, &flags) &&
Shaohua Lif2076e72015-10-08 21:54:12 -07002952 !test_bit(Journal, &flags) &&
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02002953 !test_bit(In_sync, &flags))
2954 len += sprintf(page+len, "spare%s", sep);
2955 if (test_bit(WriteErrorSeen, &flags))
2956 len += sprintf(page+len, "write_error%s", sep);
2957 if (test_bit(WantReplacement, &flags))
2958 len += sprintf(page+len, "want_replacement%s", sep);
2959 if (test_bit(Replacement, &flags))
2960 len += sprintf(page+len, "replacement%s", sep);
2961 if (test_bit(ExternalBbl, &flags))
2962 len += sprintf(page+len, "external_bbl%s", sep);
NeilBrown688834e2016-11-18 16:16:11 +11002963 if (test_bit(FailFast, &flags))
2964 len += sprintf(page+len, "failfast%s", sep);
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02002965
2966 if (len)
2967 len -= strlen(sep);
NeilBrown2d78f8c2011-12-23 10:17:51 +11002968
NeilBrown86e6ffd2005-11-08 21:39:24 -08002969 return len+sprintf(page+len, "\n");
2970}
2971
NeilBrown45dc2de2006-06-26 00:27:58 -07002972static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11002973state_store(struct md_rdev *rdev, const char *buf, size_t len)
NeilBrown45dc2de2006-06-26 00:27:58 -07002974{
2975 /* can write
NeilBrownde393cd2011-07-28 11:31:48 +10002976 * faulty - simulates an error
NeilBrown45dc2de2006-06-26 00:27:58 -07002977 * remove - disconnects the device
NeilBrownf6556752006-06-26 00:28:01 -07002978 * writemostly - sets write_mostly
2979 * -writemostly - clears write_mostly
NeilBrownde393cd2011-07-28 11:31:48 +10002980 * blocked - sets the Blocked flags
2981 * -blocked - clears the Blocked and possibly simulates an error
NeilBrown6d56e272009-04-14 12:01:57 +10002982 * insync - sets Insync providing device isn't active
NeilBrownf4667222013-12-09 12:04:56 +11002983 * -insync - clear Insync for a device with a slot assigned,
2984 * so that it gets rebuilt based on bitmap
NeilBrownd7a9d442011-07-28 11:31:48 +10002985 * write_error - sets WriteErrorSeen
2986 * -write_error - clears WriteErrorSeen
NeilBrown688834e2016-11-18 16:16:11 +11002987 * {,-}failfast - set/clear FailFast
NeilBrown45dc2de2006-06-26 00:27:58 -07002988 */
2989 int err = -EINVAL;
2990 if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
2991 md_error(rdev->mddev, rdev);
NeilBrown5ef56c82011-08-25 14:42:51 +10002992 if (test_bit(Faulty, &rdev->flags))
2993 err = 0;
2994 else
2995 err = -EBUSY;
NeilBrown45dc2de2006-06-26 00:27:58 -07002996 } else if (cmd_match(buf, "remove")) {
Shaohua Li5d881782016-07-28 09:06:34 -07002997 if (rdev->mddev->pers) {
2998 clear_bit(Blocked, &rdev->flags);
2999 remove_and_add_spares(rdev->mddev, rdev);
3000 }
NeilBrown45dc2de2006-06-26 00:27:58 -07003001 if (rdev->raid_disk >= 0)
3002 err = -EBUSY;
3003 else {
NeilBrownfd01b882011-10-11 16:47:53 +11003004 struct mddev *mddev = rdev->mddev;
NeilBrown45dc2de2006-06-26 00:27:58 -07003005 err = 0;
Guoqing Jianga9720902015-10-12 17:21:27 +08003006 if (mddev_is_clustered(mddev))
3007 err = md_cluster_ops->remove_disk(mddev, rdev);
3008
3009 if (err == 0) {
3010 md_kick_rdev_from_array(rdev);
NeilBrown060b0682016-11-04 16:46:03 +11003011 if (mddev->pers) {
Shaohua Li29530792016-12-08 15:48:19 -08003012 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrown060b0682016-11-04 16:46:03 +11003013 md_wakeup_thread(mddev->thread);
3014 }
Guoqing Jianga9720902015-10-12 17:21:27 +08003015 md_new_event(mddev);
3016 }
NeilBrown45dc2de2006-06-26 00:27:58 -07003017 }
NeilBrownf6556752006-06-26 00:28:01 -07003018 } else if (cmd_match(buf, "writemostly")) {
3019 set_bit(WriteMostly, &rdev->flags);
Guoqing Jiang404659c2019-12-23 10:48:53 +01003020 mddev_create_serial_pool(rdev->mddev, rdev, false);
NeilBrownf6556752006-06-26 00:28:01 -07003021 err = 0;
3022 } else if (cmd_match(buf, "-writemostly")) {
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +01003023 mddev_destroy_serial_pool(rdev->mddev, rdev, false);
NeilBrownf6556752006-06-26 00:28:01 -07003024 clear_bit(WriteMostly, &rdev->flags);
3025 err = 0;
Dan Williams6bfe0b42008-04-30 00:52:32 -07003026 } else if (cmd_match(buf, "blocked")) {
3027 set_bit(Blocked, &rdev->flags);
3028 err = 0;
3029 } else if (cmd_match(buf, "-blocked")) {
NeilBrownde393cd2011-07-28 11:31:48 +10003030 if (!test_bit(Faulty, &rdev->flags) &&
Tomasz Majchrzakdcbcb482016-10-21 16:27:08 +02003031 !test_bit(ExternalBbl, &rdev->flags) &&
NeilBrown7da64a02011-08-30 16:20:17 +10003032 rdev->badblocks.unacked_exist) {
NeilBrownde393cd2011-07-28 11:31:48 +10003033 /* metadata handler doesn't understand badblocks,
3034 * so we need to fail the device
3035 */
3036 md_error(rdev->mddev, rdev);
3037 }
Dan Williams6bfe0b42008-04-30 00:52:32 -07003038 clear_bit(Blocked, &rdev->flags);
NeilBrownde393cd2011-07-28 11:31:48 +10003039 clear_bit(BlockedBadBlocks, &rdev->flags);
Dan Williams6bfe0b42008-04-30 00:52:32 -07003040 wake_up(&rdev->blocked_wait);
3041 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3042 md_wakeup_thread(rdev->mddev->thread);
3043
3044 err = 0;
NeilBrown6d56e272009-04-14 12:01:57 +10003045 } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
3046 set_bit(In_sync, &rdev->flags);
3047 err = 0;
NeilBrown688834e2016-11-18 16:16:11 +11003048 } else if (cmd_match(buf, "failfast")) {
3049 set_bit(FailFast, &rdev->flags);
3050 err = 0;
3051 } else if (cmd_match(buf, "-failfast")) {
3052 clear_bit(FailFast, &rdev->flags);
3053 err = 0;
Shaohua Lif2076e72015-10-08 21:54:12 -07003054 } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 &&
3055 !test_bit(Journal, &rdev->flags)) {
NeilBrowne1960f82014-09-30 15:24:25 +10003056 if (rdev->mddev->pers == NULL) {
3057 clear_bit(In_sync, &rdev->flags);
3058 rdev->saved_raid_disk = rdev->raid_disk;
3059 rdev->raid_disk = -1;
3060 err = 0;
3061 }
NeilBrownd7a9d442011-07-28 11:31:48 +10003062 } else if (cmd_match(buf, "write_error")) {
3063 set_bit(WriteErrorSeen, &rdev->flags);
3064 err = 0;
3065 } else if (cmd_match(buf, "-write_error")) {
3066 clear_bit(WriteErrorSeen, &rdev->flags);
3067 err = 0;
NeilBrown2d78f8c2011-12-23 10:17:51 +11003068 } else if (cmd_match(buf, "want_replacement")) {
3069 /* Any non-spare device that is not a replacement can
3070 * become want_replacement at any time, but we then need to
3071 * check if recovery is needed.
3072 */
3073 if (rdev->raid_disk >= 0 &&
Shaohua Lif2076e72015-10-08 21:54:12 -07003074 !test_bit(Journal, &rdev->flags) &&
NeilBrown2d78f8c2011-12-23 10:17:51 +11003075 !test_bit(Replacement, &rdev->flags))
3076 set_bit(WantReplacement, &rdev->flags);
3077 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3078 md_wakeup_thread(rdev->mddev->thread);
3079 err = 0;
3080 } else if (cmd_match(buf, "-want_replacement")) {
3081 /* Clearing 'want_replacement' is always allowed.
3082 * Once replacements starts it is too late though.
3083 */
3084 err = 0;
3085 clear_bit(WantReplacement, &rdev->flags);
3086 } else if (cmd_match(buf, "replacement")) {
3087 /* Can only set a device as a replacement when array has not
3088 * yet been started. Once running, replacement is automatic
3089 * from spares, or by assigning 'slot'.
3090 */
3091 if (rdev->mddev->pers)
3092 err = -EBUSY;
3093 else {
3094 set_bit(Replacement, &rdev->flags);
3095 err = 0;
3096 }
3097 } else if (cmd_match(buf, "-replacement")) {
3098 /* Similarly, can only clear Replacement before start */
3099 if (rdev->mddev->pers)
3100 err = -EBUSY;
3101 else {
3102 clear_bit(Replacement, &rdev->flags);
3103 err = 0;
3104 }
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05003105 } else if (cmd_match(buf, "re-add")) {
Yufen Yuee37e622019-04-02 14:22:14 +08003106 if (!rdev->mddev->pers)
3107 err = -EINVAL;
3108 else if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
3109 rdev->saved_raid_disk >= 0) {
Goldwyn Rodrigues97f6cd32015-04-14 10:45:42 -05003110 /* clear_bit is performed _after_ all the devices
3111 * have their local Faulty bit cleared. If any writes
3112 * happen in the meantime in the local node, they
3113 * will land in the local bitmap, which will be synced
3114 * by this node eventually
3115 */
3116 if (!mddev_is_clustered(rdev->mddev) ||
3117 (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) {
3118 clear_bit(Faulty, &rdev->flags);
3119 err = add_bound_rdev(rdev);
3120 }
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05003121 } else
3122 err = -EBUSY;
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02003123 } else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) {
3124 set_bit(ExternalBbl, &rdev->flags);
3125 rdev->badblocks.shift = 0;
3126 err = 0;
3127 } else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) {
3128 clear_bit(ExternalBbl, &rdev->flags);
3129 err = 0;
NeilBrown45dc2de2006-06-26 00:27:58 -07003130 }
NeilBrown00bcb4a2010-06-01 19:37:23 +10003131 if (!err)
3132 sysfs_notify_dirent_safe(rdev->sysfs_state);
NeilBrown45dc2de2006-06-26 00:27:58 -07003133 return err ? err : len;
3134}
NeilBrown80ca3a42006-07-10 04:44:18 -07003135static struct rdev_sysfs_entry rdev_state =
NeilBrown750f1992014-09-30 08:53:05 +10003136__ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
NeilBrown86e6ffd2005-11-08 21:39:24 -08003137
3138static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003139errors_show(struct md_rdev *rdev, char *page)
NeilBrown4dbcdc72006-01-06 00:20:52 -08003140{
3141 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
3142}
3143
3144static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003145errors_store(struct md_rdev *rdev, const char *buf, size_t len)
NeilBrown4dbcdc72006-01-06 00:20:52 -08003146{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03003147 unsigned int n;
3148 int rv;
3149
3150 rv = kstrtouint(buf, 10, &n);
3151 if (rv < 0)
3152 return rv;
3153 atomic_set(&rdev->corrected_errors, n);
3154 return len;
NeilBrown4dbcdc72006-01-06 00:20:52 -08003155}
3156static struct rdev_sysfs_entry rdev_errors =
NeilBrown80ca3a42006-07-10 04:44:18 -07003157__ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
NeilBrown4dbcdc72006-01-06 00:20:52 -08003158
NeilBrown014236d2006-01-06 00:20:55 -08003159static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003160slot_show(struct md_rdev *rdev, char *page)
NeilBrown014236d2006-01-06 00:20:55 -08003161{
Shaohua Lif2076e72015-10-08 21:54:12 -07003162 if (test_bit(Journal, &rdev->flags))
3163 return sprintf(page, "journal\n");
3164 else if (rdev->raid_disk < 0)
NeilBrown014236d2006-01-06 00:20:55 -08003165 return sprintf(page, "none\n");
3166 else
3167 return sprintf(page, "%d\n", rdev->raid_disk);
3168}
3169
3170static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003171slot_store(struct md_rdev *rdev, const char *buf, size_t len)
NeilBrown014236d2006-01-06 00:20:55 -08003172{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03003173 int slot;
NeilBrownc303da62008-02-06 01:39:51 -08003174 int err;
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03003175
Shaohua Lif2076e72015-10-08 21:54:12 -07003176 if (test_bit(Journal, &rdev->flags))
3177 return -EBUSY;
NeilBrown014236d2006-01-06 00:20:55 -08003178 if (strncmp(buf, "none", 4)==0)
3179 slot = -1;
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03003180 else {
3181 err = kstrtouint(buf, 10, (unsigned int *)&slot);
3182 if (err < 0)
3183 return err;
3184 }
Neil Brown6c2fce22008-06-28 08:31:31 +10003185 if (rdev->mddev->pers && slot == -1) {
NeilBrownc303da62008-02-06 01:39:51 -08003186 /* Setting 'slot' on an active array requires also
3187 * updating the 'rd%d' link, and communicating
3188 * with the personality with ->hot_*_disk.
3189 * For now we only support removing
3190 * failed/spare devices. This normally happens automatically,
3191 * but not when the metadata is externally managed.
3192 */
NeilBrownc303da62008-02-06 01:39:51 -08003193 if (rdev->raid_disk == -1)
3194 return -EEXIST;
3195 /* personality does all needed checks */
Namhyung Kim01393f32011-06-09 11:42:54 +10003196 if (rdev->mddev->pers->hot_remove_disk == NULL)
NeilBrownc303da62008-02-06 01:39:51 -08003197 return -EINVAL;
NeilBrown746d3202013-04-24 11:42:41 +10003198 clear_bit(Blocked, &rdev->flags);
3199 remove_and_add_spares(rdev->mddev, rdev);
3200 if (rdev->raid_disk >= 0)
3201 return -EBUSY;
NeilBrownc303da62008-02-06 01:39:51 -08003202 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3203 md_wakeup_thread(rdev->mddev->thread);
Neil Brown6c2fce22008-06-28 08:31:31 +10003204 } else if (rdev->mddev->pers) {
Neil Brown6c2fce22008-06-28 08:31:31 +10003205 /* Activating a spare .. or possibly reactivating
NeilBrown6d56e272009-04-14 12:01:57 +10003206 * if we ever get bitmaps working here.
Neil Brown6c2fce22008-06-28 08:31:31 +10003207 */
Goldwyn Rodriguescb01c542015-12-18 15:19:16 +11003208 int err;
Neil Brown6c2fce22008-06-28 08:31:31 +10003209
3210 if (rdev->raid_disk != -1)
3211 return -EBUSY;
3212
NeilBrownc6751b22011-02-02 11:57:13 +11003213 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
3214 return -EBUSY;
3215
Neil Brown6c2fce22008-06-28 08:31:31 +10003216 if (rdev->mddev->pers->hot_add_disk == NULL)
3217 return -EINVAL;
3218
NeilBrownba1b41b2011-01-14 09:14:34 +11003219 if (slot >= rdev->mddev->raid_disks &&
3220 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
3221 return -ENOSPC;
3222
Neil Brown6c2fce22008-06-28 08:31:31 +10003223 rdev->raid_disk = slot;
3224 if (test_bit(In_sync, &rdev->flags))
3225 rdev->saved_raid_disk = slot;
3226 else
3227 rdev->saved_raid_disk = -1;
NeilBrownd30519f2011-10-18 12:13:47 +11003228 clear_bit(In_sync, &rdev->flags);
NeilBrown8313b8e2013-12-12 10:13:33 +11003229 clear_bit(Bitmap_sync, &rdev->flags);
Guoqing Jiang3f79cc22020-04-04 23:57:11 +02003230 err = rdev->mddev->pers->hot_add_disk(rdev->mddev, rdev);
Goldwyn Rodriguescb01c542015-12-18 15:19:16 +11003231 if (err) {
3232 rdev->raid_disk = -1;
3233 return err;
3234 } else
3235 sysfs_notify_dirent_safe(rdev->sysfs_state);
Damien Le Moal5e3b8a82020-07-16 13:54:40 +09003236 /* failure here is OK */;
3237 sysfs_link_rdev(rdev->mddev, rdev);
Neil Brown6c2fce22008-06-28 08:31:31 +10003238 /* don't wakeup anyone, leave that to userspace. */
NeilBrownc303da62008-02-06 01:39:51 -08003239 } else {
NeilBrownba1b41b2011-01-14 09:14:34 +11003240 if (slot >= rdev->mddev->raid_disks &&
3241 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
NeilBrownc303da62008-02-06 01:39:51 -08003242 return -ENOSPC;
3243 rdev->raid_disk = slot;
3244 /* assume it is working */
NeilBrownc5d79ad2008-02-06 01:39:54 -08003245 clear_bit(Faulty, &rdev->flags);
3246 clear_bit(WriteMostly, &rdev->flags);
NeilBrownc303da62008-02-06 01:39:51 -08003247 set_bit(In_sync, &rdev->flags);
NeilBrown00bcb4a2010-06-01 19:37:23 +10003248 sysfs_notify_dirent_safe(rdev->sysfs_state);
NeilBrownc303da62008-02-06 01:39:51 -08003249 }
NeilBrown014236d2006-01-06 00:20:55 -08003250 return len;
3251}
3252
NeilBrown014236d2006-01-06 00:20:55 -08003253static struct rdev_sysfs_entry rdev_slot =
NeilBrown80ca3a42006-07-10 04:44:18 -07003254__ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
NeilBrown014236d2006-01-06 00:20:55 -08003255
NeilBrown93c8cad2006-01-06 00:20:56 -08003256static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003257offset_show(struct md_rdev *rdev, char *page)
NeilBrown93c8cad2006-01-06 00:20:56 -08003258{
Andrew Morton6961ece2006-01-06 00:20:59 -08003259 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
NeilBrown93c8cad2006-01-06 00:20:56 -08003260}
3261
3262static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003263offset_store(struct md_rdev *rdev, const char *buf, size_t len)
NeilBrown93c8cad2006-01-06 00:20:56 -08003264{
NeilBrownc6563a82012-05-21 09:27:00 +10003265 unsigned long long offset;
Jingoo Hanb29bebd2013-06-01 16:15:16 +09003266 if (kstrtoull(buf, 10, &offset) < 0)
NeilBrown93c8cad2006-01-06 00:20:56 -08003267 return -EINVAL;
Neil Brown8ed0a522008-06-28 08:31:29 +10003268 if (rdev->mddev->pers && rdev->raid_disk >= 0)
NeilBrown93c8cad2006-01-06 00:20:56 -08003269 return -EBUSY;
Andre Nolldd8ac332009-03-31 14:33:13 +11003270 if (rdev->sectors && rdev->mddev->external)
NeilBrownc5d79ad2008-02-06 01:39:54 -08003271 /* Must set offset before size, so overlap checks
3272 * can be sane */
3273 return -EBUSY;
NeilBrown93c8cad2006-01-06 00:20:56 -08003274 rdev->data_offset = offset;
NeilBrown25f7fd42012-07-19 15:59:18 +10003275 rdev->new_data_offset = offset;
NeilBrown93c8cad2006-01-06 00:20:56 -08003276 return len;
3277}
3278
3279static struct rdev_sysfs_entry rdev_offset =
NeilBrown80ca3a42006-07-10 04:44:18 -07003280__ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
NeilBrown93c8cad2006-01-06 00:20:56 -08003281
NeilBrownc6563a82012-05-21 09:27:00 +10003282static ssize_t new_offset_show(struct md_rdev *rdev, char *page)
3283{
3284 return sprintf(page, "%llu\n",
3285 (unsigned long long)rdev->new_data_offset);
3286}
3287
3288static ssize_t new_offset_store(struct md_rdev *rdev,
3289 const char *buf, size_t len)
3290{
3291 unsigned long long new_offset;
3292 struct mddev *mddev = rdev->mddev;
3293
Jingoo Hanb29bebd2013-06-01 16:15:16 +09003294 if (kstrtoull(buf, 10, &new_offset) < 0)
NeilBrownc6563a82012-05-21 09:27:00 +10003295 return -EINVAL;
3296
NeilBrownf851b602014-12-11 10:02:10 +11003297 if (mddev->sync_thread ||
3298 test_bit(MD_RECOVERY_RUNNING,&mddev->recovery))
NeilBrownc6563a82012-05-21 09:27:00 +10003299 return -EBUSY;
3300 if (new_offset == rdev->data_offset)
3301 /* reset is always permitted */
3302 ;
3303 else if (new_offset > rdev->data_offset) {
3304 /* must not push array size beyond rdev_sectors */
3305 if (new_offset - rdev->data_offset
3306 + mddev->dev_sectors > rdev->sectors)
3307 return -E2BIG;
3308 }
3309 /* Metadata worries about other space details. */
3310
3311 /* decreasing the offset is inconsistent with a backwards
3312 * reshape.
3313 */
3314 if (new_offset < rdev->data_offset &&
3315 mddev->reshape_backwards)
3316 return -EINVAL;
3317 /* Increasing offset is inconsistent with forwards
3318 * reshape. reshape_direction should be set to
3319 * 'backwards' first.
3320 */
3321 if (new_offset > rdev->data_offset &&
3322 !mddev->reshape_backwards)
3323 return -EINVAL;
3324
3325 if (mddev->pers && mddev->persistent &&
3326 !super_types[mddev->major_version]
3327 .allow_new_offset(rdev, new_offset))
3328 return -E2BIG;
3329 rdev->new_data_offset = new_offset;
3330 if (new_offset > rdev->data_offset)
3331 mddev->reshape_backwards = 1;
3332 else if (new_offset < rdev->data_offset)
3333 mddev->reshape_backwards = 0;
3334
3335 return len;
3336}
3337static struct rdev_sysfs_entry rdev_new_offset =
3338__ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store);
3339
NeilBrown83303b62006-01-06 00:21:06 -08003340static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003341rdev_size_show(struct md_rdev *rdev, char *page)
NeilBrown83303b62006-01-06 00:21:06 -08003342{
Andre Nolldd8ac332009-03-31 14:33:13 +11003343 return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
NeilBrown83303b62006-01-06 00:21:06 -08003344}
3345
NeilBrownc5d79ad2008-02-06 01:39:54 -08003346static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
3347{
3348 /* check if two start/length pairs overlap */
3349 if (s1+l1 <= s2)
3350 return 0;
3351 if (s2+l2 <= s1)
3352 return 0;
3353 return 1;
3354}
3355
Dan Williamsb522adc2009-03-31 15:00:31 +11003356static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
3357{
3358 unsigned long long blocks;
3359 sector_t new;
3360
Jingoo Hanb29bebd2013-06-01 16:15:16 +09003361 if (kstrtoull(buf, 10, &blocks) < 0)
Dan Williamsb522adc2009-03-31 15:00:31 +11003362 return -EINVAL;
3363
3364 if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
3365 return -EINVAL; /* sector conversion overflow */
3366
3367 new = blocks * 2;
3368 if (new != blocks * 2)
3369 return -EINVAL; /* unsigned long long to sector_t overflow */
3370
3371 *sectors = new;
3372 return 0;
3373}
3374
NeilBrown83303b62006-01-06 00:21:06 -08003375static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003376rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
NeilBrown83303b62006-01-06 00:21:06 -08003377{
NeilBrownfd01b882011-10-11 16:47:53 +11003378 struct mddev *my_mddev = rdev->mddev;
Andre Nolldd8ac332009-03-31 14:33:13 +11003379 sector_t oldsectors = rdev->sectors;
Dan Williamsb522adc2009-03-31 15:00:31 +11003380 sector_t sectors;
NeilBrown27c529b2008-03-04 14:29:33 -08003381
Shaohua Lif2076e72015-10-08 21:54:12 -07003382 if (test_bit(Journal, &rdev->flags))
3383 return -EBUSY;
Dan Williamsb522adc2009-03-31 15:00:31 +11003384 if (strict_blocks_to_sectors(buf, &sectors) < 0)
Neil Brownd7027452008-07-12 10:37:50 +10003385 return -EINVAL;
NeilBrownc6563a82012-05-21 09:27:00 +10003386 if (rdev->data_offset != rdev->new_data_offset)
3387 return -EINVAL; /* too confusing */
Chris Webb0cd17fe2008-06-28 08:31:46 +10003388 if (my_mddev->pers && rdev->raid_disk >= 0) {
Neil Brownd7027452008-07-12 10:37:50 +10003389 if (my_mddev->persistent) {
Andre Nolldd8ac332009-03-31 14:33:13 +11003390 sectors = super_types[my_mddev->major_version].
3391 rdev_size_change(rdev, sectors);
3392 if (!sectors)
Chris Webb0cd17fe2008-06-28 08:31:46 +10003393 return -EBUSY;
Andre Nolldd8ac332009-03-31 14:33:13 +11003394 } else if (!sectors)
Mike Snitzer77304d22010-11-08 14:39:12 +01003395 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
Andre Nolldd8ac332009-03-31 14:33:13 +11003396 rdev->data_offset;
NeilBrowna6468532013-02-21 14:33:17 +11003397 if (!my_mddev->pers->resize)
3398 /* Cannot change size for RAID0 or Linear etc */
3399 return -EINVAL;
Chris Webb0cd17fe2008-06-28 08:31:46 +10003400 }
Andre Nolldd8ac332009-03-31 14:33:13 +11003401 if (sectors < my_mddev->dev_sectors)
Chris Webb7d3c6f82008-10-13 11:55:11 +11003402 return -EINVAL; /* component must fit device */
Chris Webb0cd17fe2008-06-28 08:31:46 +10003403
Andre Nolldd8ac332009-03-31 14:33:13 +11003404 rdev->sectors = sectors;
3405 if (sectors > oldsectors && my_mddev->external) {
NeilBrown8b1afc32014-09-29 15:33:20 +10003406 /* Need to check that all other rdevs with the same
3407 * ->bdev do not overlap. 'rcu' is sufficient to walk
3408 * the rdev lists safely.
3409 * This check does not provide a hard guarantee, it
3410 * just helps avoid dangerous mistakes.
NeilBrownc5d79ad2008-02-06 01:39:54 -08003411 */
NeilBrownfd01b882011-10-11 16:47:53 +11003412 struct mddev *mddev;
NeilBrownc5d79ad2008-02-06 01:39:54 -08003413 int overlap = 0;
Cheng Renquan159ec1f2009-01-09 08:31:08 +11003414 struct list_head *tmp;
NeilBrownc5d79ad2008-02-06 01:39:54 -08003415
NeilBrown8b1afc32014-09-29 15:33:20 +10003416 rcu_read_lock();
NeilBrown29ac4aa2008-02-06 01:39:58 -08003417 for_each_mddev(mddev, tmp) {
NeilBrown3cb03002011-10-11 16:45:26 +11003418 struct md_rdev *rdev2;
NeilBrownc5d79ad2008-02-06 01:39:54 -08003419
NeilBrowndafb20f2012-03-19 12:46:39 +11003420 rdev_for_each(rdev2, mddev)
NeilBrownf21e9ff2011-01-31 12:10:09 +11003421 if (rdev->bdev == rdev2->bdev &&
3422 rdev != rdev2 &&
3423 overlaps(rdev->data_offset, rdev->sectors,
3424 rdev2->data_offset,
3425 rdev2->sectors)) {
NeilBrownc5d79ad2008-02-06 01:39:54 -08003426 overlap = 1;
3427 break;
3428 }
NeilBrownc5d79ad2008-02-06 01:39:54 -08003429 if (overlap) {
3430 mddev_put(mddev);
3431 break;
3432 }
3433 }
NeilBrown8b1afc32014-09-29 15:33:20 +10003434 rcu_read_unlock();
NeilBrownc5d79ad2008-02-06 01:39:54 -08003435 if (overlap) {
3436 /* Someone else could have slipped in a size
3437 * change here, but doing so is just silly.
Andre Nolldd8ac332009-03-31 14:33:13 +11003438 * We put oldsectors back because we *know* it is
NeilBrownc5d79ad2008-02-06 01:39:54 -08003439 * safe, and trust userspace not to race with
3440 * itself
3441 */
Andre Nolldd8ac332009-03-31 14:33:13 +11003442 rdev->sectors = oldsectors;
NeilBrownc5d79ad2008-02-06 01:39:54 -08003443 return -EBUSY;
3444 }
3445 }
NeilBrown83303b62006-01-06 00:21:06 -08003446 return len;
3447}
3448
3449static struct rdev_sysfs_entry rdev_size =
NeilBrown80ca3a42006-07-10 04:44:18 -07003450__ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
NeilBrown83303b62006-01-06 00:21:06 -08003451
NeilBrown3cb03002011-10-11 16:45:26 +11003452static ssize_t recovery_start_show(struct md_rdev *rdev, char *page)
Dan Williams06e3c812009-12-12 21:17:12 -07003453{
3454 unsigned long long recovery_start = rdev->recovery_offset;
3455
3456 if (test_bit(In_sync, &rdev->flags) ||
3457 recovery_start == MaxSector)
3458 return sprintf(page, "none\n");
3459
3460 return sprintf(page, "%llu\n", recovery_start);
3461}
3462
NeilBrown3cb03002011-10-11 16:45:26 +11003463static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len)
Dan Williams06e3c812009-12-12 21:17:12 -07003464{
3465 unsigned long long recovery_start;
3466
3467 if (cmd_match(buf, "none"))
3468 recovery_start = MaxSector;
Jingoo Hanb29bebd2013-06-01 16:15:16 +09003469 else if (kstrtoull(buf, 10, &recovery_start))
Dan Williams06e3c812009-12-12 21:17:12 -07003470 return -EINVAL;
3471
3472 if (rdev->mddev->pers &&
3473 rdev->raid_disk >= 0)
3474 return -EBUSY;
3475
3476 rdev->recovery_offset = recovery_start;
3477 if (recovery_start == MaxSector)
3478 set_bit(In_sync, &rdev->flags);
3479 else
3480 clear_bit(In_sync, &rdev->flags);
3481 return len;
3482}
3483
3484static struct rdev_sysfs_entry rdev_recovery_start =
3485__ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
3486
Vishal Vermafc974ee2015-12-24 19:20:34 -07003487/* sysfs access to bad-blocks list.
3488 * We present two files.
3489 * 'bad-blocks' lists sector numbers and lengths of ranges that
3490 * are recorded as bad. The list is truncated to fit within
3491 * the one-page limit of sysfs.
3492 * Writing "sector length" to this file adds an acknowledged
3493 * bad block list.
3494 * 'unacknowledged-bad-blocks' lists bad blocks that have not yet
3495 * been acknowledged. Writing to this file adds bad blocks
3496 * without acknowledging them. This is largely for testing.
3497 */
NeilBrown3cb03002011-10-11 16:45:26 +11003498static ssize_t bb_show(struct md_rdev *rdev, char *page)
NeilBrown16c791a2011-07-28 11:31:47 +10003499{
3500 return badblocks_show(&rdev->badblocks, page, 0);
3501}
NeilBrown3cb03002011-10-11 16:45:26 +11003502static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len)
NeilBrown16c791a2011-07-28 11:31:47 +10003503{
NeilBrownde393cd2011-07-28 11:31:48 +10003504 int rv = badblocks_store(&rdev->badblocks, page, len, 0);
3505 /* Maybe that ack was all we needed */
3506 if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags))
3507 wake_up(&rdev->blocked_wait);
3508 return rv;
NeilBrown16c791a2011-07-28 11:31:47 +10003509}
3510static struct rdev_sysfs_entry rdev_bad_blocks =
3511__ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store);
3512
NeilBrown3cb03002011-10-11 16:45:26 +11003513static ssize_t ubb_show(struct md_rdev *rdev, char *page)
NeilBrown16c791a2011-07-28 11:31:47 +10003514{
3515 return badblocks_show(&rdev->badblocks, page, 1);
3516}
NeilBrown3cb03002011-10-11 16:45:26 +11003517static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len)
NeilBrown16c791a2011-07-28 11:31:47 +10003518{
3519 return badblocks_store(&rdev->badblocks, page, len, 1);
3520}
3521static struct rdev_sysfs_entry rdev_unack_bad_blocks =
3522__ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store);
3523
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01003524static ssize_t
3525ppl_sector_show(struct md_rdev *rdev, char *page)
3526{
3527 return sprintf(page, "%llu\n", (unsigned long long)rdev->ppl.sector);
3528}
3529
3530static ssize_t
3531ppl_sector_store(struct md_rdev *rdev, const char *buf, size_t len)
3532{
3533 unsigned long long sector;
3534
3535 if (kstrtoull(buf, 10, &sector) < 0)
3536 return -EINVAL;
3537 if (sector != (sector_t)sector)
3538 return -EINVAL;
3539
3540 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) &&
3541 rdev->raid_disk >= 0)
3542 return -EBUSY;
3543
3544 if (rdev->mddev->persistent) {
3545 if (rdev->mddev->major_version == 0)
3546 return -EINVAL;
3547 if ((sector > rdev->sb_start &&
3548 sector - rdev->sb_start > S16_MAX) ||
3549 (sector < rdev->sb_start &&
3550 rdev->sb_start - sector > -S16_MIN))
3551 return -EINVAL;
3552 rdev->ppl.offset = sector - rdev->sb_start;
3553 } else if (!rdev->mddev->external) {
3554 return -EBUSY;
3555 }
3556 rdev->ppl.sector = sector;
3557 return len;
3558}
3559
3560static struct rdev_sysfs_entry rdev_ppl_sector =
3561__ATTR(ppl_sector, S_IRUGO|S_IWUSR, ppl_sector_show, ppl_sector_store);
3562
3563static ssize_t
3564ppl_size_show(struct md_rdev *rdev, char *page)
3565{
3566 return sprintf(page, "%u\n", rdev->ppl.size);
3567}
3568
3569static ssize_t
3570ppl_size_store(struct md_rdev *rdev, const char *buf, size_t len)
3571{
3572 unsigned int size;
3573
3574 if (kstrtouint(buf, 10, &size) < 0)
3575 return -EINVAL;
3576
3577 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) &&
3578 rdev->raid_disk >= 0)
3579 return -EBUSY;
3580
3581 if (rdev->mddev->persistent) {
3582 if (rdev->mddev->major_version == 0)
3583 return -EINVAL;
3584 if (size > U16_MAX)
3585 return -EINVAL;
3586 } else if (!rdev->mddev->external) {
3587 return -EBUSY;
3588 }
3589 rdev->ppl.size = size;
3590 return len;
3591}
3592
3593static struct rdev_sysfs_entry rdev_ppl_size =
3594__ATTR(ppl_size, S_IRUGO|S_IWUSR, ppl_size_show, ppl_size_store);
3595
NeilBrown86e6ffd2005-11-08 21:39:24 -08003596static struct attribute *rdev_default_attrs[] = {
3597 &rdev_state.attr,
NeilBrown4dbcdc72006-01-06 00:20:52 -08003598 &rdev_errors.attr,
NeilBrown014236d2006-01-06 00:20:55 -08003599 &rdev_slot.attr,
NeilBrown93c8cad2006-01-06 00:20:56 -08003600 &rdev_offset.attr,
NeilBrownc6563a82012-05-21 09:27:00 +10003601 &rdev_new_offset.attr,
NeilBrown83303b62006-01-06 00:21:06 -08003602 &rdev_size.attr,
Dan Williams06e3c812009-12-12 21:17:12 -07003603 &rdev_recovery_start.attr,
NeilBrown16c791a2011-07-28 11:31:47 +10003604 &rdev_bad_blocks.attr,
3605 &rdev_unack_bad_blocks.attr,
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01003606 &rdev_ppl_sector.attr,
3607 &rdev_ppl_size.attr,
NeilBrown86e6ffd2005-11-08 21:39:24 -08003608 NULL,
3609};
3610static ssize_t
3611rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
3612{
3613 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
NeilBrown3cb03002011-10-11 16:45:26 +11003614 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
NeilBrown86e6ffd2005-11-08 21:39:24 -08003615
3616 if (!entry->show)
3617 return -EIO;
NeilBrown758bfc82014-12-15 12:56:59 +11003618 if (!rdev->mddev)
Marcos Paulo de Souza168b3052019-06-14 15:41:06 -07003619 return -ENODEV;
NeilBrown758bfc82014-12-15 12:56:59 +11003620 return entry->show(rdev, page);
NeilBrown86e6ffd2005-11-08 21:39:24 -08003621}
3622
3623static ssize_t
3624rdev_attr_store(struct kobject *kobj, struct attribute *attr,
3625 const char *page, size_t length)
3626{
3627 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
NeilBrown3cb03002011-10-11 16:45:26 +11003628 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
NeilBrown27c529b2008-03-04 14:29:33 -08003629 ssize_t rv;
NeilBrownfd01b882011-10-11 16:47:53 +11003630 struct mddev *mddev = rdev->mddev;
NeilBrown86e6ffd2005-11-08 21:39:24 -08003631
3632 if (!entry->store)
3633 return -EIO;
NeilBrown67463ac2006-07-10 04:44:19 -07003634 if (!capable(CAP_SYS_ADMIN))
3635 return -EACCES;
Pawel Baldysiakc42d3242019-03-27 13:48:21 +01003636 rv = mddev ? mddev_lock(mddev) : -ENODEV;
NeilBrownca388052008-02-06 01:39:55 -08003637 if (!rv) {
NeilBrown27c529b2008-03-04 14:29:33 -08003638 if (rdev->mddev == NULL)
Pawel Baldysiakc42d3242019-03-27 13:48:21 +01003639 rv = -ENODEV;
NeilBrown27c529b2008-03-04 14:29:33 -08003640 else
3641 rv = entry->store(rdev, page, length);
Dan Williams6a518302008-04-30 00:52:28 -07003642 mddev_unlock(mddev);
NeilBrownca388052008-02-06 01:39:55 -08003643 }
3644 return rv;
NeilBrown86e6ffd2005-11-08 21:39:24 -08003645}
3646
3647static void rdev_free(struct kobject *ko)
3648{
NeilBrown3cb03002011-10-11 16:45:26 +11003649 struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj);
NeilBrown86e6ffd2005-11-08 21:39:24 -08003650 kfree(rdev);
3651}
Emese Revfy52cf25d2010-01-19 02:58:23 +01003652static const struct sysfs_ops rdev_sysfs_ops = {
NeilBrown86e6ffd2005-11-08 21:39:24 -08003653 .show = rdev_attr_show,
3654 .store = rdev_attr_store,
3655};
3656static struct kobj_type rdev_ktype = {
3657 .release = rdev_free,
3658 .sysfs_ops = &rdev_sysfs_ops,
3659 .default_attrs = rdev_default_attrs,
3660};
3661
NeilBrown3cb03002011-10-11 16:45:26 +11003662int md_rdev_init(struct md_rdev *rdev)
NeilBrowne8bb9a82010-06-01 19:37:26 +10003663{
3664 rdev->desc_nr = -1;
3665 rdev->saved_raid_disk = -1;
3666 rdev->raid_disk = -1;
3667 rdev->flags = 0;
3668 rdev->data_offset = 0;
NeilBrownc6563a82012-05-21 09:27:00 +10003669 rdev->new_data_offset = 0;
NeilBrowne8bb9a82010-06-01 19:37:26 +10003670 rdev->sb_events = 0;
Arnd Bergmann0e3ef492016-06-17 17:33:10 +02003671 rdev->last_read_error = 0;
NeilBrown2699b672011-07-28 11:31:47 +10003672 rdev->sb_loaded = 0;
3673 rdev->bb_page = NULL;
NeilBrowne8bb9a82010-06-01 19:37:26 +10003674 atomic_set(&rdev->nr_pending, 0);
3675 atomic_set(&rdev->read_errors, 0);
3676 atomic_set(&rdev->corrected_errors, 0);
3677
3678 INIT_LIST_HEAD(&rdev->same_set);
3679 init_waitqueue_head(&rdev->blocked_wait);
NeilBrown2230dfe2011-07-28 11:31:46 +10003680
3681 /* Add space to store bad block list.
3682 * This reserves the space even on arrays where it cannot
3683 * be used - I wonder if that matters
3684 */
Vishal Vermafc974ee2015-12-24 19:20:34 -07003685 return badblocks_init(&rdev->badblocks, 0);
NeilBrowne8bb9a82010-06-01 19:37:26 +10003686}
3687EXPORT_SYMBOL_GPL(md_rdev_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003688/*
3689 * Import a device. If 'super_format' >= 0, then sanity check the superblock
3690 *
3691 * mark the device faulty if:
3692 *
3693 * - the device is nonexistent (zero size)
3694 * - the device has no valid superblock
3695 *
3696 * a faulty rdev _never_ has rdev->sb set.
3697 */
NeilBrown3cb03002011-10-11 16:45:26 +11003698static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003699{
3700 char b[BDEVNAME_SIZE];
3701 int err;
NeilBrown3cb03002011-10-11 16:45:26 +11003702 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003703 sector_t size;
3704
NeilBrown9ffae0c2006-01-06 00:20:32 -08003705 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
NeilBrown9d487392016-11-02 14:16:49 +11003706 if (!rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003707 return ERR_PTR(-ENOMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003708
NeilBrown2230dfe2011-07-28 11:31:46 +10003709 err = md_rdev_init(rdev);
3710 if (err)
3711 goto abort_free;
3712 err = alloc_disk_sb(rdev);
3713 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003714 goto abort_free;
3715
NeilBrownc5d79ad2008-02-06 01:39:54 -08003716 err = lock_rdev(rdev, newdev, super_format == -2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003717 if (err)
3718 goto abort_free;
3719
Greg Kroah-Hartmanf9cb0742007-12-17 23:05:35 -07003720 kobject_init(&rdev->kobj, &rdev_ktype);
NeilBrown86e6ffd2005-11-08 21:39:24 -08003721
Mike Snitzer77304d22010-11-08 14:39:12 +01003722 size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003723 if (!size) {
NeilBrown9d487392016-11-02 14:16:49 +11003724 pr_warn("md: %s has zero or unknown size, marking faulty!\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003725 bdevname(rdev->bdev,b));
3726 err = -EINVAL;
3727 goto abort_free;
3728 }
3729
3730 if (super_format >= 0) {
3731 err = super_types[super_format].
3732 load_super(rdev, NULL, super_minor);
3733 if (err == -EINVAL) {
NeilBrown9d487392016-11-02 14:16:49 +11003734 pr_warn("md: %s does not have a valid v%d.%d superblock, not importing!\n",
NeilBrowndf968c42007-07-17 04:06:11 -07003735 bdevname(rdev->bdev,b),
NeilBrown9d487392016-11-02 14:16:49 +11003736 super_format, super_minor);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003737 goto abort_free;
3738 }
3739 if (err < 0) {
NeilBrown9d487392016-11-02 14:16:49 +11003740 pr_warn("md: could not read %s's sb, not importing!\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003741 bdevname(rdev->bdev,b));
3742 goto abort_free;
3743 }
3744 }
Dan Williams6bfe0b42008-04-30 00:52:32 -07003745
Linus Torvalds1da177e2005-04-16 15:20:36 -07003746 return rdev;
3747
3748abort_free:
NeilBrown2699b672011-07-28 11:31:47 +10003749 if (rdev->bdev)
3750 unlock_rdev(rdev);
NeilBrown545c8792012-05-22 13:54:30 +10003751 md_rdev_clear(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003752 kfree(rdev);
3753 return ERR_PTR(err);
3754}
3755
3756/*
3757 * Check a full RAID array for plausibility
3758 */
3759
Yufen Yu6a5cb532019-10-16 16:00:03 +08003760static int analyze_sbs(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003761{
3762 int i;
NeilBrown3cb03002011-10-11 16:45:26 +11003763 struct md_rdev *rdev, *freshest, *tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003764 char b[BDEVNAME_SIZE];
3765
3766 freshest = NULL;
NeilBrowndafb20f2012-03-19 12:46:39 +11003767 rdev_for_each_safe(rdev, tmp, mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003768 switch (super_types[mddev->major_version].
3769 load_super(rdev, freshest, mddev->minor_version)) {
3770 case 1:
3771 freshest = rdev;
3772 break;
3773 case 0:
3774 break;
3775 default:
NeilBrown9d487392016-11-02 14:16:49 +11003776 pr_warn("md: fatal superblock inconsistency in %s -- removing from array\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003777 bdevname(rdev->bdev,b));
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05003778 md_kick_rdev_from_array(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003779 }
3780
Yufen Yu6a5cb532019-10-16 16:00:03 +08003781 /* Cannot find a valid fresh disk */
3782 if (!freshest) {
3783 pr_warn("md: cannot find a valid disk\n");
3784 return -EINVAL;
3785 }
3786
Linus Torvalds1da177e2005-04-16 15:20:36 -07003787 super_types[mddev->major_version].
3788 validate_super(mddev, freshest);
3789
3790 i = 0;
NeilBrowndafb20f2012-03-19 12:46:39 +11003791 rdev_for_each_safe(rdev, tmp, mddev) {
NeilBrown233fca32010-04-14 17:02:09 +10003792 if (mddev->max_disks &&
3793 (rdev->desc_nr >= mddev->max_disks ||
3794 i > mddev->max_disks)) {
NeilBrown9d487392016-11-02 14:16:49 +11003795 pr_warn("md: %s: %s: only %d devices permitted\n",
3796 mdname(mddev), bdevname(rdev->bdev, b),
3797 mddev->max_disks);
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05003798 md_kick_rdev_from_array(rdev);
NeilBrownde01dfa2009-02-06 18:02:46 +11003799 continue;
3800 }
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05003801 if (rdev != freshest) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003802 if (super_types[mddev->major_version].
3803 validate_super(mddev, rdev)) {
NeilBrown9d487392016-11-02 14:16:49 +11003804 pr_warn("md: kicking non-fresh %s from array!\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003805 bdevname(rdev->bdev,b));
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05003806 md_kick_rdev_from_array(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003807 continue;
3808 }
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05003809 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003810 if (mddev->level == LEVEL_MULTIPATH) {
3811 rdev->desc_nr = i++;
3812 rdev->raid_disk = rdev->desc_nr;
NeilBrownb2d444d2005-11-08 21:39:31 -08003813 set_bit(In_sync, &rdev->flags);
Shaohua Lif2076e72015-10-08 21:54:12 -07003814 } else if (rdev->raid_disk >=
3815 (mddev->raid_disks - min(0, mddev->delta_disks)) &&
3816 !test_bit(Journal, &rdev->flags)) {
NeilBrowna778b732007-05-23 13:58:10 -07003817 rdev->raid_disk = -1;
3818 clear_bit(In_sync, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003819 }
3820 }
Yufen Yu6a5cb532019-10-16 16:00:03 +08003821
3822 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003823}
3824
NeilBrown72e02072009-12-14 12:49:55 +11003825/* Read a fixed-point number.
3826 * Numbers in sysfs attributes should be in "standard" units where
3827 * possible, so time should be in seconds.
NeilBrownf72ffdd2014-09-30 14:23:59 +10003828 * However we internally use a a much smaller unit such as
NeilBrown72e02072009-12-14 12:49:55 +11003829 * milliseconds or jiffies.
3830 * This function takes a decimal number with a possible fractional
3831 * component, and produces an integer which is the result of
3832 * multiplying that number by 10^'scale'.
3833 * all without any floating-point arithmetic.
3834 */
3835int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
3836{
3837 unsigned long result = 0;
3838 long decimals = -1;
3839 while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
3840 if (*cp == '.')
3841 decimals = 0;
3842 else if (decimals < scale) {
3843 unsigned int value;
3844 value = *cp - '0';
3845 result = result * 10 + value;
3846 if (decimals >= 0)
3847 decimals++;
3848 }
3849 cp++;
3850 }
3851 if (*cp == '\n')
3852 cp++;
3853 if (*cp)
3854 return -EINVAL;
3855 if (decimals < 0)
3856 decimals = 0;
Andy Shevchenkocf891602019-07-23 23:41:55 +03003857 *res = result * int_pow(10, scale - decimals);
NeilBrown72e02072009-12-14 12:49:55 +11003858 return 0;
3859}
3860
NeilBrowneae17012005-11-08 21:39:23 -08003861static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11003862safe_delay_show(struct mddev *mddev, char *page)
NeilBrown16f17b32006-06-26 00:27:37 -07003863{
3864 int msec = (mddev->safemode_delay*1000)/HZ;
3865 return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
3866}
3867static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11003868safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
NeilBrown16f17b32006-06-26 00:27:37 -07003869{
NeilBrown16f17b32006-06-26 00:27:37 -07003870 unsigned long msec;
Dan Williams97ce0a72008-09-24 22:48:19 -07003871
Goldwyn Rodrigues28c1b9f2015-10-22 16:01:25 +11003872 if (mddev_is_clustered(mddev)) {
NeilBrown9d487392016-11-02 14:16:49 +11003873 pr_warn("md: Safemode is disabled for clustered mode\n");
Goldwyn Rodrigues28c1b9f2015-10-22 16:01:25 +11003874 return -EINVAL;
3875 }
3876
NeilBrown72e02072009-12-14 12:49:55 +11003877 if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
NeilBrown16f17b32006-06-26 00:27:37 -07003878 return -EINVAL;
NeilBrown16f17b32006-06-26 00:27:37 -07003879 if (msec == 0)
3880 mddev->safemode_delay = 0;
3881 else {
NeilBrown19052c02008-08-05 15:54:13 +10003882 unsigned long old_delay = mddev->safemode_delay;
NeilBrown1b30e662014-12-15 12:57:00 +11003883 unsigned long new_delay = (msec*HZ)/1000;
3884
3885 if (new_delay == 0)
3886 new_delay = 1;
3887 mddev->safemode_delay = new_delay;
3888 if (new_delay < old_delay || old_delay == 0)
3889 mod_timer(&mddev->safemode_timer, jiffies+1);
NeilBrown16f17b32006-06-26 00:27:37 -07003890 }
3891 return len;
3892}
3893static struct md_sysfs_entry md_safe_delay =
NeilBrown80ca3a42006-07-10 04:44:18 -07003894__ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
NeilBrown16f17b32006-06-26 00:27:37 -07003895
3896static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11003897level_show(struct mddev *mddev, char *page)
NeilBrowneae17012005-11-08 21:39:23 -08003898{
NeilBrown36d091f2014-12-15 12:56:58 +11003899 struct md_personality *p;
3900 int ret;
3901 spin_lock(&mddev->lock);
3902 p = mddev->pers;
NeilBrownd9d166c2006-01-06 00:20:51 -08003903 if (p)
NeilBrown36d091f2014-12-15 12:56:58 +11003904 ret = sprintf(page, "%s\n", p->name);
NeilBrownd9d166c2006-01-06 00:20:51 -08003905 else if (mddev->clevel[0])
NeilBrown36d091f2014-12-15 12:56:58 +11003906 ret = sprintf(page, "%s\n", mddev->clevel);
NeilBrownd9d166c2006-01-06 00:20:51 -08003907 else if (mddev->level != LEVEL_NONE)
NeilBrown36d091f2014-12-15 12:56:58 +11003908 ret = sprintf(page, "%d\n", mddev->level);
NeilBrownd9d166c2006-01-06 00:20:51 -08003909 else
NeilBrown36d091f2014-12-15 12:56:58 +11003910 ret = 0;
3911 spin_unlock(&mddev->lock);
3912 return ret;
NeilBrowneae17012005-11-08 21:39:23 -08003913}
3914
NeilBrownd9d166c2006-01-06 00:20:51 -08003915static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11003916level_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrownd9d166c2006-01-06 00:20:51 -08003917{
Dan Williamsf2859af2010-05-02 10:04:16 -07003918 char clevel[16];
NeilBrown67918752014-12-15 12:57:01 +11003919 ssize_t rv;
3920 size_t slen = len;
NeilBrowndb721d32014-12-15 12:56:58 +11003921 struct md_personality *pers, *oldpers;
Dan Williamsf2859af2010-05-02 10:04:16 -07003922 long level;
NeilBrowndb721d32014-12-15 12:56:58 +11003923 void *priv, *oldpriv;
NeilBrown3cb03002011-10-11 16:45:26 +11003924 struct md_rdev *rdev;
NeilBrown245f46c2009-03-31 14:39:39 +11003925
NeilBrown67918752014-12-15 12:57:01 +11003926 if (slen == 0 || slen >= sizeof(clevel))
3927 return -EINVAL;
3928
3929 rv = mddev_lock(mddev);
3930 if (rv)
NeilBrown245f46c2009-03-31 14:39:39 +11003931 return rv;
NeilBrown67918752014-12-15 12:57:01 +11003932
3933 if (mddev->pers == NULL) {
3934 strncpy(mddev->clevel, buf, slen);
3935 if (mddev->clevel[slen-1] == '\n')
3936 slen--;
3937 mddev->clevel[slen] = 0;
3938 mddev->level = LEVEL_NONE;
3939 rv = len;
3940 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11003941 }
NeilBrown67918752014-12-15 12:57:01 +11003942 rv = -EROFS;
NeilBrownbd8839e2014-05-28 13:39:21 +10003943 if (mddev->ro)
NeilBrown67918752014-12-15 12:57:01 +11003944 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11003945
3946 /* request to change the personality. Need to ensure:
3947 * - array is not engaged in resync/recovery/reshape
3948 * - old personality can be suspended
3949 * - new personality will access other array.
3950 */
3951
NeilBrown67918752014-12-15 12:57:01 +11003952 rv = -EBUSY;
NeilBrownbb4f1e92010-08-08 21:18:03 +10003953 if (mddev->sync_thread ||
NeilBrownf851b602014-12-11 10:02:10 +11003954 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
NeilBrownbb4f1e92010-08-08 21:18:03 +10003955 mddev->reshape_position != MaxSector ||
3956 mddev->sysfs_active)
NeilBrown67918752014-12-15 12:57:01 +11003957 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11003958
NeilBrown67918752014-12-15 12:57:01 +11003959 rv = -EINVAL;
NeilBrown245f46c2009-03-31 14:39:39 +11003960 if (!mddev->pers->quiesce) {
NeilBrown9d487392016-11-02 14:16:49 +11003961 pr_warn("md: %s: %s does not support online personality change\n",
3962 mdname(mddev), mddev->pers->name);
NeilBrown67918752014-12-15 12:57:01 +11003963 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11003964 }
3965
3966 /* Now find the new personality */
NeilBrown67918752014-12-15 12:57:01 +11003967 strncpy(clevel, buf, slen);
3968 if (clevel[slen-1] == '\n')
3969 slen--;
3970 clevel[slen] = 0;
Jingoo Hanb29bebd2013-06-01 16:15:16 +09003971 if (kstrtol(clevel, 10, &level))
Dan Williamsf2859af2010-05-02 10:04:16 -07003972 level = LEVEL_NONE;
NeilBrown245f46c2009-03-31 14:39:39 +11003973
Dan Williamsf2859af2010-05-02 10:04:16 -07003974 if (request_module("md-%s", clevel) != 0)
3975 request_module("md-level-%s", clevel);
NeilBrown245f46c2009-03-31 14:39:39 +11003976 spin_lock(&pers_lock);
Dan Williamsf2859af2010-05-02 10:04:16 -07003977 pers = find_pers(level, clevel);
NeilBrown245f46c2009-03-31 14:39:39 +11003978 if (!pers || !try_module_get(pers->owner)) {
3979 spin_unlock(&pers_lock);
NeilBrown9d487392016-11-02 14:16:49 +11003980 pr_warn("md: personality %s not loaded\n", clevel);
NeilBrown67918752014-12-15 12:57:01 +11003981 rv = -EINVAL;
3982 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11003983 }
3984 spin_unlock(&pers_lock);
3985
3986 if (pers == mddev->pers) {
3987 /* Nothing to do! */
3988 module_put(pers->owner);
NeilBrown67918752014-12-15 12:57:01 +11003989 rv = len;
3990 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11003991 }
3992 if (!pers->takeover) {
3993 module_put(pers->owner);
NeilBrown9d487392016-11-02 14:16:49 +11003994 pr_warn("md: %s: %s does not support personality takeover\n",
3995 mdname(mddev), clevel);
NeilBrown67918752014-12-15 12:57:01 +11003996 rv = -EINVAL;
3997 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11003998 }
3999
NeilBrowndafb20f2012-03-19 12:46:39 +11004000 rdev_for_each(rdev, mddev)
NeilBrowne93f68a2010-06-15 09:36:03 +01004001 rdev->new_raid_disk = rdev->raid_disk;
4002
NeilBrown245f46c2009-03-31 14:39:39 +11004003 /* ->takeover must set new_* and/or delta_disks
4004 * if it succeeds, and may set them when it fails.
4005 */
4006 priv = pers->takeover(mddev);
4007 if (IS_ERR(priv)) {
4008 mddev->new_level = mddev->level;
4009 mddev->new_layout = mddev->layout;
Andre Noll664e7c42009-06-18 08:45:27 +10004010 mddev->new_chunk_sectors = mddev->chunk_sectors;
NeilBrown245f46c2009-03-31 14:39:39 +11004011 mddev->raid_disks -= mddev->delta_disks;
4012 mddev->delta_disks = 0;
NeilBrown2c810cd2012-05-21 09:27:00 +10004013 mddev->reshape_backwards = 0;
NeilBrown245f46c2009-03-31 14:39:39 +11004014 module_put(pers->owner);
NeilBrown9d487392016-11-02 14:16:49 +11004015 pr_warn("md: %s: %s would not accept array\n",
4016 mdname(mddev), clevel);
NeilBrown67918752014-12-15 12:57:01 +11004017 rv = PTR_ERR(priv);
4018 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11004019 }
4020
4021 /* Looks like we have a winner */
4022 mddev_suspend(mddev);
NeilBrown5aa61f42014-12-15 12:56:57 +11004023 mddev_detach(mddev);
NeilBrown36d091f2014-12-15 12:56:58 +11004024
4025 spin_lock(&mddev->lock);
NeilBrowndb721d32014-12-15 12:56:58 +11004026 oldpers = mddev->pers;
4027 oldpriv = mddev->private;
4028 mddev->pers = pers;
4029 mddev->private = priv;
4030 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
4031 mddev->level = mddev->new_level;
4032 mddev->layout = mddev->new_layout;
4033 mddev->chunk_sectors = mddev->new_chunk_sectors;
4034 mddev->delta_disks = 0;
4035 mddev->reshape_backwards = 0;
4036 mddev->degraded = 0;
NeilBrown36d091f2014-12-15 12:56:58 +11004037 spin_unlock(&mddev->lock);
NeilBrownf72ffdd2014-09-30 14:23:59 +10004038
NeilBrowndb721d32014-12-15 12:56:58 +11004039 if (oldpers->sync_request == NULL &&
Trela Maciej54071b32010-03-08 16:02:42 +11004040 mddev->external) {
4041 /* We are converting from a no-redundancy array
4042 * to a redundancy array and metadata is managed
4043 * externally so we need to be sure that writes
4044 * won't block due to a need to transition
4045 * clean->dirty
4046 * until external management is started.
4047 */
4048 mddev->in_sync = 0;
4049 mddev->safemode_delay = 0;
4050 mddev->safemode = 0;
4051 }
4052
NeilBrowndb721d32014-12-15 12:56:58 +11004053 oldpers->free(mddev, oldpriv);
4054
4055 if (oldpers->sync_request == NULL &&
4056 pers->sync_request != NULL) {
4057 /* need to add the md_redundancy_group */
4058 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
NeilBrown9d487392016-11-02 14:16:49 +11004059 pr_warn("md: cannot register extra attributes for %s\n",
4060 mdname(mddev));
NeilBrowndb721d32014-12-15 12:56:58 +11004061 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
Junxiao Bie8efa9b2020-08-04 17:27:18 -07004062 mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed");
4063 mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded");
NeilBrowndb721d32014-12-15 12:56:58 +11004064 }
4065 if (oldpers->sync_request != NULL &&
4066 pers->sync_request == NULL) {
4067 /* need to remove the md_redundancy_group */
4068 if (mddev->to_remove == NULL)
4069 mddev->to_remove = &md_redundancy_group;
4070 }
4071
Alexey Obitotskiy4cb9da72016-06-23 12:11:01 +02004072 module_put(oldpers->owner);
4073
NeilBrowndafb20f2012-03-19 12:46:39 +11004074 rdev_for_each(rdev, mddev) {
NeilBrowne93f68a2010-06-15 09:36:03 +01004075 if (rdev->raid_disk < 0)
4076 continue;
NeilBrownbf2cb0d2011-01-14 09:14:34 +11004077 if (rdev->new_raid_disk >= mddev->raid_disks)
NeilBrowne93f68a2010-06-15 09:36:03 +01004078 rdev->new_raid_disk = -1;
4079 if (rdev->new_raid_disk == rdev->raid_disk)
4080 continue;
Namhyung Kim36fad852011-07-27 11:00:36 +10004081 sysfs_unlink_rdev(mddev, rdev);
NeilBrowne93f68a2010-06-15 09:36:03 +01004082 }
NeilBrowndafb20f2012-03-19 12:46:39 +11004083 rdev_for_each(rdev, mddev) {
NeilBrowne93f68a2010-06-15 09:36:03 +01004084 if (rdev->raid_disk < 0)
4085 continue;
4086 if (rdev->new_raid_disk == rdev->raid_disk)
4087 continue;
4088 rdev->raid_disk = rdev->new_raid_disk;
4089 if (rdev->raid_disk < 0)
NeilBrown3a981b02009-08-03 10:59:55 +10004090 clear_bit(In_sync, &rdev->flags);
NeilBrowne93f68a2010-06-15 09:36:03 +01004091 else {
Namhyung Kim36fad852011-07-27 11:00:36 +10004092 if (sysfs_link_rdev(mddev, rdev))
NeilBrown9d487392016-11-02 14:16:49 +11004093 pr_warn("md: cannot register rd%d for %s after level change\n",
4094 rdev->raid_disk, mdname(mddev));
NeilBrown3a981b02009-08-03 10:59:55 +10004095 }
NeilBrowne93f68a2010-06-15 09:36:03 +01004096 }
4097
NeilBrowndb721d32014-12-15 12:56:58 +11004098 if (pers->sync_request == NULL) {
Trela, Maciej9af204c2010-03-08 16:02:44 +11004099 /* this is now an array without redundancy, so
4100 * it must always be in_sync
4101 */
4102 mddev->in_sync = 1;
4103 del_timer_sync(&mddev->safemode_timer);
4104 }
NeilBrown02e5f5c2013-11-14 15:16:15 +11004105 blk_set_stacking_limits(&mddev->queue->limits);
NeilBrown245f46c2009-03-31 14:39:39 +11004106 pers->run(mddev);
Shaohua Li29530792016-12-08 15:48:19 -08004107 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
Jonathan Brassow47525e52012-05-22 13:55:29 +10004108 mddev_resume(mddev);
NeilBrown830778a2014-01-14 15:17:03 +11004109 if (!mddev->thread)
4110 md_update_sb(mddev, 1);
Junxiao Bie1a86db2020-07-14 16:10:26 -07004111 sysfs_notify_dirent_safe(mddev->sysfs_level);
Dan Williamsbb7f8d22010-05-01 18:14:57 -07004112 md_new_event(mddev);
NeilBrown67918752014-12-15 12:57:01 +11004113 rv = len;
4114out_unlock:
4115 mddev_unlock(mddev);
NeilBrownd9d166c2006-01-06 00:20:51 -08004116 return rv;
4117}
4118
4119static struct md_sysfs_entry md_level =
NeilBrown80ca3a42006-07-10 04:44:18 -07004120__ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
NeilBrowneae17012005-11-08 21:39:23 -08004121
NeilBrownd4dbd022006-06-26 00:27:59 -07004122static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004123layout_show(struct mddev *mddev, char *page)
NeilBrownd4dbd022006-06-26 00:27:59 -07004124{
4125 /* just a number, not meaningful for all levels */
NeilBrown08a02ec2007-05-09 02:35:38 -07004126 if (mddev->reshape_position != MaxSector &&
4127 mddev->layout != mddev->new_layout)
4128 return sprintf(page, "%d (%d)\n",
4129 mddev->new_layout, mddev->layout);
NeilBrownd4dbd022006-06-26 00:27:59 -07004130 return sprintf(page, "%d\n", mddev->layout);
4131}
4132
4133static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004134layout_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrownd4dbd022006-06-26 00:27:59 -07004135{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004136 unsigned int n;
NeilBrown67918752014-12-15 12:57:01 +11004137 int err;
NeilBrownd4dbd022006-06-26 00:27:59 -07004138
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004139 err = kstrtouint(buf, 10, &n);
4140 if (err < 0)
4141 return err;
NeilBrown67918752014-12-15 12:57:01 +11004142 err = mddev_lock(mddev);
4143 if (err)
4144 return err;
NeilBrownd4dbd022006-06-26 00:27:59 -07004145
NeilBrownb3546032009-03-31 14:56:41 +11004146 if (mddev->pers) {
NeilBrown50ac1682009-06-18 08:47:55 +10004147 if (mddev->pers->check_reshape == NULL)
NeilBrown67918752014-12-15 12:57:01 +11004148 err = -EBUSY;
4149 else if (mddev->ro)
4150 err = -EROFS;
4151 else {
4152 mddev->new_layout = n;
4153 err = mddev->pers->check_reshape(mddev);
4154 if (err)
4155 mddev->new_layout = mddev->layout;
NeilBrown597a7112009-06-18 08:47:42 +10004156 }
NeilBrownb3546032009-03-31 14:56:41 +11004157 } else {
NeilBrown08a02ec2007-05-09 02:35:38 -07004158 mddev->new_layout = n;
NeilBrownb3546032009-03-31 14:56:41 +11004159 if (mddev->reshape_position == MaxSector)
4160 mddev->layout = n;
4161 }
NeilBrown67918752014-12-15 12:57:01 +11004162 mddev_unlock(mddev);
4163 return err ?: len;
NeilBrownd4dbd022006-06-26 00:27:59 -07004164}
4165static struct md_sysfs_entry md_layout =
NeilBrown80ca3a42006-07-10 04:44:18 -07004166__ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
NeilBrownd4dbd022006-06-26 00:27:59 -07004167
NeilBrowneae17012005-11-08 21:39:23 -08004168static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004169raid_disks_show(struct mddev *mddev, char *page)
NeilBrowneae17012005-11-08 21:39:23 -08004170{
NeilBrownbb636542005-11-08 21:39:45 -08004171 if (mddev->raid_disks == 0)
4172 return 0;
NeilBrown08a02ec2007-05-09 02:35:38 -07004173 if (mddev->reshape_position != MaxSector &&
4174 mddev->delta_disks != 0)
4175 return sprintf(page, "%d (%d)\n", mddev->raid_disks,
4176 mddev->raid_disks - mddev->delta_disks);
NeilBrowneae17012005-11-08 21:39:23 -08004177 return sprintf(page, "%d\n", mddev->raid_disks);
4178}
4179
NeilBrownfd01b882011-10-11 16:47:53 +11004180static int update_raid_disks(struct mddev *mddev, int raid_disks);
NeilBrownda943b992006-01-06 00:20:54 -08004181
4182static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004183raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrownda943b992006-01-06 00:20:54 -08004184{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004185 unsigned int n;
NeilBrown67918752014-12-15 12:57:01 +11004186 int err;
NeilBrownda943b992006-01-06 00:20:54 -08004187
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004188 err = kstrtouint(buf, 10, &n);
4189 if (err < 0)
4190 return err;
NeilBrownda943b992006-01-06 00:20:54 -08004191
NeilBrown67918752014-12-15 12:57:01 +11004192 err = mddev_lock(mddev);
4193 if (err)
4194 return err;
NeilBrownda943b992006-01-06 00:20:54 -08004195 if (mddev->pers)
NeilBrown67918752014-12-15 12:57:01 +11004196 err = update_raid_disks(mddev, n);
NeilBrown08a02ec2007-05-09 02:35:38 -07004197 else if (mddev->reshape_position != MaxSector) {
NeilBrownc6563a82012-05-21 09:27:00 +10004198 struct md_rdev *rdev;
NeilBrown08a02ec2007-05-09 02:35:38 -07004199 int olddisks = mddev->raid_disks - mddev->delta_disks;
NeilBrownc6563a82012-05-21 09:27:00 +10004200
NeilBrown67918752014-12-15 12:57:01 +11004201 err = -EINVAL;
NeilBrownc6563a82012-05-21 09:27:00 +10004202 rdev_for_each(rdev, mddev) {
4203 if (olddisks < n &&
4204 rdev->data_offset < rdev->new_data_offset)
NeilBrown67918752014-12-15 12:57:01 +11004205 goto out_unlock;
NeilBrownc6563a82012-05-21 09:27:00 +10004206 if (olddisks > n &&
4207 rdev->data_offset > rdev->new_data_offset)
NeilBrown67918752014-12-15 12:57:01 +11004208 goto out_unlock;
NeilBrownc6563a82012-05-21 09:27:00 +10004209 }
NeilBrown67918752014-12-15 12:57:01 +11004210 err = 0;
NeilBrown08a02ec2007-05-09 02:35:38 -07004211 mddev->delta_disks = n - olddisks;
4212 mddev->raid_disks = n;
NeilBrown2c810cd2012-05-21 09:27:00 +10004213 mddev->reshape_backwards = (mddev->delta_disks < 0);
NeilBrown08a02ec2007-05-09 02:35:38 -07004214 } else
NeilBrownda943b992006-01-06 00:20:54 -08004215 mddev->raid_disks = n;
NeilBrown67918752014-12-15 12:57:01 +11004216out_unlock:
4217 mddev_unlock(mddev);
4218 return err ? err : len;
NeilBrownda943b992006-01-06 00:20:54 -08004219}
4220static struct md_sysfs_entry md_raid_disks =
NeilBrown80ca3a42006-07-10 04:44:18 -07004221__ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
NeilBrowneae17012005-11-08 21:39:23 -08004222
NeilBrown24dd4692005-11-08 21:39:26 -08004223static ssize_t
Sebastian Parschauerec164d072020-07-28 12:01:39 +02004224uuid_show(struct mddev *mddev, char *page)
4225{
4226 return sprintf(page, "%pU\n", mddev->uuid);
4227}
4228static struct md_sysfs_entry md_uuid =
4229__ATTR(uuid, S_IRUGO, uuid_show, NULL);
4230
4231static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004232chunk_size_show(struct mddev *mddev, char *page)
NeilBrown3b343802006-01-06 00:20:47 -08004233{
NeilBrown08a02ec2007-05-09 02:35:38 -07004234 if (mddev->reshape_position != MaxSector &&
Andre Noll664e7c42009-06-18 08:45:27 +10004235 mddev->chunk_sectors != mddev->new_chunk_sectors)
4236 return sprintf(page, "%d (%d)\n",
4237 mddev->new_chunk_sectors << 9,
Andre Noll9d8f0362009-06-18 08:45:01 +10004238 mddev->chunk_sectors << 9);
4239 return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
NeilBrown3b343802006-01-06 00:20:47 -08004240}
4241
4242static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004243chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown3b343802006-01-06 00:20:47 -08004244{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004245 unsigned long n;
NeilBrown67918752014-12-15 12:57:01 +11004246 int err;
NeilBrown3b343802006-01-06 00:20:47 -08004247
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004248 err = kstrtoul(buf, 10, &n);
4249 if (err < 0)
4250 return err;
NeilBrown3b343802006-01-06 00:20:47 -08004251
NeilBrown67918752014-12-15 12:57:01 +11004252 err = mddev_lock(mddev);
4253 if (err)
4254 return err;
NeilBrownb3546032009-03-31 14:56:41 +11004255 if (mddev->pers) {
NeilBrown50ac1682009-06-18 08:47:55 +10004256 if (mddev->pers->check_reshape == NULL)
NeilBrown67918752014-12-15 12:57:01 +11004257 err = -EBUSY;
4258 else if (mddev->ro)
4259 err = -EROFS;
4260 else {
4261 mddev->new_chunk_sectors = n >> 9;
4262 err = mddev->pers->check_reshape(mddev);
4263 if (err)
4264 mddev->new_chunk_sectors = mddev->chunk_sectors;
NeilBrown597a7112009-06-18 08:47:42 +10004265 }
NeilBrownb3546032009-03-31 14:56:41 +11004266 } else {
Andre Noll664e7c42009-06-18 08:45:27 +10004267 mddev->new_chunk_sectors = n >> 9;
NeilBrownb3546032009-03-31 14:56:41 +11004268 if (mddev->reshape_position == MaxSector)
Andre Noll9d8f0362009-06-18 08:45:01 +10004269 mddev->chunk_sectors = n >> 9;
NeilBrownb3546032009-03-31 14:56:41 +11004270 }
NeilBrown67918752014-12-15 12:57:01 +11004271 mddev_unlock(mddev);
4272 return err ?: len;
NeilBrown3b343802006-01-06 00:20:47 -08004273}
4274static struct md_sysfs_entry md_chunk_size =
NeilBrown80ca3a42006-07-10 04:44:18 -07004275__ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
NeilBrown3b343802006-01-06 00:20:47 -08004276
NeilBrowna94213b2006-06-26 00:28:00 -07004277static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004278resync_start_show(struct mddev *mddev, char *page)
NeilBrowna94213b2006-06-26 00:28:00 -07004279{
NeilBrownd1a7c502009-03-31 15:24:32 +11004280 if (mddev->recovery_cp == MaxSector)
4281 return sprintf(page, "none\n");
NeilBrowna94213b2006-06-26 00:28:00 -07004282 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
4283}
4284
4285static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004286resync_start_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrowna94213b2006-06-26 00:28:00 -07004287{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004288 unsigned long long n;
NeilBrown67918752014-12-15 12:57:01 +11004289 int err;
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004290
4291 if (cmd_match(buf, "none"))
4292 n = MaxSector;
4293 else {
4294 err = kstrtoull(buf, 10, &n);
4295 if (err < 0)
4296 return err;
4297 if (n != (sector_t)n)
4298 return -EINVAL;
4299 }
NeilBrowna94213b2006-06-26 00:28:00 -07004300
NeilBrown67918752014-12-15 12:57:01 +11004301 err = mddev_lock(mddev);
4302 if (err)
4303 return err;
NeilBrownb0986362011-05-11 15:52:21 +10004304 if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
NeilBrown67918752014-12-15 12:57:01 +11004305 err = -EBUSY;
NeilBrowna94213b2006-06-26 00:28:00 -07004306
NeilBrown67918752014-12-15 12:57:01 +11004307 if (!err) {
4308 mddev->recovery_cp = n;
4309 if (mddev->pers)
Shaohua Li29530792016-12-08 15:48:19 -08004310 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
NeilBrown67918752014-12-15 12:57:01 +11004311 }
4312 mddev_unlock(mddev);
4313 return err ?: len;
NeilBrowna94213b2006-06-26 00:28:00 -07004314}
4315static struct md_sysfs_entry md_resync_start =
NeilBrown750f1992014-09-30 08:53:05 +10004316__ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR,
4317 resync_start_show, resync_start_store);
NeilBrowna94213b2006-06-26 00:28:00 -07004318
NeilBrown9e653b62006-06-26 00:27:58 -07004319/*
4320 * The array state can be:
4321 *
4322 * clear
4323 * No devices, no size, no level
4324 * Equivalent to STOP_ARRAY ioctl
4325 * inactive
4326 * May have some settings, but array is not active
4327 * all IO results in error
4328 * When written, doesn't tear down array, but just stops it
4329 * suspended (not supported yet)
4330 * All IO requests will block. The array can be reconfigured.
Andre Noll910d8cb2008-03-25 21:00:53 +01004331 * Writing this, if accepted, will block until array is quiescent
NeilBrown9e653b62006-06-26 00:27:58 -07004332 * readonly
4333 * no resync can happen. no superblocks get written.
4334 * write requests fail
4335 * read-auto
4336 * like readonly, but behaves like 'clean' on a write request.
4337 *
4338 * clean - no pending writes, but otherwise active.
4339 * When written to inactive array, starts without resync
4340 * If a write request arrives then
4341 * if metadata is known, mark 'dirty' and switch to 'active'.
4342 * if not known, block and switch to write-pending
4343 * If written to an active array that has pending writes, then fails.
4344 * active
4345 * fully active: IO and resync can be happening.
4346 * When written to inactive array, starts with resync
4347 *
4348 * write-pending
4349 * clean, but writes are blocked waiting for 'active' to be written.
4350 *
4351 * active-idle
4352 * like active, but no writes have been seen for a while (100msec).
4353 *
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -03004354 * broken
4355 * RAID0/LINEAR-only: same as clean, but array is missing a member.
4356 * It's useful because RAID0/LINEAR mounted-arrays aren't stopped
4357 * when a member is gone, so this state will at least alert the
4358 * user that something is wrong.
NeilBrown9e653b62006-06-26 00:27:58 -07004359 */
4360enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -03004361 write_pending, active_idle, broken, bad_word};
Adrian Bunk05381952006-06-26 00:28:01 -07004362static char *array_states[] = {
NeilBrown9e653b62006-06-26 00:27:58 -07004363 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -03004364 "write-pending", "active-idle", "broken", NULL };
NeilBrown9e653b62006-06-26 00:27:58 -07004365
4366static int match_word(const char *word, char **list)
4367{
4368 int n;
4369 for (n=0; list[n]; n++)
4370 if (cmd_match(word, list[n]))
4371 break;
4372 return n;
4373}
4374
4375static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004376array_state_show(struct mddev *mddev, char *page)
NeilBrown9e653b62006-06-26 00:27:58 -07004377{
4378 enum array_state st = inactive;
4379
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -03004380 if (mddev->pers && !test_bit(MD_NOT_READY, &mddev->flags)) {
NeilBrown9e653b62006-06-26 00:27:58 -07004381 switch(mddev->ro) {
4382 case 1:
4383 st = readonly;
4384 break;
4385 case 2:
4386 st = read_auto;
4387 break;
4388 case 0:
NeilBrown55cc39f2017-03-15 14:05:14 +11004389 spin_lock(&mddev->lock);
Shaohua Li29530792016-12-08 15:48:19 -08004390 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
NeilBrowne6910632008-02-06 01:39:51 -08004391 st = write_pending;
Tomasz Majchrzak16f88942016-10-24 12:47:28 +02004392 else if (mddev->in_sync)
4393 st = clean;
NeilBrown9e653b62006-06-26 00:27:58 -07004394 else if (mddev->safemode)
4395 st = active_idle;
4396 else
4397 st = active;
NeilBrown55cc39f2017-03-15 14:05:14 +11004398 spin_unlock(&mddev->lock);
NeilBrown9e653b62006-06-26 00:27:58 -07004399 }
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -03004400
4401 if (test_bit(MD_BROKEN, &mddev->flags) && st == clean)
4402 st = broken;
4403 } else {
NeilBrown9e653b62006-06-26 00:27:58 -07004404 if (list_empty(&mddev->disks) &&
4405 mddev->raid_disks == 0 &&
Andre Noll58c0fed2009-03-31 14:33:13 +11004406 mddev->dev_sectors == 0)
NeilBrown9e653b62006-06-26 00:27:58 -07004407 st = clear;
4408 else
4409 st = inactive;
4410 }
4411 return sprintf(page, "%s\n", array_states[st]);
4412}
4413
NeilBrownf72ffdd2014-09-30 14:23:59 +10004414static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev);
4415static int md_set_readonly(struct mddev *mddev, struct block_device *bdev);
NeilBrownfd01b882011-10-11 16:47:53 +11004416static int restart_array(struct mddev *mddev);
NeilBrown9e653b62006-06-26 00:27:58 -07004417
4418static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004419array_state_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown9e653b62006-06-26 00:27:58 -07004420{
NeilBrown6497709b2017-03-15 14:05:14 +11004421 int err = 0;
NeilBrown9e653b62006-06-26 00:27:58 -07004422 enum array_state st = match_word(buf, array_states);
NeilBrown67918752014-12-15 12:57:01 +11004423
4424 if (mddev->pers && (st == active || st == clean) && mddev->ro != 1) {
4425 /* don't take reconfig_mutex when toggling between
4426 * clean and active
4427 */
4428 spin_lock(&mddev->lock);
4429 if (st == active) {
4430 restart_array(mddev);
Shaohua Li29530792016-12-08 15:48:19 -08004431 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
Tomasz Majchrzak91a6c4a2016-10-25 17:07:08 +02004432 md_wakeup_thread(mddev->thread);
NeilBrown67918752014-12-15 12:57:01 +11004433 wake_up(&mddev->sb_wait);
NeilBrown67918752014-12-15 12:57:01 +11004434 } else /* st == clean */ {
4435 restart_array(mddev);
NeilBrown6497709b2017-03-15 14:05:14 +11004436 if (!set_in_sync(mddev))
NeilBrown67918752014-12-15 12:57:01 +11004437 err = -EBUSY;
4438 }
Tomasz Majchrzak573275b2016-06-30 10:47:09 +02004439 if (!err)
4440 sysfs_notify_dirent_safe(mddev->sysfs_state);
NeilBrown67918752014-12-15 12:57:01 +11004441 spin_unlock(&mddev->lock);
NeilBrownc008f1d2015-06-12 19:46:44 +10004442 return err ?: len;
NeilBrown67918752014-12-15 12:57:01 +11004443 }
4444 err = mddev_lock(mddev);
4445 if (err)
4446 return err;
4447 err = -EINVAL;
NeilBrown9e653b62006-06-26 00:27:58 -07004448 switch(st) {
4449 case bad_word:
4450 break;
4451 case clear:
4452 /* stopping an active array */
NeilBrowna05b7ea2012-07-19 15:59:18 +10004453 err = do_md_stop(mddev, 0, NULL);
NeilBrown9e653b62006-06-26 00:27:58 -07004454 break;
4455 case inactive:
4456 /* stopping an active array */
NeilBrown90cf1952012-07-31 10:04:55 +10004457 if (mddev->pers)
NeilBrowna05b7ea2012-07-19 15:59:18 +10004458 err = do_md_stop(mddev, 2, NULL);
NeilBrown90cf1952012-07-31 10:04:55 +10004459 else
NeilBrowne6910632008-02-06 01:39:51 -08004460 err = 0; /* already inactive */
NeilBrown9e653b62006-06-26 00:27:58 -07004461 break;
4462 case suspended:
4463 break; /* not supported yet */
4464 case readonly:
4465 if (mddev->pers)
NeilBrowna05b7ea2012-07-19 15:59:18 +10004466 err = md_set_readonly(mddev, NULL);
NeilBrown9e653b62006-06-26 00:27:58 -07004467 else {
4468 mddev->ro = 1;
NeilBrown648b6292008-04-30 00:52:30 -07004469 set_disk_ro(mddev->gendisk, 1);
NeilBrown9e653b62006-06-26 00:27:58 -07004470 err = do_md_run(mddev);
4471 }
4472 break;
4473 case read_auto:
NeilBrown9e653b62006-06-26 00:27:58 -07004474 if (mddev->pers) {
NeilBrown80268ee2008-10-13 11:55:12 +11004475 if (mddev->ro == 0)
NeilBrowna05b7ea2012-07-19 15:59:18 +10004476 err = md_set_readonly(mddev, NULL);
NeilBrown80268ee2008-10-13 11:55:12 +11004477 else if (mddev->ro == 1)
NeilBrown648b6292008-04-30 00:52:30 -07004478 err = restart_array(mddev);
4479 if (err == 0) {
4480 mddev->ro = 2;
4481 set_disk_ro(mddev->gendisk, 0);
4482 }
NeilBrown9e653b62006-06-26 00:27:58 -07004483 } else {
4484 mddev->ro = 2;
4485 err = do_md_run(mddev);
4486 }
4487 break;
4488 case clean:
4489 if (mddev->pers) {
Song Liu339421d2015-10-08 21:54:13 -07004490 err = restart_array(mddev);
4491 if (err)
4492 break;
NeilBrown85572d72014-12-15 12:56:56 +11004493 spin_lock(&mddev->lock);
NeilBrown6497709b2017-03-15 14:05:14 +11004494 if (!set_in_sync(mddev))
NeilBrowne6910632008-02-06 01:39:51 -08004495 err = -EBUSY;
NeilBrown85572d72014-12-15 12:56:56 +11004496 spin_unlock(&mddev->lock);
NeilBrown5bf29592009-05-07 12:50:57 +10004497 } else
4498 err = -EINVAL;
NeilBrown9e653b62006-06-26 00:27:58 -07004499 break;
4500 case active:
4501 if (mddev->pers) {
Song Liu339421d2015-10-08 21:54:13 -07004502 err = restart_array(mddev);
4503 if (err)
4504 break;
Shaohua Li29530792016-12-08 15:48:19 -08004505 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
NeilBrown9e653b62006-06-26 00:27:58 -07004506 wake_up(&mddev->sb_wait);
4507 err = 0;
4508 } else {
4509 mddev->ro = 0;
NeilBrown648b6292008-04-30 00:52:30 -07004510 set_disk_ro(mddev->gendisk, 0);
NeilBrown9e653b62006-06-26 00:27:58 -07004511 err = do_md_run(mddev);
4512 }
4513 break;
4514 case write_pending:
4515 case active_idle:
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -03004516 case broken:
NeilBrown9e653b62006-06-26 00:27:58 -07004517 /* these cannot be set */
4518 break;
4519 }
NeilBrown67918752014-12-15 12:57:01 +11004520
4521 if (!err) {
NeilBrown1d23f172011-12-08 15:49:12 +11004522 if (mddev->hold_active == UNTIL_IOCTL)
4523 mddev->hold_active = 0;
NeilBrown00bcb4a2010-06-01 19:37:23 +10004524 sysfs_notify_dirent_safe(mddev->sysfs_state);
Neil Brown0fd62b82008-06-28 08:31:36 +10004525 }
NeilBrown67918752014-12-15 12:57:01 +11004526 mddev_unlock(mddev);
4527 return err ?: len;
NeilBrown9e653b62006-06-26 00:27:58 -07004528}
NeilBrown80ca3a42006-07-10 04:44:18 -07004529static struct md_sysfs_entry md_array_state =
NeilBrown750f1992014-09-30 08:53:05 +10004530__ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
NeilBrown9e653b62006-06-26 00:27:58 -07004531
NeilBrown6d7ff7382006-01-06 00:21:16 -08004532static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004533max_corrected_read_errors_show(struct mddev *mddev, char *page) {
Robert Becker1e509152009-12-14 12:49:58 +11004534 return sprintf(page, "%d\n",
4535 atomic_read(&mddev->max_corr_read_errors));
4536}
4537
4538static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004539max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len)
Robert Becker1e509152009-12-14 12:49:58 +11004540{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004541 unsigned int n;
4542 int rv;
Robert Becker1e509152009-12-14 12:49:58 +11004543
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004544 rv = kstrtouint(buf, 10, &n);
4545 if (rv < 0)
4546 return rv;
4547 atomic_set(&mddev->max_corr_read_errors, n);
4548 return len;
Robert Becker1e509152009-12-14 12:49:58 +11004549}
4550
4551static struct md_sysfs_entry max_corr_read_errors =
4552__ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
4553 max_corrected_read_errors_store);
4554
4555static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004556null_show(struct mddev *mddev, char *page)
NeilBrown6d7ff7382006-01-06 00:21:16 -08004557{
4558 return -EINVAL;
4559}
4560
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02004561/* need to ensure rdev_delayed_delete() has completed */
4562static void flush_rdev_wq(struct mddev *mddev)
4563{
4564 struct md_rdev *rdev;
4565
4566 rcu_read_lock();
4567 rdev_for_each_rcu(rdev, mddev)
4568 if (work_pending(&rdev->del_work)) {
4569 flush_workqueue(md_rdev_misc_wq);
4570 break;
4571 }
4572 rcu_read_unlock();
4573}
4574
NeilBrown6d7ff7382006-01-06 00:21:16 -08004575static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004576new_dev_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown6d7ff7382006-01-06 00:21:16 -08004577{
4578 /* buf must be %d:%d\n? giving major and minor numbers */
4579 /* The new device is added to the array.
4580 * If the array has a persistent superblock, we read the
4581 * superblock to initialise info and check validity.
4582 * Otherwise, only checking done is that in bind_rdev_to_array,
4583 * which mainly checks size.
4584 */
4585 char *e;
4586 int major = simple_strtoul(buf, &e, 10);
4587 int minor;
4588 dev_t dev;
NeilBrown3cb03002011-10-11 16:45:26 +11004589 struct md_rdev *rdev;
NeilBrown6d7ff7382006-01-06 00:21:16 -08004590 int err;
4591
4592 if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
4593 return -EINVAL;
4594 minor = simple_strtoul(e+1, &e, 10);
4595 if (*e && *e != '\n')
4596 return -EINVAL;
4597 dev = MKDEV(major, minor);
4598 if (major != MAJOR(dev) ||
4599 minor != MINOR(dev))
4600 return -EOVERFLOW;
4601
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02004602 flush_rdev_wq(mddev);
NeilBrown67918752014-12-15 12:57:01 +11004603 err = mddev_lock(mddev);
4604 if (err)
4605 return err;
NeilBrown6d7ff7382006-01-06 00:21:16 -08004606 if (mddev->persistent) {
4607 rdev = md_import_device(dev, mddev->major_version,
4608 mddev->minor_version);
4609 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
NeilBrown3cb03002011-10-11 16:45:26 +11004610 struct md_rdev *rdev0
4611 = list_entry(mddev->disks.next,
4612 struct md_rdev, same_set);
NeilBrown6d7ff7382006-01-06 00:21:16 -08004613 err = super_types[mddev->major_version]
4614 .load_super(rdev, rdev0, mddev->minor_version);
4615 if (err < 0)
4616 goto out;
4617 }
NeilBrownc5d79ad2008-02-06 01:39:54 -08004618 } else if (mddev->external)
4619 rdev = md_import_device(dev, -2, -1);
4620 else
NeilBrown6d7ff7382006-01-06 00:21:16 -08004621 rdev = md_import_device(dev, -1, -1);
4622
NeilBrown9a8c0fa2015-06-25 17:06:40 +10004623 if (IS_ERR(rdev)) {
4624 mddev_unlock(mddev);
NeilBrown6d7ff7382006-01-06 00:21:16 -08004625 return PTR_ERR(rdev);
NeilBrown9a8c0fa2015-06-25 17:06:40 +10004626 }
NeilBrown6d7ff7382006-01-06 00:21:16 -08004627 err = bind_rdev_to_array(rdev, mddev);
4628 out:
4629 if (err)
4630 export_rdev(rdev);
NeilBrown67918752014-12-15 12:57:01 +11004631 mddev_unlock(mddev);
Alexey Obitotskiy5492c462017-07-28 15:49:25 +02004632 if (!err)
4633 md_new_event(mddev);
NeilBrown6d7ff7382006-01-06 00:21:16 -08004634 return err ? err : len;
4635}
4636
4637static struct md_sysfs_entry md_new_device =
NeilBrown80ca3a42006-07-10 04:44:18 -07004638__ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
NeilBrown3b343802006-01-06 00:20:47 -08004639
4640static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004641bitmap_store(struct mddev *mddev, const char *buf, size_t len)
Paul Clements9b1d1da2006-10-03 01:15:49 -07004642{
4643 char *end;
4644 unsigned long chunk, end_chunk;
NeilBrown67918752014-12-15 12:57:01 +11004645 int err;
Paul Clements9b1d1da2006-10-03 01:15:49 -07004646
NeilBrown67918752014-12-15 12:57:01 +11004647 err = mddev_lock(mddev);
4648 if (err)
4649 return err;
Paul Clements9b1d1da2006-10-03 01:15:49 -07004650 if (!mddev->bitmap)
4651 goto out;
4652 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
4653 while (*buf) {
4654 chunk = end_chunk = simple_strtoul(buf, &end, 0);
4655 if (buf == end) break;
4656 if (*end == '-') { /* range */
4657 buf = end + 1;
4658 end_chunk = simple_strtoul(buf, &end, 0);
4659 if (buf == end) break;
4660 }
4661 if (*end && !isspace(*end)) break;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07004662 md_bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
André Goddard Rosae7d28602009-12-14 18:01:06 -08004663 buf = skip_spaces(end);
Paul Clements9b1d1da2006-10-03 01:15:49 -07004664 }
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07004665 md_bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
Paul Clements9b1d1da2006-10-03 01:15:49 -07004666out:
NeilBrown67918752014-12-15 12:57:01 +11004667 mddev_unlock(mddev);
Paul Clements9b1d1da2006-10-03 01:15:49 -07004668 return len;
4669}
4670
4671static struct md_sysfs_entry md_bitmap =
4672__ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
4673
4674static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004675size_show(struct mddev *mddev, char *page)
NeilBrowna35b0d62006-01-06 00:20:49 -08004676{
Andre Noll58c0fed2009-03-31 14:33:13 +11004677 return sprintf(page, "%llu\n",
4678 (unsigned long long)mddev->dev_sectors / 2);
NeilBrowna35b0d62006-01-06 00:20:49 -08004679}
4680
NeilBrownfd01b882011-10-11 16:47:53 +11004681static int update_size(struct mddev *mddev, sector_t num_sectors);
NeilBrowna35b0d62006-01-06 00:20:49 -08004682
4683static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004684size_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrowna35b0d62006-01-06 00:20:49 -08004685{
4686 /* If array is inactive, we can reduce the component size, but
4687 * not increase it (except from 0).
4688 * If array is active, we can try an on-line resize
4689 */
Dan Williamsb522adc2009-03-31 15:00:31 +11004690 sector_t sectors;
4691 int err = strict_blocks_to_sectors(buf, &sectors);
NeilBrowna35b0d62006-01-06 00:20:49 -08004692
Andre Noll58c0fed2009-03-31 14:33:13 +11004693 if (err < 0)
4694 return err;
NeilBrown67918752014-12-15 12:57:01 +11004695 err = mddev_lock(mddev);
4696 if (err)
4697 return err;
NeilBrowna35b0d62006-01-06 00:20:49 -08004698 if (mddev->pers) {
Andre Noll58c0fed2009-03-31 14:33:13 +11004699 err = update_size(mddev, sectors);
Xiao Ni4ba1e782016-06-12 17:18:00 +08004700 if (err == 0)
4701 md_update_sb(mddev, 1);
NeilBrowna35b0d62006-01-06 00:20:49 -08004702 } else {
Andre Noll58c0fed2009-03-31 14:33:13 +11004703 if (mddev->dev_sectors == 0 ||
4704 mddev->dev_sectors > sectors)
4705 mddev->dev_sectors = sectors;
NeilBrowna35b0d62006-01-06 00:20:49 -08004706 else
4707 err = -ENOSPC;
4708 }
NeilBrown67918752014-12-15 12:57:01 +11004709 mddev_unlock(mddev);
NeilBrowna35b0d62006-01-06 00:20:49 -08004710 return err ? err : len;
4711}
4712
4713static struct md_sysfs_entry md_size =
NeilBrown80ca3a42006-07-10 04:44:18 -07004714__ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
NeilBrowna35b0d62006-01-06 00:20:49 -08004715
Masanari Iida83f0d772012-10-30 00:18:08 +09004716/* Metadata version.
NeilBrowne6910632008-02-06 01:39:51 -08004717 * This is one of
4718 * 'none' for arrays with no metadata (good luck...)
4719 * 'external' for arrays with externally managed metadata,
NeilBrown8bb93aa2006-01-06 00:20:50 -08004720 * or N.M for internally known formats
4721 */
4722static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004723metadata_show(struct mddev *mddev, char *page)
NeilBrown8bb93aa2006-01-06 00:20:50 -08004724{
4725 if (mddev->persistent)
4726 return sprintf(page, "%d.%d\n",
4727 mddev->major_version, mddev->minor_version);
NeilBrowne6910632008-02-06 01:39:51 -08004728 else if (mddev->external)
4729 return sprintf(page, "external:%s\n", mddev->metadata_type);
NeilBrown8bb93aa2006-01-06 00:20:50 -08004730 else
4731 return sprintf(page, "none\n");
4732}
4733
4734static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004735metadata_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown8bb93aa2006-01-06 00:20:50 -08004736{
4737 int major, minor;
4738 char *e;
NeilBrown67918752014-12-15 12:57:01 +11004739 int err;
NeilBrownea43ddd2008-10-13 11:55:11 +11004740 /* Changing the details of 'external' metadata is
4741 * always permitted. Otherwise there must be
4742 * no devices attached to the array.
4743 */
NeilBrown67918752014-12-15 12:57:01 +11004744
4745 err = mddev_lock(mddev);
4746 if (err)
4747 return err;
4748 err = -EBUSY;
NeilBrownea43ddd2008-10-13 11:55:11 +11004749 if (mddev->external && strncmp(buf, "external:", 9) == 0)
4750 ;
4751 else if (!list_empty(&mddev->disks))
NeilBrown67918752014-12-15 12:57:01 +11004752 goto out_unlock;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004753
NeilBrown67918752014-12-15 12:57:01 +11004754 err = 0;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004755 if (cmd_match(buf, "none")) {
4756 mddev->persistent = 0;
NeilBrowne6910632008-02-06 01:39:51 -08004757 mddev->external = 0;
4758 mddev->major_version = 0;
4759 mddev->minor_version = 90;
NeilBrown67918752014-12-15 12:57:01 +11004760 goto out_unlock;
NeilBrowne6910632008-02-06 01:39:51 -08004761 }
4762 if (strncmp(buf, "external:", 9) == 0) {
NeilBrown20a49ff2008-02-06 01:39:57 -08004763 size_t namelen = len-9;
NeilBrowne6910632008-02-06 01:39:51 -08004764 if (namelen >= sizeof(mddev->metadata_type))
4765 namelen = sizeof(mddev->metadata_type)-1;
4766 strncpy(mddev->metadata_type, buf+9, namelen);
4767 mddev->metadata_type[namelen] = 0;
4768 if (namelen && mddev->metadata_type[namelen-1] == '\n')
4769 mddev->metadata_type[--namelen] = 0;
4770 mddev->persistent = 0;
4771 mddev->external = 1;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004772 mddev->major_version = 0;
4773 mddev->minor_version = 90;
NeilBrown67918752014-12-15 12:57:01 +11004774 goto out_unlock;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004775 }
4776 major = simple_strtoul(buf, &e, 10);
NeilBrown67918752014-12-15 12:57:01 +11004777 err = -EINVAL;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004778 if (e==buf || *e != '.')
NeilBrown67918752014-12-15 12:57:01 +11004779 goto out_unlock;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004780 buf = e+1;
4781 minor = simple_strtoul(buf, &e, 10);
NeilBrown3f9d7b02006-12-22 01:11:41 -08004782 if (e==buf || (*e && *e != '\n') )
NeilBrown67918752014-12-15 12:57:01 +11004783 goto out_unlock;
4784 err = -ENOENT;
Ahmed S. Darwish50511da2007-05-09 02:35:34 -07004785 if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
NeilBrown67918752014-12-15 12:57:01 +11004786 goto out_unlock;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004787 mddev->major_version = major;
4788 mddev->minor_version = minor;
4789 mddev->persistent = 1;
NeilBrowne6910632008-02-06 01:39:51 -08004790 mddev->external = 0;
NeilBrown67918752014-12-15 12:57:01 +11004791 err = 0;
4792out_unlock:
4793 mddev_unlock(mddev);
4794 return err ?: len;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004795}
4796
4797static struct md_sysfs_entry md_metadata =
NeilBrown750f1992014-09-30 08:53:05 +10004798__ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
NeilBrown8bb93aa2006-01-06 00:20:50 -08004799
NeilBrowna35b0d62006-01-06 00:20:49 -08004800static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004801action_show(struct mddev *mddev, char *page)
NeilBrown24dd4692005-11-08 21:39:26 -08004802{
NeilBrown7eec3142005-11-08 21:39:44 -08004803 char *type = "idle";
NeilBrownb7b17c92014-12-15 12:56:59 +11004804 unsigned long recovery = mddev->recovery;
4805 if (test_bit(MD_RECOVERY_FROZEN, &recovery))
NeilBrownb6a9ce62009-05-26 09:41:17 +10004806 type = "frozen";
NeilBrownb7b17c92014-12-15 12:56:59 +11004807 else if (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
4808 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery))) {
4809 if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
NeilBrownccfcc3c2006-03-27 01:18:09 -08004810 type = "reshape";
NeilBrownb7b17c92014-12-15 12:56:59 +11004811 else if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
4812 if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
NeilBrown24dd4692005-11-08 21:39:26 -08004813 type = "resync";
NeilBrownb7b17c92014-12-15 12:56:59 +11004814 else if (test_bit(MD_RECOVERY_CHECK, &recovery))
NeilBrown24dd4692005-11-08 21:39:26 -08004815 type = "check";
4816 else
4817 type = "repair";
NeilBrownb7b17c92014-12-15 12:56:59 +11004818 } else if (test_bit(MD_RECOVERY_RECOVER, &recovery))
NeilBrown24dd4692005-11-08 21:39:26 -08004819 type = "recover";
NeilBrown985ca972015-07-06 12:26:57 +10004820 else if (mddev->reshape_position != MaxSector)
4821 type = "reshape";
NeilBrown24dd4692005-11-08 21:39:26 -08004822 }
4823 return sprintf(page, "%s\n", type);
4824}
4825
4826static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004827action_store(struct mddev *mddev, const char *page, size_t len)
NeilBrown24dd4692005-11-08 21:39:26 -08004828{
NeilBrown7eec3142005-11-08 21:39:44 -08004829 if (!mddev->pers || !mddev->pers->sync_request)
4830 return -EINVAL;
4831
NeilBrownb6a9ce62009-05-26 09:41:17 +10004832
4833 if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
NeilBrown56ccc112015-05-28 17:53:29 +10004834 if (cmd_match(page, "frozen"))
4835 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4836 else
4837 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
NeilBrown8e8e2512015-06-12 19:51:27 +10004838 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
4839 mddev_lock(mddev) == 0) {
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02004840 if (work_pending(&mddev->del_work))
4841 flush_workqueue(md_misc_wq);
NeilBrown8e8e2512015-06-12 19:51:27 +10004842 if (mddev->sync_thread) {
4843 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
NeilBrown67918752014-12-15 12:57:01 +11004844 md_reap_sync_thread(mddev);
NeilBrown67918752014-12-15 12:57:01 +11004845 }
NeilBrown8e8e2512015-06-12 19:51:27 +10004846 mddev_unlock(mddev);
NeilBrown7eec3142005-11-08 21:39:44 -08004847 }
NeilBrown312045e2015-12-21 11:01:21 +11004848 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
NeilBrown24dd4692005-11-08 21:39:26 -08004849 return -EBUSY;
Neil Brown72a23c22008-06-28 08:31:41 +10004850 else if (cmd_match(page, "resync"))
NeilBrown56ccc112015-05-28 17:53:29 +10004851 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
Neil Brown72a23c22008-06-28 08:31:41 +10004852 else if (cmd_match(page, "recover")) {
NeilBrown56ccc112015-05-28 17:53:29 +10004853 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
Neil Brown72a23c22008-06-28 08:31:41 +10004854 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
Neil Brown72a23c22008-06-28 08:31:41 +10004855 } else if (cmd_match(page, "reshape")) {
NeilBrown16484bf2006-03-27 01:18:13 -08004856 int err;
4857 if (mddev->pers->start_reshape == NULL)
4858 return -EINVAL;
NeilBrown67918752014-12-15 12:57:01 +11004859 err = mddev_lock(mddev);
4860 if (!err) {
NeilBrown312045e2015-12-21 11:01:21 +11004861 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4862 err = -EBUSY;
4863 else {
4864 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4865 err = mddev->pers->start_reshape(mddev);
4866 }
NeilBrown67918752014-12-15 12:57:01 +11004867 mddev_unlock(mddev);
4868 }
NeilBrown16484bf2006-03-27 01:18:13 -08004869 if (err)
4870 return err;
Junxiao Bie1a86db2020-07-14 16:10:26 -07004871 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
NeilBrown16484bf2006-03-27 01:18:13 -08004872 } else {
NeilBrownbce74da2006-01-06 00:20:41 -08004873 if (cmd_match(page, "check"))
NeilBrown7eec3142005-11-08 21:39:44 -08004874 set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
NeilBrown2adc7d42006-05-20 14:59:57 -07004875 else if (!cmd_match(page, "repair"))
NeilBrown7eec3142005-11-08 21:39:44 -08004876 return -EINVAL;
NeilBrown56ccc112015-05-28 17:53:29 +10004877 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
NeilBrown7eec3142005-11-08 21:39:44 -08004878 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
4879 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
NeilBrown7eec3142005-11-08 21:39:44 -08004880 }
NeilBrown48c26dd2012-10-11 14:19:39 +11004881 if (mddev->ro == 2) {
4882 /* A write to sync_action is enough to justify
4883 * canceling read-auto mode
4884 */
4885 mddev->ro = 0;
4886 md_wakeup_thread(mddev->sync_thread);
4887 }
NeilBrown03c902e2006-01-06 00:20:46 -08004888 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
NeilBrown24dd4692005-11-08 21:39:26 -08004889 md_wakeup_thread(mddev->thread);
NeilBrown00bcb4a2010-06-01 19:37:23 +10004890 sysfs_notify_dirent_safe(mddev->sysfs_action);
NeilBrown24dd4692005-11-08 21:39:26 -08004891 return len;
4892}
4893
Jonathan Brassowc4a39552013-06-25 01:23:59 -05004894static struct md_sysfs_entry md_scan_mode =
NeilBrown750f1992014-09-30 08:53:05 +10004895__ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
Jonathan Brassowc4a39552013-06-25 01:23:59 -05004896
4897static ssize_t
4898last_sync_action_show(struct mddev *mddev, char *page)
4899{
4900 return sprintf(page, "%s\n", mddev->last_sync_action);
4901}
4902
4903static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action);
4904
NeilBrown9d888832005-11-08 21:39:26 -08004905static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004906mismatch_cnt_show(struct mddev *mddev, char *page)
NeilBrown9d888832005-11-08 21:39:26 -08004907{
4908 return sprintf(page, "%llu\n",
Jianpeng Ma7f7583d2012-10-11 14:17:59 +11004909 (unsigned long long)
4910 atomic64_read(&mddev->resync_mismatches));
NeilBrown9d888832005-11-08 21:39:26 -08004911}
4912
NeilBrown80ca3a42006-07-10 04:44:18 -07004913static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
NeilBrown9d888832005-11-08 21:39:26 -08004914
NeilBrown88202a02006-01-06 00:21:36 -08004915static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004916sync_min_show(struct mddev *mddev, char *page)
NeilBrown88202a02006-01-06 00:21:36 -08004917{
4918 return sprintf(page, "%d (%s)\n", speed_min(mddev),
4919 mddev->sync_speed_min ? "local": "system");
4920}
4921
4922static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004923sync_min_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown88202a02006-01-06 00:21:36 -08004924{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004925 unsigned int min;
4926 int rv;
4927
NeilBrown88202a02006-01-06 00:21:36 -08004928 if (strncmp(buf, "system", 6)==0) {
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004929 min = 0;
4930 } else {
4931 rv = kstrtouint(buf, 10, &min);
4932 if (rv < 0)
4933 return rv;
4934 if (min == 0)
4935 return -EINVAL;
NeilBrown88202a02006-01-06 00:21:36 -08004936 }
NeilBrown88202a02006-01-06 00:21:36 -08004937 mddev->sync_speed_min = min;
4938 return len;
4939}
4940
4941static struct md_sysfs_entry md_sync_min =
4942__ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
4943
4944static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004945sync_max_show(struct mddev *mddev, char *page)
NeilBrown88202a02006-01-06 00:21:36 -08004946{
4947 return sprintf(page, "%d (%s)\n", speed_max(mddev),
4948 mddev->sync_speed_max ? "local": "system");
4949}
4950
4951static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004952sync_max_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown88202a02006-01-06 00:21:36 -08004953{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004954 unsigned int max;
4955 int rv;
4956
NeilBrown88202a02006-01-06 00:21:36 -08004957 if (strncmp(buf, "system", 6)==0) {
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004958 max = 0;
4959 } else {
4960 rv = kstrtouint(buf, 10, &max);
4961 if (rv < 0)
4962 return rv;
4963 if (max == 0)
4964 return -EINVAL;
NeilBrown88202a02006-01-06 00:21:36 -08004965 }
NeilBrown88202a02006-01-06 00:21:36 -08004966 mddev->sync_speed_max = max;
4967 return len;
4968}
4969
4970static struct md_sysfs_entry md_sync_max =
4971__ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
4972
Iustin Popd7f3d292007-10-16 23:30:54 -07004973static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004974degraded_show(struct mddev *mddev, char *page)
Iustin Popd7f3d292007-10-16 23:30:54 -07004975{
4976 return sprintf(page, "%d\n", mddev->degraded);
4977}
4978static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
NeilBrown88202a02006-01-06 00:21:36 -08004979
4980static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004981sync_force_parallel_show(struct mddev *mddev, char *page)
Bernd Schubert90b08712008-05-23 13:04:38 -07004982{
4983 return sprintf(page, "%d\n", mddev->parallel_resync);
4984}
4985
4986static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004987sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len)
Bernd Schubert90b08712008-05-23 13:04:38 -07004988{
4989 long n;
4990
Jingoo Hanb29bebd2013-06-01 16:15:16 +09004991 if (kstrtol(buf, 10, &n))
Bernd Schubert90b08712008-05-23 13:04:38 -07004992 return -EINVAL;
4993
4994 if (n != 0 && n != 1)
4995 return -EINVAL;
4996
4997 mddev->parallel_resync = n;
4998
4999 if (mddev->sync_thread)
5000 wake_up(&resync_wait);
5001
5002 return len;
5003}
5004
5005/* force parallel resync, even with shared block devices */
5006static struct md_sysfs_entry md_sync_force_parallel =
5007__ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
5008 sync_force_parallel_show, sync_force_parallel_store);
5009
5010static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005011sync_speed_show(struct mddev *mddev, char *page)
NeilBrown88202a02006-01-06 00:21:36 -08005012{
5013 unsigned long resync, dt, db;
NeilBrownd1a7c502009-03-31 15:24:32 +11005014 if (mddev->curr_resync == 0)
5015 return sprintf(page, "none\n");
Andre Noll9687a602008-03-25 22:24:09 +01005016 resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
5017 dt = (jiffies - mddev->resync_mark) / HZ;
NeilBrown88202a02006-01-06 00:21:36 -08005018 if (!dt) dt++;
Andre Noll9687a602008-03-25 22:24:09 +01005019 db = resync - mddev->resync_mark_cnt;
5020 return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
NeilBrown88202a02006-01-06 00:21:36 -08005021}
5022
NeilBrown80ca3a42006-07-10 04:44:18 -07005023static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
NeilBrown88202a02006-01-06 00:21:36 -08005024
5025static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005026sync_completed_show(struct mddev *mddev, char *page)
NeilBrown88202a02006-01-06 00:21:36 -08005027{
RĂ©mi RĂ©rolle13ae8642011-01-14 09:14:34 +11005028 unsigned long long max_sectors, resync;
NeilBrown88202a02006-01-06 00:21:36 -08005029
NeilBrownacb180b2009-04-14 16:28:34 +10005030 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5031 return sprintf(page, "none\n");
5032
NeilBrown72f36d52012-10-11 14:25:57 +11005033 if (mddev->curr_resync == 1 ||
5034 mddev->curr_resync == 2)
5035 return sprintf(page, "delayed\n");
5036
NeilBrownc804cde2012-05-21 09:28:33 +10005037 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
5038 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
Andre Noll58c0fed2009-03-31 14:33:13 +11005039 max_sectors = mddev->resync_max_sectors;
NeilBrown88202a02006-01-06 00:21:36 -08005040 else
Andre Noll58c0fed2009-03-31 14:33:13 +11005041 max_sectors = mddev->dev_sectors;
NeilBrown88202a02006-01-06 00:21:36 -08005042
NeilBrownacb180b2009-04-14 16:28:34 +10005043 resync = mddev->curr_resync_completed;
RĂ©mi RĂ©rolle13ae8642011-01-14 09:14:34 +11005044 return sprintf(page, "%llu / %llu\n", resync, max_sectors);
NeilBrown88202a02006-01-06 00:21:36 -08005045}
5046
NeilBrown750f1992014-09-30 08:53:05 +10005047static struct md_sysfs_entry md_sync_completed =
5048 __ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL);
NeilBrown88202a02006-01-06 00:21:36 -08005049
NeilBrowne464eaf2006-03-27 01:18:14 -08005050static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005051min_sync_show(struct mddev *mddev, char *page)
Neil Brown5e96ee62008-06-28 08:31:24 +10005052{
5053 return sprintf(page, "%llu\n",
5054 (unsigned long long)mddev->resync_min);
5055}
5056static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005057min_sync_store(struct mddev *mddev, const char *buf, size_t len)
Neil Brown5e96ee62008-06-28 08:31:24 +10005058{
5059 unsigned long long min;
NeilBrown23da4222014-12-15 12:57:01 +11005060 int err;
NeilBrown23da4222014-12-15 12:57:01 +11005061
Jingoo Hanb29bebd2013-06-01 16:15:16 +09005062 if (kstrtoull(buf, 10, &min))
Neil Brown5e96ee62008-06-28 08:31:24 +10005063 return -EINVAL;
NeilBrown23da4222014-12-15 12:57:01 +11005064
5065 spin_lock(&mddev->lock);
5066 err = -EINVAL;
Neil Brown5e96ee62008-06-28 08:31:24 +10005067 if (min > mddev->resync_max)
NeilBrown23da4222014-12-15 12:57:01 +11005068 goto out_unlock;
5069
5070 err = -EBUSY;
Neil Brown5e96ee62008-06-28 08:31:24 +10005071 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
NeilBrown23da4222014-12-15 12:57:01 +11005072 goto out_unlock;
Neil Brown5e96ee62008-06-28 08:31:24 +10005073
NeilBrown50c37b12015-03-23 17:36:38 +11005074 /* Round down to multiple of 4K for safety */
5075 mddev->resync_min = round_down(min, 8);
NeilBrown23da4222014-12-15 12:57:01 +11005076 err = 0;
Neil Brown5e96ee62008-06-28 08:31:24 +10005077
NeilBrown23da4222014-12-15 12:57:01 +11005078out_unlock:
5079 spin_unlock(&mddev->lock);
5080 return err ?: len;
Neil Brown5e96ee62008-06-28 08:31:24 +10005081}
5082
5083static struct md_sysfs_entry md_min_sync =
5084__ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
5085
5086static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005087max_sync_show(struct mddev *mddev, char *page)
NeilBrownc6207272008-02-06 01:39:52 -08005088{
5089 if (mddev->resync_max == MaxSector)
5090 return sprintf(page, "max\n");
5091 else
5092 return sprintf(page, "%llu\n",
5093 (unsigned long long)mddev->resync_max);
5094}
5095static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005096max_sync_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrownc6207272008-02-06 01:39:52 -08005097{
NeilBrown23da4222014-12-15 12:57:01 +11005098 int err;
5099 spin_lock(&mddev->lock);
NeilBrownc6207272008-02-06 01:39:52 -08005100 if (strncmp(buf, "max", 3) == 0)
5101 mddev->resync_max = MaxSector;
5102 else {
Neil Brown5e96ee62008-06-28 08:31:24 +10005103 unsigned long long max;
NeilBrown23da4222014-12-15 12:57:01 +11005104 int chunk;
5105
5106 err = -EINVAL;
Jingoo Hanb29bebd2013-06-01 16:15:16 +09005107 if (kstrtoull(buf, 10, &max))
NeilBrown23da4222014-12-15 12:57:01 +11005108 goto out_unlock;
Neil Brown5e96ee62008-06-28 08:31:24 +10005109 if (max < mddev->resync_min)
NeilBrown23da4222014-12-15 12:57:01 +11005110 goto out_unlock;
5111
5112 err = -EBUSY;
NeilBrownc6207272008-02-06 01:39:52 -08005113 if (max < mddev->resync_max &&
NeilBrown4d484a42009-08-13 10:41:50 +10005114 mddev->ro == 0 &&
NeilBrownc6207272008-02-06 01:39:52 -08005115 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
NeilBrown23da4222014-12-15 12:57:01 +11005116 goto out_unlock;
NeilBrownc6207272008-02-06 01:39:52 -08005117
5118 /* Must be a multiple of chunk_size */
NeilBrown23da4222014-12-15 12:57:01 +11005119 chunk = mddev->chunk_sectors;
5120 if (chunk) {
raz ben yehuda2ac06c32009-06-16 17:01:42 +10005121 sector_t temp = max;
NeilBrown23da4222014-12-15 12:57:01 +11005122
5123 err = -EINVAL;
5124 if (sector_div(temp, chunk))
5125 goto out_unlock;
NeilBrownc6207272008-02-06 01:39:52 -08005126 }
5127 mddev->resync_max = max;
5128 }
5129 wake_up(&mddev->recovery_wait);
NeilBrown23da4222014-12-15 12:57:01 +11005130 err = 0;
5131out_unlock:
5132 spin_unlock(&mddev->lock);
5133 return err ?: len;
NeilBrownc6207272008-02-06 01:39:52 -08005134}
5135
5136static struct md_sysfs_entry md_max_sync =
5137__ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
5138
5139static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005140suspend_lo_show(struct mddev *mddev, char *page)
NeilBrowne464eaf2006-03-27 01:18:14 -08005141{
5142 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
5143}
5144
5145static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005146suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrowne464eaf2006-03-27 01:18:14 -08005147{
NeilBrownb03e0cc2017-10-19 12:49:15 +11005148 unsigned long long new;
NeilBrown67918752014-12-15 12:57:01 +11005149 int err;
NeilBrowne464eaf2006-03-27 01:18:14 -08005150
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03005151 err = kstrtoull(buf, 10, &new);
5152 if (err < 0)
5153 return err;
5154 if (new != (sector_t)new)
NeilBrowne464eaf2006-03-27 01:18:14 -08005155 return -EINVAL;
NeilBrown23ddff32011-01-14 09:14:34 +11005156
NeilBrown67918752014-12-15 12:57:01 +11005157 err = mddev_lock(mddev);
5158 if (err)
5159 return err;
5160 err = -EINVAL;
5161 if (mddev->pers == NULL ||
5162 mddev->pers->quiesce == NULL)
5163 goto unlock;
NeilBrownb03e0cc2017-10-19 12:49:15 +11005164 mddev_suspend(mddev);
NeilBrown23ddff32011-01-14 09:14:34 +11005165 mddev->suspend_lo = new;
NeilBrownb03e0cc2017-10-19 12:49:15 +11005166 mddev_resume(mddev);
5167
NeilBrown67918752014-12-15 12:57:01 +11005168 err = 0;
5169unlock:
5170 mddev_unlock(mddev);
5171 return err ?: len;
NeilBrowne464eaf2006-03-27 01:18:14 -08005172}
5173static struct md_sysfs_entry md_suspend_lo =
5174__ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
5175
NeilBrowne464eaf2006-03-27 01:18:14 -08005176static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005177suspend_hi_show(struct mddev *mddev, char *page)
NeilBrowne464eaf2006-03-27 01:18:14 -08005178{
5179 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
5180}
5181
5182static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005183suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrowne464eaf2006-03-27 01:18:14 -08005184{
NeilBrownb03e0cc2017-10-19 12:49:15 +11005185 unsigned long long new;
NeilBrown67918752014-12-15 12:57:01 +11005186 int err;
NeilBrowne464eaf2006-03-27 01:18:14 -08005187
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03005188 err = kstrtoull(buf, 10, &new);
5189 if (err < 0)
5190 return err;
5191 if (new != (sector_t)new)
NeilBrowne464eaf2006-03-27 01:18:14 -08005192 return -EINVAL;
NeilBrown23ddff32011-01-14 09:14:34 +11005193
NeilBrown67918752014-12-15 12:57:01 +11005194 err = mddev_lock(mddev);
5195 if (err)
5196 return err;
5197 err = -EINVAL;
NeilBrownb03e0cc2017-10-19 12:49:15 +11005198 if (mddev->pers == NULL)
NeilBrown67918752014-12-15 12:57:01 +11005199 goto unlock;
NeilBrownb03e0cc2017-10-19 12:49:15 +11005200
5201 mddev_suspend(mddev);
NeilBrown23ddff32011-01-14 09:14:34 +11005202 mddev->suspend_hi = new;
NeilBrownb03e0cc2017-10-19 12:49:15 +11005203 mddev_resume(mddev);
5204
NeilBrown67918752014-12-15 12:57:01 +11005205 err = 0;
5206unlock:
5207 mddev_unlock(mddev);
5208 return err ?: len;
NeilBrowne464eaf2006-03-27 01:18:14 -08005209}
5210static struct md_sysfs_entry md_suspend_hi =
5211__ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
5212
NeilBrown08a02ec2007-05-09 02:35:38 -07005213static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005214reshape_position_show(struct mddev *mddev, char *page)
NeilBrown08a02ec2007-05-09 02:35:38 -07005215{
5216 if (mddev->reshape_position != MaxSector)
5217 return sprintf(page, "%llu\n",
5218 (unsigned long long)mddev->reshape_position);
5219 strcpy(page, "none\n");
5220 return 5;
5221}
5222
5223static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005224reshape_position_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown08a02ec2007-05-09 02:35:38 -07005225{
NeilBrownc6563a82012-05-21 09:27:00 +10005226 struct md_rdev *rdev;
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03005227 unsigned long long new;
NeilBrown67918752014-12-15 12:57:01 +11005228 int err;
NeilBrown67918752014-12-15 12:57:01 +11005229
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03005230 err = kstrtoull(buf, 10, &new);
5231 if (err < 0)
5232 return err;
5233 if (new != (sector_t)new)
NeilBrown08a02ec2007-05-09 02:35:38 -07005234 return -EINVAL;
NeilBrown67918752014-12-15 12:57:01 +11005235 err = mddev_lock(mddev);
5236 if (err)
5237 return err;
5238 err = -EBUSY;
5239 if (mddev->pers)
5240 goto unlock;
NeilBrown08a02ec2007-05-09 02:35:38 -07005241 mddev->reshape_position = new;
5242 mddev->delta_disks = 0;
NeilBrown2c810cd2012-05-21 09:27:00 +10005243 mddev->reshape_backwards = 0;
NeilBrown08a02ec2007-05-09 02:35:38 -07005244 mddev->new_level = mddev->level;
5245 mddev->new_layout = mddev->layout;
Andre Noll664e7c42009-06-18 08:45:27 +10005246 mddev->new_chunk_sectors = mddev->chunk_sectors;
NeilBrownc6563a82012-05-21 09:27:00 +10005247 rdev_for_each(rdev, mddev)
5248 rdev->new_data_offset = rdev->data_offset;
NeilBrown67918752014-12-15 12:57:01 +11005249 err = 0;
5250unlock:
5251 mddev_unlock(mddev);
5252 return err ?: len;
NeilBrown08a02ec2007-05-09 02:35:38 -07005253}
5254
5255static struct md_sysfs_entry md_reshape_position =
5256__ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
5257 reshape_position_store);
5258
Dan Williamsb522adc2009-03-31 15:00:31 +11005259static ssize_t
NeilBrown2c810cd2012-05-21 09:27:00 +10005260reshape_direction_show(struct mddev *mddev, char *page)
5261{
5262 return sprintf(page, "%s\n",
5263 mddev->reshape_backwards ? "backwards" : "forwards");
5264}
5265
5266static ssize_t
5267reshape_direction_store(struct mddev *mddev, const char *buf, size_t len)
5268{
5269 int backwards = 0;
NeilBrown67918752014-12-15 12:57:01 +11005270 int err;
5271
NeilBrown2c810cd2012-05-21 09:27:00 +10005272 if (cmd_match(buf, "forwards"))
5273 backwards = 0;
5274 else if (cmd_match(buf, "backwards"))
5275 backwards = 1;
5276 else
5277 return -EINVAL;
5278 if (mddev->reshape_backwards == backwards)
5279 return len;
5280
NeilBrown67918752014-12-15 12:57:01 +11005281 err = mddev_lock(mddev);
5282 if (err)
5283 return err;
NeilBrown2c810cd2012-05-21 09:27:00 +10005284 /* check if we are allowed to change */
5285 if (mddev->delta_disks)
NeilBrown67918752014-12-15 12:57:01 +11005286 err = -EBUSY;
5287 else if (mddev->persistent &&
NeilBrown2c810cd2012-05-21 09:27:00 +10005288 mddev->major_version == 0)
NeilBrown67918752014-12-15 12:57:01 +11005289 err = -EINVAL;
5290 else
5291 mddev->reshape_backwards = backwards;
5292 mddev_unlock(mddev);
5293 return err ?: len;
NeilBrown2c810cd2012-05-21 09:27:00 +10005294}
5295
5296static struct md_sysfs_entry md_reshape_direction =
5297__ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show,
5298 reshape_direction_store);
5299
5300static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005301array_size_show(struct mddev *mddev, char *page)
Dan Williamsb522adc2009-03-31 15:00:31 +11005302{
5303 if (mddev->external_size)
5304 return sprintf(page, "%llu\n",
5305 (unsigned long long)mddev->array_sectors/2);
5306 else
5307 return sprintf(page, "default\n");
5308}
5309
5310static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005311array_size_store(struct mddev *mddev, const char *buf, size_t len)
Dan Williamsb522adc2009-03-31 15:00:31 +11005312{
5313 sector_t sectors;
NeilBrown67918752014-12-15 12:57:01 +11005314 int err;
5315
5316 err = mddev_lock(mddev);
5317 if (err)
5318 return err;
Dan Williamsb522adc2009-03-31 15:00:31 +11005319
Guoqing Jiangab5a98b2016-05-02 11:33:13 -04005320 /* cluster raid doesn't support change array_sectors */
Zhilong Liub6708832017-04-10 14:15:55 +08005321 if (mddev_is_clustered(mddev)) {
5322 mddev_unlock(mddev);
Guoqing Jiangab5a98b2016-05-02 11:33:13 -04005323 return -EINVAL;
Zhilong Liub6708832017-04-10 14:15:55 +08005324 }
Guoqing Jiangab5a98b2016-05-02 11:33:13 -04005325
Dan Williamsb522adc2009-03-31 15:00:31 +11005326 if (strncmp(buf, "default", 7) == 0) {
5327 if (mddev->pers)
5328 sectors = mddev->pers->size(mddev, 0, 0);
5329 else
5330 sectors = mddev->array_sectors;
5331
5332 mddev->external_size = 0;
5333 } else {
5334 if (strict_blocks_to_sectors(buf, &sectors) < 0)
NeilBrown67918752014-12-15 12:57:01 +11005335 err = -EINVAL;
5336 else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
5337 err = -E2BIG;
5338 else
5339 mddev->external_size = 1;
Dan Williamsb522adc2009-03-31 15:00:31 +11005340 }
5341
NeilBrown67918752014-12-15 12:57:01 +11005342 if (!err) {
5343 mddev->array_sectors = sectors;
Christoph Hellwig2c247c52020-11-16 15:57:11 +01005344 if (mddev->pers)
5345 set_capacity_and_notify(mddev->gendisk,
5346 mddev->array_sectors);
NeilBrowncbe6ef12011-02-16 13:58:38 +11005347 }
NeilBrown67918752014-12-15 12:57:01 +11005348 mddev_unlock(mddev);
5349 return err ?: len;
Dan Williamsb522adc2009-03-31 15:00:31 +11005350}
5351
5352static struct md_sysfs_entry md_array_size =
5353__ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
5354 array_size_store);
NeilBrowne464eaf2006-03-27 01:18:14 -08005355
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005356static ssize_t
5357consistency_policy_show(struct mddev *mddev, char *page)
5358{
5359 int ret;
5360
5361 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
5362 ret = sprintf(page, "journal\n");
5363 } else if (test_bit(MD_HAS_PPL, &mddev->flags)) {
5364 ret = sprintf(page, "ppl\n");
5365 } else if (mddev->bitmap) {
5366 ret = sprintf(page, "bitmap\n");
5367 } else if (mddev->pers) {
5368 if (mddev->pers->sync_request)
5369 ret = sprintf(page, "resync\n");
5370 else
5371 ret = sprintf(page, "none\n");
5372 } else {
5373 ret = sprintf(page, "unknown\n");
5374 }
5375
5376 return ret;
5377}
5378
5379static ssize_t
5380consistency_policy_store(struct mddev *mddev, const char *buf, size_t len)
5381{
Artur Paszkiewiczba903a32017-03-09 10:00:03 +01005382 int err = 0;
5383
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005384 if (mddev->pers) {
Artur Paszkiewiczba903a32017-03-09 10:00:03 +01005385 if (mddev->pers->change_consistency_policy)
5386 err = mddev->pers->change_consistency_policy(mddev, buf);
5387 else
5388 err = -EBUSY;
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005389 } else if (mddev->external && strncmp(buf, "ppl", 3) == 0) {
5390 set_bit(MD_HAS_PPL, &mddev->flags);
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005391 } else {
Artur Paszkiewiczba903a32017-03-09 10:00:03 +01005392 err = -EINVAL;
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005393 }
Artur Paszkiewiczba903a32017-03-09 10:00:03 +01005394
5395 return err ? err : len;
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005396}
5397
5398static struct md_sysfs_entry md_consistency_policy =
5399__ATTR(consistency_policy, S_IRUGO | S_IWUSR, consistency_policy_show,
5400 consistency_policy_store);
5401
Guoqing Jiang9a567842019-07-24 11:09:19 +02005402static ssize_t fail_last_dev_show(struct mddev *mddev, char *page)
5403{
5404 return sprintf(page, "%d\n", mddev->fail_last_dev);
5405}
5406
5407/*
5408 * Setting fail_last_dev to true to allow last device to be forcibly removed
5409 * from RAID1/RAID10.
5410 */
5411static ssize_t
5412fail_last_dev_store(struct mddev *mddev, const char *buf, size_t len)
5413{
5414 int ret;
5415 bool value;
5416
5417 ret = kstrtobool(buf, &value);
5418 if (ret)
5419 return ret;
5420
5421 if (value != mddev->fail_last_dev)
5422 mddev->fail_last_dev = value;
5423
5424 return len;
5425}
5426static struct md_sysfs_entry md_fail_last_dev =
5427__ATTR(fail_last_dev, S_IRUGO | S_IWUSR, fail_last_dev_show,
5428 fail_last_dev_store);
5429
Guoqing Jiang3938f5f2019-12-23 10:48:56 +01005430static ssize_t serialize_policy_show(struct mddev *mddev, char *page)
5431{
5432 if (mddev->pers == NULL || (mddev->pers->level != 1))
5433 return sprintf(page, "n/a\n");
5434 else
5435 return sprintf(page, "%d\n", mddev->serialize_policy);
5436}
5437
5438/*
5439 * Setting serialize_policy to true to enforce write IO is not reordered
5440 * for raid1.
5441 */
5442static ssize_t
5443serialize_policy_store(struct mddev *mddev, const char *buf, size_t len)
5444{
5445 int err;
5446 bool value;
5447
5448 err = kstrtobool(buf, &value);
5449 if (err)
5450 return err;
5451
5452 if (value == mddev->serialize_policy)
5453 return len;
5454
5455 err = mddev_lock(mddev);
5456 if (err)
5457 return err;
5458 if (mddev->pers == NULL || (mddev->pers->level != 1)) {
5459 pr_err("md: serialize_policy is only effective for raid1\n");
5460 err = -EINVAL;
5461 goto unlock;
5462 }
5463
5464 mddev_suspend(mddev);
5465 if (value)
5466 mddev_create_serial_pool(mddev, NULL, true);
5467 else
5468 mddev_destroy_serial_pool(mddev, NULL, true);
5469 mddev->serialize_policy = value;
5470 mddev_resume(mddev);
5471unlock:
5472 mddev_unlock(mddev);
5473 return err ?: len;
5474}
5475
5476static struct md_sysfs_entry md_serialize_policy =
5477__ATTR(serialize_policy, S_IRUGO | S_IWUSR, serialize_policy_show,
5478 serialize_policy_store);
5479
5480
NeilBrowneae17012005-11-08 21:39:23 -08005481static struct attribute *md_default_attrs[] = {
5482 &md_level.attr,
NeilBrownd4dbd022006-06-26 00:27:59 -07005483 &md_layout.attr,
NeilBrowneae17012005-11-08 21:39:23 -08005484 &md_raid_disks.attr,
Sebastian Parschauerec164d072020-07-28 12:01:39 +02005485 &md_uuid.attr,
NeilBrown3b343802006-01-06 00:20:47 -08005486 &md_chunk_size.attr,
NeilBrowna35b0d62006-01-06 00:20:49 -08005487 &md_size.attr,
NeilBrowna94213b2006-06-26 00:28:00 -07005488 &md_resync_start.attr,
NeilBrown8bb93aa2006-01-06 00:20:50 -08005489 &md_metadata.attr,
NeilBrown6d7ff7382006-01-06 00:21:16 -08005490 &md_new_device.attr,
NeilBrown16f17b32006-06-26 00:27:37 -07005491 &md_safe_delay.attr,
NeilBrown9e653b62006-06-26 00:27:58 -07005492 &md_array_state.attr,
NeilBrown08a02ec2007-05-09 02:35:38 -07005493 &md_reshape_position.attr,
NeilBrown2c810cd2012-05-21 09:27:00 +10005494 &md_reshape_direction.attr,
Dan Williamsb522adc2009-03-31 15:00:31 +11005495 &md_array_size.attr,
Robert Becker1e509152009-12-14 12:49:58 +11005496 &max_corr_read_errors.attr,
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005497 &md_consistency_policy.attr,
Guoqing Jiang9a567842019-07-24 11:09:19 +02005498 &md_fail_last_dev.attr,
Guoqing Jiang3938f5f2019-12-23 10:48:56 +01005499 &md_serialize_policy.attr,
NeilBrown411036f2005-11-08 21:39:40 -08005500 NULL,
5501};
5502
5503static struct attribute *md_redundancy_attrs[] = {
NeilBrown24dd4692005-11-08 21:39:26 -08005504 &md_scan_mode.attr,
Jonathan Brassowc4a39552013-06-25 01:23:59 -05005505 &md_last_scan_mode.attr,
NeilBrown9d888832005-11-08 21:39:26 -08005506 &md_mismatches.attr,
NeilBrown88202a02006-01-06 00:21:36 -08005507 &md_sync_min.attr,
5508 &md_sync_max.attr,
5509 &md_sync_speed.attr,
Bernd Schubert90b08712008-05-23 13:04:38 -07005510 &md_sync_force_parallel.attr,
NeilBrown88202a02006-01-06 00:21:36 -08005511 &md_sync_completed.attr,
Neil Brown5e96ee62008-06-28 08:31:24 +10005512 &md_min_sync.attr,
NeilBrownc6207272008-02-06 01:39:52 -08005513 &md_max_sync.attr,
NeilBrowne464eaf2006-03-27 01:18:14 -08005514 &md_suspend_lo.attr,
5515 &md_suspend_hi.attr,
Paul Clements9b1d1da2006-10-03 01:15:49 -07005516 &md_bitmap.attr,
Iustin Popd7f3d292007-10-16 23:30:54 -07005517 &md_degraded.attr,
NeilBrowneae17012005-11-08 21:39:23 -08005518 NULL,
5519};
NeilBrown411036f2005-11-08 21:39:40 -08005520static struct attribute_group md_redundancy_group = {
5521 .name = NULL,
5522 .attrs = md_redundancy_attrs,
5523};
5524
NeilBrowneae17012005-11-08 21:39:23 -08005525static ssize_t
5526md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
5527{
5528 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
NeilBrownfd01b882011-10-11 16:47:53 +11005529 struct mddev *mddev = container_of(kobj, struct mddev, kobj);
NeilBrown96de1e62005-11-08 21:39:39 -08005530 ssize_t rv;
NeilBrowneae17012005-11-08 21:39:23 -08005531
5532 if (!entry->show)
5533 return -EIO;
NeilBrownaf8a2432011-12-08 15:49:46 +11005534 spin_lock(&all_mddevs_lock);
5535 if (list_empty(&mddev->all_mddevs)) {
5536 spin_unlock(&all_mddevs_lock);
5537 return -EBUSY;
5538 }
5539 mddev_get(mddev);
5540 spin_unlock(&all_mddevs_lock);
5541
NeilBrownb7b17c92014-12-15 12:56:59 +11005542 rv = entry->show(mddev, page);
NeilBrownaf8a2432011-12-08 15:49:46 +11005543 mddev_put(mddev);
NeilBrown96de1e62005-11-08 21:39:39 -08005544 return rv;
NeilBrowneae17012005-11-08 21:39:23 -08005545}
5546
5547static ssize_t
5548md_attr_store(struct kobject *kobj, struct attribute *attr,
5549 const char *page, size_t length)
5550{
5551 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
NeilBrownfd01b882011-10-11 16:47:53 +11005552 struct mddev *mddev = container_of(kobj, struct mddev, kobj);
NeilBrown96de1e62005-11-08 21:39:39 -08005553 ssize_t rv;
NeilBrowneae17012005-11-08 21:39:23 -08005554
5555 if (!entry->store)
5556 return -EIO;
NeilBrown67463ac2006-07-10 04:44:19 -07005557 if (!capable(CAP_SYS_ADMIN))
5558 return -EACCES;
NeilBrownaf8a2432011-12-08 15:49:46 +11005559 spin_lock(&all_mddevs_lock);
5560 if (list_empty(&mddev->all_mddevs)) {
5561 spin_unlock(&all_mddevs_lock);
5562 return -EBUSY;
5563 }
5564 mddev_get(mddev);
5565 spin_unlock(&all_mddevs_lock);
NeilBrown67918752014-12-15 12:57:01 +11005566 rv = entry->store(mddev, page, length);
NeilBrownaf8a2432011-12-08 15:49:46 +11005567 mddev_put(mddev);
NeilBrown96de1e62005-11-08 21:39:39 -08005568 return rv;
NeilBrowneae17012005-11-08 21:39:23 -08005569}
5570
5571static void md_free(struct kobject *ko)
5572{
NeilBrownfd01b882011-10-11 16:47:53 +11005573 struct mddev *mddev = container_of(ko, struct mddev, kobj);
NeilBrowna21d1502009-01-09 08:31:09 +11005574
5575 if (mddev->sysfs_state)
5576 sysfs_put(mddev->sysfs_state);
Junxiao Bie1a86db2020-07-14 16:10:26 -07005577 if (mddev->sysfs_level)
5578 sysfs_put(mddev->sysfs_level);
5579
Bart Van Assched8115c352018-02-28 10:15:29 -08005580 if (mddev->gendisk)
5581 del_gendisk(mddev->gendisk);
NeilBrown6cd18e72015-04-27 14:12:22 +10005582 if (mddev->queue)
5583 blk_cleanup_queue(mddev->queue);
Bart Van Assched8115c352018-02-28 10:15:29 -08005584 if (mddev->gendisk)
NeilBrowna21d1502009-01-09 08:31:09 +11005585 put_disk(mddev->gendisk);
NeilBrown4ad23a972017-03-15 14:05:14 +11005586 percpu_ref_exit(&mddev->writes_pending);
NeilBrowna21d1502009-01-09 08:31:09 +11005587
Kent Overstreet28dec872018-06-07 20:52:54 -04005588 bioset_exit(&mddev->bio_set);
5589 bioset_exit(&mddev->sync_set);
Artur Paszkiewicz41d2d842020-07-03 11:13:09 +02005590 mempool_exit(&mddev->md_io_pool);
NeilBrowneae17012005-11-08 21:39:23 -08005591 kfree(mddev);
5592}
5593
Emese Revfy52cf25d2010-01-19 02:58:23 +01005594static const struct sysfs_ops md_sysfs_ops = {
NeilBrowneae17012005-11-08 21:39:23 -08005595 .show = md_attr_show,
5596 .store = md_attr_store,
5597};
5598static struct kobj_type md_ktype = {
5599 .release = md_free,
5600 .sysfs_ops = &md_sysfs_ops,
5601 .default_attrs = md_default_attrs,
5602};
5603
Linus Torvalds1da177e2005-04-16 15:20:36 -07005604int mdp_major = 0;
5605
Dan Williams5fd3a172009-03-04 00:57:25 -07005606static void mddev_delayed_delete(struct work_struct *ws)
5607{
NeilBrownfd01b882011-10-11 16:47:53 +11005608 struct mddev *mddev = container_of(ws, struct mddev, del_work);
Dan Williams5fd3a172009-03-04 00:57:25 -07005609
NeilBrown43a70502009-12-14 12:49:55 +11005610 sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
Dan Williams5fd3a172009-03-04 00:57:25 -07005611 kobject_del(&mddev->kobj);
5612 kobject_put(&mddev->kobj);
5613}
5614
NeilBrown4ad23a972017-03-15 14:05:14 +11005615static void no_op(struct percpu_ref *r) {}
5616
NeilBrowna415c0f2017-06-05 16:05:13 +10005617int mddev_init_writes_pending(struct mddev *mddev)
5618{
5619 if (mddev->writes_pending.percpu_count_ptr)
5620 return 0;
Roman Gushchinddde2af2019-05-07 10:01:49 -07005621 if (percpu_ref_init(&mddev->writes_pending, no_op,
5622 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL) < 0)
NeilBrowna415c0f2017-06-05 16:05:13 +10005623 return -ENOMEM;
5624 /* We want to start with the refcount at zero */
5625 percpu_ref_put(&mddev->writes_pending);
5626 return 0;
5627}
5628EXPORT_SYMBOL_GPL(mddev_init_writes_pending);
5629
NeilBrownefeb53c2009-01-09 08:31:10 +11005630static int md_alloc(dev_t dev, char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005631{
NeilBrown039b7222017-04-12 16:26:13 +10005632 /*
5633 * If dev is zero, name is the name of a device to allocate with
5634 * an arbitrary minor number. It will be "md_???"
5635 * If dev is non-zero it must be a device number with a MAJOR of
5636 * MD_MAJOR or mdp_major. In this case, if "name" is NULL, then
5637 * the device is being created by opening a node in /dev.
5638 * If "name" is not NULL, the device is being created by
5639 * writing to /sys/module/md_mod/parameters/new_array.
5640 */
Arjan van de Ven48c9c272006-03-27 01:18:20 -08005641 static DEFINE_MUTEX(disks_mutex);
NeilBrownfd01b882011-10-11 16:47:53 +11005642 struct mddev *mddev = mddev_find(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005643 struct gendisk *disk;
NeilBrownefeb53c2009-01-09 08:31:10 +11005644 int partitioned;
5645 int shift;
5646 int unit;
Greg Kroah-Hartman3830c622007-12-17 15:54:39 -04005647 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005648
5649 if (!mddev)
NeilBrownefeb53c2009-01-09 08:31:10 +11005650 return -ENODEV;
5651
5652 partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
5653 shift = partitioned ? MdpMinorShift : 0;
5654 unit = MINOR(mddev->unit) >> shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005655
Tejun Heoe804ac72010-10-15 15:36:08 +02005656 /* wait for any previous instance of this device to be
5657 * completely removed (mddev_delayed_delete).
NeilBrownd3374822009-01-09 08:31:10 +11005658 */
Tejun Heoe804ac72010-10-15 15:36:08 +02005659 flush_workqueue(md_misc_wq);
NeilBrownd3374822009-01-09 08:31:10 +11005660
Arjan van de Ven48c9c272006-03-27 01:18:20 -08005661 mutex_lock(&disks_mutex);
NeilBrown0909dc42009-07-01 12:27:21 +10005662 error = -EEXIST;
5663 if (mddev->gendisk)
5664 goto abort;
NeilBrownefeb53c2009-01-09 08:31:10 +11005665
NeilBrown039b7222017-04-12 16:26:13 +10005666 if (name && !dev) {
NeilBrownefeb53c2009-01-09 08:31:10 +11005667 /* Need to ensure that 'name' is not a duplicate.
5668 */
NeilBrownfd01b882011-10-11 16:47:53 +11005669 struct mddev *mddev2;
NeilBrownefeb53c2009-01-09 08:31:10 +11005670 spin_lock(&all_mddevs_lock);
5671
5672 list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
5673 if (mddev2->gendisk &&
5674 strcmp(mddev2->gendisk->disk_name, name) == 0) {
5675 spin_unlock(&all_mddevs_lock);
NeilBrown0909dc42009-07-01 12:27:21 +10005676 goto abort;
NeilBrownefeb53c2009-01-09 08:31:10 +11005677 }
5678 spin_unlock(&all_mddevs_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005679 }
NeilBrown039b7222017-04-12 16:26:13 +10005680 if (name && dev)
5681 /*
5682 * Creating /dev/mdNNN via "newarray", so adjust hold_active.
5683 */
5684 mddev->hold_active = UNTIL_STOP;
NeilBrown8b765392009-01-09 08:31:08 +11005685
Artur Paszkiewicz41d2d842020-07-03 11:13:09 +02005686 error = mempool_init_kmalloc_pool(&mddev->md_io_pool, BIO_POOL_SIZE,
5687 sizeof(struct md_io));
5688 if (error)
5689 goto abort;
5690
NeilBrown0909dc42009-07-01 12:27:21 +10005691 error = -ENOMEM;
Christoph Hellwigc62b37d2020-07-01 10:59:43 +02005692 mddev->queue = blk_alloc_queue(NUMA_NO_NODE);
NeilBrown0909dc42009-07-01 12:27:21 +10005693 if (!mddev->queue)
5694 goto abort;
NeilBrown409c57f2009-03-31 14:39:39 +11005695
Martin K. Petersenb1bd0552012-01-11 16:27:11 +01005696 blk_set_stacking_limits(&mddev->queue->limits);
NeilBrown8b765392009-01-09 08:31:08 +11005697
Linus Torvalds1da177e2005-04-16 15:20:36 -07005698 disk = alloc_disk(1 << shift);
5699 if (!disk) {
NeilBrown8b765392009-01-09 08:31:08 +11005700 blk_cleanup_queue(mddev->queue);
5701 mddev->queue = NULL;
NeilBrown0909dc42009-07-01 12:27:21 +10005702 goto abort;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005703 }
NeilBrownefeb53c2009-01-09 08:31:10 +11005704 disk->major = MAJOR(mddev->unit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005705 disk->first_minor = unit << shift;
NeilBrownefeb53c2009-01-09 08:31:10 +11005706 if (name)
5707 strcpy(disk->disk_name, name);
5708 else if (partitioned)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005709 sprintf(disk->disk_name, "md_d%d", unit);
Greg Kroah-Hartmance7b0f462005-06-20 21:15:16 -07005710 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07005711 sprintf(disk->disk_name, "md%d", unit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005712 disk->fops = &md_fops;
5713 disk->private_data = mddev;
5714 disk->queue = mddev->queue;
Jens Axboe56883a72016-03-30 10:16:53 -06005715 blk_queue_write_cache(mddev->queue, true, true);
NeilBrown92850bb2008-10-21 13:25:32 +11005716 /* Allow extended partitions. This makes the
NeilBrownd3374822009-01-09 08:31:10 +11005717 * 'mdp' device redundant, but we can't really
NeilBrown92850bb2008-10-21 13:25:32 +11005718 * remove it now.
5719 */
5720 disk->flags |= GENHD_FL_EXT_DEVT;
Christoph Hellwiga564e232020-07-08 14:25:41 +02005721 disk->events |= DISK_EVENT_MEDIA_CHANGE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005722 mddev->gendisk = disk;
NeilBrownb0140892011-05-10 17:49:01 +10005723 /* As soon as we call add_disk(), another thread could get
5724 * through to md_open, so make sure it doesn't get too far
5725 */
5726 mutex_lock(&mddev->open_mutex);
5727 add_disk(disk);
5728
Kent Overstreet28dec872018-06-07 20:52:54 -04005729 error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md");
NeilBrown0909dc42009-07-01 12:27:21 +10005730 if (error) {
5731 /* This isn't possible, but as kobject_init_and_add is marked
5732 * __must_check, we must do something with the result
5733 */
NeilBrown9d487392016-11-02 14:16:49 +11005734 pr_debug("md: cannot register %s/md - name in use\n",
5735 disk->disk_name);
NeilBrown0909dc42009-07-01 12:27:21 +10005736 error = 0;
5737 }
NeilBrown00bcb4a2010-06-01 19:37:23 +10005738 if (mddev->kobj.sd &&
5739 sysfs_create_group(&mddev->kobj, &md_bitmap_group))
NeilBrown9d487392016-11-02 14:16:49 +11005740 pr_debug("pointless warning\n");
NeilBrownb0140892011-05-10 17:49:01 +10005741 mutex_unlock(&mddev->open_mutex);
NeilBrown0909dc42009-07-01 12:27:21 +10005742 abort:
5743 mutex_unlock(&disks_mutex);
NeilBrown00bcb4a2010-06-01 19:37:23 +10005744 if (!error && mddev->kobj.sd) {
Greg Kroah-Hartman3830c622007-12-17 15:54:39 -04005745 kobject_uevent(&mddev->kobj, KOBJ_ADD);
NeilBrown00bcb4a2010-06-01 19:37:23 +10005746 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
Junxiao Bie1a86db2020-07-14 16:10:26 -07005747 mddev->sysfs_level = sysfs_get_dirent_safe(mddev->kobj.sd, "level");
NeilBrownb62b7592008-10-21 13:25:21 +11005748 }
NeilBrownd3374822009-01-09 08:31:10 +11005749 mddev_put(mddev);
NeilBrown0909dc42009-07-01 12:27:21 +10005750 return error;
NeilBrownefeb53c2009-01-09 08:31:10 +11005751}
5752
Christoph Hellwig28144f92020-10-29 15:58:34 +01005753static void md_probe(dev_t dev)
NeilBrownefeb53c2009-01-09 08:31:10 +11005754{
Christoph Hellwig28144f92020-10-29 15:58:34 +01005755 if (MAJOR(dev) == MD_MAJOR && MINOR(dev) >= 512)
5756 return;
NeilBrown78b63502017-04-12 16:26:13 +10005757 if (create_on_open)
5758 md_alloc(dev, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005759}
5760
Kees Cooke4dca7b2017-10-17 19:04:42 -07005761static int add_named_array(const char *val, const struct kernel_param *kp)
NeilBrownefeb53c2009-01-09 08:31:10 +11005762{
NeilBrown039b7222017-04-12 16:26:13 +10005763 /*
5764 * val must be "md_*" or "mdNNN".
5765 * For "md_*" we allocate an array with a large free minor number, and
NeilBrownefeb53c2009-01-09 08:31:10 +11005766 * set the name to val. val must not already be an active name.
NeilBrown039b7222017-04-12 16:26:13 +10005767 * For "mdNNN" we allocate an array with the minor number NNN
5768 * which must not already be in use.
NeilBrownefeb53c2009-01-09 08:31:10 +11005769 */
5770 int len = strlen(val);
5771 char buf[DISK_NAME_LEN];
NeilBrown039b7222017-04-12 16:26:13 +10005772 unsigned long devnum;
NeilBrownefeb53c2009-01-09 08:31:10 +11005773
5774 while (len && val[len-1] == '\n')
5775 len--;
5776 if (len >= DISK_NAME_LEN)
5777 return -E2BIG;
5778 strlcpy(buf, val, len+1);
NeilBrown039b7222017-04-12 16:26:13 +10005779 if (strncmp(buf, "md_", 3) == 0)
5780 return md_alloc(0, buf);
5781 if (strncmp(buf, "md", 2) == 0 &&
5782 isdigit(buf[2]) &&
5783 kstrtoul(buf+2, 10, &devnum) == 0 &&
5784 devnum <= MINORMASK)
5785 return md_alloc(MKDEV(MD_MAJOR, devnum), NULL);
5786
5787 return -EINVAL;
NeilBrownefeb53c2009-01-09 08:31:10 +11005788}
5789
Kees Cook8376d3c2017-10-16 17:01:48 -07005790static void md_safemode_timeout(struct timer_list *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005791{
Kees Cook8376d3c2017-10-16 17:01:48 -07005792 struct mddev *mddev = from_timer(mddev, t, safemode_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005793
NeilBrown4ad23a972017-03-15 14:05:14 +11005794 mddev->safemode = 1;
5795 if (mddev->external)
5796 sysfs_notify_dirent_safe(mddev->sysfs_state);
5797
Linus Torvalds1da177e2005-04-16 15:20:36 -07005798 md_wakeup_thread(mddev->thread);
5799}
5800
NeilBrown6ff8d8ec2006-01-06 00:20:15 -08005801static int start_dirty_degraded;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005802
NeilBrownfd01b882011-10-11 16:47:53 +11005803int md_run(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005804{
NeilBrown2604b702006-01-06 00:20:36 -08005805 int err;
NeilBrown3cb03002011-10-11 16:45:26 +11005806 struct md_rdev *rdev;
NeilBrown84fc4b52011-10-11 16:49:58 +11005807 struct md_personality *pers;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005808
NeilBrowna757e642005-04-16 15:26:42 -07005809 if (list_empty(&mddev->disks))
5810 /* cannot run an array with no devices.. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005811 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005812
5813 if (mddev->pers)
5814 return -EBUSY;
NeilBrownbb4f1e92010-08-08 21:18:03 +10005815 /* Cannot run until previous stop completes properly */
5816 if (mddev->sysfs_active)
5817 return -EBUSY;
NeilBrownb6eb1272010-04-15 10:13:47 +10005818
Linus Torvalds1da177e2005-04-16 15:20:36 -07005819 /*
5820 * Analyze all RAID superblock(s)
5821 */
NeilBrown1ec4a932008-02-06 01:39:53 -08005822 if (!mddev->raid_disks) {
5823 if (!mddev->persistent)
5824 return -EINVAL;
Yufen Yu6a5cb532019-10-16 16:00:03 +08005825 err = analyze_sbs(mddev);
5826 if (err)
5827 return -EINVAL;
NeilBrown1ec4a932008-02-06 01:39:53 -08005828 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005829
NeilBrownd9d166c2006-01-06 00:20:51 -08005830 if (mddev->level != LEVEL_NONE)
5831 request_module("md-level-%d", mddev->level);
5832 else if (mddev->clevel[0])
5833 request_module("md-%s", mddev->clevel);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005834
5835 /*
5836 * Drop all container device buffers, from now on
5837 * the only valid external interface is through the md
5838 * device.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005839 */
Heinz Mauelshagen4b6c1062018-02-02 23:13:19 +01005840 mddev->has_superblocks = false;
NeilBrowndafb20f2012-03-19 12:46:39 +11005841 rdev_for_each(rdev, mddev) {
NeilBrownb2d444d2005-11-08 21:39:31 -08005842 if (test_bit(Faulty, &rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005843 continue;
5844 sync_blockdev(rdev->bdev);
Peter Zijlstraf98393a2007-05-06 14:49:54 -07005845 invalidate_bdev(rdev->bdev);
NeilBrown97b20ef2017-04-13 08:53:48 +10005846 if (mddev->ro != 1 &&
5847 (bdev_read_only(rdev->bdev) ||
5848 bdev_read_only(rdev->meta_bdev))) {
5849 mddev->ro = 1;
5850 if (mddev->gendisk)
5851 set_disk_ro(mddev->gendisk, 1);
5852 }
NeilBrownf0d76d72007-07-17 04:06:12 -07005853
Heinz Mauelshagen4b6c1062018-02-02 23:13:19 +01005854 if (rdev->sb_page)
5855 mddev->has_superblocks = true;
5856
NeilBrownf0d76d72007-07-17 04:06:12 -07005857 /* perform some consistency tests on the device.
5858 * We don't want the data to overlap the metadata,
Andre Noll58c0fed2009-03-31 14:33:13 +11005859 * Internal Bitmap issues have been handled elsewhere.
NeilBrownf0d76d72007-07-17 04:06:12 -07005860 */
Jonathan Brassowa6ff7e02011-01-14 09:14:34 +11005861 if (rdev->meta_bdev) {
5862 /* Nothing to check */;
5863 } else if (rdev->data_offset < rdev->sb_start) {
Andre Noll58c0fed2009-03-31 14:33:13 +11005864 if (mddev->dev_sectors &&
5865 rdev->data_offset + mddev->dev_sectors
Andre Noll0f420352008-07-11 22:02:23 +10005866 > rdev->sb_start) {
NeilBrown9d487392016-11-02 14:16:49 +11005867 pr_warn("md: %s: data overlaps metadata\n",
5868 mdname(mddev));
NeilBrownf0d76d72007-07-17 04:06:12 -07005869 return -EINVAL;
5870 }
5871 } else {
Andre Noll0f420352008-07-11 22:02:23 +10005872 if (rdev->sb_start + rdev->sb_size/512
NeilBrownf0d76d72007-07-17 04:06:12 -07005873 > rdev->data_offset) {
NeilBrown9d487392016-11-02 14:16:49 +11005874 pr_warn("md: %s: metadata overlaps data\n",
5875 mdname(mddev));
NeilBrownf0d76d72007-07-17 04:06:12 -07005876 return -EINVAL;
5877 }
5878 }
NeilBrown00bcb4a2010-06-01 19:37:23 +10005879 sysfs_notify_dirent_safe(rdev->sysfs_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005880 }
5881
Kent Overstreetafeee512018-05-20 18:25:52 -04005882 if (!bioset_initialized(&mddev->bio_set)) {
5883 err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
5884 if (err)
5885 return err;
Ming Lei10273172017-02-14 23:29:00 +08005886 }
Kent Overstreetafeee512018-05-20 18:25:52 -04005887 if (!bioset_initialized(&mddev->sync_set)) {
5888 err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
5889 if (err)
Kent Overstreet28dec872018-06-07 20:52:54 -04005890 return err;
NeilBrown5a850712017-06-21 09:12:21 +10005891 }
NeilBrowna167f662010-10-26 18:31:13 +11005892
Linus Torvalds1da177e2005-04-16 15:20:36 -07005893 spin_lock(&pers_lock);
NeilBrownd9d166c2006-01-06 00:20:51 -08005894 pers = find_pers(mddev->level, mddev->clevel);
NeilBrown2604b702006-01-06 00:20:36 -08005895 if (!pers || !try_module_get(pers->owner)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005896 spin_unlock(&pers_lock);
NeilBrownd9d166c2006-01-06 00:20:51 -08005897 if (mddev->level != LEVEL_NONE)
NeilBrown9d487392016-11-02 14:16:49 +11005898 pr_warn("md: personality for level %d is not loaded!\n",
5899 mddev->level);
NeilBrownd9d166c2006-01-06 00:20:51 -08005900 else
NeilBrown9d487392016-11-02 14:16:49 +11005901 pr_warn("md: personality for level %s is not loaded!\n",
5902 mddev->clevel);
Shaohua Libfc9dfd2018-06-13 08:39:49 -07005903 err = -EINVAL;
5904 goto abort;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005905 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005906 spin_unlock(&pers_lock);
NeilBrown34817e82009-03-31 14:39:38 +11005907 if (mddev->level != pers->level) {
5908 mddev->level = pers->level;
5909 mddev->new_level = pers->level;
5910 }
NeilBrownd9d166c2006-01-06 00:20:51 -08005911 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005912
NeilBrownf6705572006-03-27 01:18:11 -08005913 if (mddev->reshape_position != MaxSector &&
NeilBrown63c70c42006-03-27 01:18:13 -08005914 pers->start_reshape == NULL) {
NeilBrownf6705572006-03-27 01:18:11 -08005915 /* This personality cannot handle reshaping... */
NeilBrownf6705572006-03-27 01:18:11 -08005916 module_put(pers->owner);
Shaohua Libfc9dfd2018-06-13 08:39:49 -07005917 err = -EINVAL;
5918 goto abort;
NeilBrownf6705572006-03-27 01:18:11 -08005919 }
5920
NeilBrown7dd5e7c32007-02-28 20:11:35 -08005921 if (pers->sync_request) {
5922 /* Warn if this is a potentially silly
5923 * configuration.
5924 */
5925 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
NeilBrown3cb03002011-10-11 16:45:26 +11005926 struct md_rdev *rdev2;
NeilBrown7dd5e7c32007-02-28 20:11:35 -08005927 int warned = 0;
Cheng Renquan159ec1f2009-01-09 08:31:08 +11005928
NeilBrowndafb20f2012-03-19 12:46:39 +11005929 rdev_for_each(rdev, mddev)
5930 rdev_for_each(rdev2, mddev) {
NeilBrown7dd5e7c32007-02-28 20:11:35 -08005931 if (rdev < rdev2 &&
Christoph Hellwig61a27e1f2020-09-03 07:40:58 +02005932 rdev->bdev->bd_disk ==
5933 rdev2->bdev->bd_disk) {
NeilBrown9d487392016-11-02 14:16:49 +11005934 pr_warn("%s: WARNING: %s appears to be on the same physical disk as %s.\n",
5935 mdname(mddev),
5936 bdevname(rdev->bdev,b),
5937 bdevname(rdev2->bdev,b2));
NeilBrown7dd5e7c32007-02-28 20:11:35 -08005938 warned = 1;
5939 }
5940 }
Cheng Renquan159ec1f2009-01-09 08:31:08 +11005941
NeilBrown7dd5e7c32007-02-28 20:11:35 -08005942 if (warned)
NeilBrown9d487392016-11-02 14:16:49 +11005943 pr_warn("True protection against single-disk failure might be compromised.\n");
NeilBrown7dd5e7c32007-02-28 20:11:35 -08005944 }
5945
NeilBrown657390d2005-08-26 18:34:16 -07005946 mddev->recovery = 0;
Andre Noll58c0fed2009-03-31 14:33:13 +11005947 /* may be over-ridden by personality */
5948 mddev->resync_max_sectors = mddev->dev_sectors;
5949
NeilBrown6ff8d8ec2006-01-06 00:20:15 -08005950 mddev->ok_start_degraded = start_dirty_degraded;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005951
NeilBrown0f9552b52009-12-30 12:08:50 +11005952 if (start_readonly && mddev->ro == 0)
NeilBrownf91de922005-11-08 21:39:36 -08005953 mddev->ro = 2; /* read-only, but switch on first write */
5954
NeilBrown36d091f2014-12-15 12:56:58 +11005955 err = pers->run(mddev);
Andre Noll13e53df2008-03-26 00:07:03 +01005956 if (err)
NeilBrown9d487392016-11-02 14:16:49 +11005957 pr_warn("md: pers->run() failed ...\n");
NeilBrown36d091f2014-12-15 12:56:58 +11005958 else if (pers->size(mddev, 0, 0) < mddev->array_sectors) {
NeilBrown9d487392016-11-02 14:16:49 +11005959 WARN_ONCE(!mddev->external_size,
5960 "%s: default size too small, but 'external_size' not in effect?\n",
5961 __func__);
5962 pr_warn("md: invalid array_size %llu > default size %llu\n",
5963 (unsigned long long)mddev->array_sectors / 2,
5964 (unsigned long long)pers->size(mddev, 0, 0) / 2);
Dan Williamsb522adc2009-03-31 15:00:31 +11005965 err = -EINVAL;
Dan Williamsb522adc2009-03-31 15:00:31 +11005966 }
NeilBrown36d091f2014-12-15 12:56:58 +11005967 if (err == 0 && pers->sync_request &&
NeilBrownef99bf42012-05-22 13:55:08 +10005968 (mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05005969 struct bitmap *bitmap;
5970
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07005971 bitmap = md_bitmap_create(mddev, -1);
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05005972 if (IS_ERR(bitmap)) {
5973 err = PTR_ERR(bitmap);
NeilBrown9d487392016-11-02 14:16:49 +11005974 pr_warn("%s: failed to create bitmap (%d)\n",
5975 mdname(mddev), err);
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05005976 } else
5977 mddev->bitmap = bitmap;
5978
NeilBrownb15c2e52006-01-06 00:20:16 -08005979 }
Guoqing Jiangd4945492019-06-14 17:10:39 +08005980 if (err)
5981 goto bitmap_abort;
Guoqing Jiang3e148a32019-06-19 17:30:46 +08005982
5983 if (mddev->bitmap_info.max_write_behind > 0) {
Guoqing Jiang3e173ab2019-12-23 10:48:54 +01005984 bool create_pool = false;
Guoqing Jiang3e148a32019-06-19 17:30:46 +08005985
5986 rdev_for_each(rdev, mddev) {
5987 if (test_bit(WriteMostly, &rdev->flags) &&
Guoqing Jiang404659c2019-12-23 10:48:53 +01005988 rdev_init_serial(rdev))
Guoqing Jiang3e173ab2019-12-23 10:48:54 +01005989 create_pool = true;
Guoqing Jiang3e148a32019-06-19 17:30:46 +08005990 }
Guoqing Jiang3e173ab2019-12-23 10:48:54 +01005991 if (create_pool && mddev->serial_info_pool == NULL) {
Guoqing Jiang404659c2019-12-23 10:48:53 +01005992 mddev->serial_info_pool =
5993 mempool_create_kmalloc_pool(NR_SERIAL_INFOS,
5994 sizeof(struct serial_info));
5995 if (!mddev->serial_info_pool) {
Guoqing Jiang3e148a32019-06-19 17:30:46 +08005996 err = -ENOMEM;
Guoqing Jiangd4945492019-06-14 17:10:39 +08005997 goto bitmap_abort;
Guoqing Jiang3e148a32019-06-19 17:30:46 +08005998 }
5999 }
6000 }
6001
NeilBrown5c675f82014-12-15 12:56:56 +11006002 if (mddev->queue) {
Shaohua Libb086a82016-09-30 09:45:40 -07006003 bool nonrot = true;
6004
6005 rdev_for_each(rdev, mddev) {
6006 if (rdev->raid_disk >= 0 &&
6007 !blk_queue_nonrot(bdev_get_queue(rdev->bdev))) {
6008 nonrot = false;
6009 break;
6010 }
6011 }
6012 if (mddev->degraded)
6013 nonrot = false;
6014 if (nonrot)
Bart Van Assche8b904b52018-03-07 17:10:10 -08006015 blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue);
Shaohua Libb086a82016-09-30 09:45:40 -07006016 else
Bart Van Assche8b904b52018-03-07 17:10:10 -08006017 blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue);
NeilBrown5c675f82014-12-15 12:56:56 +11006018 }
NeilBrown36d091f2014-12-15 12:56:58 +11006019 if (pers->sync_request) {
NeilBrown00bcb4a2010-06-01 19:37:23 +10006020 if (mddev->kobj.sd &&
6021 sysfs_create_group(&mddev->kobj, &md_redundancy_group))
NeilBrown9d487392016-11-02 14:16:49 +11006022 pr_warn("md: cannot register extra attributes for %s\n",
6023 mdname(mddev));
NeilBrown00bcb4a2010-06-01 19:37:23 +10006024 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
Junxiao Bie8efa9b2020-08-04 17:27:18 -07006025 mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed");
6026 mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded");
NeilBrown5e55e2f2007-03-26 21:32:14 -08006027 } else if (mddev->ro == 2) /* auto-readonly not meaningful */
NeilBrownfd9d49c2005-11-08 21:39:42 -08006028 mddev->ro = 0;
6029
Robert Becker1e509152009-12-14 12:49:58 +11006030 atomic_set(&mddev->max_corr_read_errors,
6031 MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006032 mddev->safemode = 0;
Goldwyn Rodrigues28c1b9f2015-10-22 16:01:25 +11006033 if (mddev_is_clustered(mddev))
6034 mddev->safemode_delay = 0;
6035 else
Zhao Heming7c9d5c52020-07-21 02:08:52 +08006036 mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006037 mddev->in_sync = 1;
NeilBrown0ca69882011-01-14 09:14:33 +11006038 smp_wmb();
NeilBrown36d091f2014-12-15 12:56:58 +11006039 spin_lock(&mddev->lock);
6040 mddev->pers = pers;
NeilBrown36d091f2014-12-15 12:56:58 +11006041 spin_unlock(&mddev->lock);
NeilBrowndafb20f2012-03-19 12:46:39 +11006042 rdev_for_each(rdev, mddev)
Namhyung Kim36fad852011-07-27 11:00:36 +10006043 if (rdev->raid_disk >= 0)
Yufen Yue5b521e2019-06-14 15:41:07 -07006044 sysfs_link_rdev(mddev, rdev); /* failure here is OK */
NeilBrownf72ffdd2014-09-30 14:23:59 +10006045
NeilBrowna4a3d262015-07-17 11:57:30 +10006046 if (mddev->degraded && !mddev->ro)
6047 /* This ensures that recovering status is reported immediately
6048 * via sysfs - until a lack of spares is confirmed.
6049 */
6050 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006051 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
NeilBrownf72ffdd2014-09-30 14:23:59 +10006052
Shaohua Li29530792016-12-08 15:48:19 -08006053 if (mddev->sb_flags)
NeilBrown850b2b422006-10-03 01:15:46 -07006054 md_update_sb(mddev, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006055
NeilBrownd7603b72006-01-06 00:20:30 -08006056 md_new_event(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006057 return 0;
Xiao Nib1261942018-01-24 12:17:38 +08006058
Guoqing Jiangd4945492019-06-14 17:10:39 +08006059bitmap_abort:
6060 mddev_detach(mddev);
6061 if (mddev->private)
6062 pers->free(mddev, mddev->private);
6063 mddev->private = NULL;
6064 module_put(pers->owner);
6065 md_bitmap_destroy(mddev);
Xiao Nib1261942018-01-24 12:17:38 +08006066abort:
NeilBrown4bc034d2019-03-29 10:46:16 -07006067 bioset_exit(&mddev->bio_set);
6068 bioset_exit(&mddev->sync_set);
Xiao Nib1261942018-01-24 12:17:38 +08006069 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006070}
NeilBrown390ee602010-06-01 19:37:27 +10006071EXPORT_SYMBOL_GPL(md_run);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006072
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02006073int do_md_run(struct mddev *mddev)
NeilBrownfe60b012010-03-29 11:10:42 +11006074{
6075 int err;
6076
NeilBrown9d4b45d2019-08-20 10:21:09 +10006077 set_bit(MD_NOT_READY, &mddev->flags);
NeilBrownfe60b012010-03-29 11:10:42 +11006078 err = md_run(mddev);
6079 if (err)
6080 goto out;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07006081 err = md_bitmap_load(mddev);
NeilBrown69e51b42010-06-01 19:37:35 +10006082 if (err) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07006083 md_bitmap_destroy(mddev);
NeilBrown69e51b42010-06-01 19:37:35 +10006084 goto out;
6085 }
Jonathan Brassow0fd018a2011-06-07 17:49:36 -05006086
Goldwyn Rodrigues28c1b9f2015-10-22 16:01:25 +11006087 if (mddev_is_clustered(mddev))
6088 md_allow_write(mddev);
6089
Song Liud5d885f2017-11-19 22:17:01 -08006090 /* run start up tasks that require md_thread */
6091 md_start(mddev);
6092
Jonathan Brassow0fd018a2011-06-07 17:49:36 -05006093 md_wakeup_thread(mddev->thread);
6094 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
6095
Christoph Hellwig2c247c52020-11-16 15:57:11 +01006096 set_capacity_and_notify(mddev->gendisk, mddev->array_sectors);
NeilBrown9d4b45d2019-08-20 10:21:09 +10006097 clear_bit(MD_NOT_READY, &mddev->flags);
NeilBrownf0b4f7e2011-02-24 17:26:41 +11006098 mddev->changed = 1;
NeilBrownfe60b012010-03-29 11:10:42 +11006099 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
NeilBrown9d4b45d2019-08-20 10:21:09 +10006100 sysfs_notify_dirent_safe(mddev->sysfs_state);
6101 sysfs_notify_dirent_safe(mddev->sysfs_action);
Junxiao Bie1a86db2020-07-14 16:10:26 -07006102 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
NeilBrownfe60b012010-03-29 11:10:42 +11006103out:
NeilBrown9d4b45d2019-08-20 10:21:09 +10006104 clear_bit(MD_NOT_READY, &mddev->flags);
NeilBrownfe60b012010-03-29 11:10:42 +11006105 return err;
6106}
6107
Song Liud5d885f2017-11-19 22:17:01 -08006108int md_start(struct mddev *mddev)
6109{
6110 int ret = 0;
6111
6112 if (mddev->pers->start) {
6113 set_bit(MD_RECOVERY_WAIT, &mddev->recovery);
6114 md_wakeup_thread(mddev->thread);
6115 ret = mddev->pers->start(mddev);
6116 clear_bit(MD_RECOVERY_WAIT, &mddev->recovery);
6117 md_wakeup_thread(mddev->sync_thread);
6118 }
6119 return ret;
6120}
6121EXPORT_SYMBOL_GPL(md_start);
6122
NeilBrownfd01b882011-10-11 16:47:53 +11006123static int restart_array(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006124{
6125 struct gendisk *disk = mddev->gendisk;
NeilBrown97b20ef2017-04-13 08:53:48 +10006126 struct md_rdev *rdev;
6127 bool has_journal = false;
6128 bool has_readonly = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006129
Andre Noll80fab1d2008-07-11 22:02:21 +10006130 /* Complain if it has no devices */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006131 if (list_empty(&mddev->disks))
Andre Noll80fab1d2008-07-11 22:02:21 +10006132 return -ENXIO;
6133 if (!mddev->pers)
6134 return -EINVAL;
6135 if (!mddev->ro)
6136 return -EBUSY;
Song Liu339421d2015-10-08 21:54:13 -07006137
NeilBrown97b20ef2017-04-13 08:53:48 +10006138 rcu_read_lock();
6139 rdev_for_each_rcu(rdev, mddev) {
6140 if (test_bit(Journal, &rdev->flags) &&
6141 !test_bit(Faulty, &rdev->flags))
6142 has_journal = true;
6143 if (bdev_read_only(rdev->bdev))
6144 has_readonly = true;
Song Liu339421d2015-10-08 21:54:13 -07006145 }
NeilBrown97b20ef2017-04-13 08:53:48 +10006146 rcu_read_unlock();
6147 if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !has_journal)
6148 /* Don't restart rw with journal missing/faulty */
6149 return -EINVAL;
6150 if (has_readonly)
6151 return -EROFS;
Song Liu339421d2015-10-08 21:54:13 -07006152
Andre Noll80fab1d2008-07-11 22:02:21 +10006153 mddev->safemode = 0;
6154 mddev->ro = 0;
6155 set_disk_ro(disk, 0);
NeilBrown9d487392016-11-02 14:16:49 +11006156 pr_debug("md: %s switched to read-write mode.\n", mdname(mddev));
Andre Noll80fab1d2008-07-11 22:02:21 +10006157 /* Kick recovery or resync if necessary */
6158 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6159 md_wakeup_thread(mddev->thread);
6160 md_wakeup_thread(mddev->sync_thread);
NeilBrown00bcb4a2010-06-01 19:37:23 +10006161 sysfs_notify_dirent_safe(mddev->sysfs_state);
Andre Noll80fab1d2008-07-11 22:02:21 +10006162 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006163}
6164
NeilBrownfd01b882011-10-11 16:47:53 +11006165static void md_clean(struct mddev *mddev)
NeilBrown6177b472010-03-29 11:37:13 +11006166{
6167 mddev->array_sectors = 0;
6168 mddev->external_size = 0;
6169 mddev->dev_sectors = 0;
6170 mddev->raid_disks = 0;
6171 mddev->recovery_cp = 0;
6172 mddev->resync_min = 0;
6173 mddev->resync_max = MaxSector;
6174 mddev->reshape_position = MaxSector;
6175 mddev->external = 0;
6176 mddev->persistent = 0;
6177 mddev->level = LEVEL_NONE;
6178 mddev->clevel[0] = 0;
6179 mddev->flags = 0;
Shaohua Li29530792016-12-08 15:48:19 -08006180 mddev->sb_flags = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006181 mddev->ro = 0;
6182 mddev->metadata_type[0] = 0;
6183 mddev->chunk_sectors = 0;
6184 mddev->ctime = mddev->utime = 0;
6185 mddev->layout = 0;
6186 mddev->max_disks = 0;
6187 mddev->events = 0;
NeilBrowna8707c02010-05-18 09:28:43 +10006188 mddev->can_decrease_events = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006189 mddev->delta_disks = 0;
NeilBrown2c810cd2012-05-21 09:27:00 +10006190 mddev->reshape_backwards = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006191 mddev->new_level = LEVEL_NONE;
6192 mddev->new_layout = 0;
6193 mddev->new_chunk_sectors = 0;
6194 mddev->curr_resync = 0;
Jianpeng Ma7f7583d2012-10-11 14:17:59 +11006195 atomic64_set(&mddev->resync_mismatches, 0);
NeilBrown6177b472010-03-29 11:37:13 +11006196 mddev->suspend_lo = mddev->suspend_hi = 0;
6197 mddev->sync_speed_min = mddev->sync_speed_max = 0;
6198 mddev->recovery = 0;
6199 mddev->in_sync = 0;
NeilBrownf0b4f7e2011-02-24 17:26:41 +11006200 mddev->changed = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006201 mddev->degraded = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006202 mddev->safemode = 0;
NeilBrownbd691922015-06-25 17:01:40 +10006203 mddev->private = NULL;
Guoqing Jiangc20c33f2016-08-12 13:42:38 +08006204 mddev->cluster_info = NULL;
NeilBrown6177b472010-03-29 11:37:13 +11006205 mddev->bitmap_info.offset = 0;
6206 mddev->bitmap_info.default_offset = 0;
NeilBrown6409bb02012-05-22 13:55:07 +10006207 mddev->bitmap_info.default_space = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006208 mddev->bitmap_info.chunksize = 0;
6209 mddev->bitmap_info.daemon_sleep = 0;
6210 mddev->bitmap_info.max_write_behind = 0;
Guoqing Jiangc20c33f2016-08-12 13:42:38 +08006211 mddev->bitmap_info.nodes = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006212}
6213
NeilBrownfd01b882011-10-11 16:47:53 +11006214static void __md_stop_writes(struct mddev *mddev)
NeilBrowna047e122010-03-29 12:07:53 +11006215{
NeilBrown6b6204e2013-05-09 09:48:30 +10006216 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
Guoqing Jiang21e09582020-04-04 23:57:07 +02006217 if (work_pending(&mddev->del_work))
6218 flush_workqueue(md_misc_wq);
NeilBrowna047e122010-03-29 12:07:53 +11006219 if (mddev->sync_thread) {
NeilBrowna047e122010-03-29 12:07:53 +11006220 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10006221 md_reap_sync_thread(mddev);
NeilBrowna047e122010-03-29 12:07:53 +11006222 }
6223
6224 del_timer_sync(&mddev->safemode_timer);
6225
Shaohua Li034e33f2016-11-21 10:29:19 -08006226 if (mddev->pers && mddev->pers->quiesce) {
6227 mddev->pers->quiesce(mddev, 1);
6228 mddev->pers->quiesce(mddev, 0);
6229 }
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07006230 md_bitmap_flush(mddev);
NeilBrowna047e122010-03-29 12:07:53 +11006231
NeilBrownb6d428c2013-04-24 11:42:42 +10006232 if (mddev->ro == 0 &&
Goldwyn Rodrigues28c1b9f2015-10-22 16:01:25 +11006233 ((!mddev->in_sync && !mddev_is_clustered(mddev)) ||
Shaohua Li29530792016-12-08 15:48:19 -08006234 mddev->sb_flags)) {
NeilBrowna047e122010-03-29 12:07:53 +11006235 /* mark array as shutdown cleanly */
Goldwyn Rodrigues28c1b9f2015-10-22 16:01:25 +11006236 if (!mddev_is_clustered(mddev))
6237 mddev->in_sync = 1;
NeilBrowna047e122010-03-29 12:07:53 +11006238 md_update_sb(mddev, 1);
6239 }
Guoqing Jiang69b00b52019-12-23 10:49:00 +01006240 /* disable policy to guarantee rdevs free resources for serialization */
6241 mddev->serialize_policy = 0;
6242 mddev_destroy_serial_pool(mddev, NULL, true);
NeilBrowna047e122010-03-29 12:07:53 +11006243}
NeilBrowndefad612011-01-14 09:14:33 +11006244
NeilBrownfd01b882011-10-11 16:47:53 +11006245void md_stop_writes(struct mddev *mddev)
NeilBrowndefad612011-01-14 09:14:33 +11006246{
NeilBrown29f097c2013-11-14 17:54:51 +11006247 mddev_lock_nointr(mddev);
NeilBrowndefad612011-01-14 09:14:33 +11006248 __md_stop_writes(mddev);
6249 mddev_unlock(mddev);
6250}
NeilBrown390ee602010-06-01 19:37:27 +10006251EXPORT_SYMBOL_GPL(md_stop_writes);
NeilBrowna047e122010-03-29 12:07:53 +11006252
NeilBrown5aa61f42014-12-15 12:56:57 +11006253static void mddev_detach(struct mddev *mddev)
6254{
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07006255 md_bitmap_wait_behind_writes(mddev);
Guoqing Jiang6b40bec2020-02-11 11:10:04 +01006256 if (mddev->pers && mddev->pers->quiesce && !mddev->suspended) {
NeilBrown5aa61f42014-12-15 12:56:57 +11006257 mddev->pers->quiesce(mddev, 1);
6258 mddev->pers->quiesce(mddev, 0);
6259 }
6260 md_unregister_thread(&mddev->thread);
6261 if (mddev->queue)
6262 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
6263}
6264
NeilBrown5eff3c42012-11-19 10:47:48 +11006265static void __md_stop(struct mddev *mddev)
NeilBrown6177b472010-03-29 11:37:13 +11006266{
NeilBrown36d091f2014-12-15 12:56:58 +11006267 struct md_personality *pers = mddev->pers;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07006268 md_bitmap_destroy(mddev);
NeilBrown5aa61f42014-12-15 12:56:57 +11006269 mddev_detach(mddev);
NeilBrownee5d0042015-07-22 10:20:07 +10006270 /* Ensure ->event_work is done */
Guoqing Jiang21e09582020-04-04 23:57:07 +02006271 if (mddev->event_work.func)
6272 flush_workqueue(md_misc_wq);
NeilBrown36d091f2014-12-15 12:56:58 +11006273 spin_lock(&mddev->lock);
NeilBrown6177b472010-03-29 11:37:13 +11006274 mddev->pers = NULL;
NeilBrown36d091f2014-12-15 12:56:58 +11006275 spin_unlock(&mddev->lock);
6276 pers->free(mddev, mddev->private);
NeilBrownbd691922015-06-25 17:01:40 +10006277 mddev->private = NULL;
NeilBrown36d091f2014-12-15 12:56:58 +11006278 if (pers->sync_request && mddev->to_remove == NULL)
6279 mddev->to_remove = &md_redundancy_group;
6280 module_put(pers->owner);
NeilBrowncca9cf92010-04-01 12:08:16 +11006281 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
Jack Wang6aaa58c2018-10-19 16:21:31 +02006282}
6283
6284void md_stop(struct mddev *mddev)
6285{
6286 /* stop the array and free an attached data structures.
6287 * This is called from dm-raid
6288 */
6289 __md_stop(mddev);
Kent Overstreetafeee512018-05-20 18:25:52 -04006290 bioset_exit(&mddev->bio_set);
6291 bioset_exit(&mddev->sync_set);
NeilBrown5eff3c42012-11-19 10:47:48 +11006292}
6293
NeilBrown390ee602010-06-01 19:37:27 +10006294EXPORT_SYMBOL_GPL(md_stop);
NeilBrown6177b472010-03-29 11:37:13 +11006295
NeilBrowna05b7ea2012-07-19 15:59:18 +10006296static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
NeilBrowna4bd82d2010-03-29 13:23:10 +11006297{
6298 int err = 0;
NeilBrown30b8feb2013-11-14 15:16:17 +11006299 int did_freeze = 0;
6300
6301 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
6302 did_freeze = 1;
6303 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6304 md_wakeup_thread(mddev->thread);
6305 }
NeilBrownf851b602014-12-11 10:02:10 +11006306 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
NeilBrown30b8feb2013-11-14 15:16:17 +11006307 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
NeilBrownf851b602014-12-11 10:02:10 +11006308 if (mddev->sync_thread)
NeilBrown30b8feb2013-11-14 15:16:17 +11006309 /* Thread might be blocked waiting for metadata update
6310 * which will now never happen */
6311 wake_up_process(mddev->sync_thread->tsk);
NeilBrownf851b602014-12-11 10:02:10 +11006312
Shaohua Li29530792016-12-08 15:48:19 -08006313 if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
NeilBrown88724bf2015-09-24 14:00:51 +10006314 return -EBUSY;
NeilBrown30b8feb2013-11-14 15:16:17 +11006315 mddev_unlock(mddev);
NeilBrownf851b602014-12-11 10:02:10 +11006316 wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
6317 &mddev->recovery));
NeilBrown88724bf2015-09-24 14:00:51 +10006318 wait_event(mddev->sb_wait,
Shaohua Li29530792016-12-08 15:48:19 -08006319 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
NeilBrown30b8feb2013-11-14 15:16:17 +11006320 mddev_lock_nointr(mddev);
6321
NeilBrowna4bd82d2010-03-29 13:23:10 +11006322 mutex_lock(&mddev->open_mutex);
NeilBrown9ba3b7f2014-09-09 14:00:15 +10006323 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
NeilBrown30b8feb2013-11-14 15:16:17 +11006324 mddev->sync_thread ||
Guoqing Jiangaf8d8e62016-08-12 13:42:37 +08006325 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
NeilBrown9d487392016-11-02 14:16:49 +11006326 pr_warn("md: %s still in use.\n",mdname(mddev));
NeilBrown30b8feb2013-11-14 15:16:17 +11006327 if (did_freeze) {
6328 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
NeilBrown45eaf452014-10-29 08:49:50 +11006329 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
NeilBrown30b8feb2013-11-14 15:16:17 +11006330 md_wakeup_thread(mddev->thread);
6331 }
NeilBrowna4bd82d2010-03-29 13:23:10 +11006332 err = -EBUSY;
6333 goto out;
6334 }
6335 if (mddev->pers) {
NeilBrowndefad612011-01-14 09:14:33 +11006336 __md_stop_writes(mddev);
NeilBrowna4bd82d2010-03-29 13:23:10 +11006337
6338 err = -ENXIO;
6339 if (mddev->ro==1)
6340 goto out;
6341 mddev->ro = 1;
6342 set_disk_ro(mddev->gendisk, 1);
6343 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
NeilBrown45eaf452014-10-29 08:49:50 +11006344 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6345 md_wakeup_thread(mddev->thread);
NeilBrown00bcb4a2010-06-01 19:37:23 +10006346 sysfs_notify_dirent_safe(mddev->sysfs_state);
NeilBrown30b8feb2013-11-14 15:16:17 +11006347 err = 0;
NeilBrowna4bd82d2010-03-29 13:23:10 +11006348 }
6349out:
6350 mutex_unlock(&mddev->open_mutex);
6351 return err;
6352}
6353
NeilBrown9e653b62006-06-26 00:27:58 -07006354/* mode:
6355 * 0 - completely stop and dis-assemble array
NeilBrown9e653b62006-06-26 00:27:58 -07006356 * 2 - stop but do not disassemble array
6357 */
NeilBrownf72ffdd2014-09-30 14:23:59 +10006358static int do_md_stop(struct mddev *mddev, int mode,
NeilBrowna05b7ea2012-07-19 15:59:18 +10006359 struct block_device *bdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006360{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006361 struct gendisk *disk = mddev->gendisk;
NeilBrown3cb03002011-10-11 16:45:26 +11006362 struct md_rdev *rdev;
NeilBrown30b8feb2013-11-14 15:16:17 +11006363 int did_freeze = 0;
6364
6365 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
6366 did_freeze = 1;
6367 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6368 md_wakeup_thread(mddev->thread);
6369 }
NeilBrownf851b602014-12-11 10:02:10 +11006370 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
NeilBrown30b8feb2013-11-14 15:16:17 +11006371 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
NeilBrownf851b602014-12-11 10:02:10 +11006372 if (mddev->sync_thread)
NeilBrown30b8feb2013-11-14 15:16:17 +11006373 /* Thread might be blocked waiting for metadata update
6374 * which will now never happen */
6375 wake_up_process(mddev->sync_thread->tsk);
NeilBrownf851b602014-12-11 10:02:10 +11006376
NeilBrown30b8feb2013-11-14 15:16:17 +11006377 mddev_unlock(mddev);
NeilBrownf851b602014-12-11 10:02:10 +11006378 wait_event(resync_wait, (mddev->sync_thread == NULL &&
6379 !test_bit(MD_RECOVERY_RUNNING,
6380 &mddev->recovery)));
NeilBrown30b8feb2013-11-14 15:16:17 +11006381 mddev_lock_nointr(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006382
NeilBrownc8c00a62009-08-10 12:50:52 +10006383 mutex_lock(&mddev->open_mutex);
NeilBrown9ba3b7f2014-09-09 14:00:15 +10006384 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
NeilBrown30b8feb2013-11-14 15:16:17 +11006385 mddev->sysfs_active ||
6386 mddev->sync_thread ||
Guoqing Jiangaf8d8e62016-08-12 13:42:37 +08006387 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
NeilBrown9d487392016-11-02 14:16:49 +11006388 pr_warn("md: %s still in use.\n",mdname(mddev));
NeilBrown6e17b022010-08-07 21:41:19 +10006389 mutex_unlock(&mddev->open_mutex);
NeilBrown30b8feb2013-11-14 15:16:17 +11006390 if (did_freeze) {
6391 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
NeilBrown45eaf452014-10-29 08:49:50 +11006392 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
NeilBrown30b8feb2013-11-14 15:16:17 +11006393 md_wakeup_thread(mddev->thread);
6394 }
NeilBrown260fa032013-08-27 16:44:13 +10006395 return -EBUSY;
6396 }
NeilBrown6e17b022010-08-07 21:41:19 +10006397 if (mddev->pers) {
NeilBrowna4bd82d2010-03-29 13:23:10 +11006398 if (mddev->ro)
6399 set_disk_ro(disk, 0);
NeilBrown409c57f2009-03-31 14:39:39 +11006400
NeilBrowndefad612011-01-14 09:14:33 +11006401 __md_stop_writes(mddev);
NeilBrown5eff3c42012-11-19 10:47:48 +11006402 __md_stop(mddev);
NeilBrown6177b472010-03-29 11:37:13 +11006403
NeilBrowna4bd82d2010-03-29 13:23:10 +11006404 /* tell userspace to handle 'inactive' */
NeilBrown00bcb4a2010-06-01 19:37:23 +10006405 sysfs_notify_dirent_safe(mddev->sysfs_state);
NeilBrown0d4ca602006-12-10 02:20:44 -08006406
NeilBrowndafb20f2012-03-19 12:46:39 +11006407 rdev_for_each(rdev, mddev)
Namhyung Kim36fad852011-07-27 11:00:36 +10006408 if (rdev->raid_disk >= 0)
6409 sysfs_unlink_rdev(mddev, rdev);
NeilBrownc4647292009-05-07 12:51:06 +10006410
Christoph Hellwig2c247c52020-11-16 15:57:11 +01006411 set_capacity_and_notify(disk, 0);
NeilBrown6e17b022010-08-07 21:41:19 +10006412 mutex_unlock(&mddev->open_mutex);
NeilBrownf0b4f7e2011-02-24 17:26:41 +11006413 mddev->changed = 1;
NeilBrown0d4ca602006-12-10 02:20:44 -08006414
NeilBrowna4bd82d2010-03-29 13:23:10 +11006415 if (mddev->ro)
6416 mddev->ro = 0;
NeilBrown6e17b022010-08-07 21:41:19 +10006417 } else
6418 mutex_unlock(&mddev->open_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006419 /*
6420 * Free resources if final stop
6421 */
NeilBrown9e653b62006-06-26 00:27:58 -07006422 if (mode == 0) {
NeilBrown9d487392016-11-02 14:16:49 +11006423 pr_info("md: %s stopped.\n", mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006424
NeilBrownc3d97142009-12-14 12:49:52 +11006425 if (mddev->bitmap_info.file) {
NeilBrown4af1a042014-12-15 12:57:00 +11006426 struct file *f = mddev->bitmap_info.file;
6427 spin_lock(&mddev->lock);
NeilBrownc3d97142009-12-14 12:49:52 +11006428 mddev->bitmap_info.file = NULL;
NeilBrown4af1a042014-12-15 12:57:00 +11006429 spin_unlock(&mddev->lock);
6430 fput(f);
NeilBrown978f9462006-02-02 14:28:05 -08006431 }
NeilBrownc3d97142009-12-14 12:49:52 +11006432 mddev->bitmap_info.offset = 0;
NeilBrown978f9462006-02-02 14:28:05 -08006433
Linus Torvalds1da177e2005-04-16 15:20:36 -07006434 export_array(mddev);
6435
NeilBrown6177b472010-03-29 11:37:13 +11006436 md_clean(mddev);
NeilBrownefeb53c2009-01-09 08:31:10 +11006437 if (mddev->hold_active == UNTIL_STOP)
6438 mddev->hold_active = 0;
NeilBrowna4bd82d2010-03-29 13:23:10 +11006439 }
NeilBrownd7603b72006-01-06 00:20:30 -08006440 md_new_event(mddev);
NeilBrown00bcb4a2010-06-01 19:37:23 +10006441 sysfs_notify_dirent_safe(mddev->sysfs_state);
NeilBrown6e17b022010-08-07 21:41:19 +10006442 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006443}
6444
Jeff Garzikfdee8ae2006-12-10 02:20:50 -08006445#ifndef MODULE
NeilBrownfd01b882011-10-11 16:47:53 +11006446static void autorun_array(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006447{
NeilBrown3cb03002011-10-11 16:45:26 +11006448 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006449 int err;
6450
NeilBrowna757e642005-04-16 15:26:42 -07006451 if (list_empty(&mddev->disks))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006452 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006453
NeilBrown9d487392016-11-02 14:16:49 +11006454 pr_info("md: running: ");
Linus Torvalds1da177e2005-04-16 15:20:36 -07006455
NeilBrowndafb20f2012-03-19 12:46:39 +11006456 rdev_for_each(rdev, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006457 char b[BDEVNAME_SIZE];
NeilBrown9d487392016-11-02 14:16:49 +11006458 pr_cont("<%s>", bdevname(rdev->bdev,b));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006459 }
NeilBrown9d487392016-11-02 14:16:49 +11006460 pr_cont("\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07006461
NeilBrownd710e132008-10-13 11:55:12 +11006462 err = do_md_run(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006463 if (err) {
NeilBrown9d487392016-11-02 14:16:49 +11006464 pr_warn("md: do_md_run() returned %d\n", err);
NeilBrowna05b7ea2012-07-19 15:59:18 +10006465 do_md_stop(mddev, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006466 }
6467}
6468
6469/*
6470 * lets try to run arrays based on all disks that have arrived
6471 * until now. (those are in pending_raid_disks)
6472 *
6473 * the method: pick the first pending disk, collect all disks with
6474 * the same UUID, remove all from the pending list and put them into
6475 * the 'same_array' list. Then order this list based on superblock
6476 * update time (freshest comes first), kick out 'old' disks and
6477 * compare superblocks. If everything's fine then run it.
6478 *
6479 * If "unit" is allocated, then bump its reference count
6480 */
6481static void autorun_devices(int part)
6482{
NeilBrown3cb03002011-10-11 16:45:26 +11006483 struct md_rdev *rdev0, *rdev, *tmp;
NeilBrownfd01b882011-10-11 16:47:53 +11006484 struct mddev *mddev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006485 char b[BDEVNAME_SIZE];
6486
NeilBrown9d487392016-11-02 14:16:49 +11006487 pr_info("md: autorun ...\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07006488 while (!list_empty(&pending_raid_disks)) {
NeilBrowne8703fe2006-10-03 01:15:59 -07006489 int unit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006490 dev_t dev;
NeilBrownad01c9e2006-03-27 01:18:07 -08006491 LIST_HEAD(candidates);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006492 rdev0 = list_entry(pending_raid_disks.next,
NeilBrown3cb03002011-10-11 16:45:26 +11006493 struct md_rdev, same_set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006494
NeilBrown9d487392016-11-02 14:16:49 +11006495 pr_debug("md: considering %s ...\n", bdevname(rdev0->bdev,b));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006496 INIT_LIST_HEAD(&candidates);
Cheng Renquan159ec1f2009-01-09 08:31:08 +11006497 rdev_for_each_list(rdev, tmp, &pending_raid_disks)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006498 if (super_90_load(rdev, rdev0, 0) >= 0) {
NeilBrown9d487392016-11-02 14:16:49 +11006499 pr_debug("md: adding %s ...\n",
6500 bdevname(rdev->bdev,b));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006501 list_move(&rdev->same_set, &candidates);
6502 }
6503 /*
6504 * now we have a set of devices, with all of them having
6505 * mostly sane superblocks. It's time to allocate the
6506 * mddev.
6507 */
NeilBrowne8703fe2006-10-03 01:15:59 -07006508 if (part) {
6509 dev = MKDEV(mdp_major,
6510 rdev0->preferred_minor << MdpMinorShift);
6511 unit = MINOR(dev) >> MdpMinorShift;
6512 } else {
6513 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
6514 unit = MINOR(dev);
6515 }
6516 if (rdev0->preferred_minor != unit) {
NeilBrown9d487392016-11-02 14:16:49 +11006517 pr_warn("md: unit number in %s is bad: %d\n",
6518 bdevname(rdev0->bdev, b), rdev0->preferred_minor);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006519 break;
6520 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006521
Christoph Hellwig28144f92020-10-29 15:58:34 +01006522 md_probe(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006523 mddev = mddev_find(dev);
Neil Brown9bbbca32008-06-28 08:31:17 +10006524 if (!mddev || !mddev->gendisk) {
6525 if (mddev)
6526 mddev_put(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006527 break;
6528 }
NeilBrownf72ffdd2014-09-30 14:23:59 +10006529 if (mddev_lock(mddev))
NeilBrown9d487392016-11-02 14:16:49 +11006530 pr_warn("md: %s locked, cannot run\n", mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006531 else if (mddev->raid_disks || mddev->major_version
6532 || !list_empty(&mddev->disks)) {
NeilBrown9d487392016-11-02 14:16:49 +11006533 pr_warn("md: %s already running, cannot run %s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006534 mdname(mddev), bdevname(rdev0->bdev,b));
6535 mddev_unlock(mddev);
6536 } else {
NeilBrown9d487392016-11-02 14:16:49 +11006537 pr_debug("md: created %s\n", mdname(mddev));
NeilBrown1ec4a932008-02-06 01:39:53 -08006538 mddev->persistent = 1;
Cheng Renquan159ec1f2009-01-09 08:31:08 +11006539 rdev_for_each_list(rdev, tmp, &candidates) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006540 list_del_init(&rdev->same_set);
6541 if (bind_rdev_to_array(rdev, mddev))
6542 export_rdev(rdev);
6543 }
6544 autorun_array(mddev);
6545 mddev_unlock(mddev);
6546 }
6547 /* on success, candidates will be empty, on error
6548 * it won't...
6549 */
Cheng Renquan159ec1f2009-01-09 08:31:08 +11006550 rdev_for_each_list(rdev, tmp, &candidates) {
NeilBrown4b809912008-07-21 17:05:25 +10006551 list_del_init(&rdev->same_set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006552 export_rdev(rdev);
NeilBrown4b809912008-07-21 17:05:25 +10006553 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006554 mddev_put(mddev);
6555 }
NeilBrown9d487392016-11-02 14:16:49 +11006556 pr_info("md: ... autorun DONE.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07006557}
Jeff Garzikfdee8ae2006-12-10 02:20:50 -08006558#endif /* !MODULE */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006559
NeilBrownf72ffdd2014-09-30 14:23:59 +10006560static int get_version(void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006561{
6562 mdu_version_t ver;
6563
6564 ver.major = MD_MAJOR_VERSION;
6565 ver.minor = MD_MINOR_VERSION;
6566 ver.patchlevel = MD_PATCHLEVEL_VERSION;
6567
6568 if (copy_to_user(arg, &ver, sizeof(ver)))
6569 return -EFAULT;
6570
6571 return 0;
6572}
6573
NeilBrownf72ffdd2014-09-30 14:23:59 +10006574static int get_array_info(struct mddev *mddev, void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006575{
6576 mdu_array_info_t info;
NeilBrowna9f326e2009-09-23 18:06:41 +10006577 int nr,working,insync,failed,spare;
NeilBrown3cb03002011-10-11 16:45:26 +11006578 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006579
NeilBrown1ca69c42012-10-11 13:37:33 +11006580 nr = working = insync = failed = spare = 0;
6581 rcu_read_lock();
6582 rdev_for_each_rcu(rdev, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006583 nr++;
NeilBrownb2d444d2005-11-08 21:39:31 -08006584 if (test_bit(Faulty, &rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006585 failed++;
6586 else {
6587 working++;
NeilBrownb2d444d2005-11-08 21:39:31 -08006588 if (test_bit(In_sync, &rdev->flags))
NeilBrownf72ffdd2014-09-30 14:23:59 +10006589 insync++;
Song Liub347af82016-08-11 17:14:45 -07006590 else if (test_bit(Journal, &rdev->flags))
6591 /* TODO: add journal count to md_u.h */
6592 ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006593 else
6594 spare++;
6595 }
6596 }
NeilBrown1ca69c42012-10-11 13:37:33 +11006597 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006598
6599 info.major_version = mddev->major_version;
6600 info.minor_version = mddev->minor_version;
6601 info.patch_version = MD_PATCHLEVEL_VERSION;
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11006602 info.ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006603 info.level = mddev->level;
Andre Noll58c0fed2009-03-31 14:33:13 +11006604 info.size = mddev->dev_sectors / 2;
6605 if (info.size != mddev->dev_sectors / 2) /* overflow */
NeilBrown284ae7c2006-02-03 03:03:40 -08006606 info.size = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006607 info.nr_disks = nr;
6608 info.raid_disks = mddev->raid_disks;
6609 info.md_minor = mddev->md_minor;
6610 info.not_persistent= !mddev->persistent;
6611
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11006612 info.utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006613 info.state = 0;
6614 if (mddev->in_sync)
6615 info.state = (1<<MD_SB_CLEAN);
NeilBrownc3d97142009-12-14 12:49:52 +11006616 if (mddev->bitmap && mddev->bitmap_info.offset)
NeilBrown9bd35922014-07-02 11:35:06 +10006617 info.state |= (1<<MD_SB_BITMAP_PRESENT);
Goldwyn Rodriguesca8895d2014-11-26 12:22:03 -06006618 if (mddev_is_clustered(mddev))
6619 info.state |= (1<<MD_SB_CLUSTERED);
NeilBrowna9f326e2009-09-23 18:06:41 +10006620 info.active_disks = insync;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006621 info.working_disks = working;
6622 info.failed_disks = failed;
6623 info.spare_disks = spare;
6624
6625 info.layout = mddev->layout;
Andre Noll9d8f0362009-06-18 08:45:01 +10006626 info.chunk_size = mddev->chunk_sectors << 9;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006627
6628 if (copy_to_user(arg, &info, sizeof(info)))
6629 return -EFAULT;
6630
6631 return 0;
6632}
6633
NeilBrownf72ffdd2014-09-30 14:23:59 +10006634static int get_bitmap_file(struct mddev *mddev, void __user * arg)
NeilBrown32a76272005-06-21 17:17:14 -07006635{
6636 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
NeilBrownf4ad3d32014-12-15 12:57:00 +11006637 char *ptr;
NeilBrown4af1a042014-12-15 12:57:00 +11006638 int err;
NeilBrown32a76272005-06-21 17:17:14 -07006639
Benjamin Randazzob6878d92015-07-25 16:36:50 +02006640 file = kzalloc(sizeof(*file), GFP_NOIO);
NeilBrown32a76272005-06-21 17:17:14 -07006641 if (!file)
NeilBrown4af1a042014-12-15 12:57:00 +11006642 return -ENOMEM;
NeilBrown32a76272005-06-21 17:17:14 -07006643
NeilBrown32a76272005-06-21 17:17:14 -07006644 err = 0;
NeilBrown4af1a042014-12-15 12:57:00 +11006645 spin_lock(&mddev->lock);
Benjamin Randazzo25eafe12015-07-25 16:36:50 +02006646 /* bitmap enabled */
6647 if (mddev->bitmap_info.file) {
6648 ptr = file_path(mddev->bitmap_info.file, file->pathname,
6649 sizeof(file->pathname));
6650 if (IS_ERR(ptr))
6651 err = PTR_ERR(ptr);
6652 else
6653 memmove(file->pathname, ptr,
6654 sizeof(file->pathname)-(ptr-file->pathname));
6655 }
NeilBrown4af1a042014-12-15 12:57:00 +11006656 spin_unlock(&mddev->lock);
6657
6658 if (err == 0 &&
6659 copy_to_user(arg, file, sizeof(*file)))
NeilBrown32a76272005-06-21 17:17:14 -07006660 err = -EFAULT;
NeilBrown4af1a042014-12-15 12:57:00 +11006661
NeilBrown32a76272005-06-21 17:17:14 -07006662 kfree(file);
6663 return err;
6664}
6665
NeilBrownf72ffdd2014-09-30 14:23:59 +10006666static int get_disk_info(struct mddev *mddev, void __user * arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006667{
6668 mdu_disk_info_t info;
NeilBrown3cb03002011-10-11 16:45:26 +11006669 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006670
6671 if (copy_from_user(&info, arg, sizeof(info)))
6672 return -EFAULT;
6673
NeilBrown1ca69c42012-10-11 13:37:33 +11006674 rcu_read_lock();
Goldwyn Rodrigues57d051d2015-04-14 10:43:55 -05006675 rdev = md_find_rdev_nr_rcu(mddev, info.number);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006676 if (rdev) {
6677 info.major = MAJOR(rdev->bdev->bd_dev);
6678 info.minor = MINOR(rdev->bdev->bd_dev);
6679 info.raid_disk = rdev->raid_disk;
6680 info.state = 0;
NeilBrownb2d444d2005-11-08 21:39:31 -08006681 if (test_bit(Faulty, &rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006682 info.state |= (1<<MD_DISK_FAULTY);
NeilBrownb2d444d2005-11-08 21:39:31 -08006683 else if (test_bit(In_sync, &rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006684 info.state |= (1<<MD_DISK_ACTIVE);
6685 info.state |= (1<<MD_DISK_SYNC);
6686 }
Shaohua Li9efdca12015-10-12 16:59:50 -07006687 if (test_bit(Journal, &rdev->flags))
Song Liubac624f2015-08-13 14:31:55 -07006688 info.state |= (1<<MD_DISK_JOURNAL);
NeilBrown8ddf9ef2005-09-09 16:23:45 -07006689 if (test_bit(WriteMostly, &rdev->flags))
6690 info.state |= (1<<MD_DISK_WRITEMOSTLY);
NeilBrown688834e2016-11-18 16:16:11 +11006691 if (test_bit(FailFast, &rdev->flags))
6692 info.state |= (1<<MD_DISK_FAILFAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006693 } else {
6694 info.major = info.minor = 0;
6695 info.raid_disk = -1;
6696 info.state = (1<<MD_DISK_REMOVED);
6697 }
NeilBrown1ca69c42012-10-11 13:37:33 +11006698 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006699
6700 if (copy_to_user(arg, &info, sizeof(info)))
6701 return -EFAULT;
6702
6703 return 0;
6704}
6705
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02006706int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006707{
6708 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
NeilBrown3cb03002011-10-11 16:45:26 +11006709 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006710 dev_t dev = MKDEV(info->major,info->minor);
6711
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006712 if (mddev_is_clustered(mddev) &&
6713 !(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) {
NeilBrown9d487392016-11-02 14:16:49 +11006714 pr_warn("%s: Cannot add to clustered mddev.\n",
6715 mdname(mddev));
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006716 return -EINVAL;
6717 }
6718
Linus Torvalds1da177e2005-04-16 15:20:36 -07006719 if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
6720 return -EOVERFLOW;
6721
6722 if (!mddev->raid_disks) {
6723 int err;
6724 /* expecting a device which has a superblock */
6725 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
6726 if (IS_ERR(rdev)) {
NeilBrown9d487392016-11-02 14:16:49 +11006727 pr_warn("md: md_import_device returned %ld\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006728 PTR_ERR(rdev));
6729 return PTR_ERR(rdev);
6730 }
6731 if (!list_empty(&mddev->disks)) {
NeilBrown3cb03002011-10-11 16:45:26 +11006732 struct md_rdev *rdev0
6733 = list_entry(mddev->disks.next,
6734 struct md_rdev, same_set);
NeilBrowna9f326e2009-09-23 18:06:41 +10006735 err = super_types[mddev->major_version]
Linus Torvalds1da177e2005-04-16 15:20:36 -07006736 .load_super(rdev, rdev0, mddev->minor_version);
6737 if (err < 0) {
NeilBrown9d487392016-11-02 14:16:49 +11006738 pr_warn("md: %s has different UUID to %s\n",
NeilBrownf72ffdd2014-09-30 14:23:59 +10006739 bdevname(rdev->bdev,b),
Linus Torvalds1da177e2005-04-16 15:20:36 -07006740 bdevname(rdev0->bdev,b2));
6741 export_rdev(rdev);
6742 return -EINVAL;
6743 }
6744 }
6745 err = bind_rdev_to_array(rdev, mddev);
6746 if (err)
6747 export_rdev(rdev);
6748 return err;
6749 }
6750
6751 /*
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02006752 * md_add_new_disk can be used once the array is assembled
Linus Torvalds1da177e2005-04-16 15:20:36 -07006753 * to add "hot spares". They must already have a superblock
6754 * written
6755 */
6756 if (mddev->pers) {
6757 int err;
6758 if (!mddev->pers->hot_add_disk) {
NeilBrown9d487392016-11-02 14:16:49 +11006759 pr_warn("%s: personality does not support diskops!\n",
6760 mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006761 return -EINVAL;
6762 }
NeilBrown7b1e35f2005-09-09 16:23:50 -07006763 if (mddev->persistent)
6764 rdev = md_import_device(dev, mddev->major_version,
6765 mddev->minor_version);
6766 else
6767 rdev = md_import_device(dev, -1, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006768 if (IS_ERR(rdev)) {
NeilBrown9d487392016-11-02 14:16:49 +11006769 pr_warn("md: md_import_device returned %ld\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006770 PTR_ERR(rdev));
6771 return PTR_ERR(rdev);
6772 }
NeilBrown1a855a02010-12-09 16:36:28 +11006773 /* set saved_raid_disk if appropriate */
NeilBrown41158c72005-06-21 17:17:25 -07006774 if (!mddev->persistent) {
6775 if (info->state & (1<<MD_DISK_SYNC) &&
NeilBrownbf572542011-01-12 09:03:35 +11006776 info->raid_disk < mddev->raid_disks) {
NeilBrown41158c72005-06-21 17:17:25 -07006777 rdev->raid_disk = info->raid_disk;
NeilBrownbf572542011-01-12 09:03:35 +11006778 set_bit(In_sync, &rdev->flags);
NeilBrown8313b8e2013-12-12 10:13:33 +11006779 clear_bit(Bitmap_sync, &rdev->flags);
NeilBrownbf572542011-01-12 09:03:35 +11006780 } else
NeilBrown41158c72005-06-21 17:17:25 -07006781 rdev->raid_disk = -1;
NeilBrownf4667222013-12-09 12:04:56 +11006782 rdev->saved_raid_disk = rdev->raid_disk;
NeilBrown41158c72005-06-21 17:17:25 -07006783 } else
6784 super_types[mddev->major_version].
6785 validate_super(mddev, rdev);
NeilBrownbedd86b2011-05-11 14:26:20 +10006786 if ((info->state & (1<<MD_DISK_SYNC)) &&
NeilBrownf4563092012-07-03 15:59:06 +10006787 rdev->raid_disk != info->raid_disk) {
NeilBrownbedd86b2011-05-11 14:26:20 +10006788 /* This was a hot-add request, but events doesn't
6789 * match, so reject it.
6790 */
6791 export_rdev(rdev);
6792 return -EINVAL;
6793 }
6794
NeilBrownb2d444d2005-11-08 21:39:31 -08006795 clear_bit(In_sync, &rdev->flags); /* just to be sure */
NeilBrown8ddf9ef2005-09-09 16:23:45 -07006796 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
6797 set_bit(WriteMostly, &rdev->flags);
NeilBrown575a80f2009-03-31 14:33:13 +11006798 else
6799 clear_bit(WriteMostly, &rdev->flags);
NeilBrown688834e2016-11-18 16:16:11 +11006800 if (info->state & (1<<MD_DISK_FAILFAST))
6801 set_bit(FailFast, &rdev->flags);
6802 else
6803 clear_bit(FailFast, &rdev->flags);
NeilBrown8ddf9ef2005-09-09 16:23:45 -07006804
Shaohua Lif6b6ec52015-12-21 10:51:02 +11006805 if (info->state & (1<<MD_DISK_JOURNAL)) {
6806 struct md_rdev *rdev2;
6807 bool has_journal = false;
6808
6809 /* make sure no existing journal disk */
6810 rdev_for_each(rdev2, mddev) {
6811 if (test_bit(Journal, &rdev2->flags)) {
6812 has_journal = true;
6813 break;
6814 }
6815 }
NeilBrown230b55f2017-10-17 14:24:09 +11006816 if (has_journal || mddev->bitmap) {
Shaohua Lif6b6ec52015-12-21 10:51:02 +11006817 export_rdev(rdev);
6818 return -EBUSY;
6819 }
Song Liubac624f2015-08-13 14:31:55 -07006820 set_bit(Journal, &rdev->flags);
Shaohua Lif6b6ec52015-12-21 10:51:02 +11006821 }
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006822 /*
6823 * check whether the device shows up in other nodes
6824 */
6825 if (mddev_is_clustered(mddev)) {
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006826 if (info->state & (1 << MD_DISK_CANDIDATE))
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006827 set_bit(Candidate, &rdev->flags);
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006828 else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) {
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006829 /* --add initiated by this node */
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006830 err = md_cluster_ops->add_new_disk(mddev, rdev);
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006831 if (err) {
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006832 export_rdev(rdev);
6833 return err;
6834 }
6835 }
6836 }
6837
Linus Torvalds1da177e2005-04-16 15:20:36 -07006838 rdev->raid_disk = -1;
6839 err = bind_rdev_to_array(rdev, mddev);
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006840
Linus Torvalds1da177e2005-04-16 15:20:36 -07006841 if (err)
6842 export_rdev(rdev);
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006843
6844 if (mddev_is_clustered(mddev)) {
Guoqing Jiange566aef2016-08-12 13:42:34 +08006845 if (info->state & (1 << MD_DISK_CANDIDATE)) {
6846 if (!err) {
6847 err = md_cluster_ops->new_disk_ack(mddev,
6848 err == 0);
6849 if (err)
6850 md_kick_rdev_from_array(rdev);
6851 }
6852 } else {
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006853 if (err)
6854 md_cluster_ops->add_new_disk_cancel(mddev);
6855 else
6856 err = add_bound_rdev(rdev);
6857 }
6858
6859 } else if (!err)
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05006860 err = add_bound_rdev(rdev);
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006861
Linus Torvalds1da177e2005-04-16 15:20:36 -07006862 return err;
6863 }
6864
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02006865 /* otherwise, md_add_new_disk is only allowed
Linus Torvalds1da177e2005-04-16 15:20:36 -07006866 * for major_version==0 superblocks
6867 */
6868 if (mddev->major_version != 0) {
NeilBrown9d487392016-11-02 14:16:49 +11006869 pr_warn("%s: ADD_NEW_DISK not supported\n", mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006870 return -EINVAL;
6871 }
6872
6873 if (!(info->state & (1<<MD_DISK_FAULTY))) {
6874 int err;
NeilBrownd710e132008-10-13 11:55:12 +11006875 rdev = md_import_device(dev, -1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006876 if (IS_ERR(rdev)) {
NeilBrown9d487392016-11-02 14:16:49 +11006877 pr_warn("md: error, md_import_device() returned %ld\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006878 PTR_ERR(rdev));
6879 return PTR_ERR(rdev);
6880 }
6881 rdev->desc_nr = info->number;
6882 if (info->raid_disk < mddev->raid_disks)
6883 rdev->raid_disk = info->raid_disk;
6884 else
6885 rdev->raid_disk = -1;
6886
Linus Torvalds1da177e2005-04-16 15:20:36 -07006887 if (rdev->raid_disk < mddev->raid_disks)
NeilBrownb2d444d2005-11-08 21:39:31 -08006888 if (info->state & (1<<MD_DISK_SYNC))
6889 set_bit(In_sync, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006890
NeilBrown8ddf9ef2005-09-09 16:23:45 -07006891 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
6892 set_bit(WriteMostly, &rdev->flags);
NeilBrown688834e2016-11-18 16:16:11 +11006893 if (info->state & (1<<MD_DISK_FAILFAST))
6894 set_bit(FailFast, &rdev->flags);
NeilBrown8ddf9ef2005-09-09 16:23:45 -07006895
Linus Torvalds1da177e2005-04-16 15:20:36 -07006896 if (!mddev->persistent) {
NeilBrown9d487392016-11-02 14:16:49 +11006897 pr_debug("md: nonpersistent superblock ...\n");
Mike Snitzer77304d22010-11-08 14:39:12 +01006898 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
6899 } else
Jonathan Brassow57b2caa2011-01-14 09:14:33 +11006900 rdev->sb_start = calc_dev_sboffset(rdev);
NeilBrown8190e752009-06-18 08:48:58 +10006901 rdev->sectors = rdev->sb_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006902
NeilBrown2bf071b2006-01-06 00:20:55 -08006903 err = bind_rdev_to_array(rdev, mddev);
6904 if (err) {
6905 export_rdev(rdev);
6906 return err;
6907 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006908 }
6909
6910 return 0;
6911}
6912
NeilBrownf72ffdd2014-09-30 14:23:59 +10006913static int hot_remove_disk(struct mddev *mddev, dev_t dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006914{
6915 char b[BDEVNAME_SIZE];
NeilBrown3cb03002011-10-11 16:45:26 +11006916 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006917
Yufen Yuc42a0e22018-05-04 18:08:10 +08006918 if (!mddev->pers)
6919 return -ENODEV;
6920
Linus Torvalds1da177e2005-04-16 15:20:36 -07006921 rdev = find_rdev(mddev, dev);
6922 if (!rdev)
6923 return -ENXIO;
6924
Goldwyn Rodrigues2910ff12015-09-28 10:27:26 -05006925 if (rdev->raid_disk < 0)
6926 goto kick_rdev;
Goldwyn Rodrigues293467a2014-06-07 01:44:51 -05006927
NeilBrown3ea8929d2013-04-24 11:42:41 +10006928 clear_bit(Blocked, &rdev->flags);
6929 remove_and_add_spares(mddev, rdev);
6930
Linus Torvalds1da177e2005-04-16 15:20:36 -07006931 if (rdev->raid_disk >= 0)
6932 goto busy;
6933
Goldwyn Rodrigues2910ff12015-09-28 10:27:26 -05006934kick_rdev:
Zhao Hemingbca5b062020-11-19 19:41:34 +08006935 if (mddev_is_clustered(mddev)) {
6936 if (md_cluster_ops->remove_disk(mddev, rdev))
6937 goto busy;
6938 }
Goldwyn Rodrigues88bcfef2015-04-14 10:44:44 -05006939
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05006940 md_kick_rdev_from_array(rdev);
Shaohua Li29530792016-12-08 15:48:19 -08006941 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrown060b0682016-11-04 16:46:03 +11006942 if (mddev->thread)
6943 md_wakeup_thread(mddev->thread);
6944 else
6945 md_update_sb(mddev, 1);
NeilBrownd7603b72006-01-06 00:20:30 -08006946 md_new_event(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006947
6948 return 0;
6949busy:
NeilBrown9d487392016-11-02 14:16:49 +11006950 pr_debug("md: cannot remove active disk %s from %s ...\n",
6951 bdevname(rdev->bdev,b), mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006952 return -EBUSY;
6953}
6954
NeilBrownf72ffdd2014-09-30 14:23:59 +10006955static int hot_add_disk(struct mddev *mddev, dev_t dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006956{
6957 char b[BDEVNAME_SIZE];
6958 int err;
NeilBrown3cb03002011-10-11 16:45:26 +11006959 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006960
6961 if (!mddev->pers)
6962 return -ENODEV;
6963
6964 if (mddev->major_version != 0) {
NeilBrown9d487392016-11-02 14:16:49 +11006965 pr_warn("%s: HOT_ADD may only be used with version-0 superblocks.\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006966 mdname(mddev));
6967 return -EINVAL;
6968 }
6969 if (!mddev->pers->hot_add_disk) {
NeilBrown9d487392016-11-02 14:16:49 +11006970 pr_warn("%s: personality does not support diskops!\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006971 mdname(mddev));
6972 return -EINVAL;
6973 }
6974
NeilBrownd710e132008-10-13 11:55:12 +11006975 rdev = md_import_device(dev, -1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006976 if (IS_ERR(rdev)) {
NeilBrown9d487392016-11-02 14:16:49 +11006977 pr_warn("md: error, md_import_device() returned %ld\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006978 PTR_ERR(rdev));
6979 return -EINVAL;
6980 }
6981
6982 if (mddev->persistent)
Jonathan Brassow57b2caa2011-01-14 09:14:33 +11006983 rdev->sb_start = calc_dev_sboffset(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006984 else
Mike Snitzer77304d22010-11-08 14:39:12 +01006985 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006986
NeilBrown8190e752009-06-18 08:48:58 +10006987 rdev->sectors = rdev->sb_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006988
NeilBrownb2d444d2005-11-08 21:39:31 -08006989 if (test_bit(Faulty, &rdev->flags)) {
NeilBrown9d487392016-11-02 14:16:49 +11006990 pr_warn("md: can not hot-add faulty %s disk to %s!\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006991 bdevname(rdev->bdev,b), mdname(mddev));
6992 err = -EINVAL;
6993 goto abort_export;
6994 }
Goldwyn Rodrigues293467a2014-06-07 01:44:51 -05006995
NeilBrownb2d444d2005-11-08 21:39:31 -08006996 clear_bit(In_sync, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006997 rdev->desc_nr = -1;
NeilBrown58427302006-10-06 00:44:04 -07006998 rdev->saved_raid_disk = -1;
NeilBrown2bf071b2006-01-06 00:20:55 -08006999 err = bind_rdev_to_array(rdev, mddev);
7000 if (err)
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05007001 goto abort_export;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007002
7003 /*
7004 * The rest should better be atomic, we can have disk failures
7005 * noticed in interrupt contexts ...
7006 */
7007
Linus Torvalds1da177e2005-04-16 15:20:36 -07007008 rdev->raid_disk = -1;
7009
Shaohua Li29530792016-12-08 15:48:19 -08007010 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrown060b0682016-11-04 16:46:03 +11007011 if (!mddev->thread)
7012 md_update_sb(mddev, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007013 /*
7014 * Kick recovery, maybe this spare has to be added to the
7015 * array immediately.
7016 */
7017 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7018 md_wakeup_thread(mddev->thread);
NeilBrownd7603b72006-01-06 00:20:30 -08007019 md_new_event(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007020 return 0;
7021
Linus Torvalds1da177e2005-04-16 15:20:36 -07007022abort_export:
7023 export_rdev(rdev);
7024 return err;
7025}
7026
NeilBrownfd01b882011-10-11 16:47:53 +11007027static int set_bitmap_file(struct mddev *mddev, int fd)
NeilBrown32a76272005-06-21 17:17:14 -07007028{
NeilBrown035328c2014-04-09 12:25:40 +10007029 int err = 0;
NeilBrown32a76272005-06-21 17:17:14 -07007030
NeilBrown36fa3062005-09-09 16:23:45 -07007031 if (mddev->pers) {
NeilBrownd66b1b32014-08-08 15:40:24 +10007032 if (!mddev->pers->quiesce || !mddev->thread)
NeilBrown36fa3062005-09-09 16:23:45 -07007033 return -EBUSY;
7034 if (mddev->recovery || mddev->sync_thread)
7035 return -EBUSY;
7036 /* we should be able to change the bitmap.. */
NeilBrown32a76272005-06-21 17:17:14 -07007037 }
7038
NeilBrown36fa3062005-09-09 16:23:45 -07007039 if (fd >= 0) {
NeilBrown035328c2014-04-09 12:25:40 +10007040 struct inode *inode;
NeilBrown1e594bb2014-12-15 12:57:00 +11007041 struct file *f;
NeilBrown36fa3062005-09-09 16:23:45 -07007042
NeilBrown1e594bb2014-12-15 12:57:00 +11007043 if (mddev->bitmap || mddev->bitmap_info.file)
7044 return -EEXIST; /* cannot add when bitmap is present */
7045 f = fget(fd);
7046
7047 if (f == NULL) {
NeilBrown9d487392016-11-02 14:16:49 +11007048 pr_warn("%s: error: failed to get bitmap file\n",
7049 mdname(mddev));
NeilBrown36fa3062005-09-09 16:23:45 -07007050 return -EBADF;
7051 }
7052
NeilBrown1e594bb2014-12-15 12:57:00 +11007053 inode = f->f_mapping->host;
NeilBrown035328c2014-04-09 12:25:40 +10007054 if (!S_ISREG(inode->i_mode)) {
NeilBrown9d487392016-11-02 14:16:49 +11007055 pr_warn("%s: error: bitmap file must be a regular file\n",
7056 mdname(mddev));
NeilBrown035328c2014-04-09 12:25:40 +10007057 err = -EBADF;
NeilBrown1e594bb2014-12-15 12:57:00 +11007058 } else if (!(f->f_mode & FMODE_WRITE)) {
NeilBrown9d487392016-11-02 14:16:49 +11007059 pr_warn("%s: error: bitmap file must open for write\n",
7060 mdname(mddev));
NeilBrown035328c2014-04-09 12:25:40 +10007061 err = -EBADF;
7062 } else if (atomic_read(&inode->i_writecount) != 1) {
NeilBrown9d487392016-11-02 14:16:49 +11007063 pr_warn("%s: error: bitmap file is already in use\n",
7064 mdname(mddev));
NeilBrown035328c2014-04-09 12:25:40 +10007065 err = -EBUSY;
7066 }
7067 if (err) {
NeilBrown1e594bb2014-12-15 12:57:00 +11007068 fput(f);
NeilBrown36fa3062005-09-09 16:23:45 -07007069 return err;
7070 }
NeilBrown1e594bb2014-12-15 12:57:00 +11007071 mddev->bitmap_info.file = f;
NeilBrownc3d97142009-12-14 12:49:52 +11007072 mddev->bitmap_info.offset = 0; /* file overrides offset */
NeilBrown36fa3062005-09-09 16:23:45 -07007073 } else if (mddev->bitmap == NULL)
7074 return -ENOENT; /* cannot remove what isn't there */
7075 err = 0;
7076 if (mddev->pers) {
NeilBrown69e51b42010-06-01 19:37:35 +10007077 if (fd >= 0) {
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05007078 struct bitmap *bitmap;
7079
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007080 bitmap = md_bitmap_create(mddev, -1);
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007081 mddev_suspend(mddev);
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05007082 if (!IS_ERR(bitmap)) {
7083 mddev->bitmap = bitmap;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007084 err = md_bitmap_load(mddev);
NeilBrownba599ac2015-02-25 11:44:11 +11007085 } else
7086 err = PTR_ERR(bitmap);
NeilBrown52a0d492017-10-17 13:46:43 +11007087 if (err) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007088 md_bitmap_destroy(mddev);
NeilBrown52a0d492017-10-17 13:46:43 +11007089 fd = -1;
7090 }
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007091 mddev_resume(mddev);
NeilBrown52a0d492017-10-17 13:46:43 +11007092 } else if (fd < 0) {
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007093 mddev_suspend(mddev);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007094 md_bitmap_destroy(mddev);
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007095 mddev_resume(mddev);
NeilBrownd7375ab2006-06-26 00:27:43 -07007096 }
NeilBrownd7375ab2006-06-26 00:27:43 -07007097 }
7098 if (fd < 0) {
NeilBrown4af1a042014-12-15 12:57:00 +11007099 struct file *f = mddev->bitmap_info.file;
7100 if (f) {
7101 spin_lock(&mddev->lock);
7102 mddev->bitmap_info.file = NULL;
7103 spin_unlock(&mddev->lock);
7104 fput(f);
7105 }
NeilBrown36fa3062005-09-09 16:23:45 -07007106 }
7107
NeilBrown32a76272005-06-21 17:17:14 -07007108 return err;
7109}
7110
Linus Torvalds1da177e2005-04-16 15:20:36 -07007111/*
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02007112 * md_set_array_info is used two different ways
Linus Torvalds1da177e2005-04-16 15:20:36 -07007113 * The original usage is when creating a new array.
7114 * In this usage, raid_disks is > 0 and it together with
7115 * level, size, not_persistent,layout,chunksize determine the
7116 * shape of the array.
7117 * This will always create an array with a type-0.90.0 superblock.
7118 * The newer usage is when assembling an array.
7119 * In this case raid_disks will be 0, and the major_version field is
7120 * use to determine which style super-blocks are to be found on the devices.
7121 * The minor and patch _version numbers are also kept incase the
7122 * super_block handler wishes to interpret them.
7123 */
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02007124int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007125{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007126 if (info->raid_disks == 0) {
7127 /* just setting version number for superblock loading */
7128 if (info->major_version < 0 ||
Ahmed S. Darwish50511da2007-05-09 02:35:34 -07007129 info->major_version >= ARRAY_SIZE(super_types) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07007130 super_types[info->major_version].name == NULL) {
7131 /* maybe try to auto-load a module? */
NeilBrown9d487392016-11-02 14:16:49 +11007132 pr_warn("md: superblock version %d not known\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07007133 info->major_version);
7134 return -EINVAL;
7135 }
7136 mddev->major_version = info->major_version;
7137 mddev->minor_version = info->minor_version;
7138 mddev->patch_version = info->patch_version;
NeilBrown3f9d7b02006-12-22 01:11:41 -08007139 mddev->persistent = !info->not_persistent;
NeilBrowncbd19982009-12-30 12:08:49 +11007140 /* ensure mddev_put doesn't delete this now that there
7141 * is some minimal configuration.
7142 */
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11007143 mddev->ctime = ktime_get_real_seconds();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007144 return 0;
7145 }
7146 mddev->major_version = MD_MAJOR_VERSION;
7147 mddev->minor_version = MD_MINOR_VERSION;
7148 mddev->patch_version = MD_PATCHLEVEL_VERSION;
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11007149 mddev->ctime = ktime_get_real_seconds();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007150
7151 mddev->level = info->level;
NeilBrown17115e02006-01-16 22:14:57 -08007152 mddev->clevel[0] = 0;
Andre Noll58c0fed2009-03-31 14:33:13 +11007153 mddev->dev_sectors = 2 * (sector_t)info->size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007154 mddev->raid_disks = info->raid_disks;
7155 /* don't set md_minor, it is determined by which /dev/md* was
7156 * openned
7157 */
7158 if (info->state & (1<<MD_SB_CLEAN))
7159 mddev->recovery_cp = MaxSector;
7160 else
7161 mddev->recovery_cp = 0;
7162 mddev->persistent = ! info->not_persistent;
NeilBrowne6910632008-02-06 01:39:51 -08007163 mddev->external = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007164
7165 mddev->layout = info->layout;
NeilBrown33f2c352019-09-09 16:52:29 +10007166 if (mddev->level == 0)
7167 /* Cannot trust RAID0 layout info here */
7168 mddev->layout = -1;
Andre Noll9d8f0362009-06-18 08:45:01 +10007169 mddev->chunk_sectors = info->chunk_size >> 9;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007170
Shaohua Li29530792016-12-08 15:48:19 -08007171 if (mddev->persistent) {
NeilBrown1b3bae42017-03-01 07:31:28 +11007172 mddev->max_disks = MD_SB_DISKS;
7173 mddev->flags = 0;
7174 mddev->sb_flags = 0;
Shaohua Li29530792016-12-08 15:48:19 -08007175 }
7176 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007177
NeilBrownc3d97142009-12-14 12:49:52 +11007178 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
NeilBrown6409bb02012-05-22 13:55:07 +10007179 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
NeilBrownc3d97142009-12-14 12:49:52 +11007180 mddev->bitmap_info.offset = 0;
NeilBrownb2a27032005-11-28 13:44:12 -08007181
NeilBrownf6705572006-03-27 01:18:11 -08007182 mddev->reshape_position = MaxSector;
7183
Linus Torvalds1da177e2005-04-16 15:20:36 -07007184 /*
7185 * Generate a 128 bit UUID
7186 */
7187 get_random_bytes(mddev->uuid, 16);
7188
NeilBrownf6705572006-03-27 01:18:11 -08007189 mddev->new_level = mddev->level;
Andre Noll664e7c42009-06-18 08:45:27 +10007190 mddev->new_chunk_sectors = mddev->chunk_sectors;
NeilBrownf6705572006-03-27 01:18:11 -08007191 mddev->new_layout = mddev->layout;
7192 mddev->delta_disks = 0;
NeilBrown2c810cd2012-05-21 09:27:00 +10007193 mddev->reshape_backwards = 0;
NeilBrownf6705572006-03-27 01:18:11 -08007194
Linus Torvalds1da177e2005-04-16 15:20:36 -07007195 return 0;
7196}
7197
NeilBrownfd01b882011-10-11 16:47:53 +11007198void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors)
Dan Williams1f403622009-03-31 14:59:03 +11007199{
Shaohua Liefa4b772017-10-18 22:08:13 -07007200 lockdep_assert_held(&mddev->reconfig_mutex);
Dan Williamsb522adc2009-03-31 15:00:31 +11007201
7202 if (mddev->external_size)
7203 return;
7204
Dan Williams1f403622009-03-31 14:59:03 +11007205 mddev->array_sectors = array_sectors;
7206}
7207EXPORT_SYMBOL(md_set_array_sectors);
7208
NeilBrownfd01b882011-10-11 16:47:53 +11007209static int update_size(struct mddev *mddev, sector_t num_sectors)
NeilBrowna35b0d62006-01-06 00:20:49 -08007210{
NeilBrown3cb03002011-10-11 16:45:26 +11007211 struct md_rdev *rdev;
NeilBrowna35b0d62006-01-06 00:20:49 -08007212 int rv;
Andre Nolld71f9f82008-07-11 22:02:22 +10007213 int fit = (num_sectors == 0);
Guoqing Jiang818da592017-03-01 16:42:40 +08007214 sector_t old_dev_sectors = mddev->dev_sectors;
Guoqing Jiangab5a98b2016-05-02 11:33:13 -04007215
NeilBrowna35b0d62006-01-06 00:20:49 -08007216 if (mddev->pers->resize == NULL)
7217 return -EINVAL;
Andre Nolld71f9f82008-07-11 22:02:22 +10007218 /* The "num_sectors" is the number of sectors of each device that
7219 * is used. This can only make sense for arrays with redundancy.
7220 * linear and raid0 always use whatever space is available. We can only
7221 * consider changing this number if no resync or reconstruction is
7222 * happening, and if the new size is acceptable. It must fit before the
Andre Noll0f420352008-07-11 22:02:23 +10007223 * sb_start or, if that is <data_offset, it must fit before the size
Andre Nolld71f9f82008-07-11 22:02:22 +10007224 * of each device. If num_sectors is zero, we find the largest size
7225 * that fits.
NeilBrowna35b0d62006-01-06 00:20:49 -08007226 */
NeilBrownf851b602014-12-11 10:02:10 +11007227 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
7228 mddev->sync_thread)
NeilBrowna35b0d62006-01-06 00:20:49 -08007229 return -EBUSY;
NeilBrownbd8839e2014-05-28 13:39:21 +10007230 if (mddev->ro)
7231 return -EROFS;
NeilBrowna4a61252012-05-22 13:55:27 +10007232
NeilBrowndafb20f2012-03-19 12:46:39 +11007233 rdev_for_each(rdev, mddev) {
Andre Nolldd8ac332009-03-31 14:33:13 +11007234 sector_t avail = rdev->sectors;
NeilBrown01ab5662006-10-28 10:38:30 -07007235
Andre Nolld71f9f82008-07-11 22:02:22 +10007236 if (fit && (num_sectors == 0 || num_sectors > avail))
7237 num_sectors = avail;
7238 if (avail < num_sectors)
NeilBrowna35b0d62006-01-06 00:20:49 -08007239 return -ENOSPC;
7240 }
Andre Nolld71f9f82008-07-11 22:02:22 +10007241 rv = mddev->pers->resize(mddev, num_sectors);
Guoqing Jiangc9483632017-02-24 11:15:23 +08007242 if (!rv) {
Guoqing Jiang818da592017-03-01 16:42:40 +08007243 if (mddev_is_clustered(mddev))
7244 md_cluster_ops->update_size(mddev, old_dev_sectors);
7245 else if (mddev->queue) {
Christoph Hellwig2c247c52020-11-16 15:57:11 +01007246 set_capacity_and_notify(mddev->gendisk,
7247 mddev->array_sectors);
Guoqing Jiangc9483632017-02-24 11:15:23 +08007248 }
7249 }
NeilBrowna35b0d62006-01-06 00:20:49 -08007250 return rv;
7251}
7252
NeilBrownfd01b882011-10-11 16:47:53 +11007253static int update_raid_disks(struct mddev *mddev, int raid_disks)
NeilBrownda943b992006-01-06 00:20:54 -08007254{
7255 int rv;
NeilBrownc6563a82012-05-21 09:27:00 +10007256 struct md_rdev *rdev;
NeilBrownda943b992006-01-06 00:20:54 -08007257 /* change the number of raid disks */
NeilBrown63c70c42006-03-27 01:18:13 -08007258 if (mddev->pers->check_reshape == NULL)
NeilBrownda943b992006-01-06 00:20:54 -08007259 return -EINVAL;
NeilBrownbd8839e2014-05-28 13:39:21 +10007260 if (mddev->ro)
7261 return -EROFS;
NeilBrownda943b992006-01-06 00:20:54 -08007262 if (raid_disks <= 0 ||
NeilBrown233fca32010-04-14 17:02:09 +10007263 (mddev->max_disks && raid_disks >= mddev->max_disks))
NeilBrownda943b992006-01-06 00:20:54 -08007264 return -EINVAL;
NeilBrownf851b602014-12-11 10:02:10 +11007265 if (mddev->sync_thread ||
7266 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
Zhao Heminga8da01f2020-11-19 19:41:33 +08007267 test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) ||
NeilBrownf851b602014-12-11 10:02:10 +11007268 mddev->reshape_position != MaxSector)
NeilBrownda943b992006-01-06 00:20:54 -08007269 return -EBUSY;
NeilBrownc6563a82012-05-21 09:27:00 +10007270
7271 rdev_for_each(rdev, mddev) {
7272 if (mddev->raid_disks < raid_disks &&
7273 rdev->data_offset < rdev->new_data_offset)
7274 return -EINVAL;
7275 if (mddev->raid_disks > raid_disks &&
7276 rdev->data_offset > rdev->new_data_offset)
7277 return -EINVAL;
7278 }
7279
NeilBrown63c70c42006-03-27 01:18:13 -08007280 mddev->delta_disks = raid_disks - mddev->raid_disks;
NeilBrown2c810cd2012-05-21 09:27:00 +10007281 if (mddev->delta_disks < 0)
7282 mddev->reshape_backwards = 1;
7283 else if (mddev->delta_disks > 0)
7284 mddev->reshape_backwards = 0;
NeilBrown63c70c42006-03-27 01:18:13 -08007285
7286 rv = mddev->pers->check_reshape(mddev);
NeilBrown2c810cd2012-05-21 09:27:00 +10007287 if (rv < 0) {
NeilBrownde171cb2011-01-31 11:57:42 +11007288 mddev->delta_disks = 0;
NeilBrown2c810cd2012-05-21 09:27:00 +10007289 mddev->reshape_backwards = 0;
7290 }
NeilBrownda943b992006-01-06 00:20:54 -08007291 return rv;
7292}
7293
Linus Torvalds1da177e2005-04-16 15:20:36 -07007294/*
7295 * update_array_info is used to change the configuration of an
7296 * on-line array.
7297 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
7298 * fields in the info are checked against the array.
7299 * Any differences that cannot be handled will cause an error.
7300 * Normally, only one change can be managed at a time.
7301 */
NeilBrownfd01b882011-10-11 16:47:53 +11007302static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007303{
7304 int rv = 0;
7305 int cnt = 0;
NeilBrown36fa3062005-09-09 16:23:45 -07007306 int state = 0;
7307
7308 /* calculate expected state,ignoring low bits */
NeilBrownc3d97142009-12-14 12:49:52 +11007309 if (mddev->bitmap && mddev->bitmap_info.offset)
NeilBrown36fa3062005-09-09 16:23:45 -07007310 state |= (1 << MD_SB_BITMAP_PRESENT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007311
7312 if (mddev->major_version != info->major_version ||
7313 mddev->minor_version != info->minor_version ||
7314/* mddev->patch_version != info->patch_version || */
7315 mddev->ctime != info->ctime ||
7316 mddev->level != info->level ||
7317/* mddev->layout != info->layout || */
Firo Yang4e023612015-06-11 09:41:10 +08007318 mddev->persistent != !info->not_persistent ||
Andre Noll9d8f0362009-06-18 08:45:01 +10007319 mddev->chunk_sectors != info->chunk_size >> 9 ||
NeilBrown36fa3062005-09-09 16:23:45 -07007320 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
7321 ((state^info->state) & 0xfffffe00)
7322 )
Linus Torvalds1da177e2005-04-16 15:20:36 -07007323 return -EINVAL;
7324 /* Check there is only one change */
Andre Noll58c0fed2009-03-31 14:33:13 +11007325 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
7326 cnt++;
7327 if (mddev->raid_disks != info->raid_disks)
7328 cnt++;
7329 if (mddev->layout != info->layout)
7330 cnt++;
7331 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
7332 cnt++;
7333 if (cnt == 0)
7334 return 0;
7335 if (cnt > 1)
7336 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007337
7338 if (mddev->layout != info->layout) {
7339 /* Change layout
7340 * we don't need to do anything at the md level, the
7341 * personality will take care of it all.
7342 */
NeilBrown50ac1682009-06-18 08:47:55 +10007343 if (mddev->pers->check_reshape == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007344 return -EINVAL;
NeilBrown597a7112009-06-18 08:47:42 +10007345 else {
7346 mddev->new_layout = info->layout;
NeilBrown50ac1682009-06-18 08:47:55 +10007347 rv = mddev->pers->check_reshape(mddev);
NeilBrown597a7112009-06-18 08:47:42 +10007348 if (rv)
7349 mddev->new_layout = mddev->layout;
7350 return rv;
7351 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007352 }
Andre Noll58c0fed2009-03-31 14:33:13 +11007353 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
Andre Nolld71f9f82008-07-11 22:02:22 +10007354 rv = update_size(mddev, (sector_t)info->size * 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007355
NeilBrownda943b992006-01-06 00:20:54 -08007356 if (mddev->raid_disks != info->raid_disks)
7357 rv = update_raid_disks(mddev, info->raid_disks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007358
NeilBrown36fa3062005-09-09 16:23:45 -07007359 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
Goldwyn Rodrigues293467a2014-06-07 01:44:51 -05007360 if (mddev->pers->quiesce == NULL || mddev->thread == NULL) {
7361 rv = -EINVAL;
7362 goto err;
7363 }
7364 if (mddev->recovery || mddev->sync_thread) {
7365 rv = -EBUSY;
7366 goto err;
7367 }
NeilBrown36fa3062005-09-09 16:23:45 -07007368 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05007369 struct bitmap *bitmap;
NeilBrown36fa3062005-09-09 16:23:45 -07007370 /* add the bitmap */
Goldwyn Rodrigues293467a2014-06-07 01:44:51 -05007371 if (mddev->bitmap) {
7372 rv = -EEXIST;
7373 goto err;
7374 }
7375 if (mddev->bitmap_info.default_offset == 0) {
7376 rv = -EINVAL;
7377 goto err;
7378 }
NeilBrownc3d97142009-12-14 12:49:52 +11007379 mddev->bitmap_info.offset =
7380 mddev->bitmap_info.default_offset;
NeilBrown6409bb02012-05-22 13:55:07 +10007381 mddev->bitmap_info.space =
7382 mddev->bitmap_info.default_space;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007383 bitmap = md_bitmap_create(mddev, -1);
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007384 mddev_suspend(mddev);
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05007385 if (!IS_ERR(bitmap)) {
7386 mddev->bitmap = bitmap;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007387 rv = md_bitmap_load(mddev);
NeilBrownba599ac2015-02-25 11:44:11 +11007388 } else
7389 rv = PTR_ERR(bitmap);
NeilBrown36fa3062005-09-09 16:23:45 -07007390 if (rv)
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007391 md_bitmap_destroy(mddev);
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007392 mddev_resume(mddev);
NeilBrown36fa3062005-09-09 16:23:45 -07007393 } else {
7394 /* remove the bitmap */
Goldwyn Rodrigues293467a2014-06-07 01:44:51 -05007395 if (!mddev->bitmap) {
7396 rv = -ENOENT;
7397 goto err;
7398 }
7399 if (mddev->bitmap->storage.file) {
7400 rv = -EINVAL;
7401 goto err;
7402 }
Guoqing Jiangf6a2dc62015-12-21 10:51:00 +11007403 if (mddev->bitmap_info.nodes) {
7404 /* hold PW on all the bitmap lock */
7405 if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) {
NeilBrown9d487392016-11-02 14:16:49 +11007406 pr_warn("md: can't change bitmap to none since the array is in use by more than one node\n");
Guoqing Jiangf6a2dc62015-12-21 10:51:00 +11007407 rv = -EPERM;
7408 md_cluster_ops->unlock_all_bitmaps(mddev);
7409 goto err;
7410 }
7411
7412 mddev->bitmap_info.nodes = 0;
7413 md_cluster_ops->leave(mddev);
Zhao Hemingedee9df2020-07-21 02:08:53 +08007414 module_put(md_cluster_mod);
Zhao Heming7c9d5c52020-07-21 02:08:52 +08007415 mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY;
Guoqing Jiangf6a2dc62015-12-21 10:51:00 +11007416 }
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007417 mddev_suspend(mddev);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007418 md_bitmap_destroy(mddev);
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007419 mddev_resume(mddev);
NeilBrownc3d97142009-12-14 12:49:52 +11007420 mddev->bitmap_info.offset = 0;
NeilBrown36fa3062005-09-09 16:23:45 -07007421 }
7422 }
NeilBrown850b2b422006-10-03 01:15:46 -07007423 md_update_sb(mddev, 1);
Goldwyn Rodrigues293467a2014-06-07 01:44:51 -05007424 return rv;
7425err:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007426 return rv;
7427}
7428
NeilBrownfd01b882011-10-11 16:47:53 +11007429static int set_disk_faulty(struct mddev *mddev, dev_t dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007430{
NeilBrown3cb03002011-10-11 16:45:26 +11007431 struct md_rdev *rdev;
NeilBrown1ca69c42012-10-11 13:37:33 +11007432 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007433
7434 if (mddev->pers == NULL)
7435 return -ENODEV;
7436
NeilBrown1ca69c42012-10-11 13:37:33 +11007437 rcu_read_lock();
Tomasz Majchrzak1532d9e2017-12-27 10:31:40 +01007438 rdev = md_find_rdev_rcu(mddev, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007439 if (!rdev)
NeilBrown1ca69c42012-10-11 13:37:33 +11007440 err = -ENODEV;
7441 else {
7442 md_error(mddev, rdev);
7443 if (!test_bit(Faulty, &rdev->flags))
7444 err = -EBUSY;
7445 }
7446 rcu_read_unlock();
7447 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007448}
7449
Andre Noll2f9618c2008-04-25 18:57:58 +02007450/*
7451 * We have a problem here : there is no easy way to give a CHS
7452 * virtual geometry. We currently pretend that we have a 2 heads
7453 * 4 sectors (with a BIG number of cylinders...). This drives
7454 * dosfs just mad... ;-)
7455 */
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08007456static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
7457{
NeilBrownfd01b882011-10-11 16:47:53 +11007458 struct mddev *mddev = bdev->bd_disk->private_data;
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08007459
7460 geo->heads = 2;
7461 geo->sectors = 4;
NeilBrown49ce6ce2010-03-29 10:51:42 +11007462 geo->cylinders = mddev->array_sectors / 8;
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08007463 return 0;
7464}
7465
Nicolas Schichancb335f82014-01-15 16:58:52 +01007466static inline bool md_ioctl_valid(unsigned int cmd)
7467{
7468 switch (cmd) {
7469 case ADD_NEW_DISK:
Nicolas Schichancb335f82014-01-15 16:58:52 +01007470 case GET_ARRAY_INFO:
7471 case GET_BITMAP_FILE:
7472 case GET_DISK_INFO:
7473 case HOT_ADD_DISK:
7474 case HOT_REMOVE_DISK:
Nicolas Schichancb335f82014-01-15 16:58:52 +01007475 case RAID_VERSION:
7476 case RESTART_ARRAY_RW:
7477 case RUN_ARRAY:
7478 case SET_ARRAY_INFO:
7479 case SET_BITMAP_FILE:
7480 case SET_DISK_FAULTY:
7481 case STOP_ARRAY:
7482 case STOP_ARRAY_RO:
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05007483 case CLUSTERED_DISK_NACK:
Nicolas Schichancb335f82014-01-15 16:58:52 +01007484 return true;
7485 default:
7486 return false;
7487 }
7488}
7489
Al Viroa39907f2008-03-02 10:31:15 -05007490static int md_ioctl(struct block_device *bdev, fmode_t mode,
Linus Torvalds1da177e2005-04-16 15:20:36 -07007491 unsigned int cmd, unsigned long arg)
7492{
7493 int err = 0;
7494 void __user *argp = (void __user *)arg;
NeilBrownfd01b882011-10-11 16:47:53 +11007495 struct mddev *mddev = NULL;
NeilBrown065e5192017-04-06 11:16:33 +08007496 bool did_set_md_closing = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007497
Nicolas Schichancb335f82014-01-15 16:58:52 +01007498 if (!md_ioctl_valid(cmd))
7499 return -ENOTTY;
7500
NeilBrown506c9e42011-12-23 10:17:26 +11007501 switch (cmd) {
7502 case RAID_VERSION:
7503 case GET_ARRAY_INFO:
7504 case GET_DISK_INFO:
7505 break;
7506 default:
7507 if (!capable(CAP_SYS_ADMIN))
7508 return -EACCES;
7509 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007510
7511 /*
7512 * Commands dealing with the RAID driver but not any
7513 * particular array:
7514 */
NeilBrownc02c0ae2012-12-11 13:39:21 +11007515 switch (cmd) {
7516 case RAID_VERSION:
7517 err = get_version(argp);
NeilBrown3adc28d2014-09-30 15:46:41 +10007518 goto out;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007519 default:;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007520 }
7521
7522 /*
7523 * Commands creating/starting a new array:
7524 */
7525
Al Viroa39907f2008-03-02 10:31:15 -05007526 mddev = bdev->bd_disk->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007527
7528 if (!mddev) {
7529 BUG();
NeilBrown3adc28d2014-09-30 15:46:41 +10007530 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007531 }
7532
NeilBrown1ca69c42012-10-11 13:37:33 +11007533 /* Some actions do not requires the mutex */
7534 switch (cmd) {
7535 case GET_ARRAY_INFO:
7536 if (!mddev->raid_disks && !mddev->external)
7537 err = -ENODEV;
7538 else
7539 err = get_array_info(mddev, argp);
NeilBrown3adc28d2014-09-30 15:46:41 +10007540 goto out;
NeilBrown1ca69c42012-10-11 13:37:33 +11007541
7542 case GET_DISK_INFO:
7543 if (!mddev->raid_disks && !mddev->external)
7544 err = -ENODEV;
7545 else
7546 err = get_disk_info(mddev, argp);
NeilBrown3adc28d2014-09-30 15:46:41 +10007547 goto out;
NeilBrown1ca69c42012-10-11 13:37:33 +11007548
7549 case SET_DISK_FAULTY:
7550 err = set_disk_faulty(mddev, new_decode_dev(arg));
NeilBrown3adc28d2014-09-30 15:46:41 +10007551 goto out;
NeilBrown4af1a042014-12-15 12:57:00 +11007552
7553 case GET_BITMAP_FILE:
7554 err = get_bitmap_file(mddev, argp);
7555 goto out;
7556
NeilBrown1ca69c42012-10-11 13:37:33 +11007557 }
7558
Guoqing Jiang78b990c2020-04-04 23:57:10 +02007559 if (cmd == ADD_NEW_DISK || cmd == HOT_ADD_DISK)
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02007560 flush_rdev_wq(mddev);
NeilBrowna7a3f082012-12-11 13:35:54 +11007561
Hannes Reinecke90f5f7a2013-04-02 08:38:55 +02007562 if (cmd == HOT_REMOVE_DISK)
7563 /* need to ensure recovery thread has run */
7564 wait_event_interruptible_timeout(mddev->sb_wait,
7565 !test_bit(MD_RECOVERY_NEEDED,
Shaohua Li82a301c2016-12-08 15:48:18 -08007566 &mddev->recovery),
Hannes Reinecke90f5f7a2013-04-02 08:38:55 +02007567 msecs_to_jiffies(5000));
NeilBrown260fa032013-08-27 16:44:13 +10007568 if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
7569 /* Need to flush page cache, and ensure no-one else opens
7570 * and writes
7571 */
7572 mutex_lock(&mddev->open_mutex);
NeilBrown9ba3b7f2014-09-09 14:00:15 +10007573 if (mddev->pers && atomic_read(&mddev->openers) > 1) {
NeilBrown260fa032013-08-27 16:44:13 +10007574 mutex_unlock(&mddev->open_mutex);
7575 err = -EBUSY;
NeilBrown3adc28d2014-09-30 15:46:41 +10007576 goto out;
NeilBrown260fa032013-08-27 16:44:13 +10007577 }
Dae R. Jeongc731b842020-10-22 10:21:28 +09007578 if (test_and_set_bit(MD_CLOSING, &mddev->flags)) {
7579 mutex_unlock(&mddev->open_mutex);
7580 err = -EBUSY;
7581 goto out;
7582 }
NeilBrown065e5192017-04-06 11:16:33 +08007583 did_set_md_closing = true;
NeilBrown260fa032013-08-27 16:44:13 +10007584 mutex_unlock(&mddev->open_mutex);
7585 sync_blockdev(bdev);
7586 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007587 err = mddev_lock(mddev);
7588 if (err) {
NeilBrown9d487392016-11-02 14:16:49 +11007589 pr_debug("md: ioctl lock interrupted, reason %d, cmd %d\n",
7590 err, cmd);
NeilBrown3adc28d2014-09-30 15:46:41 +10007591 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007592 }
7593
NeilBrownc02c0ae2012-12-11 13:39:21 +11007594 if (cmd == SET_ARRAY_INFO) {
7595 mdu_array_info_t info;
7596 if (!arg)
7597 memset(&info, 0, sizeof(info));
7598 else if (copy_from_user(&info, argp, sizeof(info))) {
7599 err = -EFAULT;
NeilBrown3adc28d2014-09-30 15:46:41 +10007600 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007601 }
7602 if (mddev->pers) {
7603 err = update_array_info(mddev, &info);
7604 if (err) {
NeilBrown9d487392016-11-02 14:16:49 +11007605 pr_warn("md: couldn't update array info. %d\n", err);
NeilBrown3adc28d2014-09-30 15:46:41 +10007606 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007607 }
NeilBrown3adc28d2014-09-30 15:46:41 +10007608 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007609 }
7610 if (!list_empty(&mddev->disks)) {
NeilBrown9d487392016-11-02 14:16:49 +11007611 pr_warn("md: array %s already has disks!\n", mdname(mddev));
NeilBrownc02c0ae2012-12-11 13:39:21 +11007612 err = -EBUSY;
NeilBrown3adc28d2014-09-30 15:46:41 +10007613 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007614 }
7615 if (mddev->raid_disks) {
NeilBrown9d487392016-11-02 14:16:49 +11007616 pr_warn("md: array %s already initialised!\n", mdname(mddev));
NeilBrownc02c0ae2012-12-11 13:39:21 +11007617 err = -EBUSY;
NeilBrown3adc28d2014-09-30 15:46:41 +10007618 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007619 }
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02007620 err = md_set_array_info(mddev, &info);
NeilBrownc02c0ae2012-12-11 13:39:21 +11007621 if (err) {
NeilBrown9d487392016-11-02 14:16:49 +11007622 pr_warn("md: couldn't set array info. %d\n", err);
NeilBrown3adc28d2014-09-30 15:46:41 +10007623 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007624 }
NeilBrown3adc28d2014-09-30 15:46:41 +10007625 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007626 }
7627
7628 /*
7629 * Commands querying/configuring an existing array:
7630 */
NeilBrown32a76272005-06-21 17:17:14 -07007631 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
NeilBrown3f9d7b02006-12-22 01:11:41 -08007632 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
NeilBrowna17184a2008-02-06 01:39:55 -08007633 if ((!mddev->raid_disks && !mddev->external)
7634 && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
7635 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
7636 && cmd != GET_BITMAP_FILE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007637 err = -ENODEV;
NeilBrown3adc28d2014-09-30 15:46:41 +10007638 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007639 }
7640
7641 /*
7642 * Commands even a read-only array can execute:
7643 */
NeilBrownc02c0ae2012-12-11 13:39:21 +11007644 switch (cmd) {
NeilBrownc02c0ae2012-12-11 13:39:21 +11007645 case RESTART_ARRAY_RW:
7646 err = restart_array(mddev);
NeilBrown3adc28d2014-09-30 15:46:41 +10007647 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007648
7649 case STOP_ARRAY:
7650 err = do_md_stop(mddev, 0, bdev);
NeilBrown3adc28d2014-09-30 15:46:41 +10007651 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007652
7653 case STOP_ARRAY_RO:
7654 err = md_set_readonly(mddev, bdev);
NeilBrown3adc28d2014-09-30 15:46:41 +10007655 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007656
NeilBrown3ea8929d2013-04-24 11:42:41 +10007657 case HOT_REMOVE_DISK:
7658 err = hot_remove_disk(mddev, new_decode_dev(arg));
NeilBrown3adc28d2014-09-30 15:46:41 +10007659 goto unlock;
NeilBrown3ea8929d2013-04-24 11:42:41 +10007660
NeilBrown7ceb17e2013-04-24 11:42:42 +10007661 case ADD_NEW_DISK:
7662 /* We can support ADD_NEW_DISK on read-only arrays
Wei Fang466ad292016-03-21 19:19:30 +08007663 * only if we are re-adding a preexisting device.
NeilBrown7ceb17e2013-04-24 11:42:42 +10007664 * So require mddev->pers and MD_DISK_SYNC.
7665 */
7666 if (mddev->pers) {
7667 mdu_disk_info_t info;
7668 if (copy_from_user(&info, argp, sizeof(info)))
7669 err = -EFAULT;
7670 else if (!(info.state & (1<<MD_DISK_SYNC)))
7671 /* Need to clear read-only for this */
7672 break;
7673 else
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02007674 err = md_add_new_disk(mddev, &info);
NeilBrown3adc28d2014-09-30 15:46:41 +10007675 goto unlock;
NeilBrown7ceb17e2013-04-24 11:42:42 +10007676 }
7677 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007678 }
7679
7680 /*
7681 * The remaining ioctls are changing the state of the
NeilBrownf91de922005-11-08 21:39:36 -08007682 * superblock, so we do not allow them on read-only arrays.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007683 */
NeilBrown326eb172014-09-30 15:36:28 +10007684 if (mddev->ro && mddev->pers) {
NeilBrownf91de922005-11-08 21:39:36 -08007685 if (mddev->ro == 2) {
7686 mddev->ro = 0;
NeilBrown00bcb4a2010-06-01 19:37:23 +10007687 sysfs_notify_dirent_safe(mddev->sysfs_state);
Neil Brown0fd62b82008-06-28 08:31:36 +10007688 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
NeilBrownf3378b42013-02-28 11:59:03 +11007689 /* mddev_unlock will wake thread */
7690 /* If a device failed while we were read-only, we
7691 * need to make sure the metadata is updated now.
7692 */
Shaohua Li29530792016-12-08 15:48:19 -08007693 if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) {
NeilBrownf3378b42013-02-28 11:59:03 +11007694 mddev_unlock(mddev);
7695 wait_event(mddev->sb_wait,
Shaohua Li29530792016-12-08 15:48:19 -08007696 !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) &&
7697 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
NeilBrown29f097c2013-11-14 17:54:51 +11007698 mddev_lock_nointr(mddev);
NeilBrownf3378b42013-02-28 11:59:03 +11007699 }
NeilBrownf91de922005-11-08 21:39:36 -08007700 } else {
7701 err = -EROFS;
NeilBrown3adc28d2014-09-30 15:46:41 +10007702 goto unlock;
NeilBrownf91de922005-11-08 21:39:36 -08007703 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007704 }
7705
NeilBrownc02c0ae2012-12-11 13:39:21 +11007706 switch (cmd) {
7707 case ADD_NEW_DISK:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007708 {
NeilBrownc02c0ae2012-12-11 13:39:21 +11007709 mdu_disk_info_t info;
7710 if (copy_from_user(&info, argp, sizeof(info)))
7711 err = -EFAULT;
7712 else
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02007713 err = md_add_new_disk(mddev, &info);
NeilBrown3adc28d2014-09-30 15:46:41 +10007714 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007715 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007716
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05007717 case CLUSTERED_DISK_NACK:
7718 if (mddev_is_clustered(mddev))
7719 md_cluster_ops->new_disk_ack(mddev, false);
7720 else
7721 err = -EINVAL;
7722 goto unlock;
7723
NeilBrownc02c0ae2012-12-11 13:39:21 +11007724 case HOT_ADD_DISK:
7725 err = hot_add_disk(mddev, new_decode_dev(arg));
NeilBrown3adc28d2014-09-30 15:46:41 +10007726 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007727
NeilBrownc02c0ae2012-12-11 13:39:21 +11007728 case RUN_ARRAY:
7729 err = do_md_run(mddev);
NeilBrown3adc28d2014-09-30 15:46:41 +10007730 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007731
NeilBrownc02c0ae2012-12-11 13:39:21 +11007732 case SET_BITMAP_FILE:
7733 err = set_bitmap_file(mddev, (int)arg);
NeilBrown3adc28d2014-09-30 15:46:41 +10007734 goto unlock;
NeilBrown32a76272005-06-21 17:17:14 -07007735
NeilBrownc02c0ae2012-12-11 13:39:21 +11007736 default:
7737 err = -EINVAL;
NeilBrown3adc28d2014-09-30 15:46:41 +10007738 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007739 }
7740
NeilBrown3adc28d2014-09-30 15:46:41 +10007741unlock:
NeilBrownd3374822009-01-09 08:31:10 +11007742 if (mddev->hold_active == UNTIL_IOCTL &&
7743 err != -EINVAL)
7744 mddev->hold_active = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007745 mddev_unlock(mddev);
NeilBrown3adc28d2014-09-30 15:46:41 +10007746out:
NeilBrown065e5192017-04-06 11:16:33 +08007747 if(did_set_md_closing)
7748 clear_bit(MD_CLOSING, &mddev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007749 return err;
7750}
Arnd Bergmannaa98aa32009-12-14 12:50:05 +11007751#ifdef CONFIG_COMPAT
7752static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
7753 unsigned int cmd, unsigned long arg)
7754{
7755 switch (cmd) {
7756 case HOT_REMOVE_DISK:
7757 case HOT_ADD_DISK:
7758 case SET_DISK_FAULTY:
7759 case SET_BITMAP_FILE:
7760 /* These take in integer arg, do not convert */
7761 break;
7762 default:
7763 arg = (unsigned long)compat_ptr(arg);
7764 break;
7765 }
7766
7767 return md_ioctl(bdev, mode, cmd, arg);
7768}
7769#endif /* CONFIG_COMPAT */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007770
Christoph Hellwig118cf082020-11-03 11:00:13 +01007771static int md_set_read_only(struct block_device *bdev, bool ro)
7772{
7773 struct mddev *mddev = bdev->bd_disk->private_data;
7774 int err;
7775
7776 err = mddev_lock(mddev);
7777 if (err)
7778 return err;
7779
7780 if (!mddev->raid_disks && !mddev->external) {
7781 err = -ENODEV;
7782 goto out_unlock;
7783 }
7784
7785 /*
7786 * Transitioning to read-auto need only happen for arrays that call
7787 * md_write_start and which are not ready for writes yet.
7788 */
7789 if (!ro && mddev->ro == 1 && mddev->pers) {
7790 err = restart_array(mddev);
7791 if (err)
7792 goto out_unlock;
7793 mddev->ro = 2;
7794 }
7795
7796out_unlock:
7797 mddev_unlock(mddev);
7798 return err;
7799}
7800
Al Viroa39907f2008-03-02 10:31:15 -05007801static int md_open(struct block_device *bdev, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007802{
7803 /*
7804 * Succeed if we can lock the mddev, which confirms that
7805 * it isn't being stopped right now.
7806 */
NeilBrownfd01b882011-10-11 16:47:53 +11007807 struct mddev *mddev = mddev_find(bdev->bd_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007808 int err;
7809
Yuanhan Liu0c098222012-05-22 13:55:32 +10007810 if (!mddev)
7811 return -ENODEV;
7812
NeilBrownd3374822009-01-09 08:31:10 +11007813 if (mddev->gendisk != bdev->bd_disk) {
7814 /* we are racing with mddev_put which is discarding this
7815 * bd_disk.
7816 */
7817 mddev_put(mddev);
7818 /* Wait until bdev->bd_disk is definitely gone */
Guoqing Jiangf6766ff2020-04-04 23:57:09 +02007819 if (work_pending(&mddev->del_work))
7820 flush_workqueue(md_misc_wq);
NeilBrownd3374822009-01-09 08:31:10 +11007821 /* Then retry the open from the top */
7822 return -ERESTARTSYS;
7823 }
7824 BUG_ON(mddev != bdev->bd_disk->private_data);
7825
NeilBrownc8c00a62009-08-10 12:50:52 +10007826 if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07007827 goto out;
7828
Guoqing Jiangaf8d8e62016-08-12 13:42:37 +08007829 if (test_bit(MD_CLOSING, &mddev->flags)) {
7830 mutex_unlock(&mddev->open_mutex);
NeilBrowne2342ca2016-12-05 16:40:50 +11007831 err = -ENODEV;
7832 goto out;
Guoqing Jiangaf8d8e62016-08-12 13:42:37 +08007833 }
7834
Linus Torvalds1da177e2005-04-16 15:20:36 -07007835 err = 0;
NeilBrownf2ea68c2008-07-21 17:05:25 +10007836 atomic_inc(&mddev->openers);
NeilBrownc8c00a62009-08-10 12:50:52 +10007837 mutex_unlock(&mddev->open_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007838
Christoph Hellwig818077d2020-09-08 16:53:43 +02007839 bdev_check_media_change(bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007840 out:
NeilBrowne2342ca2016-12-05 16:40:50 +11007841 if (err)
7842 mddev_put(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007843 return err;
7844}
7845
Al Virodb2a1442013-05-05 21:52:57 -04007846static void md_release(struct gendisk *disk, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007847{
NeilBrownf72ffdd2014-09-30 14:23:59 +10007848 struct mddev *mddev = disk->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007849
Eric Sesterhenn52e5f9d2006-10-03 23:33:23 +02007850 BUG_ON(!mddev);
NeilBrownf2ea68c2008-07-21 17:05:25 +10007851 atomic_dec(&mddev->openers);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007852 mddev_put(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007853}
NeilBrownf0b4f7e2011-02-24 17:26:41 +11007854
Christoph Hellwiga564e232020-07-08 14:25:41 +02007855static unsigned int md_check_events(struct gendisk *disk, unsigned int clearing)
NeilBrownf0b4f7e2011-02-24 17:26:41 +11007856{
NeilBrownfd01b882011-10-11 16:47:53 +11007857 struct mddev *mddev = disk->private_data;
Christoph Hellwiga564e232020-07-08 14:25:41 +02007858 unsigned int ret = 0;
NeilBrownf0b4f7e2011-02-24 17:26:41 +11007859
Christoph Hellwiga564e232020-07-08 14:25:41 +02007860 if (mddev->changed)
7861 ret = DISK_EVENT_MEDIA_CHANGE;
NeilBrownf0b4f7e2011-02-24 17:26:41 +11007862 mddev->changed = 0;
Christoph Hellwiga564e232020-07-08 14:25:41 +02007863 return ret;
NeilBrownf0b4f7e2011-02-24 17:26:41 +11007864}
Christoph Hellwiga564e232020-07-08 14:25:41 +02007865
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02007866const struct block_device_operations md_fops =
Linus Torvalds1da177e2005-04-16 15:20:36 -07007867{
7868 .owner = THIS_MODULE,
Christoph Hellwigc62b37d2020-07-01 10:59:43 +02007869 .submit_bio = md_submit_bio,
Al Viroa39907f2008-03-02 10:31:15 -05007870 .open = md_open,
7871 .release = md_release,
NeilBrownb492b852009-05-26 12:57:36 +10007872 .ioctl = md_ioctl,
Arnd Bergmannaa98aa32009-12-14 12:50:05 +11007873#ifdef CONFIG_COMPAT
7874 .compat_ioctl = md_compat_ioctl,
7875#endif
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08007876 .getgeo = md_getgeo,
Christoph Hellwiga564e232020-07-08 14:25:41 +02007877 .check_events = md_check_events,
Christoph Hellwig118cf082020-11-03 11:00:13 +01007878 .set_read_only = md_set_read_only,
Linus Torvalds1da177e2005-04-16 15:20:36 -07007879};
7880
NeilBrownf72ffdd2014-09-30 14:23:59 +10007881static int md_thread(void *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007882{
NeilBrown2b8bf342011-10-11 16:48:23 +11007883 struct md_thread *thread = arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007884
Linus Torvalds1da177e2005-04-16 15:20:36 -07007885 /*
7886 * md_thread is a 'system-thread', it's priority should be very
7887 * high. We avoid resource deadlocks individually in each
7888 * raid personality. (RAID5 does preallocation) We also use RR and
7889 * the very same RT priority as kswapd, thus we will never get
7890 * into a priority inversion deadlock.
7891 *
7892 * we definitely have to have equal or higher priority than
7893 * bdflush, otherwise bdflush will deadlock if there are too
7894 * many dirty RAID5 blocks.
7895 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007896
NeilBrown6985c432005-10-19 21:23:47 -07007897 allow_signal(SIGKILL);
NeilBrowna6fb0932005-09-09 16:23:56 -07007898 while (!kthread_should_stop()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007899
NeilBrown93588e22005-11-15 00:09:12 -08007900 /* We need to wait INTERRUPTIBLE so that
7901 * we don't add to the load-average.
7902 * That means we need to be sure no signals are
7903 * pending
7904 */
7905 if (signal_pending(current))
7906 flush_signals(current);
7907
7908 wait_event_interruptible_timeout
7909 (thread->wqueue,
7910 test_bit(THREAD_WAKEUP, &thread->flags)
Shaohua Lice1ccd02016-11-21 10:29:18 -08007911 || kthread_should_stop() || kthread_should_park(),
NeilBrown93588e22005-11-15 00:09:12 -08007912 thread->timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007913
NeilBrown6c987912011-01-14 09:13:53 +11007914 clear_bit(THREAD_WAKEUP, &thread->flags);
Shaohua Lice1ccd02016-11-21 10:29:18 -08007915 if (kthread_should_park())
7916 kthread_parkme();
NeilBrown6c987912011-01-14 09:13:53 +11007917 if (!kthread_should_stop())
Shaohua Li4ed87312012-10-11 13:34:00 +11007918 thread->run(thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007919 }
NeilBrowna6fb0932005-09-09 16:23:56 -07007920
Linus Torvalds1da177e2005-04-16 15:20:36 -07007921 return 0;
7922}
7923
NeilBrown2b8bf342011-10-11 16:48:23 +11007924void md_wakeup_thread(struct md_thread *thread)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007925{
7926 if (thread) {
NeilBrown36a4e1f2011-10-07 14:23:17 +11007927 pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm);
Guoqing Jiangd1d90142017-10-09 10:32:48 +08007928 set_bit(THREAD_WAKEUP, &thread->flags);
7929 wake_up(&thread->wqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007930 }
7931}
NeilBrown6c144d32014-09-30 16:15:38 +10007932EXPORT_SYMBOL(md_wakeup_thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007933
Shaohua Li4ed87312012-10-11 13:34:00 +11007934struct md_thread *md_register_thread(void (*run) (struct md_thread *),
7935 struct mddev *mddev, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007936{
NeilBrown2b8bf342011-10-11 16:48:23 +11007937 struct md_thread *thread;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007938
NeilBrown2b8bf342011-10-11 16:48:23 +11007939 thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007940 if (!thread)
7941 return NULL;
7942
Linus Torvalds1da177e2005-04-16 15:20:36 -07007943 init_waitqueue_head(&thread->wqueue);
7944
Linus Torvalds1da177e2005-04-16 15:20:36 -07007945 thread->run = run;
7946 thread->mddev = mddev;
NeilBrown32a76272005-06-21 17:17:14 -07007947 thread->timeout = MAX_SCHEDULE_TIMEOUT;
NeilBrown0da3c612009-09-23 18:09:45 +10007948 thread->tsk = kthread_run(md_thread, thread,
7949 "%s_%s",
7950 mdname(thread->mddev),
NeilBrown02326052012-07-03 15:56:52 +10007951 name);
NeilBrowna6fb0932005-09-09 16:23:56 -07007952 if (IS_ERR(thread->tsk)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007953 kfree(thread);
7954 return NULL;
7955 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007956 return thread;
7957}
NeilBrown6c144d32014-09-30 16:15:38 +10007958EXPORT_SYMBOL(md_register_thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007959
NeilBrown2b8bf342011-10-11 16:48:23 +11007960void md_unregister_thread(struct md_thread **threadp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007961{
NeilBrown2b8bf342011-10-11 16:48:23 +11007962 struct md_thread *thread = *threadp;
NeilBrowne0cf8f02009-03-31 14:39:39 +11007963 if (!thread)
7964 return;
NeilBrown36a4e1f2011-10-07 14:23:17 +11007965 pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
NeilBrown01f96c02011-09-21 15:30:20 +10007966 /* Locking ensures that mddev_unlock does not wake_up a
7967 * non-existent thread
7968 */
7969 spin_lock(&pers_lock);
7970 *threadp = NULL;
7971 spin_unlock(&pers_lock);
NeilBrowna6fb0932005-09-09 16:23:56 -07007972
7973 kthread_stop(thread->tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007974 kfree(thread);
7975}
NeilBrown6c144d32014-09-30 16:15:38 +10007976EXPORT_SYMBOL(md_unregister_thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007977
NeilBrownfd01b882011-10-11 16:47:53 +11007978void md_error(struct mddev *mddev, struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007979{
NeilBrownb2d444d2005-11-08 21:39:31 -08007980 if (!rdev || test_bit(Faulty, &rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07007981 return;
Dan Williams6bfe0b42008-04-30 00:52:32 -07007982
NeilBrownde393cd2011-07-28 11:31:48 +10007983 if (!mddev->pers || !mddev->pers->error_handler)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007984 return;
7985 mddev->pers->error_handler(mddev,rdev);
Neil Brown72a23c22008-06-28 08:31:41 +10007986 if (mddev->degraded)
7987 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
NeilBrown00bcb4a2010-06-01 19:37:23 +10007988 sysfs_notify_dirent_safe(rdev->sysfs_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007989 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7990 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7991 md_wakeup_thread(mddev->thread);
NeilBrown768a4182010-07-26 11:49:55 +10007992 if (mddev->event_work.func)
Tejun Heoe804ac72010-10-15 15:36:08 +02007993 queue_work(md_misc_wq, &mddev->event_work);
Guoqing Jiangbb9ef712015-12-28 10:46:38 +08007994 md_new_event(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007995}
NeilBrown6c144d32014-09-30 16:15:38 +10007996EXPORT_SYMBOL(md_error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007997
7998/* seq_file implementation /proc/mdstat */
7999
8000static void status_unused(struct seq_file *seq)
8001{
8002 int i = 0;
NeilBrown3cb03002011-10-11 16:45:26 +11008003 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008004
8005 seq_printf(seq, "unused devices: ");
8006
Cheng Renquan159ec1f2009-01-09 08:31:08 +11008007 list_for_each_entry(rdev, &pending_raid_disks, same_set) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008008 char b[BDEVNAME_SIZE];
8009 i++;
8010 seq_printf(seq, "%s ",
8011 bdevname(rdev->bdev,b));
8012 }
8013 if (!i)
8014 seq_printf(seq, "<none>");
8015
8016 seq_printf(seq, "\n");
8017}
8018
NeilBrownf7851be2015-07-02 17:12:58 +10008019static int status_resync(struct seq_file *seq, struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008020{
NeilBrowndd71cf62009-05-07 12:49:35 +10008021 sector_t max_sectors, resync, res;
Mariusz Tkaczyk9642fa72019-06-13 16:11:41 +02008022 unsigned long dt, db = 0;
8023 sector_t rt, curr_mark_cnt, resync_mark_cnt;
8024 int scale, recovery_active;
NeilBrown4588b422006-03-27 01:18:04 -08008025 unsigned int per_milli;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008026
NeilBrownc804cde2012-05-21 09:28:33 +10008027 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
8028 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
NeilBrowndd71cf62009-05-07 12:49:35 +10008029 max_sectors = mddev->resync_max_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008030 else
NeilBrowndd71cf62009-05-07 12:49:35 +10008031 max_sectors = mddev->dev_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008032
NeilBrownf7851be2015-07-02 17:12:58 +10008033 resync = mddev->curr_resync;
8034 if (resync <= 3) {
8035 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
8036 /* Still cleaning up */
8037 resync = max_sectors;
Nate Daileyd2e2ec82017-11-30 11:33:30 -05008038 } else if (resync > max_sectors)
8039 resync = max_sectors;
8040 else
NeilBrownf7851be2015-07-02 17:12:58 +10008041 resync -= atomic_read(&mddev->recovery_active);
8042
8043 if (resync == 0) {
Guoqing Jiang0357ba22018-07-02 16:26:25 +08008044 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery)) {
8045 struct md_rdev *rdev;
8046
8047 rdev_for_each(rdev, mddev)
8048 if (rdev->raid_disk >= 0 &&
8049 !test_bit(Faulty, &rdev->flags) &&
8050 rdev->recovery_offset != MaxSector &&
8051 rdev->recovery_offset) {
8052 seq_printf(seq, "\trecover=REMOTE");
8053 return 1;
8054 }
8055 if (mddev->reshape_position != MaxSector)
8056 seq_printf(seq, "\treshape=REMOTE");
8057 else
8058 seq_printf(seq, "\tresync=REMOTE");
8059 return 1;
8060 }
NeilBrownf7851be2015-07-02 17:12:58 +10008061 if (mddev->recovery_cp < MaxSector) {
8062 seq_printf(seq, "\tresync=PENDING");
8063 return 1;
8064 }
8065 return 0;
8066 }
8067 if (resync < 3) {
8068 seq_printf(seq, "\tresync=DELAYED");
8069 return 1;
8070 }
8071
NeilBrown403df472014-09-30 15:52:29 +10008072 WARN_ON(max_sectors == 0);
NeilBrown4588b422006-03-27 01:18:04 -08008073 /* Pick 'scale' such that (resync>>scale)*1000 will fit
NeilBrowndd71cf62009-05-07 12:49:35 +10008074 * in a sector_t, and (max_sectors>>scale) will fit in a
NeilBrown4588b422006-03-27 01:18:04 -08008075 * u32, as those are the requirements for sector_div.
8076 * Thus 'scale' must be at least 10
8077 */
8078 scale = 10;
8079 if (sizeof(sector_t) > sizeof(unsigned long)) {
NeilBrowndd71cf62009-05-07 12:49:35 +10008080 while ( max_sectors/2 > (1ULL<<(scale+32)))
NeilBrown4588b422006-03-27 01:18:04 -08008081 scale++;
8082 }
8083 res = (resync>>scale)*1000;
NeilBrowndd71cf62009-05-07 12:49:35 +10008084 sector_div(res, (u32)((max_sectors>>scale)+1));
NeilBrown4588b422006-03-27 01:18:04 -08008085
8086 per_milli = res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008087 {
NeilBrown4588b422006-03-27 01:18:04 -08008088 int i, x = per_milli/50, y = 20-x;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008089 seq_printf(seq, "[");
8090 for (i = 0; i < x; i++)
8091 seq_printf(seq, "=");
8092 seq_printf(seq, ">");
8093 for (i = 0; i < y; i++)
8094 seq_printf(seq, ".");
8095 seq_printf(seq, "] ");
8096 }
NeilBrown4588b422006-03-27 01:18:04 -08008097 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
NeilBrownccfcc3c2006-03-27 01:18:09 -08008098 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
8099 "reshape" :
NeilBrown61df9d92006-10-03 01:15:57 -07008100 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
8101 "check" :
8102 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
8103 "resync" : "recovery"))),
8104 per_milli/10, per_milli % 10,
NeilBrowndd71cf62009-05-07 12:49:35 +10008105 (unsigned long long) resync/2,
8106 (unsigned long long) max_sectors/2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008107
8108 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07008109 * dt: time from mark until now
8110 * db: blocks written from mark until now
8111 * rt: remaining time
NeilBrowndd71cf62009-05-07 12:49:35 +10008112 *
Mariusz Tkaczyk9642fa72019-06-13 16:11:41 +02008113 * rt is a sector_t, which is always 64bit now. We are keeping
8114 * the original algorithm, but it is not really necessary.
8115 *
8116 * Original algorithm:
8117 * So we divide before multiply in case it is 32bit and close
8118 * to the limit.
8119 * We scale the divisor (db) by 32 to avoid losing precision
8120 * near the end of resync when the number of remaining sectors
8121 * is close to 'db'.
8122 * We then divide rt by 32 after multiplying by db to compensate.
8123 * The '+1' avoids division by zero if db is very small.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008124 */
8125 dt = ((jiffies - mddev->resync_mark) / HZ);
8126 if (!dt) dt++;
Mariusz Tkaczyk9642fa72019-06-13 16:11:41 +02008127
8128 curr_mark_cnt = mddev->curr_mark_cnt;
8129 recovery_active = atomic_read(&mddev->recovery_active);
8130 resync_mark_cnt = mddev->resync_mark_cnt;
8131
8132 if (curr_mark_cnt >= (recovery_active + resync_mark_cnt))
8133 db = curr_mark_cnt - (recovery_active + resync_mark_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008134
NeilBrowndd71cf62009-05-07 12:49:35 +10008135 rt = max_sectors - resync; /* number of remaining sectors */
Mariusz Tkaczyk9642fa72019-06-13 16:11:41 +02008136 rt = div64_u64(rt, db/32+1);
NeilBrowndd71cf62009-05-07 12:49:35 +10008137 rt *= dt;
8138 rt >>= 5;
8139
8140 seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
8141 ((unsigned long)rt % 60)/6);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008142
NeilBrownff4e8d92006-07-10 04:44:16 -07008143 seq_printf(seq, " speed=%ldK/sec", db/2/dt);
NeilBrownf7851be2015-07-02 17:12:58 +10008144 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008145}
8146
8147static void *md_seq_start(struct seq_file *seq, loff_t *pos)
8148{
8149 struct list_head *tmp;
8150 loff_t l = *pos;
NeilBrownfd01b882011-10-11 16:47:53 +11008151 struct mddev *mddev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008152
8153 if (l >= 0x10000)
8154 return NULL;
8155 if (!l--)
8156 /* header */
8157 return (void*)1;
8158
8159 spin_lock(&all_mddevs_lock);
8160 list_for_each(tmp,&all_mddevs)
8161 if (!l--) {
NeilBrownfd01b882011-10-11 16:47:53 +11008162 mddev = list_entry(tmp, struct mddev, all_mddevs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008163 mddev_get(mddev);
8164 spin_unlock(&all_mddevs_lock);
8165 return mddev;
8166 }
8167 spin_unlock(&all_mddevs_lock);
8168 if (!l--)
8169 return (void*)2;/* tail */
8170 return NULL;
8171}
8172
8173static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
8174{
8175 struct list_head *tmp;
NeilBrownfd01b882011-10-11 16:47:53 +11008176 struct mddev *next_mddev, *mddev = v;
NeilBrownf72ffdd2014-09-30 14:23:59 +10008177
Linus Torvalds1da177e2005-04-16 15:20:36 -07008178 ++*pos;
8179 if (v == (void*)2)
8180 return NULL;
8181
8182 spin_lock(&all_mddevs_lock);
8183 if (v == (void*)1)
8184 tmp = all_mddevs.next;
8185 else
8186 tmp = mddev->all_mddevs.next;
8187 if (tmp != &all_mddevs)
NeilBrownfd01b882011-10-11 16:47:53 +11008188 next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs));
Linus Torvalds1da177e2005-04-16 15:20:36 -07008189 else {
8190 next_mddev = (void*)2;
8191 *pos = 0x10000;
NeilBrownf72ffdd2014-09-30 14:23:59 +10008192 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008193 spin_unlock(&all_mddevs_lock);
8194
8195 if (v != (void*)1)
8196 mddev_put(mddev);
8197 return next_mddev;
8198
8199}
8200
8201static void md_seq_stop(struct seq_file *seq, void *v)
8202{
NeilBrownfd01b882011-10-11 16:47:53 +11008203 struct mddev *mddev = v;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008204
8205 if (mddev && v != (void*)1 && v != (void*)2)
8206 mddev_put(mddev);
8207}
8208
8209static int md_seq_show(struct seq_file *seq, void *v)
8210{
NeilBrownfd01b882011-10-11 16:47:53 +11008211 struct mddev *mddev = v;
Andre Nolldd8ac332009-03-31 14:33:13 +11008212 sector_t sectors;
NeilBrown3cb03002011-10-11 16:45:26 +11008213 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008214
8215 if (v == (void*)1) {
NeilBrown84fc4b52011-10-11 16:49:58 +11008216 struct md_personality *pers;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008217 seq_printf(seq, "Personalities : ");
8218 spin_lock(&pers_lock);
NeilBrown2604b702006-01-06 00:20:36 -08008219 list_for_each_entry(pers, &pers_list, list)
8220 seq_printf(seq, "[%s] ", pers->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008221
8222 spin_unlock(&pers_lock);
8223 seq_printf(seq, "\n");
Kay Sieversf1514632011-07-12 20:48:39 +02008224 seq->poll_event = atomic_read(&md_event_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008225 return 0;
8226 }
8227 if (v == (void*)2) {
8228 status_unused(seq);
8229 return 0;
8230 }
8231
NeilBrown36d091f2014-12-15 12:56:58 +11008232 spin_lock(&mddev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008233 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
8234 seq_printf(seq, "%s : %sactive", mdname(mddev),
8235 mddev->pers ? "" : "in");
8236 if (mddev->pers) {
NeilBrownf91de922005-11-08 21:39:36 -08008237 if (mddev->ro==1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008238 seq_printf(seq, " (read-only)");
NeilBrownf91de922005-11-08 21:39:36 -08008239 if (mddev->ro==2)
NeilBrown52720ae2008-03-10 11:43:47 -07008240 seq_printf(seq, " (auto-read-only)");
Linus Torvalds1da177e2005-04-16 15:20:36 -07008241 seq_printf(seq, " %s", mddev->pers->name);
8242 }
8243
Andre Nolldd8ac332009-03-31 14:33:13 +11008244 sectors = 0;
NeilBrownf97fcad2014-12-15 12:56:59 +11008245 rcu_read_lock();
8246 rdev_for_each_rcu(rdev, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008247 char b[BDEVNAME_SIZE];
8248 seq_printf(seq, " %s[%d]",
8249 bdevname(rdev->bdev,b), rdev->desc_nr);
NeilBrown8ddf9ef2005-09-09 16:23:45 -07008250 if (test_bit(WriteMostly, &rdev->flags))
8251 seq_printf(seq, "(W)");
Shaohua Li9efdca12015-10-12 16:59:50 -07008252 if (test_bit(Journal, &rdev->flags))
8253 seq_printf(seq, "(J)");
NeilBrownb2d444d2005-11-08 21:39:31 -08008254 if (test_bit(Faulty, &rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008255 seq_printf(seq, "(F)");
8256 continue;
NeilBrown2d78f8c2011-12-23 10:17:51 +11008257 }
8258 if (rdev->raid_disk < 0)
NeilBrownb325a322005-09-09 16:24:00 -07008259 seq_printf(seq, "(S)"); /* spare */
NeilBrown2d78f8c2011-12-23 10:17:51 +11008260 if (test_bit(Replacement, &rdev->flags))
8261 seq_printf(seq, "(R)");
Andre Nolldd8ac332009-03-31 14:33:13 +11008262 sectors += rdev->sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008263 }
NeilBrownf97fcad2014-12-15 12:56:59 +11008264 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07008265
8266 if (!list_empty(&mddev->disks)) {
8267 if (mddev->pers)
8268 seq_printf(seq, "\n %llu blocks",
Andre Nollf233ea52008-07-21 17:05:22 +10008269 (unsigned long long)
8270 mddev->array_sectors / 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008271 else
8272 seq_printf(seq, "\n %llu blocks",
Andre Nolldd8ac332009-03-31 14:33:13 +11008273 (unsigned long long)sectors / 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008274 }
NeilBrown1cd6bf12005-09-09 16:24:00 -07008275 if (mddev->persistent) {
8276 if (mddev->major_version != 0 ||
8277 mddev->minor_version != 90) {
8278 seq_printf(seq," super %d.%d",
8279 mddev->major_version,
8280 mddev->minor_version);
8281 }
NeilBrowne6910632008-02-06 01:39:51 -08008282 } else if (mddev->external)
8283 seq_printf(seq, " super external:%s",
8284 mddev->metadata_type);
8285 else
NeilBrown1cd6bf12005-09-09 16:24:00 -07008286 seq_printf(seq, " super non-persistent");
Linus Torvalds1da177e2005-04-16 15:20:36 -07008287
8288 if (mddev->pers) {
NeilBrownd710e132008-10-13 11:55:12 +11008289 mddev->pers->status(seq, mddev);
NeilBrownf72ffdd2014-09-30 14:23:59 +10008290 seq_printf(seq, "\n ");
NeilBrown8e1b39d2005-11-08 21:39:41 -08008291 if (mddev->pers->sync_request) {
NeilBrownf7851be2015-07-02 17:12:58 +10008292 if (status_resync(seq, mddev))
NeilBrown8e1b39d2005-11-08 21:39:41 -08008293 seq_printf(seq, "\n ");
NeilBrown8e1b39d2005-11-08 21:39:41 -08008294 }
NeilBrown32a76272005-06-21 17:17:14 -07008295 } else
8296 seq_printf(seq, "\n ");
8297
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07008298 md_bitmap_status(seq, mddev->bitmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008299
8300 seq_printf(seq, "\n");
8301 }
NeilBrown36d091f2014-12-15 12:56:58 +11008302 spin_unlock(&mddev->lock);
NeilBrownf72ffdd2014-09-30 14:23:59 +10008303
Linus Torvalds1da177e2005-04-16 15:20:36 -07008304 return 0;
8305}
8306
Jan Engelhardt110518b2009-05-07 12:49:37 +10008307static const struct seq_operations md_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008308 .start = md_seq_start,
8309 .next = md_seq_next,
8310 .stop = md_seq_stop,
8311 .show = md_seq_show,
8312};
8313
8314static int md_seq_open(struct inode *inode, struct file *file)
8315{
Kay Sieversf1514632011-07-12 20:48:39 +02008316 struct seq_file *seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008317 int error;
8318
8319 error = seq_open(file, &md_seq_ops);
NeilBrownd7603b72006-01-06 00:20:30 -08008320 if (error)
Kay Sieversf1514632011-07-12 20:48:39 +02008321 return error;
8322
8323 seq = file->private_data;
8324 seq->poll_event = atomic_read(&md_event_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008325 return error;
8326}
8327
NeilBrowne2f23b62014-04-09 14:33:51 +10008328static int md_unloading;
Al Viroafc9a422017-07-03 06:39:46 -04008329static __poll_t mdstat_poll(struct file *filp, poll_table *wait)
NeilBrownd7603b72006-01-06 00:20:30 -08008330{
Kay Sieversf1514632011-07-12 20:48:39 +02008331 struct seq_file *seq = filp->private_data;
Al Viroafc9a422017-07-03 06:39:46 -04008332 __poll_t mask;
NeilBrownd7603b72006-01-06 00:20:30 -08008333
NeilBrowne2f23b62014-04-09 14:33:51 +10008334 if (md_unloading)
Linus Torvaldsa9a08842018-02-11 14:34:03 -08008335 return EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI;
NeilBrownd7603b72006-01-06 00:20:30 -08008336 poll_wait(filp, &md_event_waiters, wait);
8337
8338 /* always allow read */
Linus Torvaldsa9a08842018-02-11 14:34:03 -08008339 mask = EPOLLIN | EPOLLRDNORM;
NeilBrownd7603b72006-01-06 00:20:30 -08008340
Kay Sieversf1514632011-07-12 20:48:39 +02008341 if (seq->poll_event != atomic_read(&md_event_count))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08008342 mask |= EPOLLERR | EPOLLPRI;
NeilBrownd7603b72006-01-06 00:20:30 -08008343 return mask;
8344}
8345
Alexey Dobriyan97a32532020-02-03 17:37:17 -08008346static const struct proc_ops mdstat_proc_ops = {
8347 .proc_open = md_seq_open,
8348 .proc_read = seq_read,
8349 .proc_lseek = seq_lseek,
8350 .proc_release = seq_release,
8351 .proc_poll = mdstat_poll,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008352};
8353
NeilBrown84fc4b52011-10-11 16:49:58 +11008354int register_md_personality(struct md_personality *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008355{
NeilBrown9d487392016-11-02 14:16:49 +11008356 pr_debug("md: %s personality registered for level %d\n",
8357 p->name, p->level);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008358 spin_lock(&pers_lock);
NeilBrown2604b702006-01-06 00:20:36 -08008359 list_add_tail(&p->list, &pers_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008360 spin_unlock(&pers_lock);
8361 return 0;
8362}
NeilBrown6c144d32014-09-30 16:15:38 +10008363EXPORT_SYMBOL(register_md_personality);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008364
NeilBrown84fc4b52011-10-11 16:49:58 +11008365int unregister_md_personality(struct md_personality *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008366{
NeilBrown9d487392016-11-02 14:16:49 +11008367 pr_debug("md: %s personality unregistered\n", p->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008368 spin_lock(&pers_lock);
NeilBrown2604b702006-01-06 00:20:36 -08008369 list_del_init(&p->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008370 spin_unlock(&pers_lock);
8371 return 0;
8372}
NeilBrown6c144d32014-09-30 16:15:38 +10008373EXPORT_SYMBOL(unregister_md_personality);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008374
NeilBrown6022e752015-08-13 12:32:55 +10008375int register_md_cluster_operations(struct md_cluster_operations *ops,
8376 struct module *module)
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008377{
NeilBrown6022e752015-08-13 12:32:55 +10008378 int ret = 0;
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008379 spin_lock(&pers_lock);
NeilBrown6022e752015-08-13 12:32:55 +10008380 if (md_cluster_ops != NULL)
8381 ret = -EALREADY;
8382 else {
8383 md_cluster_ops = ops;
8384 md_cluster_mod = module;
8385 }
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008386 spin_unlock(&pers_lock);
NeilBrown6022e752015-08-13 12:32:55 +10008387 return ret;
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008388}
8389EXPORT_SYMBOL(register_md_cluster_operations);
8390
8391int unregister_md_cluster_operations(void)
8392{
8393 spin_lock(&pers_lock);
8394 md_cluster_ops = NULL;
8395 spin_unlock(&pers_lock);
8396 return 0;
8397}
8398EXPORT_SYMBOL(unregister_md_cluster_operations);
8399
8400int md_setup_cluster(struct mddev *mddev, int nodes)
8401{
Zhao Heming7c9d5c52020-07-21 02:08:52 +08008402 int ret;
Guoqing Jiang47a7b0d2016-09-04 22:17:28 -04008403 if (!md_cluster_ops)
8404 request_module("md-cluster");
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008405 spin_lock(&pers_lock);
Guoqing Jiang47a7b0d2016-09-04 22:17:28 -04008406 /* ensure module won't be unloaded */
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008407 if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
NeilBrown9d487392016-11-02 14:16:49 +11008408 pr_warn("can't find md-cluster module or get it's reference.\n");
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008409 spin_unlock(&pers_lock);
8410 return -ENOENT;
8411 }
8412 spin_unlock(&pers_lock);
8413
Zhao Heming7c9d5c52020-07-21 02:08:52 +08008414 ret = md_cluster_ops->join(mddev, nodes);
8415 if (!ret)
8416 mddev->safemode_delay = 0;
8417 return ret;
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008418}
8419
8420void md_cluster_stop(struct mddev *mddev)
8421{
Goldwyn Rodriguesc4ce8672014-03-29 10:20:02 -05008422 if (!md_cluster_ops)
8423 return;
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008424 md_cluster_ops->leave(mddev);
8425 module_put(md_cluster_mod);
8426}
8427
NeilBrownfd01b882011-10-11 16:47:53 +11008428static int is_mddev_idle(struct mddev *mddev, int init)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008429{
NeilBrownf72ffdd2014-09-30 14:23:59 +10008430 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008431 int idle;
NeilBrowneea1bf32009-03-31 14:27:02 +11008432 int curr_events;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008433
8434 idle = 1;
NeilBrown4b809912008-07-21 17:05:25 +10008435 rcu_read_lock();
8436 rdev_for_each_rcu(rdev, mddev) {
Christoph Hellwig4245e522020-09-03 07:40:59 +02008437 struct gendisk *disk = rdev->bdev->bd_disk;
Christoph Hellwig8446fe92020-11-24 09:36:54 +01008438 curr_events = (int)part_stat_read_accum(disk->part0, sectors) -
NeilBrowneea1bf32009-03-31 14:27:02 +11008439 atomic_read(&disk->sync_io);
NeilBrown713f6ab2007-07-17 04:06:12 -07008440 /* sync IO will cause sync_io to increase before the disk_stats
8441 * as sync_io is counted when a request starts, and
8442 * disk_stats is counted when it completes.
8443 * So resync activity will cause curr_events to be smaller than
8444 * when there was no such activity.
8445 * non-sync IO will cause disk_stat to increase without
8446 * increasing sync_io so curr_events will (eventually)
8447 * be larger than it was before. Once it becomes
8448 * substantially larger, the test below will cause
8449 * the array to appear non-idle, and resync will slow
8450 * down.
8451 * If there is a lot of outstanding resync activity when
8452 * we set last_event to curr_events, then all that activity
8453 * completing might cause the array to appear non-idle
8454 * and resync will be slowed down even though there might
8455 * not have been non-resync activity. This will only
8456 * happen once though. 'last_events' will soon reflect
8457 * the state where there is little or no outstanding
8458 * resync requests, and further resync activity will
8459 * always make curr_events less than last_events.
NeilBrownc0e48522005-11-18 01:11:01 -08008460 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07008461 */
NeilBrowneea1bf32009-03-31 14:27:02 +11008462 if (init || curr_events - rdev->last_events > 64) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008463 rdev->last_events = curr_events;
8464 idle = 0;
8465 }
8466 }
NeilBrown4b809912008-07-21 17:05:25 +10008467 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07008468 return idle;
8469}
8470
NeilBrownfd01b882011-10-11 16:47:53 +11008471void md_done_sync(struct mddev *mddev, int blocks, int ok)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008472{
8473 /* another "blocks" (512byte) blocks have been synced */
8474 atomic_sub(blocks, &mddev->recovery_active);
8475 wake_up(&mddev->recovery_wait);
8476 if (!ok) {
NeilBrowndfc70642008-05-23 13:04:39 -07008477 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
majianpeng0a19caa2012-11-19 19:57:34 +08008478 set_bit(MD_RECOVERY_ERROR, &mddev->recovery);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008479 md_wakeup_thread(mddev->thread);
8480 // stop recovery, signal do_sync ....
8481 }
8482}
NeilBrown6c144d32014-09-30 16:15:38 +10008483EXPORT_SYMBOL(md_done_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008484
NeilBrown06d91a52005-06-21 17:17:12 -07008485/* md_write_start(mddev, bi)
8486 * If we need to update some array metadata (e.g. 'active' flag
NeilBrown3d310eb2005-06-21 17:17:26 -07008487 * in superblock) before writing, schedule a superblock update
8488 * and wait for it to complete.
NeilBrowncc27b0c2017-06-05 16:49:39 +10008489 * A return value of 'false' means that the write wasn't recorded
8490 * and cannot proceed as the array is being suspend.
NeilBrown06d91a52005-06-21 17:17:12 -07008491 */
NeilBrowncc27b0c2017-06-05 16:49:39 +10008492bool md_write_start(struct mddev *mddev, struct bio *bi)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008493{
Neil Brown0fd62b82008-06-28 08:31:36 +10008494 int did_change = 0;
Heinz Mauelshagen4b6c1062018-02-02 23:13:19 +01008495
NeilBrown06d91a52005-06-21 17:17:12 -07008496 if (bio_data_dir(bi) != WRITE)
NeilBrowncc27b0c2017-06-05 16:49:39 +10008497 return true;
NeilBrown06d91a52005-06-21 17:17:12 -07008498
NeilBrownf91de922005-11-08 21:39:36 -08008499 BUG_ON(mddev->ro == 1);
8500 if (mddev->ro == 2) {
8501 /* need to switch to read/write */
8502 mddev->ro = 0;
8503 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8504 md_wakeup_thread(mddev->thread);
NeilBrown25156192008-03-04 14:29:32 -08008505 md_wakeup_thread(mddev->sync_thread);
Neil Brown0fd62b82008-06-28 08:31:36 +10008506 did_change = 1;
NeilBrownf91de922005-11-08 21:39:36 -08008507 }
NeilBrown4ad23a972017-03-15 14:05:14 +11008508 rcu_read_lock();
8509 percpu_ref_get(&mddev->writes_pending);
NeilBrown55cc39f2017-03-15 14:05:14 +11008510 smp_mb(); /* Match smp_mb in set_in_sync() */
NeilBrown31a59e32008-04-30 00:52:30 -07008511 if (mddev->safemode == 1)
8512 mddev->safemode = 0;
NeilBrown4ad23a972017-03-15 14:05:14 +11008513 /* sync_checkers is always 0 when writes_pending is in per-cpu mode */
NeilBrown81fe48e2017-08-08 16:56:36 +10008514 if (mddev->in_sync || mddev->sync_checkers) {
NeilBrown85572d72014-12-15 12:56:56 +11008515 spin_lock(&mddev->lock);
NeilBrown3d310eb2005-06-21 17:17:26 -07008516 if (mddev->in_sync) {
8517 mddev->in_sync = 0;
Shaohua Li29530792016-12-08 15:48:19 -08008518 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
8519 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
NeilBrown3d310eb2005-06-21 17:17:26 -07008520 md_wakeup_thread(mddev->thread);
Neil Brown0fd62b82008-06-28 08:31:36 +10008521 did_change = 1;
NeilBrown3d310eb2005-06-21 17:17:26 -07008522 }
NeilBrown85572d72014-12-15 12:56:56 +11008523 spin_unlock(&mddev->lock);
NeilBrown06d91a52005-06-21 17:17:12 -07008524 }
NeilBrown4ad23a972017-03-15 14:05:14 +11008525 rcu_read_unlock();
Neil Brown0fd62b82008-06-28 08:31:36 +10008526 if (did_change)
NeilBrown00bcb4a2010-06-01 19:37:23 +10008527 sysfs_notify_dirent_safe(mddev->sysfs_state);
Heinz Mauelshagen4b6c1062018-02-02 23:13:19 +01008528 if (!mddev->has_superblocks)
8529 return true;
NeilBrown09a44cc2008-05-23 13:04:36 -07008530 wait_event(mddev->sb_wait,
NeilBrownd47c8ad2017-10-05 16:23:16 +11008531 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) ||
8532 mddev->suspended);
NeilBrowncc27b0c2017-06-05 16:49:39 +10008533 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
8534 percpu_ref_put(&mddev->writes_pending);
8535 return false;
8536 }
8537 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008538}
NeilBrown6c144d32014-09-30 16:15:38 +10008539EXPORT_SYMBOL(md_write_start);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008540
NeilBrown49728052017-03-15 14:05:12 +11008541/* md_write_inc can only be called when md_write_start() has
8542 * already been called at least once of the current request.
8543 * It increments the counter and is useful when a single request
8544 * is split into several parts. Each part causes an increment and
8545 * so needs a matching md_write_end().
8546 * Unlike md_write_start(), it is safe to call md_write_inc() inside
8547 * a spinlocked region.
8548 */
8549void md_write_inc(struct mddev *mddev, struct bio *bi)
8550{
8551 if (bio_data_dir(bi) != WRITE)
8552 return;
8553 WARN_ON_ONCE(mddev->in_sync || mddev->ro);
NeilBrown4ad23a972017-03-15 14:05:14 +11008554 percpu_ref_get(&mddev->writes_pending);
NeilBrown49728052017-03-15 14:05:12 +11008555}
8556EXPORT_SYMBOL(md_write_inc);
8557
NeilBrownfd01b882011-10-11 16:47:53 +11008558void md_write_end(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008559{
NeilBrown4ad23a972017-03-15 14:05:14 +11008560 percpu_ref_put(&mddev->writes_pending);
8561
8562 if (mddev->safemode == 2)
8563 md_wakeup_thread(mddev->thread);
8564 else if (mddev->safemode_delay)
8565 /* The roundup() ensures this only performs locking once
8566 * every ->safemode_delay jiffies
8567 */
8568 mod_timer(&mddev->safemode_timer,
8569 roundup(jiffies, mddev->safemode_delay) +
8570 mddev->safemode_delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008571}
NeilBrown4ad23a972017-03-15 14:05:14 +11008572
NeilBrown6c144d32014-09-30 16:15:38 +10008573EXPORT_SYMBOL(md_write_end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008574
NeilBrown2a2275d2007-01-26 00:57:11 -08008575/* md_allow_write(mddev)
8576 * Calling this ensures that the array is marked 'active' so that writes
8577 * may proceed without blocking. It is important to call this before
8578 * attempting a GFP_KERNEL allocation while holding the mddev lock.
8579 * Must be called with mddev_lock held.
8580 */
Artur Paszkiewicz2214c262017-05-08 11:56:55 +02008581void md_allow_write(struct mddev *mddev)
NeilBrown2a2275d2007-01-26 00:57:11 -08008582{
8583 if (!mddev->pers)
Artur Paszkiewicz2214c262017-05-08 11:56:55 +02008584 return;
NeilBrown2a2275d2007-01-26 00:57:11 -08008585 if (mddev->ro)
Artur Paszkiewicz2214c262017-05-08 11:56:55 +02008586 return;
Neil Brown1a0fd492008-06-28 08:31:27 +10008587 if (!mddev->pers->sync_request)
Artur Paszkiewicz2214c262017-05-08 11:56:55 +02008588 return;
NeilBrown2a2275d2007-01-26 00:57:11 -08008589
NeilBrown85572d72014-12-15 12:56:56 +11008590 spin_lock(&mddev->lock);
NeilBrown2a2275d2007-01-26 00:57:11 -08008591 if (mddev->in_sync) {
8592 mddev->in_sync = 0;
Shaohua Li29530792016-12-08 15:48:19 -08008593 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
8594 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
NeilBrown2a2275d2007-01-26 00:57:11 -08008595 if (mddev->safemode_delay &&
8596 mddev->safemode == 0)
8597 mddev->safemode = 1;
NeilBrown85572d72014-12-15 12:56:56 +11008598 spin_unlock(&mddev->lock);
NeilBrown2a2275d2007-01-26 00:57:11 -08008599 md_update_sb(mddev, 0);
NeilBrown00bcb4a2010-06-01 19:37:23 +10008600 sysfs_notify_dirent_safe(mddev->sysfs_state);
Artur Paszkiewicz2214c262017-05-08 11:56:55 +02008601 /* wait for the dirty state to be recorded in the metadata */
8602 wait_event(mddev->sb_wait,
Artur Paszkiewicz2214c262017-05-08 11:56:55 +02008603 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
NeilBrown2a2275d2007-01-26 00:57:11 -08008604 } else
NeilBrown85572d72014-12-15 12:56:56 +11008605 spin_unlock(&mddev->lock);
NeilBrown2a2275d2007-01-26 00:57:11 -08008606}
8607EXPORT_SYMBOL_GPL(md_allow_write);
8608
Linus Torvalds1da177e2005-04-16 15:20:36 -07008609#define SYNC_MARKS 10
8610#define SYNC_MARK_STEP (3*HZ)
majianpeng54f89342012-10-31 11:59:10 +11008611#define UPDATE_FREQUENCY (5*60*HZ)
Shaohua Li4ed87312012-10-11 13:34:00 +11008612void md_do_sync(struct md_thread *thread)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008613{
Shaohua Li4ed87312012-10-11 13:34:00 +11008614 struct mddev *mddev = thread->mddev;
NeilBrownfd01b882011-10-11 16:47:53 +11008615 struct mddev *mddev2;
Yufen Yue5b521e2019-06-14 15:41:07 -07008616 unsigned int currspeed = 0, window;
Xiao Niac7e50a2014-08-07 09:37:41 -04008617 sector_t max_sectors,j, io_sectors, recovery_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008618 unsigned long mark[SYNC_MARKS];
majianpeng54f89342012-10-31 11:59:10 +11008619 unsigned long update_time;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008620 sector_t mark_cnt[SYNC_MARKS];
8621 int last_mark,m;
8622 struct list_head *tmp;
8623 sector_t last_check;
NeilBrown57afd892005-06-21 17:17:13 -07008624 int skipped = 0;
NeilBrown3cb03002011-10-11 16:45:26 +11008625 struct md_rdev *rdev;
Jonathan Brassowc4a39552013-06-25 01:23:59 -05008626 char *desc, *action = NULL;
majianpeng7c2c57c2012-07-03 12:12:26 +10008627 struct blk_plug plug;
Guoqing Jiang41a9a0d2016-05-02 11:33:08 -04008628 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008629
8630 /* just incase thread restarts... */
Song Liud5d885f2017-11-19 22:17:01 -08008631 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
8632 test_bit(MD_RECOVERY_WAIT, &mddev->recovery))
Linus Torvalds1da177e2005-04-16 15:20:36 -07008633 return;
NeilBrown3991b312014-05-28 13:39:23 +10008634 if (mddev->ro) {/* never try to sync a read-only array */
8635 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008636 return;
NeilBrown3991b312014-05-28 13:39:23 +10008637 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008638
Guoqing Jiang41a9a0d2016-05-02 11:33:08 -04008639 if (mddev_is_clustered(mddev)) {
8640 ret = md_cluster_ops->resync_start(mddev);
8641 if (ret)
8642 goto skip;
8643
Guoqing Jiangbb8bf152016-06-02 23:32:04 -04008644 set_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags);
Guoqing Jiang41a9a0d2016-05-02 11:33:08 -04008645 if (!(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
8646 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ||
8647 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
8648 && ((unsigned long long)mddev->curr_resync_completed
8649 < (unsigned long long)mddev->resync_max_sectors))
8650 goto skip;
8651 }
8652
NeilBrown61df9d92006-10-03 01:15:57 -07008653 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
Jonathan Brassowc4a39552013-06-25 01:23:59 -05008654 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
NeilBrown61df9d92006-10-03 01:15:57 -07008655 desc = "data-check";
Jonathan Brassowc4a39552013-06-25 01:23:59 -05008656 action = "check";
8657 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
NeilBrown61df9d92006-10-03 01:15:57 -07008658 desc = "requested-resync";
Jonathan Brassowc4a39552013-06-25 01:23:59 -05008659 action = "repair";
8660 } else
NeilBrown61df9d92006-10-03 01:15:57 -07008661 desc = "resync";
8662 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
8663 desc = "reshape";
8664 else
8665 desc = "recovery";
8666
Jonathan Brassowc4a39552013-06-25 01:23:59 -05008667 mddev->last_sync_action = action ?: desc;
8668
Linus Torvalds1da177e2005-04-16 15:20:36 -07008669 /* we overload curr_resync somewhat here.
8670 * 0 == not engaged in resync at all
8671 * 2 == checking that there is no conflict with another sync
8672 * 1 == like 2, but have yielded to allow conflicting resync to
Yufen Yue5b521e2019-06-14 15:41:07 -07008673 * commence
Linus Torvalds1da177e2005-04-16 15:20:36 -07008674 * other == active in resync - this many blocks
8675 *
8676 * Before starting a resync we must have set curr_resync to
8677 * 2, and then checked that every "conflicting" array has curr_resync
8678 * less than ours. When we find one that is the same or higher
8679 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync
8680 * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
8681 * This will mean we have to start checking from the beginning again.
8682 *
8683 */
8684
8685 do {
Artur Paszkiewiczc622ca52016-08-16 14:26:08 +02008686 int mddev2_minor = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008687 mddev->curr_resync = 2;
8688
8689 try_again:
NeilBrown404e4b42009-12-30 15:25:23 +11008690 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
Linus Torvalds1da177e2005-04-16 15:20:36 -07008691 goto skip;
NeilBrown29ac4aa2008-02-06 01:39:58 -08008692 for_each_mddev(mddev2, tmp) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008693 if (mddev2 == mddev)
8694 continue;
Bernd Schubert90b08712008-05-23 13:04:38 -07008695 if (!mddev->parallel_resync
8696 && mddev2->curr_resync
8697 && match_mddev_units(mddev, mddev2)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008698 DEFINE_WAIT(wq);
8699 if (mddev < mddev2 && mddev->curr_resync == 2) {
8700 /* arbitrarily yield */
8701 mddev->curr_resync = 1;
8702 wake_up(&resync_wait);
8703 }
8704 if (mddev > mddev2 && mddev->curr_resync == 1)
8705 /* no need to wait here, we can wait the next
8706 * time 'round when curr_resync == 2
8707 */
8708 continue;
NeilBrown97441972008-09-19 11:49:54 +10008709 /* We need to wait 'interruptible' so as not to
8710 * contribute to the load average, and not to
8711 * be caught by 'softlockup'
8712 */
8713 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
NeilBrownc91abf52013-11-19 12:02:01 +11008714 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
NeilBrown8712e552005-10-26 01:58:58 -07008715 mddev2->curr_resync >= mddev->curr_resync) {
Artur Paszkiewiczc622ca52016-08-16 14:26:08 +02008716 if (mddev2_minor != mddev2->md_minor) {
8717 mddev2_minor = mddev2->md_minor;
NeilBrown9d487392016-11-02 14:16:49 +11008718 pr_info("md: delaying %s of %s until %s has finished (they share one or more physical units)\n",
8719 desc, mdname(mddev),
8720 mdname(mddev2));
Artur Paszkiewiczc622ca52016-08-16 14:26:08 +02008721 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008722 mddev_put(mddev2);
NeilBrown97441972008-09-19 11:49:54 +10008723 if (signal_pending(current))
8724 flush_signals(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008725 schedule();
8726 finish_wait(&resync_wait, &wq);
8727 goto try_again;
8728 }
8729 finish_wait(&resync_wait, &wq);
8730 }
8731 }
8732 } while (mddev->curr_resync < 2);
8733
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008734 j = 0;
NeilBrown9d888832005-11-08 21:39:26 -08008735 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008736 /* resync follows the size requested by the personality,
NeilBrown57afd892005-06-21 17:17:13 -07008737 * which defaults to physical size, but can be virtual size
Linus Torvalds1da177e2005-04-16 15:20:36 -07008738 */
8739 max_sectors = mddev->resync_max_sectors;
Jianpeng Ma7f7583d2012-10-11 14:17:59 +11008740 atomic64_set(&mddev->resync_mismatches, 0);
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008741 /* we don't use the checkpoint if there's a bitmap */
Neil Brown5e96ee62008-06-28 08:31:24 +10008742 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
8743 j = mddev->resync_min;
8744 else if (!mddev->bitmap)
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008745 j = mddev->recovery_cp;
Neil Brown5e96ee62008-06-28 08:31:24 +10008746
Guoqing Jiangcb9ee152018-10-18 16:37:47 +08008747 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
NeilBrownc804cde2012-05-21 09:28:33 +10008748 max_sectors = mddev->resync_max_sectors;
Guoqing Jiangcb9ee152018-10-18 16:37:47 +08008749 /*
8750 * If the original node aborts reshaping then we continue the
8751 * reshaping, so set j again to avoid restart reshape from the
8752 * first beginning
8753 */
8754 if (mddev_is_clustered(mddev) &&
8755 mddev->reshape_position != MaxSector)
8756 j = mddev->reshape_position;
8757 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008758 /* recovery follows the physical size of devices */
Andre Noll58c0fed2009-03-31 14:33:13 +11008759 max_sectors = mddev->dev_sectors;
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008760 j = MaxSector;
Dan Williams4e59ca72009-12-12 21:17:06 -07008761 rcu_read_lock();
NeilBrowndafb20f2012-03-19 12:46:39 +11008762 rdev_for_each_rcu(rdev, mddev)
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008763 if (rdev->raid_disk >= 0 &&
Shaohua Lif2076e72015-10-08 21:54:12 -07008764 !test_bit(Journal, &rdev->flags) &&
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008765 !test_bit(Faulty, &rdev->flags) &&
8766 !test_bit(In_sync, &rdev->flags) &&
8767 rdev->recovery_offset < j)
8768 j = rdev->recovery_offset;
Dan Williams4e59ca72009-12-12 21:17:06 -07008769 rcu_read_unlock();
NeilBrown133d4522014-07-02 12:04:14 +10008770
8771 /* If there is a bitmap, we need to make sure all
8772 * writes that started before we added a spare
8773 * complete before we start doing a recovery.
8774 * Otherwise the write might complete and (via
8775 * bitmap_endwrite) set a bit in the bitmap after the
8776 * recovery has checked that bit and skipped that
8777 * region.
8778 */
8779 if (mddev->bitmap) {
8780 mddev->pers->quiesce(mddev, 1);
8781 mddev->pers->quiesce(mddev, 0);
8782 }
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008783 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008784
NeilBrown9d487392016-11-02 14:16:49 +11008785 pr_info("md: %s of RAID array %s\n", desc, mdname(mddev));
8786 pr_debug("md: minimum _guaranteed_ speed: %d KB/sec/disk.\n", speed_min(mddev));
8787 pr_debug("md: using maximum available idle IO bandwidth (but not more than %d KB/sec) for %s.\n",
8788 speed_max(mddev), desc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008789
NeilBrowneea1bf32009-03-31 14:27:02 +11008790 is_mddev_idle(mddev, 1); /* this initializes IO event counters */
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008791
NeilBrown57afd892005-06-21 17:17:13 -07008792 io_sectors = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008793 for (m = 0; m < SYNC_MARKS; m++) {
8794 mark[m] = jiffies;
NeilBrown57afd892005-06-21 17:17:13 -07008795 mark_cnt[m] = io_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008796 }
8797 last_mark = 0;
8798 mddev->resync_mark = mark[last_mark];
8799 mddev->resync_mark_cnt = mark_cnt[last_mark];
8800
8801 /*
8802 * Tune reconstruction:
8803 */
Yufen Yue5b521e2019-06-14 15:41:07 -07008804 window = 32 * (PAGE_SIZE / 512);
NeilBrown9d487392016-11-02 14:16:49 +11008805 pr_debug("md: using %dk window, over a total of %lluk.\n",
8806 window/2, (unsigned long long)max_sectors/2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008807
8808 atomic_set(&mddev->recovery_active, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008809 last_check = 0;
8810
8811 if (j>2) {
NeilBrown9d487392016-11-02 14:16:49 +11008812 pr_debug("md: resuming %s of %s from checkpoint.\n",
8813 desc, mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07008814 mddev->curr_resync = j;
NeilBrown72f36d52012-10-11 14:25:57 +11008815 } else
8816 mddev->curr_resync = 3; /* no longer delayed */
NeilBrown75d3da42011-01-14 09:14:34 +11008817 mddev->curr_resync_completed = j;
Junxiao Bie1a86db2020-07-14 16:10:26 -07008818 sysfs_notify_dirent_safe(mddev->sysfs_completed);
NeilBrown72f36d52012-10-11 14:25:57 +11008819 md_new_event(mddev);
majianpeng54f89342012-10-31 11:59:10 +11008820 update_time = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008821
majianpeng7c2c57c2012-07-03 12:12:26 +10008822 blk_start_plug(&plug);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008823 while (j < max_sectors) {
NeilBrown57afd892005-06-21 17:17:13 -07008824 sector_t sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008825
NeilBrown57afd892005-06-21 17:17:13 -07008826 skipped = 0;
NeilBrown97e4f422009-03-31 14:33:13 +11008827
NeilBrown7a91ee12009-05-26 12:57:21 +10008828 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8829 ((mddev->curr_resync > mddev->curr_resync_completed &&
8830 (mddev->curr_resync - mddev->curr_resync_completed)
8831 > (max_sectors >> 4)) ||
majianpeng54f89342012-10-31 11:59:10 +11008832 time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) ||
NeilBrown7a91ee12009-05-26 12:57:21 +10008833 (j - mddev->curr_resync_completed)*2
NeilBrownc5e19d92015-07-17 12:06:02 +10008834 >= mddev->resync_max - mddev->curr_resync_completed ||
8835 mddev->curr_resync_completed > mddev->resync_max
NeilBrown7a91ee12009-05-26 12:57:21 +10008836 )) {
NeilBrown97e4f422009-03-31 14:33:13 +11008837 /* time to update curr_resync_completed */
NeilBrown97e4f422009-03-31 14:33:13 +11008838 wait_event(mddev->recovery_wait,
8839 atomic_read(&mddev->recovery_active) == 0);
NeilBrown75d3da42011-01-14 09:14:34 +11008840 mddev->curr_resync_completed = j;
kernelmail35d78c62012-10-31 11:59:10 +11008841 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
8842 j > mddev->recovery_cp)
8843 mddev->recovery_cp = j;
majianpeng54f89342012-10-31 11:59:10 +11008844 update_time = jiffies;
Shaohua Li29530792016-12-08 15:48:19 -08008845 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
Junxiao Bie1a86db2020-07-14 16:10:26 -07008846 sysfs_notify_dirent_safe(mddev->sysfs_completed);
NeilBrown97e4f422009-03-31 14:33:13 +11008847 }
NeilBrownacb180b2009-04-14 16:28:34 +10008848
NeilBrownc91abf52013-11-19 12:02:01 +11008849 while (j >= mddev->resync_max &&
8850 !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
NeilBrowne62e58a2009-07-01 13:15:35 +10008851 /* As this condition is controlled by user-space,
8852 * we can block indefinitely, so use '_interruptible'
8853 * to avoid triggering warnings.
8854 */
8855 flush_signals(current); /* just in case */
8856 wait_event_interruptible(mddev->recovery_wait,
8857 mddev->resync_max > j
NeilBrownc91abf52013-11-19 12:02:01 +11008858 || test_bit(MD_RECOVERY_INTR,
8859 &mddev->recovery));
NeilBrowne62e58a2009-07-01 13:15:35 +10008860 }
NeilBrownacb180b2009-04-14 16:28:34 +10008861
NeilBrownc91abf52013-11-19 12:02:01 +11008862 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8863 break;
NeilBrownacb180b2009-04-14 16:28:34 +10008864
NeilBrown09314792015-02-19 16:04:40 +11008865 sectors = mddev->pers->sync_request(mddev, j, &skipped);
NeilBrown57afd892005-06-21 17:17:13 -07008866 if (sectors == 0) {
NeilBrowndfc70642008-05-23 13:04:39 -07008867 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
NeilBrownc91abf52013-11-19 12:02:01 +11008868 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008869 }
NeilBrown57afd892005-06-21 17:17:13 -07008870
8871 if (!skipped) { /* actual IO requested */
8872 io_sectors += sectors;
8873 atomic_add(sectors, &mddev->recovery_active);
8874 }
8875
NeilBrowne875ece2011-07-28 11:39:24 +10008876 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8877 break;
8878
Linus Torvalds1da177e2005-04-16 15:20:36 -07008879 j += sectors;
NeilBrown5ed1df22015-07-24 13:27:08 +10008880 if (j > max_sectors)
8881 /* when skipping, extra large numbers can be returned. */
8882 j = max_sectors;
NeilBrown72f36d52012-10-11 14:25:57 +11008883 if (j > 2)
8884 mddev->curr_resync = j;
NeilBrownff4e8d92006-07-10 04:44:16 -07008885 mddev->curr_mark_cnt = io_sectors;
NeilBrownd7603b72006-01-06 00:20:30 -08008886 if (last_check == 0)
NeilBrowne875ece2011-07-28 11:39:24 +10008887 /* this is the earliest that rebuild will be
NeilBrownd7603b72006-01-06 00:20:30 -08008888 * visible in /proc/mdstat
8889 */
8890 md_new_event(mddev);
NeilBrown57afd892005-06-21 17:17:13 -07008891
8892 if (last_check + window > io_sectors || j == max_sectors)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008893 continue;
8894
NeilBrown57afd892005-06-21 17:17:13 -07008895 last_check = io_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008896 repeat:
8897 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
8898 /* step marks */
8899 int next = (last_mark+1) % SYNC_MARKS;
8900
8901 mddev->resync_mark = mark[next];
8902 mddev->resync_mark_cnt = mark_cnt[next];
8903 mark[next] = jiffies;
NeilBrown57afd892005-06-21 17:17:13 -07008904 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008905 last_mark = next;
8906 }
8907
NeilBrownc91abf52013-11-19 12:02:01 +11008908 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8909 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008910
8911 /*
8912 * this loop exits only if either when we are slower than
8913 * the 'hard' speed limit, or the system was IO-idle for
8914 * a jiffy.
8915 * the system might be non-idle CPU-wise, but we only care
8916 * about not overloading the IO subsystem. (things like an
8917 * e2fsck being done on the RAID array should execute fast)
8918 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07008919 cond_resched();
8920
Xiao Niac7e50a2014-08-07 09:37:41 -04008921 recovery_done = io_sectors - atomic_read(&mddev->recovery_active);
8922 currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2
NeilBrown57afd892005-06-21 17:17:13 -07008923 /((jiffies-mddev->resync_mark)/HZ +1) +1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008924
NeilBrown88202a02006-01-06 00:21:36 -08008925 if (currspeed > speed_min(mddev)) {
NeilBrownac8fa412015-02-19 16:55:00 +11008926 if (currspeed > speed_max(mddev)) {
NeilBrownc0e48522005-11-18 01:11:01 -08008927 msleep(500);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008928 goto repeat;
8929 }
NeilBrownac8fa412015-02-19 16:55:00 +11008930 if (!is_mddev_idle(mddev, 0)) {
8931 /*
8932 * Give other IO more of a chance.
8933 * The faster the devices, the less we wait.
8934 */
8935 wait_event(mddev->recovery_wait,
8936 !atomic_read(&mddev->recovery_active));
8937 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008938 }
8939 }
NeilBrown9d487392016-11-02 14:16:49 +11008940 pr_info("md: %s: %s %s.\n",mdname(mddev), desc,
8941 test_bit(MD_RECOVERY_INTR, &mddev->recovery)
8942 ? "interrupted" : "done");
Linus Torvalds1da177e2005-04-16 15:20:36 -07008943 /*
8944 * this also signals 'finished resyncing' to md_stop
8945 */
majianpeng7c2c57c2012-07-03 12:12:26 +10008946 blk_finish_plug(&plug);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008947 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
8948
NeilBrown5ed1df22015-07-24 13:27:08 +10008949 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8950 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
NeilBrown1217e1d2016-10-28 15:59:41 +11008951 mddev->curr_resync > 3) {
NeilBrown5ed1df22015-07-24 13:27:08 +10008952 mddev->curr_resync_completed = mddev->curr_resync;
Junxiao Bie1a86db2020-07-14 16:10:26 -07008953 sysfs_notify_dirent_safe(mddev->sysfs_completed);
NeilBrown5ed1df22015-07-24 13:27:08 +10008954 }
NeilBrown09314792015-02-19 16:04:40 +11008955 mddev->pers->sync_request(mddev, max_sectors, &skipped);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008956
NeilBrowndfc70642008-05-23 13:04:39 -07008957 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
NeilBrown1217e1d2016-10-28 15:59:41 +11008958 mddev->curr_resync > 3) {
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008959 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
8960 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
8961 if (mddev->curr_resync >= mddev->recovery_cp) {
NeilBrown9d487392016-11-02 14:16:49 +11008962 pr_debug("md: checkpointing %s of %s.\n",
8963 desc, mdname(mddev));
majianpeng0a19caa2012-11-19 19:57:34 +08008964 if (test_bit(MD_RECOVERY_ERROR,
8965 &mddev->recovery))
8966 mddev->recovery_cp =
8967 mddev->curr_resync_completed;
8968 else
8969 mddev->recovery_cp =
8970 mddev->curr_resync;
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008971 }
8972 } else
8973 mddev->recovery_cp = MaxSector;
8974 } else {
8975 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8976 mddev->curr_resync = MaxSector;
NeilBrowndb0505d2017-10-17 16:18:36 +11008977 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8978 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) {
8979 rcu_read_lock();
8980 rdev_for_each_rcu(rdev, mddev)
8981 if (rdev->raid_disk >= 0 &&
8982 mddev->delta_disks >= 0 &&
8983 !test_bit(Journal, &rdev->flags) &&
8984 !test_bit(Faulty, &rdev->flags) &&
8985 !test_bit(In_sync, &rdev->flags) &&
8986 rdev->recovery_offset < mddev->curr_resync)
8987 rdev->recovery_offset = mddev->curr_resync;
8988 rcu_read_unlock();
8989 }
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008990 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008991 }
NeilBrowndb91ff52012-02-07 12:01:51 +11008992 skip:
Guoqing Jiangbb8bf152016-06-02 23:32:04 -04008993 /* set CHANGE_PENDING here since maybe another update is needed,
8994 * so other nodes are informed. It should be harmless for normal
8995 * raid */
Shaohua Li29530792016-12-08 15:48:19 -08008996 set_mask_bits(&mddev->sb_flags, 0,
8997 BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS));
Goldwyn Rodriguesc186b122015-09-30 13:20:35 -05008998
BingJing Chang88763912018-02-22 13:34:46 +08008999 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
9000 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
9001 mddev->delta_disks > 0 &&
9002 mddev->pers->finish_reshape &&
9003 mddev->pers->size &&
9004 mddev->queue) {
9005 mddev_lock_nointr(mddev);
9006 md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0));
9007 mddev_unlock(mddev);
Christoph Hellwig2c247c52020-11-16 15:57:11 +01009008 if (!mddev_is_clustered(mddev))
9009 set_capacity_and_notify(mddev->gendisk,
9010 mddev->array_sectors);
BingJing Chang88763912018-02-22 13:34:46 +08009011 }
9012
NeilBrown23da4222014-12-15 12:57:01 +11009013 spin_lock(&mddev->lock);
NeilBrownc07b70a2009-12-14 12:49:48 +11009014 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
9015 /* We completed so min/max setting can be forgotten if used. */
9016 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
9017 mddev->resync_min = 0;
9018 mddev->resync_max = MaxSector;
9019 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
9020 mddev->resync_min = mddev->curr_resync_completed;
NeilBrownf7851be2015-07-02 17:12:58 +10009021 set_bit(MD_RECOVERY_DONE, &mddev->recovery);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009022 mddev->curr_resync = 0;
NeilBrown23da4222014-12-15 12:57:01 +11009023 spin_unlock(&mddev->lock);
9024
Linus Torvalds1da177e2005-04-16 15:20:36 -07009025 wake_up(&resync_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009026 md_wakeup_thread(mddev->thread);
NeilBrownc6207272008-02-06 01:39:52 -08009027 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009028}
NeilBrown29269552006-03-27 01:18:10 -08009029EXPORT_SYMBOL_GPL(md_do_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009030
NeilBrown746d3202013-04-24 11:42:41 +10009031static int remove_and_add_spares(struct mddev *mddev,
9032 struct md_rdev *this)
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009033{
NeilBrown3cb03002011-10-11 16:45:26 +11009034 struct md_rdev *rdev;
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009035 int spares = 0;
NeilBrownf2a371c2012-01-09 00:46:41 +11009036 int removed = 0;
NeilBrownd787be42016-06-02 16:19:53 +10009037 bool remove_some = false;
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009038
NeilBrown39772f02018-02-03 09:19:30 +11009039 if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
9040 /* Mustn't remove devices when resync thread is running */
9041 return 0;
9042
NeilBrownd787be42016-06-02 16:19:53 +10009043 rdev_for_each(rdev, mddev) {
NeilBrown746d3202013-04-24 11:42:41 +10009044 if ((this == NULL || rdev == this) &&
9045 rdev->raid_disk >= 0 &&
Dan Williams6bfe0b42008-04-30 00:52:32 -07009046 !test_bit(Blocked, &rdev->flags) &&
NeilBrownd787be42016-06-02 16:19:53 +10009047 test_bit(Faulty, &rdev->flags) &&
9048 atomic_read(&rdev->nr_pending)==0) {
9049 /* Faulty non-Blocked devices with nr_pending == 0
9050 * never get nr_pending incremented,
9051 * never get Faulty cleared, and never get Blocked set.
9052 * So we can synchronize_rcu now rather than once per device
9053 */
9054 remove_some = true;
9055 set_bit(RemoveSynchronized, &rdev->flags);
9056 }
9057 }
9058
9059 if (remove_some)
9060 synchronize_rcu();
9061 rdev_for_each(rdev, mddev) {
9062 if ((this == NULL || rdev == this) &&
9063 rdev->raid_disk >= 0 &&
9064 !test_bit(Blocked, &rdev->flags) &&
9065 ((test_bit(RemoveSynchronized, &rdev->flags) ||
Shaohua Lif2076e72015-10-08 21:54:12 -07009066 (!test_bit(In_sync, &rdev->flags) &&
9067 !test_bit(Journal, &rdev->flags))) &&
NeilBrownd787be42016-06-02 16:19:53 +10009068 atomic_read(&rdev->nr_pending)==0)) {
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009069 if (mddev->pers->hot_remove_disk(
NeilBrownb8321b62011-12-23 10:17:51 +11009070 mddev, rdev) == 0) {
Namhyung Kim36fad852011-07-27 11:00:36 +10009071 sysfs_unlink_rdev(mddev, rdev);
NeilBrown011abdc2018-04-26 14:46:29 +10009072 rdev->saved_raid_disk = rdev->raid_disk;
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009073 rdev->raid_disk = -1;
NeilBrownf2a371c2012-01-09 00:46:41 +11009074 removed++;
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009075 }
9076 }
NeilBrownd787be42016-06-02 16:19:53 +10009077 if (remove_some && test_bit(RemoveSynchronized, &rdev->flags))
9078 clear_bit(RemoveSynchronized, &rdev->flags);
9079 }
9080
Jonathan Brassow90584fc2013-03-07 16:24:26 -06009081 if (removed && mddev->kobj.sd)
Junxiao Bie1a86db2020-07-14 16:10:26 -07009082 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009083
Goldwyn Rodrigues2910ff12015-09-28 10:27:26 -05009084 if (this && removed)
NeilBrown746d3202013-04-24 11:42:41 +10009085 goto no_add;
9086
NeilBrowndafb20f2012-03-19 12:46:39 +11009087 rdev_for_each(rdev, mddev) {
Goldwyn Rodrigues2910ff12015-09-28 10:27:26 -05009088 if (this && this != rdev)
9089 continue;
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05009090 if (test_bit(Candidate, &rdev->flags))
9091 continue;
NeilBrown7bfec5f2011-12-23 10:17:53 +11009092 if (rdev->raid_disk >= 0 &&
9093 !test_bit(In_sync, &rdev->flags) &&
Shaohua Lif2076e72015-10-08 21:54:12 -07009094 !test_bit(Journal, &rdev->flags) &&
NeilBrown7bfec5f2011-12-23 10:17:53 +11009095 !test_bit(Faulty, &rdev->flags))
9096 spares++;
NeilBrown7ceb17e2013-04-24 11:42:42 +10009097 if (rdev->raid_disk >= 0)
9098 continue;
9099 if (test_bit(Faulty, &rdev->flags))
9100 continue;
Shaohua Lif6b6ec52015-12-21 10:51:02 +11009101 if (!test_bit(Journal, &rdev->flags)) {
9102 if (mddev->ro &&
9103 ! (rdev->saved_raid_disk >= 0 &&
9104 !test_bit(Bitmap_sync, &rdev->flags)))
9105 continue;
NeilBrown7ceb17e2013-04-24 11:42:42 +10009106
Shaohua Lif6b6ec52015-12-21 10:51:02 +11009107 rdev->recovery_offset = 0;
9108 }
Guoqing Jiang3f79cc22020-04-04 23:57:11 +02009109 if (mddev->pers->hot_add_disk(mddev, rdev) == 0) {
Damien Le Moal5e3b8a82020-07-16 13:54:40 +09009110 /* failure here is OK */
9111 sysfs_link_rdev(mddev, rdev);
Shaohua Lif6b6ec52015-12-21 10:51:02 +11009112 if (!test_bit(Journal, &rdev->flags))
9113 spares++;
NeilBrown7ceb17e2013-04-24 11:42:42 +10009114 md_new_event(mddev);
Shaohua Li29530792016-12-08 15:48:19 -08009115 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrowndfc70642008-05-23 13:04:39 -07009116 }
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009117 }
NeilBrown746d3202013-04-24 11:42:41 +10009118no_add:
NeilBrown6dafab62012-09-19 12:54:22 +10009119 if (removed)
Shaohua Li29530792016-12-08 15:48:19 -08009120 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009121 return spares;
9122}
NeilBrown7ebc0be2011-01-14 09:14:33 +11009123
NeilBrownac05f252014-09-30 08:10:42 +10009124static void md_start_sync(struct work_struct *ws)
9125{
9126 struct mddev *mddev = container_of(ws, struct mddev, del_work);
Goldwyn Rodriguesc186b122015-09-30 13:20:35 -05009127
NeilBrownac05f252014-09-30 08:10:42 +10009128 mddev->sync_thread = md_register_thread(md_do_sync,
9129 mddev,
9130 "resync");
9131 if (!mddev->sync_thread) {
NeilBrown9d487392016-11-02 14:16:49 +11009132 pr_warn("%s: could not start resync thread...\n",
9133 mdname(mddev));
NeilBrownac05f252014-09-30 08:10:42 +10009134 /* leave the spares where they are, it shouldn't hurt */
9135 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9136 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
9137 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
9138 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
9139 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
NeilBrownf851b602014-12-11 10:02:10 +11009140 wake_up(&resync_wait);
NeilBrownac05f252014-09-30 08:10:42 +10009141 if (test_and_clear_bit(MD_RECOVERY_RECOVER,
9142 &mddev->recovery))
9143 if (mddev->sysfs_action)
9144 sysfs_notify_dirent_safe(mddev->sysfs_action);
9145 } else
9146 md_wakeup_thread(mddev->sync_thread);
9147 sysfs_notify_dirent_safe(mddev->sysfs_action);
9148 md_new_event(mddev);
9149}
9150
Linus Torvalds1da177e2005-04-16 15:20:36 -07009151/*
9152 * This routine is regularly called by all per-raid-array threads to
9153 * deal with generic issues like resync and super-block update.
9154 * Raid personalities that don't have a thread (linear/raid0) do not
9155 * need this as they never do any recovery or update the superblock.
9156 *
9157 * It does not do any resync itself, but rather "forks" off other threads
9158 * to do that as needed.
9159 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
9160 * "->recovery" and create a thread at ->sync_thread.
NeilBrowndfc70642008-05-23 13:04:39 -07009161 * When the thread finishes it sets MD_RECOVERY_DONE
Linus Torvalds1da177e2005-04-16 15:20:36 -07009162 * and wakeups up this thread which will reap the thread and finish up.
9163 * This thread also removes any faulty devices (with nr_pending == 0).
9164 *
9165 * The overall approach is:
9166 * 1/ if the superblock needs updating, update it.
9167 * 2/ If a recovery thread is running, don't do anything else.
9168 * 3/ If recovery has finished, clean up, possibly marking spares active.
9169 * 4/ If there are any faulty devices, remove them.
9170 * 5/ If array is degraded, try to add spares devices
9171 * 6/ If array has spares or is not in-sync, start a resync thread.
9172 */
NeilBrownfd01b882011-10-11 16:47:53 +11009173void md_check_recovery(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009174{
NeilBrown059421e2018-10-03 15:04:41 +10009175 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) {
9176 /* Write superblock - thread that called mddev_suspend()
9177 * holds reconfig_mutex for us.
9178 */
9179 set_bit(MD_UPDATING_SB, &mddev->flags);
9180 smp_mb__after_atomic();
9181 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags))
9182 md_update_sb(mddev, 0);
9183 clear_bit_unlock(MD_UPDATING_SB, &mddev->flags);
9184 wake_up(&mddev->sb_wait);
9185 }
9186
Jonathan Brassow68866e42011-06-08 15:10:08 +10009187 if (mddev->suspended)
9188 return;
9189
NeilBrown5f404022005-06-21 17:17:16 -07009190 if (mddev->bitmap)
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07009191 md_bitmap_daemon_work(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009192
NeilBrownfca4d842005-06-21 17:17:11 -07009193 if (signal_pending(current)) {
NeilBrown31a59e32008-04-30 00:52:30 -07009194 if (mddev->pers->sync_request && !mddev->external) {
NeilBrown9d487392016-11-02 14:16:49 +11009195 pr_debug("md: %s in immediate safe mode\n",
9196 mdname(mddev));
NeilBrownfca4d842005-06-21 17:17:11 -07009197 mddev->safemode = 2;
9198 }
9199 flush_signals(current);
9200 }
9201
NeilBrownc89a8ee2008-08-05 15:54:13 +10009202 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
9203 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009204 if ( ! (
Shaohua Li29530792016-12-08 15:48:19 -08009205 (mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07009206 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
NeilBrownfca4d842005-06-21 17:17:11 -07009207 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
NeilBrown31a59e32008-04-30 00:52:30 -07009208 (mddev->external == 0 && mddev->safemode == 1) ||
NeilBrown4ad23a972017-03-15 14:05:14 +11009209 (mddev->safemode == 2
NeilBrownfca4d842005-06-21 17:17:11 -07009210 && !mddev->in_sync && mddev->recovery_cp == MaxSector)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009211 ))
9212 return;
NeilBrownfca4d842005-06-21 17:17:11 -07009213
NeilBrowndf5b89b2006-03-27 01:18:20 -08009214 if (mddev_trylock(mddev)) {
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009215 int spares = 0;
NeilBrown480523f2019-08-20 10:21:09 +10009216 bool try_set_sync = mddev->safemode != 0;
NeilBrownfca4d842005-06-21 17:17:11 -07009217
Shaohua Liafc1f552017-08-11 20:34:45 -07009218 if (!mddev->external && mddev->safemode == 1)
NeilBrown33182d12017-08-08 16:56:36 +10009219 mddev->safemode = 0;
9220
NeilBrownc89a8ee2008-08-05 15:54:13 +10009221 if (mddev->ro) {
Neil Brownab16bfc2015-06-17 12:31:46 +10009222 struct md_rdev *rdev;
9223 if (!mddev->external && mddev->in_sync)
9224 /* 'Blocked' flag not needed as failed devices
9225 * will be recorded if array switched to read/write.
9226 * Leaving it set will prevent the device
9227 * from being removed.
9228 */
9229 rdev_for_each(rdev, mddev)
9230 clear_bit(Blocked, &rdev->flags);
NeilBrown7ceb17e2013-04-24 11:42:42 +10009231 /* On a read-only array we can:
9232 * - remove failed devices
9233 * - add already-in_sync devices if the array itself
9234 * is in-sync.
9235 * As we only add devices that are already in-sync,
9236 * we can activate the spares immediately.
NeilBrownc89a8ee2008-08-05 15:54:13 +10009237 */
NeilBrown7ceb17e2013-04-24 11:42:42 +10009238 remove_and_add_spares(mddev, NULL);
NeilBrown8313b8e2013-12-12 10:13:33 +11009239 /* There is no thread, but we need to call
9240 * ->spare_active and clear saved_raid_disk
9241 */
NeilBrown2ac295a2014-05-29 11:40:03 +10009242 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
NeilBrown8313b8e2013-12-12 10:13:33 +11009243 md_reap_sync_thread(mddev);
NeilBrowna4a3d262015-07-17 11:57:30 +10009244 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
NeilBrown8313b8e2013-12-12 10:13:33 +11009245 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
Shaohua Li29530792016-12-08 15:48:19 -08009246 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
NeilBrownc89a8ee2008-08-05 15:54:13 +10009247 goto unlock;
9248 }
9249
Guoqing Jiang659b2542015-12-21 10:50:59 +11009250 if (mddev_is_clustered(mddev)) {
9251 struct md_rdev *rdev;
9252 /* kick the device if another node issued a
9253 * remove disk.
9254 */
9255 rdev_for_each(rdev, mddev) {
9256 if (test_and_clear_bit(ClusterRemove, &rdev->flags) &&
9257 rdev->raid_disk < 0)
9258 md_kick_rdev_from_array(rdev);
9259 }
9260 }
9261
NeilBrown480523f2019-08-20 10:21:09 +10009262 if (try_set_sync && !mddev->external && !mddev->in_sync) {
NeilBrown85572d72014-12-15 12:56:56 +11009263 spin_lock(&mddev->lock);
NeilBrown6497709b2017-03-15 14:05:14 +11009264 set_in_sync(mddev);
NeilBrown85572d72014-12-15 12:56:56 +11009265 spin_unlock(&mddev->lock);
NeilBrownfca4d842005-06-21 17:17:11 -07009266 }
NeilBrownfca4d842005-06-21 17:17:11 -07009267
Shaohua Li29530792016-12-08 15:48:19 -08009268 if (mddev->sb_flags)
NeilBrown850b2b422006-10-03 01:15:46 -07009269 md_update_sb(mddev, 0);
NeilBrown06d91a52005-06-21 17:17:12 -07009270
Linus Torvalds1da177e2005-04-16 15:20:36 -07009271 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
9272 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
9273 /* resync/recovery still happening */
9274 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9275 goto unlock;
9276 }
9277 if (mddev->sync_thread) {
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009278 md_reap_sync_thread(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009279 goto unlock;
9280 }
Neil Brown72a23c22008-06-28 08:31:41 +10009281 /* Set RUNNING before clearing NEEDED to avoid
9282 * any transients in the value of "sync_action".
9283 */
NeilBrown72f36d52012-10-11 14:25:57 +11009284 mddev->curr_resync_completed = 0;
NeilBrown23da4222014-12-15 12:57:01 +11009285 spin_lock(&mddev->lock);
Neil Brown72a23c22008-06-28 08:31:41 +10009286 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
NeilBrown23da4222014-12-15 12:57:01 +11009287 spin_unlock(&mddev->lock);
NeilBrown24dd4692005-11-08 21:39:26 -08009288 /* Clear some bits that don't mean anything, but
9289 * might be left set
9290 */
NeilBrown24dd4692005-11-08 21:39:26 -08009291 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
9292 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009293
NeilBrowned209582012-04-24 10:23:14 +10009294 if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
9295 test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
NeilBrownac05f252014-09-30 08:10:42 +10009296 goto not_running;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009297 /* no recovery is running.
9298 * remove any failed drives, then
9299 * add spares if possible.
NeilBrown72f36d52012-10-11 14:25:57 +11009300 * Spares are also removed and re-added, to allow
Linus Torvalds1da177e2005-04-16 15:20:36 -07009301 * the personality to fail the re-add.
9302 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009303
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009304 if (mddev->reshape_position != MaxSector) {
NeilBrown50ac1682009-06-18 08:47:55 +10009305 if (mddev->pers->check_reshape == NULL ||
9306 mddev->pers->check_reshape(mddev) != 0)
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009307 /* Cannot proceed */
NeilBrownac05f252014-09-30 08:10:42 +10009308 goto not_running;
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009309 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
Neil Brown72a23c22008-06-28 08:31:41 +10009310 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
NeilBrown746d3202013-04-24 11:42:41 +10009311 } else if ((spares = remove_and_add_spares(mddev, NULL))) {
NeilBrown24dd4692005-11-08 21:39:26 -08009312 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9313 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
Dan Williams56ac36d2008-08-07 10:02:47 -07009314 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
Neil Brown72a23c22008-06-28 08:31:41 +10009315 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
NeilBrown24dd4692005-11-08 21:39:26 -08009316 } else if (mddev->recovery_cp < MaxSector) {
9317 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
Neil Brown72a23c22008-06-28 08:31:41 +10009318 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
NeilBrown24dd4692005-11-08 21:39:26 -08009319 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
9320 /* nothing to be done ... */
NeilBrownac05f252014-09-30 08:10:42 +10009321 goto not_running;
NeilBrown24dd4692005-11-08 21:39:26 -08009322
Linus Torvalds1da177e2005-04-16 15:20:36 -07009323 if (mddev->pers->sync_request) {
NeilBrownef99bf42012-05-22 13:55:08 +10009324 if (spares) {
NeilBrowna654b9d82005-06-21 17:17:27 -07009325 /* We are adding a device or devices to an array
9326 * which has the bitmap stored on all devices.
9327 * So make sure all bitmap pages get written
9328 */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07009329 md_bitmap_write_all(mddev->bitmap);
NeilBrowna654b9d82005-06-21 17:17:27 -07009330 }
NeilBrownac05f252014-09-30 08:10:42 +10009331 INIT_WORK(&mddev->del_work, md_start_sync);
9332 queue_work(md_misc_wq, &mddev->del_work);
9333 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009334 }
NeilBrownac05f252014-09-30 08:10:42 +10009335 not_running:
Neil Brown72a23c22008-06-28 08:31:41 +10009336 if (!mddev->sync_thread) {
9337 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
NeilBrownf851b602014-12-11 10:02:10 +11009338 wake_up(&resync_wait);
Neil Brown72a23c22008-06-28 08:31:41 +10009339 if (test_and_clear_bit(MD_RECOVERY_RECOVER,
9340 &mddev->recovery))
NeilBrown0c3573f2009-01-09 08:31:05 +11009341 if (mddev->sysfs_action)
NeilBrown00bcb4a2010-06-01 19:37:23 +10009342 sysfs_notify_dirent_safe(mddev->sysfs_action);
Neil Brown72a23c22008-06-28 08:31:41 +10009343 }
NeilBrownac05f252014-09-30 08:10:42 +10009344 unlock:
9345 wake_up(&mddev->sb_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009346 mddev_unlock(mddev);
9347 }
9348}
NeilBrown6c144d32014-09-30 16:15:38 +10009349EXPORT_SYMBOL(md_check_recovery);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009350
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009351void md_reap_sync_thread(struct mddev *mddev)
9352{
9353 struct md_rdev *rdev;
Guoqing Jiangaefb2e52018-10-18 16:37:44 +08009354 sector_t old_dev_sectors = mddev->dev_sectors;
9355 bool is_reshaped = false;
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009356
9357 /* resync has finished, collect result */
9358 md_unregister_thread(&mddev->sync_thread);
9359 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
Guoqing Jiang0d8ed0e92019-07-24 11:09:21 +02009360 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
9361 mddev->degraded != mddev->raid_disks) {
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009362 /* success...*/
9363 /* activate any spares */
9364 if (mddev->pers->spare_active(mddev)) {
Junxiao Bie1a86db2020-07-14 16:10:26 -07009365 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
Shaohua Li29530792016-12-08 15:48:19 -08009366 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009367 }
9368 }
9369 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
Guoqing Jiangaefb2e52018-10-18 16:37:44 +08009370 mddev->pers->finish_reshape) {
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009371 mddev->pers->finish_reshape(mddev);
Guoqing Jiangaefb2e52018-10-18 16:37:44 +08009372 if (mddev_is_clustered(mddev))
9373 is_reshaped = true;
9374 }
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009375
9376 /* If array is no-longer degraded, then any saved_raid_disk
NeilBrownf4667222013-12-09 12:04:56 +11009377 * information must be scrapped.
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009378 */
NeilBrownf4667222013-12-09 12:04:56 +11009379 if (!mddev->degraded)
9380 rdev_for_each(rdev, mddev)
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009381 rdev->saved_raid_disk = -1;
9382
9383 md_update_sb(mddev, 1);
Shaohua Li29530792016-12-08 15:48:19 -08009384 /* MD_SB_CHANGE_PENDING should be cleared by md_update_sb, so we can
Guoqing Jiangbb8bf152016-06-02 23:32:04 -04009385 * call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by
9386 * clustered raid */
9387 if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags))
9388 md_cluster_ops->resync_finish(mddev);
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009389 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
NeilBrownea358cd2015-06-12 20:05:04 +10009390 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009391 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9392 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
9393 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
9394 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
Guoqing Jiangaefb2e52018-10-18 16:37:44 +08009395 /*
9396 * We call md_cluster_ops->update_size here because sync_size could
9397 * be changed by md_update_sb, and MD_RECOVERY_RESHAPE is cleared,
9398 * so it is time to update size across cluster.
9399 */
9400 if (mddev_is_clustered(mddev) && is_reshaped
9401 && !test_bit(MD_CLOSING, &mddev->flags))
9402 md_cluster_ops->update_size(mddev, old_dev_sectors);
NeilBrownf851b602014-12-11 10:02:10 +11009403 wake_up(&resync_wait);
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009404 /* flag recovery needed just to double check */
9405 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9406 sysfs_notify_dirent_safe(mddev->sysfs_action);
9407 md_new_event(mddev);
9408 if (mddev->event_work.func)
9409 queue_work(md_misc_wq, &mddev->event_work);
9410}
NeilBrown6c144d32014-09-30 16:15:38 +10009411EXPORT_SYMBOL(md_reap_sync_thread);
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009412
NeilBrownfd01b882011-10-11 16:47:53 +11009413void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev)
Dan Williams6bfe0b42008-04-30 00:52:32 -07009414{
NeilBrown00bcb4a2010-06-01 19:37:23 +10009415 sysfs_notify_dirent_safe(rdev->sysfs_state);
Dan Williams6bfe0b42008-04-30 00:52:32 -07009416 wait_event_timeout(rdev->blocked_wait,
NeilBrownde393cd2011-07-28 11:31:48 +10009417 !test_bit(Blocked, &rdev->flags) &&
9418 !test_bit(BlockedBadBlocks, &rdev->flags),
Dan Williams6bfe0b42008-04-30 00:52:32 -07009419 msecs_to_jiffies(5000));
9420 rdev_dec_pending(rdev, mddev);
9421}
9422EXPORT_SYMBOL(md_wait_for_blocked_rdev);
9423
NeilBrownc6563a82012-05-21 09:27:00 +10009424void md_finish_reshape(struct mddev *mddev)
9425{
9426 /* called be personality module when reshape completes. */
9427 struct md_rdev *rdev;
9428
9429 rdev_for_each(rdev, mddev) {
9430 if (rdev->data_offset > rdev->new_data_offset)
9431 rdev->sectors += rdev->data_offset - rdev->new_data_offset;
9432 else
9433 rdev->sectors -= rdev->new_data_offset - rdev->data_offset;
9434 rdev->data_offset = rdev->new_data_offset;
9435 }
9436}
9437EXPORT_SYMBOL(md_finish_reshape);
NeilBrown2230dfe2011-07-28 11:31:46 +10009438
Vishal Vermafc974ee2015-12-24 19:20:34 -07009439/* Bad block management */
NeilBrown2230dfe2011-07-28 11:31:46 +10009440
Vishal Vermafc974ee2015-12-24 19:20:34 -07009441/* Returns 1 on success, 0 on failure */
NeilBrown3cb03002011-10-11 16:45:26 +11009442int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
NeilBrownc6563a82012-05-21 09:27:00 +10009443 int is_new)
NeilBrown2230dfe2011-07-28 11:31:46 +10009444{
Guoqing Jiang85ad1d12016-05-03 22:22:13 -04009445 struct mddev *mddev = rdev->mddev;
NeilBrownc6563a82012-05-21 09:27:00 +10009446 int rv;
9447 if (is_new)
9448 s += rdev->new_data_offset;
9449 else
9450 s += rdev->data_offset;
Vishal Vermafc974ee2015-12-24 19:20:34 -07009451 rv = badblocks_set(&rdev->badblocks, s, sectors, 0);
9452 if (rv == 0) {
NeilBrown2230dfe2011-07-28 11:31:46 +10009453 /* Make sure they get written out promptly */
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02009454 if (test_bit(ExternalBbl, &rdev->flags))
Junxiao Bie1a86db2020-07-14 16:10:26 -07009455 sysfs_notify_dirent_safe(rdev->sysfs_unack_badblocks);
NeilBrown8bd2f0a2011-12-08 16:26:08 +11009456 sysfs_notify_dirent_safe(rdev->sysfs_state);
Shaohua Li29530792016-12-08 15:48:19 -08009457 set_mask_bits(&mddev->sb_flags, 0,
9458 BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING));
NeilBrown2230dfe2011-07-28 11:31:46 +10009459 md_wakeup_thread(rdev->mddev->thread);
Vishal Vermafc974ee2015-12-24 19:20:34 -07009460 return 1;
9461 } else
9462 return 0;
NeilBrown2230dfe2011-07-28 11:31:46 +10009463}
9464EXPORT_SYMBOL_GPL(rdev_set_badblocks);
9465
NeilBrownc6563a82012-05-21 09:27:00 +10009466int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
9467 int is_new)
NeilBrown2230dfe2011-07-28 11:31:46 +10009468{
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02009469 int rv;
NeilBrownc6563a82012-05-21 09:27:00 +10009470 if (is_new)
9471 s += rdev->new_data_offset;
9472 else
9473 s += rdev->data_offset;
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02009474 rv = badblocks_clear(&rdev->badblocks, s, sectors);
9475 if ((rv == 0) && test_bit(ExternalBbl, &rdev->flags))
Junxiao Bie1a86db2020-07-14 16:10:26 -07009476 sysfs_notify_dirent_safe(rdev->sysfs_badblocks);
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02009477 return rv;
NeilBrown2230dfe2011-07-28 11:31:46 +10009478}
9479EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
9480
Adrian Bunk75c96f82005-05-05 16:16:09 -07009481static int md_notify_reboot(struct notifier_block *this,
9482 unsigned long code, void *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009483{
9484 struct list_head *tmp;
NeilBrownfd01b882011-10-11 16:47:53 +11009485 struct mddev *mddev;
Daniel P. Berrange2dba6a92011-09-23 10:40:45 +01009486 int need_delay = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009487
NeilBrownc744a652012-03-19 12:46:37 +11009488 for_each_mddev(mddev, tmp) {
9489 if (mddev_trylock(mddev)) {
NeilBrown30b8aa92012-04-24 10:23:16 +10009490 if (mddev->pers)
9491 __md_stop_writes(mddev);
NeilBrown0f62fb22014-05-06 09:36:08 +10009492 if (mddev->persistent)
9493 mddev->safemode = 2;
NeilBrownc744a652012-03-19 12:46:37 +11009494 mddev_unlock(mddev);
Daniel P. Berrange2dba6a92011-09-23 10:40:45 +01009495 }
NeilBrownc744a652012-03-19 12:46:37 +11009496 need_delay = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009497 }
NeilBrownc744a652012-03-19 12:46:37 +11009498 /*
9499 * certain more exotic SCSI devices are known to be
9500 * volatile wrt too early system reboots. While the
9501 * right place to handle this issue is the given
9502 * driver, we do want to have a safe RAID driver ...
9503 */
9504 if (need_delay)
9505 mdelay(1000*1);
9506
Linus Torvalds1da177e2005-04-16 15:20:36 -07009507 return NOTIFY_DONE;
9508}
9509
Adrian Bunk75c96f82005-05-05 16:16:09 -07009510static struct notifier_block md_notifier = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009511 .notifier_call = md_notify_reboot,
9512 .next = NULL,
9513 .priority = INT_MAX, /* before any real devices */
9514};
9515
9516static void md_geninit(void)
9517{
NeilBrown36a4e1f2011-10-07 14:23:17 +11009518 pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
Linus Torvalds1da177e2005-04-16 15:20:36 -07009519
Alexey Dobriyan97a32532020-02-03 17:37:17 -08009520 proc_create("mdstat", S_IRUGO, NULL, &mdstat_proc_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009521}
9522
Adrian Bunk75c96f82005-05-05 16:16:09 -07009523static int __init md_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009524{
Tejun Heoe804ac72010-10-15 15:36:08 +02009525 int ret = -ENOMEM;
9526
Tejun Heoada609e2011-01-25 14:35:54 +01009527 md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0);
Tejun Heoe804ac72010-10-15 15:36:08 +02009528 if (!md_wq)
9529 goto err_wq;
9530
9531 md_misc_wq = alloc_workqueue("md_misc", 0, 0);
9532 if (!md_misc_wq)
9533 goto err_misc_wq;
9534
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02009535 md_rdev_misc_wq = alloc_workqueue("md_rdev_misc", 0, 0);
Guoqing Jiangcf0b9b42020-10-08 05:19:09 +02009536 if (!md_rdev_misc_wq)
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02009537 goto err_rdev_misc_wq;
9538
Christoph Hellwig28144f92020-10-29 15:58:34 +01009539 ret = __register_blkdev(MD_MAJOR, "md", md_probe);
9540 if (ret < 0)
Tejun Heoe804ac72010-10-15 15:36:08 +02009541 goto err_md;
9542
Christoph Hellwig28144f92020-10-29 15:58:34 +01009543 ret = __register_blkdev(0, "mdp", md_probe);
9544 if (ret < 0)
Tejun Heoe804ac72010-10-15 15:36:08 +02009545 goto err_mdp;
9546 mdp_major = ret;
9547
Linus Torvalds1da177e2005-04-16 15:20:36 -07009548 register_reboot_notifier(&md_notifier);
Eric W. Biederman0b4d4142007-02-14 00:34:09 -08009549 raid_table_header = register_sysctl_table(raid_root_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009550
9551 md_geninit();
NeilBrownd710e132008-10-13 11:55:12 +11009552 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009553
Tejun Heoe804ac72010-10-15 15:36:08 +02009554err_mdp:
9555 unregister_blkdev(MD_MAJOR, "md");
9556err_md:
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02009557 destroy_workqueue(md_rdev_misc_wq);
9558err_rdev_misc_wq:
Tejun Heoe804ac72010-10-15 15:36:08 +02009559 destroy_workqueue(md_misc_wq);
9560err_misc_wq:
9561 destroy_workqueue(md_wq);
9562err_wq:
9563 return ret;
9564}
Linus Torvalds1da177e2005-04-16 15:20:36 -07009565
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009566static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
Goldwyn Rodrigues1d7e3e92014-06-07 01:53:00 -05009567{
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009568 struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
9569 struct md_rdev *rdev2;
9570 int role, ret;
9571 char b[BDEVNAME_SIZE];
Goldwyn Rodrigues1d7e3e92014-06-07 01:53:00 -05009572
Guoqing Jiang818da592017-03-01 16:42:40 +08009573 /*
9574 * If size is changed in another node then we need to
9575 * do resize as well.
9576 */
9577 if (mddev->dev_sectors != le64_to_cpu(sb->size)) {
9578 ret = mddev->pers->resize(mddev, le64_to_cpu(sb->size));
9579 if (ret)
9580 pr_info("md-cluster: resize failed\n");
9581 else
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07009582 md_bitmap_update_sb(mddev->bitmap);
Guoqing Jiang818da592017-03-01 16:42:40 +08009583 }
9584
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009585 /* Check for change of roles in the active devices */
9586 rdev_for_each(rdev2, mddev) {
9587 if (test_bit(Faulty, &rdev2->flags))
9588 continue;
9589
9590 /* Check if the roles changed */
9591 role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]);
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05009592
9593 if (test_bit(Candidate, &rdev2->flags)) {
9594 if (role == 0xfffe) {
9595 pr_info("md: Removing Candidate device %s because add failed\n", bdevname(rdev2->bdev,b));
9596 md_kick_rdev_from_array(rdev2);
9597 continue;
9598 }
9599 else
9600 clear_bit(Candidate, &rdev2->flags);
9601 }
9602
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009603 if (role != rdev2->raid_disk) {
Guoqing Jiangca1e98e2018-10-18 16:37:45 +08009604 /*
9605 * got activated except reshape is happening.
9606 */
9607 if (rdev2->raid_disk == -1 && role != 0xffff &&
9608 !(le32_to_cpu(sb->feature_map) &
9609 MD_FEATURE_RESHAPE_ACTIVE)) {
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009610 rdev2->saved_raid_disk = role;
9611 ret = remove_and_add_spares(mddev, rdev2);
9612 pr_info("Activated spare: %s\n",
NeilBrown9d487392016-11-02 14:16:49 +11009613 bdevname(rdev2->bdev,b));
Guoqing Jianga5781832016-05-02 11:33:14 -04009614 /* wakeup mddev->thread here, so array could
9615 * perform resync with the new activated disk */
9616 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9617 md_wakeup_thread(mddev->thread);
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009618 }
9619 /* device faulty
9620 * We just want to do the minimum to mark the disk
9621 * as faulty. The recovery is performed by the
9622 * one who initiated the error.
9623 */
9624 if ((role == 0xfffe) || (role == 0xfffd)) {
9625 md_error(mddev, rdev2);
9626 clear_bit(Blocked, &rdev2->flags);
9627 }
9628 }
Goldwyn Rodrigues1d7e3e92014-06-07 01:53:00 -05009629 }
9630
Zhao Heminga8da01f2020-11-19 19:41:33 +08009631 if (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) {
9632 ret = update_raid_disks(mddev, le32_to_cpu(sb->raid_disks));
9633 if (ret)
9634 pr_warn("md: updating array disks failed. %d\n", ret);
9635 }
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009636
Guoqing Jiang7564bed2018-10-18 16:37:42 +08009637 /*
9638 * Since mddev->delta_disks has already updated in update_raid_disks,
9639 * so it is time to check reshape.
9640 */
9641 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) &&
9642 (le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
9643 /*
9644 * reshape is happening in the remote node, we need to
9645 * update reshape_position and call start_reshape.
9646 */
Christoph Hellwiged4d0a4e2019-04-04 18:56:10 +02009647 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
Guoqing Jiang7564bed2018-10-18 16:37:42 +08009648 if (mddev->pers->update_reshape_pos)
9649 mddev->pers->update_reshape_pos(mddev);
9650 if (mddev->pers->start_reshape)
9651 mddev->pers->start_reshape(mddev);
9652 } else if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) &&
9653 mddev->reshape_position != MaxSector &&
9654 !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
9655 /* reshape is just done in another node. */
9656 mddev->reshape_position = MaxSector;
9657 if (mddev->pers->update_reshape_pos)
9658 mddev->pers->update_reshape_pos(mddev);
9659 }
9660
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009661 /* Finally set the event to be up to date */
9662 mddev->events = le64_to_cpu(sb->events);
9663}
9664
9665static int read_rdev(struct mddev *mddev, struct md_rdev *rdev)
9666{
9667 int err;
9668 struct page *swapout = rdev->sb_page;
9669 struct mdp_superblock_1 *sb;
9670
9671 /* Store the sb page of the rdev in the swapout temporary
9672 * variable in case we err in the future
9673 */
9674 rdev->sb_page = NULL;
NeilBrown7f0f0d82016-11-02 14:16:49 +11009675 err = alloc_disk_sb(rdev);
9676 if (err == 0) {
9677 ClearPageUptodate(rdev->sb_page);
9678 rdev->sb_loaded = 0;
9679 err = super_types[mddev->major_version].
9680 load_super(rdev, NULL, mddev->minor_version);
9681 }
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009682 if (err < 0) {
9683 pr_warn("%s: %d Could not reload rdev(%d) err: %d. Restoring old values\n",
9684 __func__, __LINE__, rdev->desc_nr, err);
NeilBrown7f0f0d82016-11-02 14:16:49 +11009685 if (rdev->sb_page)
9686 put_page(rdev->sb_page);
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009687 rdev->sb_page = swapout;
9688 rdev->sb_loaded = 1;
9689 return err;
9690 }
9691
9692 sb = page_address(rdev->sb_page);
9693 /* Read the offset unconditionally, even if MD_FEATURE_RECOVERY_OFFSET
9694 * is not set
9695 */
9696
9697 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RECOVERY_OFFSET))
9698 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
9699
9700 /* The other node finished recovery, call spare_active to set
9701 * device In_sync and mddev->degraded
9702 */
9703 if (rdev->recovery_offset == MaxSector &&
9704 !test_bit(In_sync, &rdev->flags) &&
9705 mddev->pers->spare_active(mddev))
Junxiao Bie1a86db2020-07-14 16:10:26 -07009706 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009707
9708 put_page(swapout);
9709 return 0;
9710}
9711
9712void md_reload_sb(struct mddev *mddev, int nr)
9713{
9714 struct md_rdev *rdev;
9715 int err;
9716
9717 /* Find the rdev */
9718 rdev_for_each_rcu(rdev, mddev) {
9719 if (rdev->desc_nr == nr)
9720 break;
9721 }
9722
9723 if (!rdev || rdev->desc_nr != nr) {
9724 pr_warn("%s: %d Could not find rdev with nr %d\n", __func__, __LINE__, nr);
9725 return;
9726 }
9727
9728 err = read_rdev(mddev, rdev);
9729 if (err < 0)
9730 return;
9731
9732 check_sb_changes(mddev, rdev);
9733
9734 /* Read all rdev's to update recovery_offset */
Guoqing Jiang0ea99242018-04-09 17:01:21 +08009735 rdev_for_each_rcu(rdev, mddev) {
9736 if (!test_bit(Faulty, &rdev->flags))
9737 read_rdev(mddev, rdev);
9738 }
Goldwyn Rodrigues1d7e3e92014-06-07 01:53:00 -05009739}
9740EXPORT_SYMBOL(md_reload_sb);
9741
Linus Torvalds1da177e2005-04-16 15:20:36 -07009742#ifndef MODULE
9743
9744/*
9745 * Searches all registered partitions for autorun RAID arrays
9746 * at boot time.
9747 */
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009748
Cong Wang5b1f5bc32016-06-08 09:20:16 -07009749static DEFINE_MUTEX(detected_devices_mutex);
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009750static LIST_HEAD(all_detected_devices);
9751struct detected_devices_node {
9752 struct list_head list;
9753 dev_t dev;
9754};
Linus Torvalds1da177e2005-04-16 15:20:36 -07009755
9756void md_autodetect_dev(dev_t dev)
9757{
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009758 struct detected_devices_node *node_detected_dev;
9759
9760 node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
9761 if (node_detected_dev) {
9762 node_detected_dev->dev = dev;
Cong Wang5b1f5bc32016-06-08 09:20:16 -07009763 mutex_lock(&detected_devices_mutex);
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009764 list_add_tail(&node_detected_dev->list, &all_detected_devices);
Cong Wang5b1f5bc32016-06-08 09:20:16 -07009765 mutex_unlock(&detected_devices_mutex);
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009766 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009767}
9768
Christoph Hellwigd82fa812020-06-06 15:00:24 +02009769void md_autostart_arrays(int part)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009770{
NeilBrown3cb03002011-10-11 16:45:26 +11009771 struct md_rdev *rdev;
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009772 struct detected_devices_node *node_detected_dev;
9773 dev_t dev;
9774 int i_scanned, i_passed;
9775
9776 i_scanned = 0;
9777 i_passed = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009778
NeilBrown9d487392016-11-02 14:16:49 +11009779 pr_info("md: Autodetecting RAID arrays.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07009780
Cong Wang5b1f5bc32016-06-08 09:20:16 -07009781 mutex_lock(&detected_devices_mutex);
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009782 while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
9783 i_scanned++;
9784 node_detected_dev = list_entry(all_detected_devices.next,
9785 struct detected_devices_node, list);
9786 list_del(&node_detected_dev->list);
9787 dev = node_detected_dev->dev;
9788 kfree(node_detected_dev);
Shaohua Li90bcf1332016-09-14 14:26:54 -07009789 mutex_unlock(&detected_devices_mutex);
NeilBrowndf968c42007-07-17 04:06:11 -07009790 rdev = md_import_device(dev,0, 90);
Shaohua Li90bcf1332016-09-14 14:26:54 -07009791 mutex_lock(&detected_devices_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009792 if (IS_ERR(rdev))
9793 continue;
9794
NeilBrown403df472014-09-30 15:52:29 +10009795 if (test_bit(Faulty, &rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009796 continue;
NeilBrown403df472014-09-30 15:52:29 +10009797
NeilBrownd0fae182008-03-04 14:29:31 -08009798 set_bit(AutoDetected, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009799 list_add(&rdev->same_set, &pending_raid_disks);
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009800 i_passed++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009801 }
Cong Wang5b1f5bc32016-06-08 09:20:16 -07009802 mutex_unlock(&detected_devices_mutex);
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009803
NeilBrown9d487392016-11-02 14:16:49 +11009804 pr_debug("md: Scanned %d and added %d devices.\n", i_scanned, i_passed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009805
9806 autorun_devices(part);
9807}
9808
Jeff Garzikfdee8ae2006-12-10 02:20:50 -08009809#endif /* !MODULE */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009810
9811static __exit void md_exit(void)
9812{
NeilBrownfd01b882011-10-11 16:47:53 +11009813 struct mddev *mddev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009814 struct list_head *tmp;
NeilBrowne2f23b62014-04-09 14:33:51 +10009815 int delay = 1;
Greg Kroah-Hartman8ab5e4c2005-06-20 21:15:16 -07009816
Christoph Hellwig3dbd8c22009-03-31 14:27:02 +11009817 unregister_blkdev(MD_MAJOR,"md");
Linus Torvalds1da177e2005-04-16 15:20:36 -07009818 unregister_blkdev(mdp_major, "mdp");
9819 unregister_reboot_notifier(&md_notifier);
9820 unregister_sysctl_table(raid_table_header);
NeilBrowne2f23b62014-04-09 14:33:51 +10009821
9822 /* We cannot unload the modules while some process is
9823 * waiting for us in select() or poll() - wake them up
9824 */
9825 md_unloading = 1;
9826 while (waitqueue_active(&md_event_waiters)) {
9827 /* not safe to leave yet */
9828 wake_up(&md_event_waiters);
9829 msleep(delay);
9830 delay += delay;
9831 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009832 remove_proc_entry("mdstat", NULL);
NeilBrowne2f23b62014-04-09 14:33:51 +10009833
NeilBrown29ac4aa2008-02-06 01:39:58 -08009834 for_each_mddev(mddev, tmp) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009835 export_array(mddev);
NeilBrown93568632017-02-06 13:41:39 +11009836 mddev->ctime = 0;
NeilBrownd3374822009-01-09 08:31:10 +11009837 mddev->hold_active = 0;
NeilBrown93568632017-02-06 13:41:39 +11009838 /*
9839 * for_each_mddev() will call mddev_put() at the end of each
9840 * iteration. As the mddev is now fully clear, this will
9841 * schedule the mddev for destruction by a workqueue, and the
9842 * destroy_workqueue() below will wait for that to complete.
9843 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009844 }
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02009845 destroy_workqueue(md_rdev_misc_wq);
Tejun Heoe804ac72010-10-15 15:36:08 +02009846 destroy_workqueue(md_misc_wq);
9847 destroy_workqueue(md_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009848}
9849
Dan Williams685784a2007-07-09 11:56:42 -07009850subsys_initcall(md_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009851module_exit(md_exit)
9852
Kees Cooke4dca7b2017-10-17 19:04:42 -07009853static int get_ro(char *buffer, const struct kernel_param *kp)
NeilBrownf91de922005-11-08 21:39:36 -08009854{
Xiongfeng Wang3f999802020-05-11 16:23:25 +08009855 return sprintf(buffer, "%d\n", start_readonly);
NeilBrownf91de922005-11-08 21:39:36 -08009856}
Kees Cooke4dca7b2017-10-17 19:04:42 -07009857static int set_ro(const char *val, const struct kernel_param *kp)
NeilBrownf91de922005-11-08 21:39:36 -08009858{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03009859 return kstrtouint(val, 10, (unsigned int *)&start_readonly);
NeilBrownf91de922005-11-08 21:39:36 -08009860}
9861
NeilBrown80ca3a42006-07-10 04:44:18 -07009862module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
9863module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
NeilBrownefeb53c2009-01-09 08:31:10 +11009864module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
NeilBrown78b63502017-04-12 16:26:13 +10009865module_param(create_on_open, bool, S_IRUSR|S_IWUSR);
NeilBrownf91de922005-11-08 21:39:36 -08009866
Linus Torvalds1da177e2005-04-16 15:20:36 -07009867MODULE_LICENSE("GPL");
NeilBrown0efb9e62009-12-14 12:49:58 +11009868MODULE_DESCRIPTION("MD RAID framework");
NeilBrownaa1595e2005-08-04 12:53:32 -07009869MODULE_ALIAS("md");
NeilBrown72008652005-08-26 18:34:15 -07009870MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);