blob: 946bbedf5dcf7971b87281ee9927ed40cd86c47c [file] [log] [blame]
Thomas Gleixneraf1a8892019-05-20 19:08:12 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 md.c : Multiple Devices driver for Linux
NeilBrownf72ffdd2014-09-30 14:23:59 +10004 Copyright (C) 1998, 1999, 2000 Ingo Molnar
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 completely rewritten, based on the MD driver code from Marc Zyngier
7
8 Changes:
9
10 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
11 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
12 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
13 - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
14 - kmod support by: Cyrus Durgin
15 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
16 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
17
18 - lots of fixes and improvements to the RAID1/RAID5 and generic
19 RAID code (such as request based resynchronization):
20
21 Neil Brown <neilb@cse.unsw.edu.au>.
22
NeilBrown32a76272005-06-21 17:17:14 -070023 - persistent bitmap code
24 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
25
NeilBrown9d487392016-11-02 14:16:49 +110026
27 Errors, Warnings, etc.
28 Please use:
29 pr_crit() for error conditions that risk data loss
30 pr_err() for error conditions that are unexpected, like an IO error
31 or internal inconsistency
32 pr_warn() for error conditions that could have been predicated, like
33 adding a device to an array when it has incompatible metadata
34 pr_info() for every interesting, very rare events, like an array starting
35 or stopping, or resync starting or stopping
36 pr_debug() for everything else.
37
Linus Torvalds1da177e2005-04-16 15:20:36 -070038*/
39
Guoqing Jiang963c5552019-06-14 17:10:36 +080040#include <linux/sched/mm.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +010041#include <linux/sched/signal.h>
NeilBrowna6fb0932005-09-09 16:23:56 -070042#include <linux/kthread.h>
NeilBrownbff61972009-03-31 14:33:13 +110043#include <linux/blkdev.h>
Christoph Hellwigfe45e632021-09-20 14:33:27 +020044#include <linux/blk-integrity.h>
Vishal Vermafc974ee2015-12-24 19:20:34 -070045#include <linux/badblocks.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <linux/sysctl.h>
NeilBrownbff61972009-03-31 14:33:13 +110047#include <linux/seq_file.h>
Al Viroff01bb42011-09-16 02:31:11 -040048#include <linux/fs.h>
NeilBrownd7603b72006-01-06 00:20:30 -080049#include <linux/poll.h>
NeilBrown16f17b32006-06-26 00:27:37 -070050#include <linux/ctype.h>
André Goddard Rosae7d28602009-12-14 18:01:06 -080051#include <linux/string.h>
NeilBrownfb4d8c72008-10-13 11:55:12 +110052#include <linux/hdreg.h>
53#include <linux/proc_fs.h>
54#include <linux/random.h>
Christoph Hellwigb81e0c22021-09-20 14:33:25 +020055#include <linux/major.h>
Paul Gortmaker056075c2011-07-03 13:58:33 -040056#include <linux/module.h>
NeilBrownfb4d8c72008-10-13 11:55:12 +110057#include <linux/reboot.h>
NeilBrown32a76272005-06-21 17:17:14 -070058#include <linux/file.h>
Arnd Bergmannaa98aa32009-12-14 12:50:05 +110059#include <linux/compat.h>
Stephen Rothwell25570722008-10-15 09:09:21 +110060#include <linux/delay.h>
NeilBrownbff61972009-03-31 14:33:13 +110061#include <linux/raid/md_p.h>
62#include <linux/raid/md_u.h>
Christoph Hellwig74cc979c2020-03-24 08:25:19 +010063#include <linux/raid/detect.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090064#include <linux/slab.h>
NeilBrown4ad23a972017-03-15 14:05:14 +110065#include <linux/percpu-refcount.h>
Christoph Hellwigc6a564ff2020-03-25 16:48:42 +010066#include <linux/part_stat.h>
NeilBrown4ad23a972017-03-15 14:05:14 +110067
Shaohua Li504634f2016-11-18 09:44:08 -080068#include <trace/events/block.h>
NeilBrown43b2e5d2009-03-31 14:33:13 +110069#include "md.h"
Mike Snitzer935fe092017-10-10 17:02:41 -040070#include "md-bitmap.h"
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -050071#include "md-cluster.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070072
NeilBrown01f96c02011-09-21 15:30:20 +100073/* pers_list is a list of registered personalities protected
74 * by pers_lock.
75 * pers_lock does extra service to protect accesses to
76 * mddev->thread when the mutex cannot be held.
77 */
NeilBrown2604b702006-01-06 00:20:36 -080078static LIST_HEAD(pers_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079static DEFINE_SPINLOCK(pers_lock);
80
Kent Overstreet28dec872018-06-07 20:52:54 -040081static struct kobj_type md_ktype;
82
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -050083struct md_cluster_operations *md_cluster_ops;
Goldwyn Rodrigues589a1c42014-06-07 02:39:37 -050084EXPORT_SYMBOL(md_cluster_ops);
Christoph Hellwig2b598ee2019-04-04 18:56:14 +020085static struct module *md_cluster_mod;
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -050086
Bernd Schubert90b08712008-05-23 13:04:38 -070087static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
Tejun Heoe804ac72010-10-15 15:36:08 +020088static struct workqueue_struct *md_wq;
89static struct workqueue_struct *md_misc_wq;
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +020090static struct workqueue_struct *md_rdev_misc_wq;
Bernd Schubert90b08712008-05-23 13:04:38 -070091
NeilBrown746d3202013-04-24 11:42:41 +100092static int remove_and_add_spares(struct mddev *mddev,
93 struct md_rdev *this);
NeilBrown5aa61f42014-12-15 12:56:57 +110094static void mddev_detach(struct mddev *mddev);
NeilBrown746d3202013-04-24 11:42:41 +100095
Linus Torvalds1da177e2005-04-16 15:20:36 -070096/*
Robert Becker1e509152009-12-14 12:49:58 +110097 * Default number of read corrections we'll attempt on an rdev
98 * before ejecting it from the array. We divide the read error
99 * count by 2 for every hour elapsed between read errors.
100 */
101#define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
Zhao Heming7c9d5c52020-07-21 02:08:52 +0800102/* Default safemode delay: 200 msec */
103#define DEFAULT_SAFEMODE_DELAY ((200 * HZ)/1000 +1)
Robert Becker1e509152009-12-14 12:49:58 +1100104/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
106 * is 1000 KB/sec, so the extra system load does not show up that much.
107 * Increase it if you want to have more _guaranteed_ speed. Note that
Adrian Bunk338cec32005-09-10 00:26:54 -0700108 * the RAID driver will use the maximum available bandwidth if the IO
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 * subsystem is idle. There is also an 'absolute maximum' reconstruction
110 * speed limit - in case reconstruction slows down your system despite
111 * idle IO detection.
112 *
113 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
NeilBrown88202a02006-01-06 00:21:36 -0800114 * or /sys/block/mdX/md/sync_speed_{min,max}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115 */
116
117static int sysctl_speed_limit_min = 1000;
118static int sysctl_speed_limit_max = 200000;
NeilBrownfd01b882011-10-11 16:47:53 +1100119static inline int speed_min(struct mddev *mddev)
NeilBrown88202a02006-01-06 00:21:36 -0800120{
121 return mddev->sync_speed_min ?
122 mddev->sync_speed_min : sysctl_speed_limit_min;
123}
124
NeilBrownfd01b882011-10-11 16:47:53 +1100125static inline int speed_max(struct mddev *mddev)
NeilBrown88202a02006-01-06 00:21:36 -0800126{
127 return mddev->sync_speed_max ?
128 mddev->sync_speed_max : sysctl_speed_limit_max;
129}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100131static void rdev_uninit_serial(struct md_rdev *rdev)
Guoqing Jiang3e148a32019-06-19 17:30:46 +0800132{
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100133 if (!test_and_clear_bit(CollisionCheck, &rdev->flags))
134 return;
Guoqing Jiang3e148a32019-06-19 17:30:46 +0800135
Guoqing Jiang025471f2019-12-23 10:49:01 +0100136 kvfree(rdev->serial);
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100137 rdev->serial = NULL;
Guoqing Jiang3e148a32019-06-19 17:30:46 +0800138}
139
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100140static void rdevs_uninit_serial(struct mddev *mddev)
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100141{
142 struct md_rdev *rdev;
143
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100144 rdev_for_each(rdev, mddev)
145 rdev_uninit_serial(rdev);
146}
147
148static int rdev_init_serial(struct md_rdev *rdev)
149{
Guoqing Jiang025471f2019-12-23 10:49:01 +0100150 /* serial_nums equals with BARRIER_BUCKETS_NR */
151 int i, serial_nums = 1 << ((PAGE_SHIFT - ilog2(sizeof(atomic_t))));
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100152 struct serial_in_rdev *serial = NULL;
153
154 if (test_bit(CollisionCheck, &rdev->flags))
155 return 0;
156
Guoqing Jiang025471f2019-12-23 10:49:01 +0100157 serial = kvmalloc(sizeof(struct serial_in_rdev) * serial_nums,
158 GFP_KERNEL);
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100159 if (!serial)
160 return -ENOMEM;
161
Guoqing Jiang025471f2019-12-23 10:49:01 +0100162 for (i = 0; i < serial_nums; i++) {
163 struct serial_in_rdev *serial_tmp = &serial[i];
164
165 spin_lock_init(&serial_tmp->serial_lock);
166 serial_tmp->serial_rb = RB_ROOT_CACHED;
167 init_waitqueue_head(&serial_tmp->serial_io_wait);
168 }
169
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100170 rdev->serial = serial;
171 set_bit(CollisionCheck, &rdev->flags);
172
173 return 0;
174}
175
176static int rdevs_init_serial(struct mddev *mddev)
177{
178 struct md_rdev *rdev;
179 int ret = 0;
180
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100181 rdev_for_each(rdev, mddev) {
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100182 ret = rdev_init_serial(rdev);
183 if (ret)
184 break;
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100185 }
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100186
187 /* Free all resources if pool is not existed */
188 if (ret && !mddev->serial_info_pool)
189 rdevs_uninit_serial(mddev);
190
191 return ret;
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100192}
193
Guoqing Jiang963c5552019-06-14 17:10:36 +0800194/*
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100195 * rdev needs to enable serial stuffs if it meets the conditions:
196 * 1. it is multi-queue device flaged with writemostly.
197 * 2. the write-behind mode is enabled.
198 */
199static int rdev_need_serial(struct md_rdev *rdev)
200{
201 return (rdev && rdev->mddev->bitmap_info.max_write_behind > 0 &&
Christoph Hellwige556f6b2020-06-26 10:01:56 +0200202 rdev->bdev->bd_disk->queue->nr_hw_queues != 1 &&
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100203 test_bit(WriteMostly, &rdev->flags));
204}
205
206/*
207 * Init resource for rdev(s), then create serial_info_pool if:
208 * 1. rdev is the first device which return true from rdev_enable_serial.
209 * 2. rdev is NULL, means we want to enable serialization for all rdevs.
Guoqing Jiang963c5552019-06-14 17:10:36 +0800210 */
Guoqing Jiang404659c2019-12-23 10:48:53 +0100211void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100212 bool is_suspend)
Guoqing Jiang963c5552019-06-14 17:10:36 +0800213{
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100214 int ret = 0;
215
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100216 if (rdev && !rdev_need_serial(rdev) &&
217 !test_bit(CollisionCheck, &rdev->flags))
Guoqing Jiang963c5552019-06-14 17:10:36 +0800218 return;
219
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100220 if (!is_suspend)
221 mddev_suspend(mddev);
222
223 if (!rdev)
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100224 ret = rdevs_init_serial(mddev);
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100225 else
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100226 ret = rdev_init_serial(rdev);
227 if (ret)
228 goto abort;
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100229
Guoqing Jiang404659c2019-12-23 10:48:53 +0100230 if (mddev->serial_info_pool == NULL) {
Coly Li3024ba22020-04-09 22:17:23 +0800231 /*
232 * already in memalloc noio context by
233 * mddev_suspend()
234 */
Guoqing Jiang404659c2019-12-23 10:48:53 +0100235 mddev->serial_info_pool =
236 mempool_create_kmalloc_pool(NR_SERIAL_INFOS,
237 sizeof(struct serial_info));
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100238 if (!mddev->serial_info_pool) {
239 rdevs_uninit_serial(mddev);
Guoqing Jiang404659c2019-12-23 10:48:53 +0100240 pr_err("can't alloc memory pool for serialization\n");
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100241 }
Guoqing Jiang963c5552019-06-14 17:10:36 +0800242 }
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100243
244abort:
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100245 if (!is_suspend)
246 mddev_resume(mddev);
Guoqing Jiang963c5552019-06-14 17:10:36 +0800247}
Guoqing Jiang963c5552019-06-14 17:10:36 +0800248
249/*
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100250 * Free resource from rdev(s), and destroy serial_info_pool under conditions:
251 * 1. rdev is the last device flaged with CollisionCheck.
252 * 2. when bitmap is destroyed while policy is not enabled.
253 * 3. for disable policy, the pool is destroyed only when no rdev needs it.
Guoqing Jiang963c5552019-06-14 17:10:36 +0800254 */
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100255void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
256 bool is_suspend)
Guoqing Jiang963c5552019-06-14 17:10:36 +0800257{
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100258 if (rdev && !test_bit(CollisionCheck, &rdev->flags))
Guoqing Jiang963c5552019-06-14 17:10:36 +0800259 return;
260
Guoqing Jiang404659c2019-12-23 10:48:53 +0100261 if (mddev->serial_info_pool) {
Guoqing Jiang963c5552019-06-14 17:10:36 +0800262 struct md_rdev *temp;
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100263 int num = 0; /* used to track if other rdevs need the pool */
Guoqing Jiang963c5552019-06-14 17:10:36 +0800264
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100265 if (!is_suspend)
266 mddev_suspend(mddev);
267 rdev_for_each(temp, mddev) {
268 if (!rdev) {
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100269 if (!mddev->serialize_policy ||
270 !rdev_need_serial(temp))
271 rdev_uninit_serial(temp);
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100272 else
273 num++;
274 } else if (temp != rdev &&
275 test_bit(CollisionCheck, &temp->flags))
Guoqing Jiang963c5552019-06-14 17:10:36 +0800276 num++;
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100277 }
278
279 if (rdev)
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100280 rdev_uninit_serial(rdev);
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100281
282 if (num)
283 pr_info("The mempool could be used by other devices\n");
284 else {
Guoqing Jiang404659c2019-12-23 10:48:53 +0100285 mempool_destroy(mddev->serial_info_pool);
286 mddev->serial_info_pool = NULL;
Guoqing Jiang963c5552019-06-14 17:10:36 +0800287 }
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100288 if (!is_suspend)
289 mddev_resume(mddev);
Guoqing Jiang963c5552019-06-14 17:10:36 +0800290 }
291}
292
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293static struct ctl_table_header *raid_table_header;
294
Joe Perches82592c32013-11-14 15:16:18 +1100295static struct ctl_table raid_table[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 .procname = "speed_limit_min",
298 .data = &sysctl_speed_limit_min,
299 .maxlen = sizeof(int),
NeilBrown80ca3a42006-07-10 04:44:18 -0700300 .mode = S_IRUGO|S_IWUSR,
Eric W. Biederman6d456112009-11-16 03:11:48 -0800301 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 },
303 {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 .procname = "speed_limit_max",
305 .data = &sysctl_speed_limit_max,
306 .maxlen = sizeof(int),
NeilBrown80ca3a42006-07-10 04:44:18 -0700307 .mode = S_IRUGO|S_IWUSR,
Eric W. Biederman6d456112009-11-16 03:11:48 -0800308 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 },
Eric W. Biederman894d2492009-11-05 14:34:02 -0800310 { }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311};
312
Joe Perches82592c32013-11-14 15:16:18 +1100313static struct ctl_table raid_dir_table[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 .procname = "raid",
316 .maxlen = 0,
NeilBrown80ca3a42006-07-10 04:44:18 -0700317 .mode = S_IRUGO|S_IXUGO,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318 .child = raid_table,
319 },
Eric W. Biederman894d2492009-11-05 14:34:02 -0800320 { }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321};
322
Joe Perches82592c32013-11-14 15:16:18 +1100323static struct ctl_table raid_root_table[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 .procname = "dev",
326 .maxlen = 0,
327 .mode = 0555,
328 .child = raid_dir_table,
329 },
Eric W. Biederman894d2492009-11-05 14:34:02 -0800330 { }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331};
332
NeilBrownf91de922005-11-08 21:39:36 -0800333static int start_readonly;
334
NeilBrown78b63502017-04-12 16:26:13 +1000335/*
336 * The original mechanism for creating an md device is to create
337 * a device node in /dev and to open it. This causes races with device-close.
338 * The preferred method is to write to the "new_array" module parameter.
339 * This can avoid races.
340 * Setting create_on_open to false disables the original mechanism
341 * so all the races disappear.
342 */
343static bool create_on_open = true;
344
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345/*
NeilBrownd7603b72006-01-06 00:20:30 -0800346 * We have a system wide 'event count' that is incremented
347 * on any 'interesting' event, and readers of /proc/mdstat
348 * can use 'poll' or 'select' to find out when the event
349 * count increases.
350 *
351 * Events are:
352 * start array, stop array, error, add device, remove device,
353 * start build, activate spare
354 */
NeilBrown2989ddb2006-01-06 00:20:43 -0800355static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
NeilBrownd7603b72006-01-06 00:20:30 -0800356static atomic_t md_event_count;
NeilBrownfd01b882011-10-11 16:47:53 +1100357void md_new_event(struct mddev *mddev)
NeilBrownd7603b72006-01-06 00:20:30 -0800358{
359 atomic_inc(&md_event_count);
360 wake_up(&md_event_waiters);
361}
NeilBrown29269552006-03-27 01:18:10 -0800362EXPORT_SYMBOL_GPL(md_new_event);
NeilBrownd7603b72006-01-06 00:20:30 -0800363
364/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 * Enables to iterate over all existing md arrays
366 * all_mddevs_lock protects this list.
367 */
368static LIST_HEAD(all_mddevs);
369static DEFINE_SPINLOCK(all_mddevs_lock);
370
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371/*
372 * iterates through all used mddevs in the system.
373 * We take care to grab the all_mddevs_lock whenever navigating
374 * the list, and to always hold a refcount when unlocked.
375 * Any code which breaks out of this loop while own
376 * a reference to the current mddev and must mddev_put it.
377 */
NeilBrownfd01b882011-10-11 16:47:53 +1100378#define for_each_mddev(_mddev,_tmp) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 \
NeilBrownf72ffdd2014-09-30 14:23:59 +1000380 for (({ spin_lock(&all_mddevs_lock); \
NeilBrownfd01b882011-10-11 16:47:53 +1100381 _tmp = all_mddevs.next; \
382 _mddev = NULL;}); \
383 ({ if (_tmp != &all_mddevs) \
384 mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 spin_unlock(&all_mddevs_lock); \
NeilBrownfd01b882011-10-11 16:47:53 +1100386 if (_mddev) mddev_put(_mddev); \
387 _mddev = list_entry(_tmp, struct mddev, all_mddevs); \
388 _tmp != &all_mddevs;}); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 ({ spin_lock(&all_mddevs_lock); \
NeilBrownfd01b882011-10-11 16:47:53 +1100390 _tmp = _tmp->next;}) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 )
392
NeilBrown409c57f2009-03-31 14:39:39 +1100393/* Rather than calling directly into the personality make_request function,
394 * IO requests come here first so that we can check if the device is
395 * being suspended pending a reconfiguration.
396 * We hold a refcount over the call to ->make_request. By the time that
397 * call has finished, the bio has been linked into some internal structure
398 * and so is visible to ->quiesce(), so we don't need the refcount any more.
399 */
NeilBrownb3143b92017-10-17 13:46:43 +1100400static bool is_suspended(struct mddev *mddev, struct bio *bio)
401{
402 if (mddev->suspended)
403 return true;
404 if (bio_data_dir(bio) != WRITE)
405 return false;
406 if (mddev->suspend_lo >= mddev->suspend_hi)
407 return false;
408 if (bio->bi_iter.bi_sector >= mddev->suspend_hi)
409 return false;
410 if (bio_end_sector(bio) < mddev->suspend_lo)
411 return false;
412 return true;
413}
414
Shaohua Li393debc2017-09-21 10:23:35 -0700415void md_handle_request(struct mddev *mddev, struct bio *bio)
416{
417check_suspended:
418 rcu_read_lock();
NeilBrownb3143b92017-10-17 13:46:43 +1100419 if (is_suspended(mddev, bio)) {
Shaohua Li393debc2017-09-21 10:23:35 -0700420 DEFINE_WAIT(__wait);
421 for (;;) {
422 prepare_to_wait(&mddev->sb_wait, &__wait,
423 TASK_UNINTERRUPTIBLE);
NeilBrownb3143b92017-10-17 13:46:43 +1100424 if (!is_suspended(mddev, bio))
Shaohua Li393debc2017-09-21 10:23:35 -0700425 break;
426 rcu_read_unlock();
427 schedule();
428 rcu_read_lock();
429 }
430 finish_wait(&mddev->sb_wait, &__wait);
431 }
432 atomic_inc(&mddev->active_io);
433 rcu_read_unlock();
434
435 if (!mddev->pers->make_request(mddev, bio)) {
436 atomic_dec(&mddev->active_io);
437 wake_up(&mddev->sb_wait);
438 goto check_suspended;
439 }
440
441 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
442 wake_up(&mddev->sb_wait);
443}
444EXPORT_SYMBOL(md_handle_request);
445
Christoph Hellwig3e087732021-10-12 13:12:24 +0200446static void md_submit_bio(struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447{
NeilBrown49077322010-03-25 16:20:56 +1100448 const int rw = bio_data_dir(bio);
Christoph Hellwig309dca302021-01-24 11:02:34 +0100449 struct mddev *mddev = bio->bi_bdev->bd_disk->private_data;
NeilBrown49077322010-03-25 16:20:56 +1100450
Colin Ian King9a5a8592020-07-02 12:35:02 +0100451 if (mddev == NULL || mddev->pers == NULL) {
452 bio_io_error(bio);
Christoph Hellwig3e087732021-10-12 13:12:24 +0200453 return;
Colin Ian King9a5a8592020-07-02 12:35:02 +0100454 }
NeilBrown409c57f2009-03-31 14:39:39 +1100455
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -0300456 if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) {
457 bio_io_error(bio);
Christoph Hellwig3e087732021-10-12 13:12:24 +0200458 return;
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -0300459 }
460
Christoph Hellwigf695ca32020-07-01 10:59:39 +0200461 blk_queue_split(&bio);
Kent Overstreet54efd502015-04-23 22:37:18 -0700462
Sebastian Riemerbbfa57c2013-02-21 13:28:09 +1100463 if (mddev->ro == 1 && unlikely(rw == WRITE)) {
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200464 if (bio_sectors(bio) != 0)
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200465 bio->bi_status = BLK_STS_IOERR;
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200466 bio_endio(bio);
Christoph Hellwig3e087732021-10-12 13:12:24 +0200467 return;
Sebastian Riemerbbfa57c2013-02-21 13:28:09 +1100468 }
NeilBrown49077322010-03-25 16:20:56 +1100469
Shaohua Li9c573de2016-04-25 16:52:38 -0700470 /* bio could be mergeable after passing to underlayer */
Jens Axboe1eff9d32016-08-05 15:35:16 -0600471 bio->bi_opf &= ~REQ_NOMERGE;
Shaohua Li393debc2017-09-21 10:23:35 -0700472
473 md_handle_request(mddev, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474}
475
NeilBrown9e35b992010-04-06 14:23:02 +1000476/* mddev_suspend makes sure no new requests are submitted
477 * to the device, and that any requests that have been submitted
478 * are completely handled.
NeilBrownafa0f552014-12-15 12:56:58 +1100479 * Once mddev_detach() is called and completes, the module will be
480 * completely unused.
NeilBrown9e35b992010-04-06 14:23:02 +1000481 */
NeilBrownfd01b882011-10-11 16:47:53 +1100482void mddev_suspend(struct mddev *mddev)
NeilBrown409c57f2009-03-31 14:39:39 +1100483{
Heinz Mauelshagen092398d2016-05-03 19:43:57 +0200484 WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk);
NeilBrown4d5324f2017-10-19 12:17:16 +1100485 lockdep_assert_held(&mddev->reconfig_mutex);
Mikulas Patocka0dc10e52015-12-18 15:19:16 +1100486 if (mddev->suspended++)
487 return;
NeilBrown409c57f2009-03-31 14:39:39 +1100488 synchronize_rcu();
NeilBrowncc27b0c2017-06-05 16:49:39 +1000489 wake_up(&mddev->sb_wait);
NeilBrown35bfc522017-10-17 13:46:43 +1100490 set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags);
491 smp_mb__after_atomic();
NeilBrown409c57f2009-03-31 14:39:39 +1100492 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
493 mddev->pers->quiesce(mddev, 1);
NeilBrown35bfc522017-10-17 13:46:43 +1100494 clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags);
495 wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags));
Jonathan Brassow0d9f4f12012-05-16 04:06:14 -0500496
497 del_timer_sync(&mddev->safemode_timer);
Coly Li78f57ef2020-04-09 22:17:20 +0800498 /* restrict memory reclaim I/O during raid array is suspend */
499 mddev->noio_flag = memalloc_noio_save();
NeilBrown409c57f2009-03-31 14:39:39 +1100500}
NeilBrown390ee602010-06-01 19:37:27 +1000501EXPORT_SYMBOL_GPL(mddev_suspend);
NeilBrown409c57f2009-03-31 14:39:39 +1100502
NeilBrownfd01b882011-10-11 16:47:53 +1100503void mddev_resume(struct mddev *mddev)
NeilBrown409c57f2009-03-31 14:39:39 +1100504{
Coly Li78f57ef2020-04-09 22:17:20 +0800505 /* entred the memalloc scope from mddev_suspend() */
506 memalloc_noio_restore(mddev->noio_flag);
NeilBrown4d5324f2017-10-19 12:17:16 +1100507 lockdep_assert_held(&mddev->reconfig_mutex);
Mikulas Patocka0dc10e52015-12-18 15:19:16 +1100508 if (--mddev->suspended)
509 return;
NeilBrown409c57f2009-03-31 14:39:39 +1100510 wake_up(&mddev->sb_wait);
511 mddev->pers->quiesce(mddev, 0);
Jonathan Brassow0fd018a2011-06-07 17:49:36 -0500512
Jonathan Brassow47525e52012-05-22 13:55:29 +1000513 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
Jonathan Brassow0fd018a2011-06-07 17:49:36 -0500514 md_wakeup_thread(mddev->thread);
515 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
NeilBrown409c57f2009-03-31 14:39:39 +1100516}
NeilBrown390ee602010-06-01 19:37:27 +1000517EXPORT_SYMBOL_GPL(mddev_resume);
NeilBrown409c57f2009-03-31 14:39:39 +1100518
NeilBrowna2826aa2009-12-14 12:49:49 +1100519/*
Tejun Heoe9c74692010-09-03 11:56:18 +0200520 * Generic flush handling for md
NeilBrowna2826aa2009-12-14 12:49:49 +1100521 */
NeilBrown4bc034d2019-03-29 10:46:16 -0700522
523static void md_end_flush(struct bio *bio)
NeilBrowna2826aa2009-12-14 12:49:49 +1100524{
NeilBrown4bc034d2019-03-29 10:46:16 -0700525 struct md_rdev *rdev = bio->bi_private;
526 struct mddev *mddev = rdev->mddev;
NeilBrowna2826aa2009-12-14 12:49:49 +1100527
528 rdev_dec_pending(rdev, mddev);
529
NeilBrown4bc034d2019-03-29 10:46:16 -0700530 if (atomic_dec_and_test(&mddev->flush_pending)) {
531 /* The pre-request flush has finished */
532 queue_work(md_wq, &mddev->flush_work);
NeilBrowna2826aa2009-12-14 12:49:49 +1100533 }
NeilBrown4bc034d2019-03-29 10:46:16 -0700534 bio_put(bio);
NeilBrowna2826aa2009-12-14 12:49:49 +1100535}
536
NeilBrown4bc034d2019-03-29 10:46:16 -0700537static void md_submit_flush_data(struct work_struct *ws);
538
539static void submit_flushes(struct work_struct *ws)
NeilBrowna2826aa2009-12-14 12:49:49 +1100540{
NeilBrown4bc034d2019-03-29 10:46:16 -0700541 struct mddev *mddev = container_of(ws, struct mddev, flush_work);
NeilBrown3cb03002011-10-11 16:45:26 +1100542 struct md_rdev *rdev;
NeilBrowna2826aa2009-12-14 12:49:49 +1100543
NeilBrown2bc13b82019-03-29 10:46:17 -0700544 mddev->start_flush = ktime_get_boottime();
NeilBrown4bc034d2019-03-29 10:46:16 -0700545 INIT_WORK(&mddev->flush_work, md_submit_flush_data);
546 atomic_set(&mddev->flush_pending, 1);
NeilBrowna2826aa2009-12-14 12:49:49 +1100547 rcu_read_lock();
NeilBrowndafb20f2012-03-19 12:46:39 +1100548 rdev_for_each_rcu(rdev, mddev)
NeilBrowna2826aa2009-12-14 12:49:49 +1100549 if (rdev->raid_disk >= 0 &&
550 !test_bit(Faulty, &rdev->flags)) {
551 /* Take two references, one is dropped
552 * when request finishes, one after
553 * we reclaim rcu_read_lock
554 */
555 struct bio *bi;
556 atomic_inc(&rdev->nr_pending);
557 atomic_inc(&rdev->nr_pending);
558 rcu_read_unlock();
Christoph Hellwiga78f18d2021-01-26 15:52:41 +0100559 bi = bio_alloc_bioset(GFP_NOIO, 0, &mddev->bio_set);
Xiao Ni5a409b42018-05-21 11:49:54 +0800560 bi->bi_end_io = md_end_flush;
NeilBrown4bc034d2019-03-29 10:46:16 -0700561 bi->bi_private = rdev;
562 bio_set_dev(bi, rdev->bdev);
Christoph Hellwig70fd7612016-11-01 07:40:10 -0600563 bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
NeilBrown4bc034d2019-03-29 10:46:16 -0700564 atomic_inc(&mddev->flush_pending);
Mike Christie4e49ea42016-06-05 14:31:41 -0500565 submit_bio(bi);
NeilBrowna2826aa2009-12-14 12:49:49 +1100566 rcu_read_lock();
567 rdev_dec_pending(rdev, mddev);
568 }
569 rcu_read_unlock();
NeilBrown4bc034d2019-03-29 10:46:16 -0700570 if (atomic_dec_and_test(&mddev->flush_pending))
571 queue_work(md_wq, &mddev->flush_work);
572}
NeilBrowna2826aa2009-12-14 12:49:49 +1100573
NeilBrown4bc034d2019-03-29 10:46:16 -0700574static void md_submit_flush_data(struct work_struct *ws)
575{
576 struct mddev *mddev = container_of(ws, struct mddev, flush_work);
577 struct bio *bio = mddev->flush_bio;
578
579 /*
580 * must reset flush_bio before calling into md_handle_request to avoid a
581 * deadlock, because other bios passed md_handle_request suspend check
582 * could wait for this and below md_handle_request could wait for those
583 * bios because of suspend check
584 */
Xiao Nidc5d17a32020-12-10 14:33:32 +0800585 spin_lock_irq(&mddev->lock);
Pankaj Gupta81ba3c22020-11-11 06:16:56 +0100586 mddev->prev_flush_start = mddev->start_flush;
NeilBrown4bc034d2019-03-29 10:46:16 -0700587 mddev->flush_bio = NULL;
Xiao Nidc5d17a32020-12-10 14:33:32 +0800588 spin_unlock_irq(&mddev->lock);
NeilBrown4bc034d2019-03-29 10:46:16 -0700589 wake_up(&mddev->sb_wait);
590
591 if (bio->bi_iter.bi_size == 0) {
592 /* an empty barrier - all done */
593 bio_endio(bio);
594 } else {
595 bio->bi_opf &= ~REQ_PREFLUSH;
596 md_handle_request(mddev, bio);
NeilBrowna2826aa2009-12-14 12:49:49 +1100597 }
NeilBrowna2826aa2009-12-14 12:49:49 +1100598}
NeilBrown4bc034d2019-03-29 10:46:16 -0700599
David Jeffery775d7832019-09-16 13:15:14 -0400600/*
601 * Manages consolidation of flushes and submitting any flushes needed for
602 * a bio with REQ_PREFLUSH. Returns true if the bio is finished or is
603 * being finished in another context. Returns false if the flushing is
604 * complete but still needs the I/O portion of the bio to be processed.
605 */
606bool md_flush_request(struct mddev *mddev, struct bio *bio)
NeilBrown4bc034d2019-03-29 10:46:16 -0700607{
Pankaj Gupta81ba3c22020-11-11 06:16:56 +0100608 ktime_t req_start = ktime_get_boottime();
NeilBrown4bc034d2019-03-29 10:46:16 -0700609 spin_lock_irq(&mddev->lock);
Pankaj Gupta204d1a62020-11-11 06:16:57 +0100610 /* flush requests wait until ongoing flush completes,
611 * hence coalescing all the pending requests.
612 */
NeilBrown4bc034d2019-03-29 10:46:16 -0700613 wait_event_lock_irq(mddev->sb_wait,
NeilBrown2bc13b82019-03-29 10:46:17 -0700614 !mddev->flush_bio ||
Pankaj Guptaa23f2aa2020-11-11 06:16:58 +0100615 ktime_before(req_start, mddev->prev_flush_start),
NeilBrown4bc034d2019-03-29 10:46:16 -0700616 mddev->lock);
Pankaj Gupta204d1a62020-11-11 06:16:57 +0100617 /* new request after previous flush is completed */
Pankaj Guptaa23f2aa2020-11-11 06:16:58 +0100618 if (ktime_after(req_start, mddev->prev_flush_start)) {
NeilBrown2bc13b82019-03-29 10:46:17 -0700619 WARN_ON(mddev->flush_bio);
620 mddev->flush_bio = bio;
621 bio = NULL;
622 }
NeilBrown4bc034d2019-03-29 10:46:16 -0700623 spin_unlock_irq(&mddev->lock);
624
NeilBrown2bc13b82019-03-29 10:46:17 -0700625 if (!bio) {
626 INIT_WORK(&mddev->flush_work, submit_flushes);
627 queue_work(md_wq, &mddev->flush_work);
628 } else {
629 /* flush was performed for some other bio while we waited. */
630 if (bio->bi_iter.bi_size == 0)
631 /* an empty barrier - all done */
632 bio_endio(bio);
633 else {
634 bio->bi_opf &= ~REQ_PREFLUSH;
David Jeffery775d7832019-09-16 13:15:14 -0400635 return false;
NeilBrown2bc13b82019-03-29 10:46:17 -0700636 }
637 }
David Jeffery775d7832019-09-16 13:15:14 -0400638 return true;
NeilBrown4bc034d2019-03-29 10:46:16 -0700639}
Tejun Heoe9c74692010-09-03 11:56:18 +0200640EXPORT_SYMBOL(md_flush_request);
NeilBrown409c57f2009-03-31 14:39:39 +1100641
NeilBrownfd01b882011-10-11 16:47:53 +1100642static inline struct mddev *mddev_get(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643{
644 atomic_inc(&mddev->active);
645 return mddev;
646}
647
Dan Williams5fd3a172009-03-04 00:57:25 -0700648static void mddev_delayed_delete(struct work_struct *ws);
NeilBrownd3374822009-01-09 08:31:10 +1100649
NeilBrownfd01b882011-10-11 16:47:53 +1100650static void mddev_put(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651{
652 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
653 return;
NeilBrownd3374822009-01-09 08:31:10 +1100654 if (!mddev->raid_disks && list_empty(&mddev->disks) &&
NeilBrowncbd19982009-12-30 12:08:49 +1100655 mddev->ctime == 0 && !mddev->hold_active) {
656 /* Array is not configured at all, and not held active,
657 * so destroy it */
NeilBrownaf8a2432011-12-08 15:49:46 +1100658 list_del_init(&mddev->all_mddevs);
Kent Overstreet28dec872018-06-07 20:52:54 -0400659
660 /*
661 * Call queue_work inside the spinlock so that
662 * flush_workqueue() after mddev_find will succeed in waiting
663 * for the work to be done.
664 */
665 INIT_WORK(&mddev->del_work, mddev_delayed_delete);
666 queue_work(md_misc_wq, &mddev->del_work);
NeilBrownd3374822009-01-09 08:31:10 +1100667 }
668 spin_unlock(&all_mddevs_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669}
670
Kees Cook8376d3c2017-10-16 17:01:48 -0700671static void md_safemode_timeout(struct timer_list *t);
Sasha Levin25b2edf2015-07-24 18:19:58 -0400672
NeilBrownfd01b882011-10-11 16:47:53 +1100673void mddev_init(struct mddev *mddev)
NeilBrownfafd7fb2010-04-01 15:55:30 +1100674{
Kent Overstreet28dec872018-06-07 20:52:54 -0400675 kobject_init(&mddev->kobj, &md_ktype);
NeilBrownfafd7fb2010-04-01 15:55:30 +1100676 mutex_init(&mddev->open_mutex);
677 mutex_init(&mddev->reconfig_mutex);
678 mutex_init(&mddev->bitmap_info.mutex);
679 INIT_LIST_HEAD(&mddev->disks);
680 INIT_LIST_HEAD(&mddev->all_mddevs);
Kees Cook8376d3c2017-10-16 17:01:48 -0700681 timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0);
NeilBrownfafd7fb2010-04-01 15:55:30 +1100682 atomic_set(&mddev->active, 1);
683 atomic_set(&mddev->openers, 0);
684 atomic_set(&mddev->active_io, 0);
NeilBrown85572d72014-12-15 12:56:56 +1100685 spin_lock_init(&mddev->lock);
NeilBrown4bc034d2019-03-29 10:46:16 -0700686 atomic_set(&mddev->flush_pending, 0);
NeilBrownfafd7fb2010-04-01 15:55:30 +1100687 init_waitqueue_head(&mddev->sb_wait);
688 init_waitqueue_head(&mddev->recovery_wait);
689 mddev->reshape_position = MaxSector;
NeilBrown2c810cd2012-05-21 09:27:00 +1000690 mddev->reshape_backwards = 0;
Jonathan Brassowc4a39552013-06-25 01:23:59 -0500691 mddev->last_sync_action = "none";
NeilBrownfafd7fb2010-04-01 15:55:30 +1100692 mddev->resync_min = 0;
693 mddev->resync_max = MaxSector;
694 mddev->level = LEVEL_NONE;
695}
NeilBrown390ee602010-06-01 19:37:27 +1000696EXPORT_SYMBOL_GPL(mddev_init);
NeilBrownfafd7fb2010-04-01 15:55:30 +1100697
Christoph Hellwig8b57251f2021-04-03 18:15:28 +0200698static struct mddev *mddev_find_locked(dev_t unit)
699{
700 struct mddev *mddev;
701
702 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
703 if (mddev->unit == unit)
704 return mddev;
705
706 return NULL;
707}
708
Christoph Hellwig85c8c3c2021-04-12 10:05:28 +0200709/* find an unused unit number */
710static dev_t mddev_alloc_unit(void)
711{
712 static int next_minor = 512;
713 int start = next_minor;
714 bool is_free = 0;
715 dev_t dev = 0;
716
717 while (!is_free) {
718 dev = MKDEV(MD_MAJOR, next_minor);
719 next_minor++;
720 if (next_minor > MINORMASK)
721 next_minor = 0;
722 if (next_minor == start)
723 return 0; /* Oh dear, all in use. */
724 is_free = !mddev_find_locked(dev);
725 }
726
727 return dev;
728}
729
NeilBrownf72ffdd2014-09-30 14:23:59 +1000730static struct mddev *mddev_find(dev_t unit)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731{
Christoph Hellwig65aa97c2021-04-03 18:15:29 +0200732 struct mddev *mddev;
733
734 if (MAJOR(unit) != MD_MAJOR)
735 unit &= ~((1 << MdpMinorShift) - 1);
736
737 spin_lock(&all_mddevs_lock);
738 mddev = mddev_find_locked(unit);
739 if (mddev)
740 mddev_get(mddev);
741 spin_unlock(&all_mddevs_lock);
742
743 return mddev;
744}
745
Christoph Hellwig0d809b32021-04-12 10:05:30 +0200746static struct mddev *mddev_alloc(dev_t unit)
Christoph Hellwig65aa97c2021-04-03 18:15:29 +0200747{
Christoph Hellwig0d809b32021-04-12 10:05:30 +0200748 struct mddev *new;
749 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750
NeilBrown8f5f02c2011-02-16 13:58:51 +1100751 if (unit && MAJOR(unit) != MD_MAJOR)
Christoph Hellwigd144fe62021-04-12 10:05:29 +0200752 unit &= ~((1 << MdpMinorShift) - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753
NeilBrown9ffae0c2006-01-06 00:20:32 -0800754 new = kzalloc(sizeof(*new), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 if (!new)
Christoph Hellwig0d809b32021-04-12 10:05:30 +0200756 return ERR_PTR(-ENOMEM);
NeilBrownfafd7fb2010-04-01 15:55:30 +1100757 mddev_init(new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758
Christoph Hellwigd144fe62021-04-12 10:05:29 +0200759 spin_lock(&all_mddevs_lock);
760 if (unit) {
Christoph Hellwig0d809b32021-04-12 10:05:30 +0200761 error = -EEXIST;
762 if (mddev_find_locked(unit))
Christoph Hellwigd144fe62021-04-12 10:05:29 +0200763 goto out_free_new;
Christoph Hellwigd144fe62021-04-12 10:05:29 +0200764 new->unit = unit;
765 if (MAJOR(unit) == MD_MAJOR)
766 new->md_minor = MINOR(unit);
767 else
768 new->md_minor = MINOR(unit) >> MdpMinorShift;
769 new->hold_active = UNTIL_IOCTL;
770 } else {
Christoph Hellwig0d809b32021-04-12 10:05:30 +0200771 error = -ENODEV;
Christoph Hellwigd144fe62021-04-12 10:05:29 +0200772 new->unit = mddev_alloc_unit();
773 if (!new->unit)
774 goto out_free_new;
775 new->md_minor = MINOR(new->unit);
776 new->hold_active = UNTIL_STOP;
777 }
778
779 list_add(&new->all_mddevs, &all_mddevs);
780 spin_unlock(&all_mddevs_lock);
781 return new;
782out_free_new:
783 spin_unlock(&all_mddevs_lock);
784 kfree(new);
Christoph Hellwig0d809b32021-04-12 10:05:30 +0200785 return ERR_PTR(error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786}
787
Rikard Falkebornc32dc042021-05-29 12:30:49 +0200788static const struct attribute_group md_redundancy_group;
NeilBrownb6eb1272010-04-15 10:13:47 +1000789
NeilBrown5c47daf2014-12-15 12:57:01 +1100790void mddev_unlock(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791{
NeilBrowna64c8762010-04-14 17:15:37 +1000792 if (mddev->to_remove) {
NeilBrownb6eb1272010-04-15 10:13:47 +1000793 /* These cannot be removed under reconfig_mutex as
794 * an access to the files will try to take reconfig_mutex
795 * while holding the file unremovable, which leads to
796 * a deadlock.
NeilBrownbb4f1e92010-08-08 21:18:03 +1000797 * So hold set sysfs_active while the remove in happeing,
798 * and anything else which might set ->to_remove or my
799 * otherwise change the sysfs namespace will fail with
800 * -EBUSY if sysfs_active is still set.
801 * We set sysfs_active under reconfig_mutex and elsewhere
802 * test it under the same mutex to ensure its correct value
803 * is seen.
NeilBrownb6eb1272010-04-15 10:13:47 +1000804 */
Rikard Falkebornc32dc042021-05-29 12:30:49 +0200805 const struct attribute_group *to_remove = mddev->to_remove;
NeilBrowna64c8762010-04-14 17:15:37 +1000806 mddev->to_remove = NULL;
NeilBrownbb4f1e92010-08-08 21:18:03 +1000807 mddev->sysfs_active = 1;
NeilBrownb6eb1272010-04-15 10:13:47 +1000808 mutex_unlock(&mddev->reconfig_mutex);
809
NeilBrown00bcb4a2010-06-01 19:37:23 +1000810 if (mddev->kobj.sd) {
811 if (to_remove != &md_redundancy_group)
812 sysfs_remove_group(&mddev->kobj, to_remove);
813 if (mddev->pers == NULL ||
814 mddev->pers->sync_request == NULL) {
815 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
816 if (mddev->sysfs_action)
817 sysfs_put(mddev->sysfs_action);
Junxiao Bie8efa9b2020-08-04 17:27:18 -0700818 if (mddev->sysfs_completed)
819 sysfs_put(mddev->sysfs_completed);
820 if (mddev->sysfs_degraded)
821 sysfs_put(mddev->sysfs_degraded);
NeilBrown00bcb4a2010-06-01 19:37:23 +1000822 mddev->sysfs_action = NULL;
Junxiao Bie8efa9b2020-08-04 17:27:18 -0700823 mddev->sysfs_completed = NULL;
824 mddev->sysfs_degraded = NULL;
NeilBrown00bcb4a2010-06-01 19:37:23 +1000825 }
NeilBrowna64c8762010-04-14 17:15:37 +1000826 }
NeilBrownbb4f1e92010-08-08 21:18:03 +1000827 mddev->sysfs_active = 0;
NeilBrownb6eb1272010-04-15 10:13:47 +1000828 } else
829 mutex_unlock(&mddev->reconfig_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830
Chris Dunlop751e67c2011-10-19 16:48:26 +1100831 /* As we've dropped the mutex we need a spinlock to
832 * make sure the thread doesn't disappear
NeilBrown01f96c02011-09-21 15:30:20 +1000833 */
834 spin_lock(&pers_lock);
NeilBrown005eca52005-08-22 13:11:08 -0700835 md_wakeup_thread(mddev->thread);
NeilBrown4d5324f2017-10-19 12:17:16 +1100836 wake_up(&mddev->sb_wait);
NeilBrown01f96c02011-09-21 15:30:20 +1000837 spin_unlock(&pers_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838}
NeilBrown5c47daf2014-12-15 12:57:01 +1100839EXPORT_SYMBOL_GPL(mddev_unlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840
Goldwyn Rodrigues57d051d2015-04-14 10:43:55 -0500841struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr)
NeilBrown1ca69c42012-10-11 13:37:33 +1100842{
843 struct md_rdev *rdev;
844
845 rdev_for_each_rcu(rdev, mddev)
846 if (rdev->desc_nr == nr)
847 return rdev;
848
849 return NULL;
850}
Goldwyn Rodrigues57d051d2015-04-14 10:43:55 -0500851EXPORT_SYMBOL_GPL(md_find_rdev_nr_rcu);
NeilBrown1ca69c42012-10-11 13:37:33 +1100852
853static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854{
NeilBrown3cb03002011-10-11 16:45:26 +1100855 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856
NeilBrowndafb20f2012-03-19 12:46:39 +1100857 rdev_for_each(rdev, mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 if (rdev->bdev->bd_dev == dev)
859 return rdev;
Cheng Renquan159ec1f2009-01-09 08:31:08 +1100860
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 return NULL;
862}
863
Tomasz Majchrzak1532d9e2017-12-27 10:31:40 +0100864struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev)
NeilBrown1ca69c42012-10-11 13:37:33 +1100865{
866 struct md_rdev *rdev;
867
868 rdev_for_each_rcu(rdev, mddev)
869 if (rdev->bdev->bd_dev == dev)
870 return rdev;
871
872 return NULL;
873}
Tomasz Majchrzak1532d9e2017-12-27 10:31:40 +0100874EXPORT_SYMBOL_GPL(md_find_rdev_rcu);
NeilBrown1ca69c42012-10-11 13:37:33 +1100875
NeilBrown84fc4b52011-10-11 16:49:58 +1100876static struct md_personality *find_pers(int level, char *clevel)
NeilBrown2604b702006-01-06 00:20:36 -0800877{
NeilBrown84fc4b52011-10-11 16:49:58 +1100878 struct md_personality *pers;
NeilBrownd9d166c2006-01-06 00:20:51 -0800879 list_for_each_entry(pers, &pers_list, list) {
880 if (level != LEVEL_NONE && pers->level == level)
NeilBrown2604b702006-01-06 00:20:36 -0800881 return pers;
NeilBrownd9d166c2006-01-06 00:20:51 -0800882 if (strcmp(pers->name, clevel)==0)
883 return pers;
884 }
NeilBrown2604b702006-01-06 00:20:36 -0800885 return NULL;
886}
887
Andre Nollb73df2d2008-07-11 22:02:23 +1000888/* return the offset of the super block in 512byte sectors */
NeilBrown3cb03002011-10-11 16:45:26 +1100889static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890{
Jonathan Brassow57b2caa2011-01-14 09:14:33 +1100891 sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512;
Andre Nollb73df2d2008-07-11 22:02:23 +1000892 return MD_NEW_SIZE_SECTORS(num_sectors);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893}
894
NeilBrownf72ffdd2014-09-30 14:23:59 +1000895static int alloc_disk_sb(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 rdev->sb_page = alloc_page(GFP_KERNEL);
NeilBrown7f0f0d82016-11-02 14:16:49 +1100898 if (!rdev->sb_page)
Andre Nollebc24332008-07-11 22:02:20 +1000899 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 return 0;
901}
902
NeilBrown545c8792012-05-22 13:54:30 +1000903void md_rdev_clear(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904{
905 if (rdev->sb_page) {
NeilBrown2d1f3b52006-01-06 00:20:31 -0800906 put_page(rdev->sb_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 rdev->sb_loaded = 0;
908 rdev->sb_page = NULL;
Andre Noll0f420352008-07-11 22:02:23 +1000909 rdev->sb_start = 0;
Andre Nolldd8ac332009-03-31 14:33:13 +1100910 rdev->sectors = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 }
NeilBrown2699b672011-07-28 11:31:47 +1000912 if (rdev->bb_page) {
913 put_page(rdev->bb_page);
914 rdev->bb_page = NULL;
915 }
Dan Williamsd3b407fb2016-01-06 12:19:22 -0800916 badblocks_exit(&rdev->badblocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917}
NeilBrown545c8792012-05-22 13:54:30 +1000918EXPORT_SYMBOL_GPL(md_rdev_clear);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200920static void super_written(struct bio *bio)
NeilBrown7bfa19f2005-06-21 17:17:28 -0700921{
NeilBrown3cb03002011-10-11 16:45:26 +1100922 struct md_rdev *rdev = bio->bi_private;
NeilBrownfd01b882011-10-11 16:47:53 +1100923 struct mddev *mddev = rdev->mddev;
NeilBrown7bfa19f2005-06-21 17:17:28 -0700924
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200925 if (bio->bi_status) {
Guoqing Jiangb3db8a22020-07-28 12:01:41 +0200926 pr_err("md: %s gets error=%d\n", __func__,
927 blk_status_to_errno(bio->bi_status));
NeilBrowna9701a32005-11-08 21:39:34 -0800928 md_error(mddev, rdev);
NeilBrown46533ff2016-11-18 16:16:11 +1100929 if (!test_bit(Faulty, &rdev->flags)
930 && (bio->bi_opf & MD_FAILFAST)) {
Shaohua Li29530792016-12-08 15:48:19 -0800931 set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags);
NeilBrown46533ff2016-11-18 16:16:11 +1100932 set_bit(LastDev, &rdev->flags);
933 }
934 } else
935 clear_bit(LastDev, &rdev->flags);
NeilBrown7bfa19f2005-06-21 17:17:28 -0700936
NeilBrowna9701a32005-11-08 21:39:34 -0800937 if (atomic_dec_and_test(&mddev->pending_writes))
938 wake_up(&mddev->sb_wait);
Shaohua Lied3b98c2016-03-29 14:00:19 -0700939 rdev_dec_pending(rdev, mddev);
Neil Brownf8b58ed2005-06-27 22:29:34 -0700940 bio_put(bio);
NeilBrown7bfa19f2005-06-21 17:17:28 -0700941}
942
NeilBrownfd01b882011-10-11 16:47:53 +1100943void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
NeilBrown7bfa19f2005-06-21 17:17:28 -0700944 sector_t sector, int size, struct page *page)
945{
946 /* write first size bytes of page to sector of rdev
947 * Increment mddev->pending_writes before returning
948 * and decrement it on completion, waking up sb_wait
949 * if zero is reached.
950 * If an error occurred, call md_error
951 */
NeilBrown46533ff2016-11-18 16:16:11 +1100952 struct bio *bio;
953 int ff = 0;
954
Heinz Mauelshagen4b6c1062018-02-02 23:13:19 +0100955 if (!page)
956 return;
957
NeilBrown46533ff2016-11-18 16:16:11 +1100958 if (test_bit(Faulty, &rdev->flags))
959 return;
960
Christoph Hellwig6a596562021-01-26 15:52:43 +0100961 bio = bio_alloc_bioset(GFP_NOIO, 1, &mddev->sync_set);
NeilBrown7bfa19f2005-06-21 17:17:28 -0700962
Shaohua Lied3b98c2016-03-29 14:00:19 -0700963 atomic_inc(&rdev->nr_pending);
964
Christoph Hellwig74d46992017-08-23 19:10:32 +0200965 bio_set_dev(bio, rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev);
Kent Overstreet4f024f32013-10-11 15:44:27 -0700966 bio->bi_iter.bi_sector = sector;
NeilBrown7bfa19f2005-06-21 17:17:28 -0700967 bio_add_page(bio, page, size, 0);
968 bio->bi_private = rdev;
969 bio->bi_end_io = super_written;
NeilBrown46533ff2016-11-18 16:16:11 +1100970
971 if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) &&
972 test_bit(FailFast, &rdev->flags) &&
973 !test_bit(LastDev, &rdev->flags))
974 ff = MD_FAILFAST;
Jan Kara5a8948f2017-05-31 09:44:33 +0200975 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA | ff;
NeilBrowna9701a32005-11-08 21:39:34 -0800976
NeilBrown7bfa19f2005-06-21 17:17:28 -0700977 atomic_inc(&mddev->pending_writes);
Mike Christie4e49ea42016-06-05 14:31:41 -0500978 submit_bio(bio);
NeilBrowna9701a32005-11-08 21:39:34 -0800979}
980
NeilBrown46533ff2016-11-18 16:16:11 +1100981int md_super_wait(struct mddev *mddev)
NeilBrowna9701a32005-11-08 21:39:34 -0800982{
Tejun Heoe9c74692010-09-03 11:56:18 +0200983 /* wait for all superblock writes that were scheduled to complete */
NeilBrown1967cd52014-09-09 14:20:28 +1000984 wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
Shaohua Li29530792016-12-08 15:48:19 -0800985 if (test_and_clear_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags))
NeilBrown46533ff2016-11-18 16:16:11 +1100986 return -EAGAIN;
987 return 0;
NeilBrown7bfa19f2005-06-21 17:17:28 -0700988}
989
NeilBrown3cb03002011-10-11 16:45:26 +1100990int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
Mike Christie796a5cf2016-06-05 14:32:07 -0500991 struct page *page, int op, int op_flags, bool metadata_op)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992{
Christoph Hellwig32637382021-01-26 15:52:42 +0100993 struct bio bio;
994 struct bio_vec bvec;
995
996 bio_init(&bio, &bvec, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997
Christoph Hellwig74d46992017-08-23 19:10:32 +0200998 if (metadata_op && rdev->meta_bdev)
Christoph Hellwig32637382021-01-26 15:52:42 +0100999 bio_set_dev(&bio, rdev->meta_bdev);
Christoph Hellwig74d46992017-08-23 19:10:32 +02001000 else
Christoph Hellwig32637382021-01-26 15:52:42 +01001001 bio_set_dev(&bio, rdev->bdev);
1002 bio.bi_opf = op | op_flags;
Jonathan Brassowccebd4c2011-01-14 09:14:33 +11001003 if (metadata_op)
Christoph Hellwig32637382021-01-26 15:52:42 +01001004 bio.bi_iter.bi_sector = sector + rdev->sb_start;
NeilBrown1fdd6fc92012-05-21 09:28:32 +10001005 else if (rdev->mddev->reshape_position != MaxSector &&
1006 (rdev->mddev->reshape_backwards ==
1007 (sector >= rdev->mddev->reshape_position)))
Christoph Hellwig32637382021-01-26 15:52:42 +01001008 bio.bi_iter.bi_sector = sector + rdev->new_data_offset;
Jonathan Brassowccebd4c2011-01-14 09:14:33 +11001009 else
Christoph Hellwig32637382021-01-26 15:52:42 +01001010 bio.bi_iter.bi_sector = sector + rdev->data_offset;
1011 bio_add_page(&bio, page, size, 0);
Mike Christie4e49ea42016-06-05 14:31:41 -05001012
Christoph Hellwig32637382021-01-26 15:52:42 +01001013 submit_bio_wait(&bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014
Christoph Hellwig32637382021-01-26 15:52:42 +01001015 return !bio.bi_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016}
NeilBrowna8745db2006-01-06 00:20:34 -08001017EXPORT_SYMBOL_GPL(sync_page_io);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018
NeilBrownf72ffdd2014-09-30 14:23:59 +10001019static int read_disk_sb(struct md_rdev *rdev, int size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020{
1021 char b[BDEVNAME_SIZE];
NeilBrown403df472014-09-30 15:52:29 +10001022
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023 if (rdev->sb_loaded)
1024 return 0;
1025
Mike Christie796a5cf2016-06-05 14:32:07 -05001026 if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027 goto fail;
1028 rdev->sb_loaded = 1;
1029 return 0;
1030
1031fail:
NeilBrown9d487392016-11-02 14:16:49 +11001032 pr_err("md: disabled device %s, could not read superblock.\n",
1033 bdevname(rdev->bdev,b));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034 return -EINVAL;
1035}
1036
Amir Goldsteine6fd2092017-05-04 16:26:20 +03001037static int md_uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038{
NeilBrownf72ffdd2014-09-30 14:23:59 +10001039 return sb1->set_uuid0 == sb2->set_uuid0 &&
Andre Noll05710462008-07-11 22:02:20 +10001040 sb1->set_uuid1 == sb2->set_uuid1 &&
1041 sb1->set_uuid2 == sb2->set_uuid2 &&
1042 sb1->set_uuid3 == sb2->set_uuid3;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043}
1044
Amir Goldsteine6fd2092017-05-04 16:26:20 +03001045static int md_sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046{
1047 int ret;
1048 mdp_super_t *tmp1, *tmp2;
1049
1050 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
1051 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
1052
1053 if (!tmp1 || !tmp2) {
1054 ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 goto abort;
1056 }
1057
1058 *tmp1 = *sb1;
1059 *tmp2 = *sb2;
1060
1061 /*
1062 * nr_disks is not constant
1063 */
1064 tmp1->nr_disks = 0;
1065 tmp2->nr_disks = 0;
1066
Andre Nollce0c8e02008-07-11 22:02:20 +10001067 ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068abort:
Jesper Juhl990a8ba2005-06-21 17:17:30 -07001069 kfree(tmp1);
1070 kfree(tmp2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071 return ret;
1072}
1073
NeilBrown4d167f02007-05-09 02:35:37 -07001074static u32 md_csum_fold(u32 csum)
1075{
1076 csum = (csum & 0xffff) + (csum >> 16);
1077 return (csum & 0xffff) + (csum >> 16);
1078}
1079
NeilBrownf72ffdd2014-09-30 14:23:59 +10001080static unsigned int calc_sb_csum(mdp_super_t *sb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081{
NeilBrown4d167f02007-05-09 02:35:37 -07001082 u64 newcsum = 0;
1083 u32 *sb32 = (u32*)sb;
1084 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085 unsigned int disk_csum, csum;
1086
1087 disk_csum = sb->sb_csum;
1088 sb->sb_csum = 0;
NeilBrown4d167f02007-05-09 02:35:37 -07001089
1090 for (i = 0; i < MD_SB_BYTES/4 ; i++)
1091 newcsum += sb32[i];
1092 csum = (newcsum & 0xffffffff) + (newcsum>>32);
1093
NeilBrown4d167f02007-05-09 02:35:37 -07001094#ifdef CONFIG_ALPHA
1095 /* This used to use csum_partial, which was wrong for several
1096 * reasons including that different results are returned on
1097 * different architectures. It isn't critical that we get exactly
1098 * the same return value as before (we always csum_fold before
1099 * testing, and that removes any differences). However as we
1100 * know that csum_partial always returned a 16bit value on
1101 * alphas, do a fold to maximise conformity to previous behaviour.
1102 */
1103 sb->sb_csum = md_csum_fold(disk_csum);
1104#else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 sb->sb_csum = disk_csum;
NeilBrown4d167f02007-05-09 02:35:37 -07001106#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107 return csum;
1108}
1109
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110/*
1111 * Handle superblock details.
1112 * We want to be able to handle multiple superblock formats
1113 * so we have a common interface to them all, and an array of
1114 * different handlers.
1115 * We rely on user-space to write the initial superblock, and support
1116 * reading and updating of superblocks.
1117 * Interface methods are:
NeilBrown3cb03002011-10-11 16:45:26 +11001118 * int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119 * loads and validates a superblock on dev.
1120 * if refdev != NULL, compare superblocks on both devices
1121 * Return:
1122 * 0 - dev has a superblock that is compatible with refdev
1123 * 1 - dev has a superblock that is compatible and newer than refdev
1124 * so dev should be used as the refdev in future
1125 * -EINVAL superblock incompatible or invalid
1126 * -othererror e.g. -EIO
1127 *
NeilBrownfd01b882011-10-11 16:47:53 +11001128 * int validate_super(struct mddev *mddev, struct md_rdev *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129 * Verify that dev is acceptable into mddev.
1130 * The first time, mddev->raid_disks will be 0, and data from
1131 * dev should be merged in. Subsequent calls check that dev
1132 * is new enough. Return 0 or -EINVAL
1133 *
NeilBrownfd01b882011-10-11 16:47:53 +11001134 * void sync_super(struct mddev *mddev, struct md_rdev *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135 * Update the superblock for rdev with data in mddev
1136 * This does not write to disc.
1137 *
1138 */
1139
1140struct super_type {
Chris Webb0cd17fe2008-06-28 08:31:46 +10001141 char *name;
1142 struct module *owner;
NeilBrownc6563a82012-05-21 09:27:00 +10001143 int (*load_super)(struct md_rdev *rdev,
1144 struct md_rdev *refdev,
Chris Webb0cd17fe2008-06-28 08:31:46 +10001145 int minor_version);
NeilBrownc6563a82012-05-21 09:27:00 +10001146 int (*validate_super)(struct mddev *mddev,
1147 struct md_rdev *rdev);
1148 void (*sync_super)(struct mddev *mddev,
1149 struct md_rdev *rdev);
NeilBrown3cb03002011-10-11 16:45:26 +11001150 unsigned long long (*rdev_size_change)(struct md_rdev *rdev,
Andre Noll15f4a5f2008-07-21 14:42:12 +10001151 sector_t num_sectors);
NeilBrownc6563a82012-05-21 09:27:00 +10001152 int (*allow_new_offset)(struct md_rdev *rdev,
1153 unsigned long long new_offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154};
1155
1156/*
Andre Noll0894cc32009-06-18 08:49:23 +10001157 * Check that the given mddev has no bitmap.
1158 *
1159 * This function is called from the run method of all personalities that do not
1160 * support bitmaps. It prints an error message and returns non-zero if mddev
1161 * has a bitmap. Otherwise, it returns 0.
1162 *
1163 */
NeilBrownfd01b882011-10-11 16:47:53 +11001164int md_check_no_bitmap(struct mddev *mddev)
Andre Noll0894cc32009-06-18 08:49:23 +10001165{
NeilBrownc3d97142009-12-14 12:49:52 +11001166 if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
Andre Noll0894cc32009-06-18 08:49:23 +10001167 return 0;
NeilBrown9d487392016-11-02 14:16:49 +11001168 pr_warn("%s: bitmaps are not supported for %s\n",
Andre Noll0894cc32009-06-18 08:49:23 +10001169 mdname(mddev), mddev->pers->name);
1170 return 1;
1171}
1172EXPORT_SYMBOL(md_check_no_bitmap);
1173
1174/*
NeilBrownf72ffdd2014-09-30 14:23:59 +10001175 * load_super for 0.90.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176 */
NeilBrown3cb03002011-10-11 16:45:26 +11001177static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178{
1179 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1180 mdp_super_t *sb;
1181 int ret;
Yufen Yu228fc7d2019-10-30 18:47:02 +08001182 bool spare_disk = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183
1184 /*
Andre Noll0f420352008-07-11 22:02:23 +10001185 * Calculate the position of the superblock (512byte sectors),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186 * it's at the end of the disk.
1187 *
1188 * It also happens to be a multiple of 4Kb.
1189 */
Jonathan Brassow57b2caa2011-01-14 09:14:33 +11001190 rdev->sb_start = calc_dev_sboffset(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191
NeilBrown0002b272005-09-09 16:23:53 -07001192 ret = read_disk_sb(rdev, MD_SB_BYTES);
NeilBrown9d487392016-11-02 14:16:49 +11001193 if (ret)
1194 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195
1196 ret = -EINVAL;
1197
1198 bdevname(rdev->bdev, b);
Namhyung Kim65a06f062011-07-27 11:00:36 +10001199 sb = page_address(rdev->sb_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200
1201 if (sb->md_magic != MD_SB_MAGIC) {
NeilBrown9d487392016-11-02 14:16:49 +11001202 pr_warn("md: invalid raid superblock magic on %s\n", b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 goto abort;
1204 }
1205
1206 if (sb->major_version != 0 ||
NeilBrownf6705572006-03-27 01:18:11 -08001207 sb->minor_version < 90 ||
1208 sb->minor_version > 91) {
NeilBrown9d487392016-11-02 14:16:49 +11001209 pr_warn("Bad version number %d.%d on %s\n",
1210 sb->major_version, sb->minor_version, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 goto abort;
1212 }
1213
1214 if (sb->raid_disks <= 0)
1215 goto abort;
1216
NeilBrown4d167f02007-05-09 02:35:37 -07001217 if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
NeilBrown9d487392016-11-02 14:16:49 +11001218 pr_warn("md: invalid superblock checksum on %s\n", b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219 goto abort;
1220 }
1221
1222 rdev->preferred_minor = sb->md_minor;
1223 rdev->data_offset = 0;
NeilBrownc6563a82012-05-21 09:27:00 +10001224 rdev->new_data_offset = 0;
NeilBrown0002b272005-09-09 16:23:53 -07001225 rdev->sb_size = MD_SB_BYTES;
NeilBrown9f2f3832011-07-28 11:31:47 +10001226 rdev->badblocks.shift = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227
1228 if (sb->level == LEVEL_MULTIPATH)
1229 rdev->desc_nr = -1;
1230 else
1231 rdev->desc_nr = sb->this_disk.number;
1232
Yufen Yu228fc7d2019-10-30 18:47:02 +08001233 /* not spare disk, or LEVEL_MULTIPATH */
1234 if (sb->level == LEVEL_MULTIPATH ||
1235 (rdev->desc_nr >= 0 &&
Yufen Yu3b7436c2019-12-10 15:01:29 +08001236 rdev->desc_nr < MD_SB_DISKS &&
Yufen Yu228fc7d2019-10-30 18:47:02 +08001237 sb->disks[rdev->desc_nr].state &
1238 ((1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))))
1239 spare_disk = false;
1240
Harvey Harrison9a7b2b02008-04-28 02:15:49 -07001241 if (!refdev) {
Yufen Yu228fc7d2019-10-30 18:47:02 +08001242 if (!spare_disk)
Yufen Yu6a5cb532019-10-16 16:00:03 +08001243 ret = 1;
1244 else
1245 ret = 0;
Harvey Harrison9a7b2b02008-04-28 02:15:49 -07001246 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247 __u64 ev1, ev2;
Namhyung Kim65a06f062011-07-27 11:00:36 +10001248 mdp_super_t *refsb = page_address(refdev->sb_page);
Amir Goldsteine6fd2092017-05-04 16:26:20 +03001249 if (!md_uuid_equal(refsb, sb)) {
NeilBrown9d487392016-11-02 14:16:49 +11001250 pr_warn("md: %s has different UUID to %s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251 b, bdevname(refdev->bdev,b2));
1252 goto abort;
1253 }
Amir Goldsteine6fd2092017-05-04 16:26:20 +03001254 if (!md_sb_equal(refsb, sb)) {
NeilBrown9d487392016-11-02 14:16:49 +11001255 pr_warn("md: %s has same UUID but different superblock to %s\n",
1256 b, bdevname(refdev->bdev, b2));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257 goto abort;
1258 }
1259 ev1 = md_event(sb);
1260 ev2 = md_event(refsb);
Yufen Yu6a5cb532019-10-16 16:00:03 +08001261
Yufen Yu228fc7d2019-10-30 18:47:02 +08001262 if (!spare_disk && ev1 > ev2)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 ret = 1;
NeilBrownf72ffdd2014-09-30 14:23:59 +10001264 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265 ret = 0;
1266 }
NeilBrown8190e752009-06-18 08:48:58 +10001267 rdev->sectors = rdev->sb_start;
NeilBrown667a5312012-08-16 16:46:12 +10001268 /* Limit to 4TB as metadata cannot record more than that.
1269 * (not needed for Linear and RAID0 as metadata doesn't
1270 * record this size)
1271 */
Christoph Hellwig72deb452019-04-05 18:08:59 +02001272 if ((u64)rdev->sectors >= (2ULL << 32) && sb->level >= 1)
Arnd Bergmann3312c952015-12-21 10:51:01 +11001273 rdev->sectors = (sector_t)(2ULL << 32) - 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274
NeilBrown27a7b262011-09-10 17:21:28 +10001275 if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
NeilBrown2bf071b2006-01-06 00:20:55 -08001276 /* "this cannot possibly happen" ... */
1277 ret = -EINVAL;
1278
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279 abort:
1280 return ret;
1281}
1282
1283/*
1284 * validate_super for 0.90.0
1285 */
NeilBrownfd01b882011-10-11 16:47:53 +11001286static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287{
1288 mdp_disk_t *desc;
Namhyung Kim65a06f062011-07-27 11:00:36 +10001289 mdp_super_t *sb = page_address(rdev->sb_page);
NeilBrown07d84d102006-06-26 00:27:56 -07001290 __u64 ev1 = md_event(sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291
NeilBrown41158c72005-06-21 17:17:25 -07001292 rdev->raid_disk = -1;
NeilBrownc5d79ad2008-02-06 01:39:54 -08001293 clear_bit(Faulty, &rdev->flags);
1294 clear_bit(In_sync, &rdev->flags);
NeilBrown8313b8e2013-12-12 10:13:33 +11001295 clear_bit(Bitmap_sync, &rdev->flags);
NeilBrownc5d79ad2008-02-06 01:39:54 -08001296 clear_bit(WriteMostly, &rdev->flags);
NeilBrownc5d79ad2008-02-06 01:39:54 -08001297
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298 if (mddev->raid_disks == 0) {
1299 mddev->major_version = 0;
1300 mddev->minor_version = sb->minor_version;
1301 mddev->patch_version = sb->patch_version;
NeilBrowne6910632008-02-06 01:39:51 -08001302 mddev->external = 0;
Andre Noll9d8f0362009-06-18 08:45:01 +10001303 mddev->chunk_sectors = sb->chunk_size >> 9;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304 mddev->ctime = sb->ctime;
1305 mddev->utime = sb->utime;
1306 mddev->level = sb->level;
NeilBrownd9d166c2006-01-06 00:20:51 -08001307 mddev->clevel[0] = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308 mddev->layout = sb->layout;
1309 mddev->raid_disks = sb->raid_disks;
NeilBrown27a7b262011-09-10 17:21:28 +10001310 mddev->dev_sectors = ((sector_t)sb->size) * 2;
NeilBrown07d84d102006-06-26 00:27:56 -07001311 mddev->events = ev1;
NeilBrownc3d97142009-12-14 12:49:52 +11001312 mddev->bitmap_info.offset = 0;
NeilBrown6409bb02012-05-22 13:55:07 +10001313 mddev->bitmap_info.space = 0;
1314 /* bitmap can use 60 K after the 4K superblocks */
NeilBrownc3d97142009-12-14 12:49:52 +11001315 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
NeilBrown6409bb02012-05-22 13:55:07 +10001316 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
NeilBrown2c810cd2012-05-21 09:27:00 +10001317 mddev->reshape_backwards = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318
NeilBrownf6705572006-03-27 01:18:11 -08001319 if (mddev->minor_version >= 91) {
1320 mddev->reshape_position = sb->reshape_position;
1321 mddev->delta_disks = sb->delta_disks;
1322 mddev->new_level = sb->new_level;
1323 mddev->new_layout = sb->new_layout;
Andre Noll664e7c42009-06-18 08:45:27 +10001324 mddev->new_chunk_sectors = sb->new_chunk >> 9;
NeilBrown2c810cd2012-05-21 09:27:00 +10001325 if (mddev->delta_disks < 0)
1326 mddev->reshape_backwards = 1;
NeilBrownf6705572006-03-27 01:18:11 -08001327 } else {
1328 mddev->reshape_position = MaxSector;
1329 mddev->delta_disks = 0;
1330 mddev->new_level = mddev->level;
1331 mddev->new_layout = mddev->layout;
Andre Noll664e7c42009-06-18 08:45:27 +10001332 mddev->new_chunk_sectors = mddev->chunk_sectors;
NeilBrownf6705572006-03-27 01:18:11 -08001333 }
NeilBrown33f2c352019-09-09 16:52:29 +10001334 if (mddev->level == 0)
1335 mddev->layout = -1;
NeilBrownf6705572006-03-27 01:18:11 -08001336
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337 if (sb->state & (1<<MD_SB_CLEAN))
1338 mddev->recovery_cp = MaxSector;
1339 else {
NeilBrownf72ffdd2014-09-30 14:23:59 +10001340 if (sb->events_hi == sb->cp_events_hi &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341 sb->events_lo == sb->cp_events_lo) {
1342 mddev->recovery_cp = sb->recovery_cp;
1343 } else
1344 mddev->recovery_cp = 0;
1345 }
1346
1347 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
1348 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
1349 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
1350 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
1351
1352 mddev->max_disks = MD_SB_DISKS;
NeilBrowna654b9d82005-06-21 17:17:27 -07001353
1354 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
NeilBrown6409bb02012-05-22 13:55:07 +10001355 mddev->bitmap_info.file == NULL) {
NeilBrownc3d97142009-12-14 12:49:52 +11001356 mddev->bitmap_info.offset =
1357 mddev->bitmap_info.default_offset;
NeilBrown6409bb02012-05-22 13:55:07 +10001358 mddev->bitmap_info.space =
Dave Jonesc9ad0202013-08-19 22:26:32 -04001359 mddev->bitmap_info.default_space;
NeilBrown6409bb02012-05-22 13:55:07 +10001360 }
NeilBrowna654b9d82005-06-21 17:17:27 -07001361
NeilBrown41158c72005-06-21 17:17:25 -07001362 } else if (mddev->pers == NULL) {
NeilBrownbe6800a2010-05-18 10:17:09 +10001363 /* Insist on good event counter while assembling, except
1364 * for spares (which don't need an event count) */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365 ++ev1;
NeilBrownbe6800a2010-05-18 10:17:09 +10001366 if (sb->disks[rdev->desc_nr].state & (
1367 (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
NeilBrownf72ffdd2014-09-30 14:23:59 +10001368 if (ev1 < mddev->events)
NeilBrownbe6800a2010-05-18 10:17:09 +10001369 return -EINVAL;
NeilBrown41158c72005-06-21 17:17:25 -07001370 } else if (mddev->bitmap) {
1371 /* if adding to array with a bitmap, then we can accept an
1372 * older device ... but not too old.
1373 */
NeilBrown41158c72005-06-21 17:17:25 -07001374 if (ev1 < mddev->bitmap->events_cleared)
1375 return 0;
NeilBrown8313b8e2013-12-12 10:13:33 +11001376 if (ev1 < mddev->events)
1377 set_bit(Bitmap_sync, &rdev->flags);
NeilBrown07d84d102006-06-26 00:27:56 -07001378 } else {
1379 if (ev1 < mddev->events)
1380 /* just a hot-add of a new device, leave raid_disk at -1 */
1381 return 0;
1382 }
NeilBrown41158c72005-06-21 17:17:25 -07001383
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384 if (mddev->level != LEVEL_MULTIPATH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385 desc = sb->disks + rdev->desc_nr;
1386
1387 if (desc->state & (1<<MD_DISK_FAULTY))
NeilBrownb2d444d2005-11-08 21:39:31 -08001388 set_bit(Faulty, &rdev->flags);
NeilBrown7c7546c2006-06-26 00:27:41 -07001389 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
1390 desc->raid_disk < mddev->raid_disks */) {
NeilBrownb2d444d2005-11-08 21:39:31 -08001391 set_bit(In_sync, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392 rdev->raid_disk = desc->raid_disk;
NeilBrownf4667222013-12-09 12:04:56 +11001393 rdev->saved_raid_disk = desc->raid_disk;
NeilBrown0261cd9f2009-11-13 17:40:48 +11001394 } else if (desc->state & (1<<MD_DISK_ACTIVE)) {
1395 /* active but not in sync implies recovery up to
1396 * reshape position. We don't know exactly where
1397 * that is, so set to zero for now */
1398 if (mddev->minor_version >= 91) {
1399 rdev->recovery_offset = 0;
1400 rdev->raid_disk = desc->raid_disk;
1401 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402 }
NeilBrown8ddf9ef2005-09-09 16:23:45 -07001403 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
1404 set_bit(WriteMostly, &rdev->flags);
NeilBrown688834e2016-11-18 16:16:11 +11001405 if (desc->state & (1<<MD_DISK_FAILFAST))
1406 set_bit(FailFast, &rdev->flags);
NeilBrown41158c72005-06-21 17:17:25 -07001407 } else /* MULTIPATH are always insync */
NeilBrownb2d444d2005-11-08 21:39:31 -08001408 set_bit(In_sync, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409 return 0;
1410}
1411
1412/*
1413 * sync_super for 0.90.0
1414 */
NeilBrownfd01b882011-10-11 16:47:53 +11001415static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416{
1417 mdp_super_t *sb;
NeilBrown3cb03002011-10-11 16:45:26 +11001418 struct md_rdev *rdev2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419 int next_spare = mddev->raid_disks;
NeilBrown19133a42005-11-08 21:39:35 -08001420
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421 /* make rdev->sb match mddev data..
1422 *
1423 * 1/ zero out disks
1424 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
1425 * 3/ any empty disks < next_spare become removed
1426 *
1427 * disks[0] gets initialised to REMOVED because
1428 * we cannot be sure from other fields if it has
1429 * been initialised or not.
1430 */
1431 int i;
1432 int active=0, working=0,failed=0,spare=0,nr_disks=0;
1433
NeilBrown61181562005-09-09 16:24:02 -07001434 rdev->sb_size = MD_SB_BYTES;
1435
Namhyung Kim65a06f062011-07-27 11:00:36 +10001436 sb = page_address(rdev->sb_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437
1438 memset(sb, 0, sizeof(*sb));
1439
1440 sb->md_magic = MD_SB_MAGIC;
1441 sb->major_version = mddev->major_version;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442 sb->patch_version = mddev->patch_version;
1443 sb->gvalid_words = 0; /* ignored */
1444 memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
1445 memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
1446 memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
1447 memcpy(&sb->set_uuid3, mddev->uuid+12,4);
1448
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11001449 sb->ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 sb->level = mddev->level;
Andre Noll58c0fed2009-03-31 14:33:13 +11001451 sb->size = mddev->dev_sectors / 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452 sb->raid_disks = mddev->raid_disks;
1453 sb->md_minor = mddev->md_minor;
NeilBrowne6910632008-02-06 01:39:51 -08001454 sb->not_persistent = 0;
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11001455 sb->utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456 sb->state = 0;
1457 sb->events_hi = (mddev->events>>32);
1458 sb->events_lo = (u32)mddev->events;
1459
NeilBrownf6705572006-03-27 01:18:11 -08001460 if (mddev->reshape_position == MaxSector)
1461 sb->minor_version = 90;
1462 else {
1463 sb->minor_version = 91;
1464 sb->reshape_position = mddev->reshape_position;
1465 sb->new_level = mddev->new_level;
1466 sb->delta_disks = mddev->delta_disks;
1467 sb->new_layout = mddev->new_layout;
Andre Noll664e7c42009-06-18 08:45:27 +10001468 sb->new_chunk = mddev->new_chunk_sectors << 9;
NeilBrownf6705572006-03-27 01:18:11 -08001469 }
1470 mddev->minor_version = sb->minor_version;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471 if (mddev->in_sync)
1472 {
1473 sb->recovery_cp = mddev->recovery_cp;
1474 sb->cp_events_hi = (mddev->events>>32);
1475 sb->cp_events_lo = (u32)mddev->events;
1476 if (mddev->recovery_cp == MaxSector)
1477 sb->state = (1<< MD_SB_CLEAN);
1478 } else
1479 sb->recovery_cp = 0;
1480
1481 sb->layout = mddev->layout;
Andre Noll9d8f0362009-06-18 08:45:01 +10001482 sb->chunk_size = mddev->chunk_sectors << 9;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483
NeilBrownc3d97142009-12-14 12:49:52 +11001484 if (mddev->bitmap && mddev->bitmap_info.file == NULL)
NeilBrowna654b9d82005-06-21 17:17:27 -07001485 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
1486
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487 sb->disks[0].state = (1<<MD_DISK_REMOVED);
NeilBrowndafb20f2012-03-19 12:46:39 +11001488 rdev_for_each(rdev2, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489 mdp_disk_t *d;
NeilBrown86e6ffd2005-11-08 21:39:24 -08001490 int desc_nr;
NeilBrown0261cd9f2009-11-13 17:40:48 +11001491 int is_active = test_bit(In_sync, &rdev2->flags);
1492
1493 if (rdev2->raid_disk >= 0 &&
1494 sb->minor_version >= 91)
1495 /* we have nowhere to store the recovery_offset,
1496 * but if it is not below the reshape_position,
1497 * we can piggy-back on that.
1498 */
1499 is_active = 1;
1500 if (rdev2->raid_disk < 0 ||
1501 test_bit(Faulty, &rdev2->flags))
1502 is_active = 0;
1503 if (is_active)
NeilBrown86e6ffd2005-11-08 21:39:24 -08001504 desc_nr = rdev2->raid_disk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505 else
NeilBrown86e6ffd2005-11-08 21:39:24 -08001506 desc_nr = next_spare++;
NeilBrown19133a42005-11-08 21:39:35 -08001507 rdev2->desc_nr = desc_nr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508 d = &sb->disks[rdev2->desc_nr];
1509 nr_disks++;
1510 d->number = rdev2->desc_nr;
1511 d->major = MAJOR(rdev2->bdev->bd_dev);
1512 d->minor = MINOR(rdev2->bdev->bd_dev);
NeilBrown0261cd9f2009-11-13 17:40:48 +11001513 if (is_active)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514 d->raid_disk = rdev2->raid_disk;
1515 else
1516 d->raid_disk = rdev2->desc_nr; /* compatibility */
NeilBrown1be78922006-03-27 01:18:03 -08001517 if (test_bit(Faulty, &rdev2->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518 d->state = (1<<MD_DISK_FAULTY);
NeilBrown0261cd9f2009-11-13 17:40:48 +11001519 else if (is_active) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520 d->state = (1<<MD_DISK_ACTIVE);
NeilBrown0261cd9f2009-11-13 17:40:48 +11001521 if (test_bit(In_sync, &rdev2->flags))
1522 d->state |= (1<<MD_DISK_SYNC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523 active++;
1524 working++;
1525 } else {
1526 d->state = 0;
1527 spare++;
1528 working++;
1529 }
NeilBrown8ddf9ef2005-09-09 16:23:45 -07001530 if (test_bit(WriteMostly, &rdev2->flags))
1531 d->state |= (1<<MD_DISK_WRITEMOSTLY);
NeilBrown688834e2016-11-18 16:16:11 +11001532 if (test_bit(FailFast, &rdev2->flags))
1533 d->state |= (1<<MD_DISK_FAILFAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535 /* now set the "removed" and "faulty" bits on any missing devices */
1536 for (i=0 ; i < mddev->raid_disks ; i++) {
1537 mdp_disk_t *d = &sb->disks[i];
1538 if (d->state == 0 && d->number == 0) {
1539 d->number = i;
1540 d->raid_disk = i;
1541 d->state = (1<<MD_DISK_REMOVED);
1542 d->state |= (1<<MD_DISK_FAULTY);
1543 failed++;
1544 }
1545 }
1546 sb->nr_disks = nr_disks;
1547 sb->active_disks = active;
1548 sb->working_disks = working;
1549 sb->failed_disks = failed;
1550 sb->spare_disks = spare;
1551
1552 sb->this_disk = sb->disks[rdev->desc_nr];
1553 sb->sb_csum = calc_sb_csum(sb);
1554}
1555
1556/*
Chris Webb0cd17fe2008-06-28 08:31:46 +10001557 * rdev_size_change for 0.90.0
1558 */
1559static unsigned long long
NeilBrown3cb03002011-10-11 16:45:26 +11001560super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
Chris Webb0cd17fe2008-06-28 08:31:46 +10001561{
Andre Noll58c0fed2009-03-31 14:33:13 +11001562 if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
Chris Webb0cd17fe2008-06-28 08:31:46 +10001563 return 0; /* component must fit device */
NeilBrownc3d97142009-12-14 12:49:52 +11001564 if (rdev->mddev->bitmap_info.offset)
Chris Webb0cd17fe2008-06-28 08:31:46 +10001565 return 0; /* can't move bitmap */
Jonathan Brassow57b2caa2011-01-14 09:14:33 +11001566 rdev->sb_start = calc_dev_sboffset(rdev);
Andre Noll15f4a5f2008-07-21 14:42:12 +10001567 if (!num_sectors || num_sectors > rdev->sb_start)
1568 num_sectors = rdev->sb_start;
NeilBrown27a7b262011-09-10 17:21:28 +10001569 /* Limit to 4TB as metadata cannot record more than that.
1570 * 4TB == 2^32 KB, or 2*2^32 sectors.
1571 */
Christoph Hellwig72deb452019-04-05 18:08:59 +02001572 if ((u64)num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
Arnd Bergmann3312c952015-12-21 10:51:01 +11001573 num_sectors = (sector_t)(2ULL << 32) - 2;
NeilBrown46533ff2016-11-18 16:16:11 +11001574 do {
1575 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
Chris Webb0cd17fe2008-06-28 08:31:46 +10001576 rdev->sb_page);
NeilBrown46533ff2016-11-18 16:16:11 +11001577 } while (md_super_wait(rdev->mddev) < 0);
Justin Maggardc26a44e2010-11-24 16:36:17 +11001578 return num_sectors;
Chris Webb0cd17fe2008-06-28 08:31:46 +10001579}
1580
NeilBrownc6563a82012-05-21 09:27:00 +10001581static int
1582super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset)
1583{
1584 /* non-zero offset changes not possible with v0.90 */
1585 return new_offset == 0;
1586}
Chris Webb0cd17fe2008-06-28 08:31:46 +10001587
1588/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589 * version 1 superblock
1590 */
1591
NeilBrownf72ffdd2014-09-30 14:23:59 +10001592static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593{
NeilBrown1c05b4b2006-10-21 10:24:08 -07001594 __le32 disk_csum;
1595 u32 csum;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596 unsigned long long newcsum;
1597 int size = 256 + le32_to_cpu(sb->max_dev)*2;
NeilBrown1c05b4b2006-10-21 10:24:08 -07001598 __le32 *isuper = (__le32*)sb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599
1600 disk_csum = sb->sb_csum;
1601 sb->sb_csum = 0;
1602 newcsum = 0;
NeilBrown1f3c9902012-12-11 13:09:00 +11001603 for (; size >= 4; size -= 4)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604 newcsum += le32_to_cpu(*isuper++);
1605
1606 if (size == 2)
NeilBrown1c05b4b2006-10-21 10:24:08 -07001607 newcsum += le16_to_cpu(*(__le16*) isuper);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608
1609 csum = (newcsum & 0xffffffff) + (newcsum >> 32);
1610 sb->sb_csum = disk_csum;
1611 return cpu_to_le32(csum);
1612}
1613
NeilBrown3cb03002011-10-11 16:45:26 +11001614static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615{
1616 struct mdp_superblock_1 *sb;
1617 int ret;
Andre Noll0f420352008-07-11 22:02:23 +10001618 sector_t sb_start;
NeilBrownc6563a82012-05-21 09:27:00 +10001619 sector_t sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
NeilBrown0002b272005-09-09 16:23:53 -07001621 int bmask;
Yufen Yu228fc7d2019-10-30 18:47:02 +08001622 bool spare_disk = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623
1624 /*
Andre Noll0f420352008-07-11 22:02:23 +10001625 * Calculate the position of the superblock in 512byte sectors.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626 * It is always aligned to a 4K boundary and
1627 * depeding on minor_version, it can be:
1628 * 0: At least 8K, but less than 12K, from end of device
1629 * 1: At start of device
1630 * 2: 4K from start of device.
1631 */
1632 switch(minor_version) {
1633 case 0:
Mike Snitzer77304d22010-11-08 14:39:12 +01001634 sb_start = i_size_read(rdev->bdev->bd_inode) >> 9;
Andre Noll0f420352008-07-11 22:02:23 +10001635 sb_start -= 8*2;
1636 sb_start &= ~(sector_t)(4*2-1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637 break;
1638 case 1:
Andre Noll0f420352008-07-11 22:02:23 +10001639 sb_start = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640 break;
1641 case 2:
Andre Noll0f420352008-07-11 22:02:23 +10001642 sb_start = 8;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643 break;
1644 default:
1645 return -EINVAL;
1646 }
Andre Noll0f420352008-07-11 22:02:23 +10001647 rdev->sb_start = sb_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648
NeilBrown0002b272005-09-09 16:23:53 -07001649 /* superblock is rarely larger than 1K, but it can be larger,
1650 * and it is safe to read 4k, so we do that
1651 */
1652 ret = read_disk_sb(rdev, 4096);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653 if (ret) return ret;
1654
Namhyung Kim65a06f062011-07-27 11:00:36 +10001655 sb = page_address(rdev->sb_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656
1657 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1658 sb->major_version != cpu_to_le32(1) ||
1659 le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
Andre Noll0f420352008-07-11 22:02:23 +10001660 le64_to_cpu(sb->super_offset) != rdev->sb_start ||
NeilBrown71c08052005-09-09 16:23:51 -07001661 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662 return -EINVAL;
1663
1664 if (calc_sb_1_csum(sb) != sb->sb_csum) {
NeilBrown9d487392016-11-02 14:16:49 +11001665 pr_warn("md: invalid superblock checksum on %s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666 bdevname(rdev->bdev,b));
1667 return -EINVAL;
1668 }
1669 if (le64_to_cpu(sb->data_size) < 10) {
NeilBrown9d487392016-11-02 14:16:49 +11001670 pr_warn("md: data_size too small on %s\n",
1671 bdevname(rdev->bdev,b));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672 return -EINVAL;
1673 }
NeilBrownc6563a82012-05-21 09:27:00 +10001674 if (sb->pad0 ||
1675 sb->pad3[0] ||
1676 memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1])))
1677 /* Some padding is non-zero, might be a new feature */
1678 return -EINVAL;
NeilBrowne11e93f2007-05-09 02:35:36 -07001679
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680 rdev->preferred_minor = 0xffff;
1681 rdev->data_offset = le64_to_cpu(sb->data_offset);
NeilBrownc6563a82012-05-21 09:27:00 +10001682 rdev->new_data_offset = rdev->data_offset;
1683 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
1684 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
1685 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
NeilBrown4dbcdc72006-01-06 00:20:52 -08001686 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687
NeilBrown0002b272005-09-09 16:23:53 -07001688 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
Martin K. Petersene1defc42009-05-22 17:17:49 -04001689 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
NeilBrown0002b272005-09-09 16:23:53 -07001690 if (rdev->sb_size & bmask)
NeilBrowna1801f82008-03-04 14:29:31 -08001691 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1692
1693 if (minor_version
Andre Noll0f420352008-07-11 22:02:23 +10001694 && rdev->data_offset < sb_start + (rdev->sb_size/512))
NeilBrowna1801f82008-03-04 14:29:31 -08001695 return -EINVAL;
NeilBrownc6563a82012-05-21 09:27:00 +10001696 if (minor_version
1697 && rdev->new_data_offset < sb_start + (rdev->sb_size/512))
1698 return -EINVAL;
NeilBrown0002b272005-09-09 16:23:53 -07001699
NeilBrown31b65a02006-07-10 04:44:14 -07001700 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1701 rdev->desc_nr = -1;
1702 else
1703 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1704
NeilBrown2699b672011-07-28 11:31:47 +10001705 if (!rdev->bb_page) {
1706 rdev->bb_page = alloc_page(GFP_KERNEL);
1707 if (!rdev->bb_page)
1708 return -ENOMEM;
1709 }
1710 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) &&
1711 rdev->badblocks.count == 0) {
1712 /* need to load the bad block list.
1713 * Currently we limit it to one page.
1714 */
1715 s32 offset;
1716 sector_t bb_sector;
Christoph Hellwig00485d02019-04-04 18:56:12 +02001717 __le64 *bbp;
NeilBrown2699b672011-07-28 11:31:47 +10001718 int i;
1719 int sectors = le16_to_cpu(sb->bblog_size);
1720 if (sectors > (PAGE_SIZE / 512))
1721 return -EINVAL;
1722 offset = le32_to_cpu(sb->bblog_offset);
1723 if (offset == 0)
1724 return -EINVAL;
1725 bb_sector = (long long)offset;
1726 if (!sync_page_io(rdev, bb_sector, sectors << 9,
Mike Christie796a5cf2016-06-05 14:32:07 -05001727 rdev->bb_page, REQ_OP_READ, 0, true))
NeilBrown2699b672011-07-28 11:31:47 +10001728 return -EIO;
Christoph Hellwig00485d02019-04-04 18:56:12 +02001729 bbp = (__le64 *)page_address(rdev->bb_page);
NeilBrown2699b672011-07-28 11:31:47 +10001730 rdev->badblocks.shift = sb->bblog_shift;
1731 for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
1732 u64 bb = le64_to_cpu(*bbp);
1733 int count = bb & (0x3ff);
1734 u64 sector = bb >> 10;
1735 sector <<= sb->bblog_shift;
1736 count <<= sb->bblog_shift;
1737 if (bb + 1 == 0)
1738 break;
Vishal Vermafc974ee2015-12-24 19:20:34 -07001739 if (badblocks_set(&rdev->badblocks, sector, count, 1))
NeilBrown2699b672011-07-28 11:31:47 +10001740 return -EINVAL;
1741 }
NeilBrown486adf72013-04-24 11:42:44 +10001742 } else if (sb->bblog_offset != 0)
1743 rdev->badblocks.shift = 0;
NeilBrown2699b672011-07-28 11:31:47 +10001744
Pawel Baldysiakddc08822017-08-16 17:13:45 +02001745 if ((le32_to_cpu(sb->feature_map) &
1746 (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS))) {
Artur Paszkiewiczea0213e2017-03-09 09:59:57 +01001747 rdev->ppl.offset = (__s16)le16_to_cpu(sb->ppl.offset);
1748 rdev->ppl.size = le16_to_cpu(sb->ppl.size);
1749 rdev->ppl.sector = rdev->sb_start + rdev->ppl.offset;
1750 }
1751
NeilBrown33f2c352019-09-09 16:52:29 +10001752 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT) &&
1753 sb->level != 0)
1754 return -EINVAL;
1755
Yufen Yu228fc7d2019-10-30 18:47:02 +08001756 /* not spare disk, or LEVEL_MULTIPATH */
1757 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH) ||
1758 (rdev->desc_nr >= 0 &&
1759 rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
1760 (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
1761 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL)))
1762 spare_disk = false;
Yufen Yu6a5cb532019-10-16 16:00:03 +08001763
Harvey Harrison9a7b2b02008-04-28 02:15:49 -07001764 if (!refdev) {
Yufen Yu228fc7d2019-10-30 18:47:02 +08001765 if (!spare_disk)
Yufen Yu6a5cb532019-10-16 16:00:03 +08001766 ret = 1;
1767 else
1768 ret = 0;
Harvey Harrison9a7b2b02008-04-28 02:15:49 -07001769 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770 __u64 ev1, ev2;
Namhyung Kim65a06f062011-07-27 11:00:36 +10001771 struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772
1773 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1774 sb->level != refsb->level ||
1775 sb->layout != refsb->layout ||
1776 sb->chunksize != refsb->chunksize) {
NeilBrown9d487392016-11-02 14:16:49 +11001777 pr_warn("md: %s has strangely different superblock to %s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778 bdevname(rdev->bdev,b),
1779 bdevname(refdev->bdev,b2));
1780 return -EINVAL;
1781 }
1782 ev1 = le64_to_cpu(sb->events);
1783 ev2 = le64_to_cpu(refsb->events);
1784
Yufen Yu228fc7d2019-10-30 18:47:02 +08001785 if (!spare_disk && ev1 > ev2)
NeilBrown8ed75462006-02-03 03:03:41 -08001786 ret = 1;
1787 else
1788 ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789 }
NeilBrownc6563a82012-05-21 09:27:00 +10001790 if (minor_version) {
1791 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9);
1792 sectors -= rdev->data_offset;
1793 } else
1794 sectors = rdev->sb_start;
1795 if (sectors < le64_to_cpu(sb->data_size))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796 return -EINVAL;
Andre Nolldd8ac332009-03-31 14:33:13 +11001797 rdev->sectors = le64_to_cpu(sb->data_size);
NeilBrown8ed75462006-02-03 03:03:41 -08001798 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799}
1800
NeilBrownfd01b882011-10-11 16:47:53 +11001801static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802{
Namhyung Kim65a06f062011-07-27 11:00:36 +10001803 struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
NeilBrown07d84d102006-06-26 00:27:56 -07001804 __u64 ev1 = le64_to_cpu(sb->events);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805
NeilBrown41158c72005-06-21 17:17:25 -07001806 rdev->raid_disk = -1;
NeilBrownc5d79ad2008-02-06 01:39:54 -08001807 clear_bit(Faulty, &rdev->flags);
1808 clear_bit(In_sync, &rdev->flags);
NeilBrown8313b8e2013-12-12 10:13:33 +11001809 clear_bit(Bitmap_sync, &rdev->flags);
NeilBrownc5d79ad2008-02-06 01:39:54 -08001810 clear_bit(WriteMostly, &rdev->flags);
NeilBrownc5d79ad2008-02-06 01:39:54 -08001811
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812 if (mddev->raid_disks == 0) {
1813 mddev->major_version = 1;
1814 mddev->patch_version = 0;
NeilBrowne6910632008-02-06 01:39:51 -08001815 mddev->external = 0;
Andre Noll9d8f0362009-06-18 08:45:01 +10001816 mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11001817 mddev->ctime = le64_to_cpu(sb->ctime);
1818 mddev->utime = le64_to_cpu(sb->utime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819 mddev->level = le32_to_cpu(sb->level);
NeilBrownd9d166c2006-01-06 00:20:51 -08001820 mddev->clevel[0] = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821 mddev->layout = le32_to_cpu(sb->layout);
1822 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
Andre Noll58c0fed2009-03-31 14:33:13 +11001823 mddev->dev_sectors = le64_to_cpu(sb->size);
NeilBrown07d84d102006-06-26 00:27:56 -07001824 mddev->events = ev1;
NeilBrownc3d97142009-12-14 12:49:52 +11001825 mddev->bitmap_info.offset = 0;
NeilBrown6409bb02012-05-22 13:55:07 +10001826 mddev->bitmap_info.space = 0;
1827 /* Default location for bitmap is 1K after superblock
1828 * using 3K - total of 4K
1829 */
NeilBrownc3d97142009-12-14 12:49:52 +11001830 mddev->bitmap_info.default_offset = 1024 >> 9;
NeilBrown6409bb02012-05-22 13:55:07 +10001831 mddev->bitmap_info.default_space = (4096-1024) >> 9;
NeilBrown2c810cd2012-05-21 09:27:00 +10001832 mddev->reshape_backwards = 0;
1833
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1835 memcpy(mddev->uuid, sb->set_uuid, 16);
1836
1837 mddev->max_disks = (4096-256)/2;
NeilBrowna654b9d82005-06-21 17:17:27 -07001838
NeilBrown71c08052005-09-09 16:23:51 -07001839 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
NeilBrown6409bb02012-05-22 13:55:07 +10001840 mddev->bitmap_info.file == NULL) {
NeilBrownc3d97142009-12-14 12:49:52 +11001841 mddev->bitmap_info.offset =
1842 (__s32)le32_to_cpu(sb->bitmap_offset);
NeilBrown6409bb02012-05-22 13:55:07 +10001843 /* Metadata doesn't record how much space is available.
1844 * For 1.0, we assume we can use up to the superblock
1845 * if before, else to 4K beyond superblock.
1846 * For others, assume no change is possible.
1847 */
1848 if (mddev->minor_version > 0)
1849 mddev->bitmap_info.space = 0;
1850 else if (mddev->bitmap_info.offset > 0)
1851 mddev->bitmap_info.space =
1852 8 - mddev->bitmap_info.offset;
1853 else
1854 mddev->bitmap_info.space =
1855 -mddev->bitmap_info.offset;
1856 }
NeilBrowne11e93f2007-05-09 02:35:36 -07001857
NeilBrownf6705572006-03-27 01:18:11 -08001858 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1859 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1860 mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1861 mddev->new_level = le32_to_cpu(sb->new_level);
1862 mddev->new_layout = le32_to_cpu(sb->new_layout);
Andre Noll664e7c42009-06-18 08:45:27 +10001863 mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
NeilBrown2c810cd2012-05-21 09:27:00 +10001864 if (mddev->delta_disks < 0 ||
1865 (mddev->delta_disks == 0 &&
1866 (le32_to_cpu(sb->feature_map)
1867 & MD_FEATURE_RESHAPE_BACKWARDS)))
1868 mddev->reshape_backwards = 1;
NeilBrownf6705572006-03-27 01:18:11 -08001869 } else {
1870 mddev->reshape_position = MaxSector;
1871 mddev->delta_disks = 0;
1872 mddev->new_level = mddev->level;
1873 mddev->new_layout = mddev->layout;
Andre Noll664e7c42009-06-18 08:45:27 +10001874 mddev->new_chunk_sectors = mddev->chunk_sectors;
NeilBrownf6705572006-03-27 01:18:11 -08001875 }
1876
NeilBrown33f2c352019-09-09 16:52:29 +10001877 if (mddev->level == 0 &&
1878 !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT))
1879 mddev->layout = -1;
1880
Song Liu486b0f72016-08-19 15:34:01 -07001881 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)
Shaohua Lia62ab492016-01-06 14:37:13 -08001882 set_bit(MD_HAS_JOURNAL, &mddev->flags);
Artur Paszkiewiczea0213e2017-03-09 09:59:57 +01001883
Pawel Baldysiakddc08822017-08-16 17:13:45 +02001884 if (le32_to_cpu(sb->feature_map) &
1885 (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS)) {
Artur Paszkiewiczea0213e2017-03-09 09:59:57 +01001886 if (le32_to_cpu(sb->feature_map) &
1887 (MD_FEATURE_BITMAP_OFFSET | MD_FEATURE_JOURNAL))
1888 return -EINVAL;
Pawel Baldysiakddc08822017-08-16 17:13:45 +02001889 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_PPL) &&
1890 (le32_to_cpu(sb->feature_map) &
1891 MD_FEATURE_MULTIPLE_PPLS))
1892 return -EINVAL;
Artur Paszkiewiczea0213e2017-03-09 09:59:57 +01001893 set_bit(MD_HAS_PPL, &mddev->flags);
1894 }
NeilBrown41158c72005-06-21 17:17:25 -07001895 } else if (mddev->pers == NULL) {
NeilBrownbe6800a2010-05-18 10:17:09 +10001896 /* Insist of good event counter while assembling, except for
1897 * spares (which don't need an event count) */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898 ++ev1;
NeilBrownbe6800a2010-05-18 10:17:09 +10001899 if (rdev->desc_nr >= 0 &&
1900 rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
Song Liua3dfbda2015-10-08 21:54:11 -07001901 (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
1902 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL))
NeilBrownbe6800a2010-05-18 10:17:09 +10001903 if (ev1 < mddev->events)
1904 return -EINVAL;
NeilBrown41158c72005-06-21 17:17:25 -07001905 } else if (mddev->bitmap) {
1906 /* If adding to array with a bitmap, then we can accept an
1907 * older device, but not too old.
1908 */
NeilBrown41158c72005-06-21 17:17:25 -07001909 if (ev1 < mddev->bitmap->events_cleared)
1910 return 0;
NeilBrown8313b8e2013-12-12 10:13:33 +11001911 if (ev1 < mddev->events)
1912 set_bit(Bitmap_sync, &rdev->flags);
NeilBrown07d84d102006-06-26 00:27:56 -07001913 } else {
1914 if (ev1 < mddev->events)
1915 /* just a hot-add of a new device, leave raid_disk at -1 */
1916 return 0;
1917 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918 if (mddev->level != LEVEL_MULTIPATH) {
1919 int role;
NeilBrown3673f302009-08-03 10:59:56 +10001920 if (rdev->desc_nr < 0 ||
1921 rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
Song Liuc4d4c912015-08-13 14:31:54 -07001922 role = MD_DISK_ROLE_SPARE;
NeilBrown3673f302009-08-03 10:59:56 +10001923 rdev->desc_nr = -1;
1924 } else
1925 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926 switch(role) {
Song Liuc4d4c912015-08-13 14:31:54 -07001927 case MD_DISK_ROLE_SPARE: /* spare */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928 break;
Song Liuc4d4c912015-08-13 14:31:54 -07001929 case MD_DISK_ROLE_FAULTY: /* faulty */
NeilBrownb2d444d2005-11-08 21:39:31 -08001930 set_bit(Faulty, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931 break;
Song Liubac624f2015-08-13 14:31:55 -07001932 case MD_DISK_ROLE_JOURNAL: /* journal device */
1933 if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)) {
1934 /* journal device without journal feature */
NeilBrown9d487392016-11-02 14:16:49 +11001935 pr_warn("md: journal device provided without journal feature, ignoring the device\n");
Song Liubac624f2015-08-13 14:31:55 -07001936 return -EINVAL;
1937 }
1938 set_bit(Journal, &rdev->flags);
Shaohua Li3069aa82015-08-13 14:31:56 -07001939 rdev->journal_tail = le64_to_cpu(sb->journal_tail);
Shaohua Li9b156032015-12-18 15:19:16 +11001940 rdev->raid_disk = 0;
Song Liubac624f2015-08-13 14:31:55 -07001941 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942 default:
NeilBrownf4667222013-12-09 12:04:56 +11001943 rdev->saved_raid_disk = role;
NeilBrown5fd6c1d2006-06-26 00:27:40 -07001944 if ((le32_to_cpu(sb->feature_map) &
NeilBrownf4667222013-12-09 12:04:56 +11001945 MD_FEATURE_RECOVERY_OFFSET)) {
NeilBrown5fd6c1d2006-06-26 00:27:40 -07001946 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
NeilBrownf4667222013-12-09 12:04:56 +11001947 if (!(le32_to_cpu(sb->feature_map) &
1948 MD_FEATURE_RECOVERY_BITMAP))
1949 rdev->saved_raid_disk = -1;
Guoqing Jiang062f5b2a2019-07-24 11:09:20 +02001950 } else {
1951 /*
1952 * If the array is FROZEN, then the device can't
1953 * be in_sync with rest of array.
1954 */
1955 if (!test_bit(MD_RECOVERY_FROZEN,
1956 &mddev->recovery))
1957 set_bit(In_sync, &rdev->flags);
1958 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959 rdev->raid_disk = role;
1960 break;
1961 }
NeilBrown8ddf9ef2005-09-09 16:23:45 -07001962 if (sb->devflags & WriteMostly1)
1963 set_bit(WriteMostly, &rdev->flags);
NeilBrown688834e2016-11-18 16:16:11 +11001964 if (sb->devflags & FailFast1)
1965 set_bit(FailFast, &rdev->flags);
NeilBrown2d78f8c2011-12-23 10:17:51 +11001966 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
1967 set_bit(Replacement, &rdev->flags);
NeilBrown41158c72005-06-21 17:17:25 -07001968 } else /* MULTIPATH are always insync */
NeilBrownb2d444d2005-11-08 21:39:31 -08001969 set_bit(In_sync, &rdev->flags);
NeilBrown41158c72005-06-21 17:17:25 -07001970
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971 return 0;
1972}
1973
NeilBrownfd01b882011-10-11 16:47:53 +11001974static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975{
1976 struct mdp_superblock_1 *sb;
NeilBrown3cb03002011-10-11 16:45:26 +11001977 struct md_rdev *rdev2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978 int max_dev, i;
1979 /* make rdev->sb match mddev and rdev data. */
1980
Namhyung Kim65a06f062011-07-27 11:00:36 +10001981 sb = page_address(rdev->sb_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001982
1983 sb->feature_map = 0;
1984 sb->pad0 = 0;
NeilBrown5fd6c1d2006-06-26 00:27:40 -07001985 sb->recovery_offset = cpu_to_le64(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986 memset(sb->pad3, 0, sizeof(sb->pad3));
1987
1988 sb->utime = cpu_to_le64((__u64)mddev->utime);
1989 sb->events = cpu_to_le64(mddev->events);
1990 if (mddev->in_sync)
1991 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
Shaohua Libd18f642015-09-02 13:49:50 -07001992 else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags))
1993 sb->resync_offset = cpu_to_le64(MaxSector);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994 else
1995 sb->resync_offset = cpu_to_le64(0);
1996
NeilBrown1c05b4b2006-10-21 10:24:08 -07001997 sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
NeilBrown4dbcdc72006-01-06 00:20:52 -08001998
NeilBrownf0ca3402006-02-02 14:28:04 -08001999 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
Andre Noll58c0fed2009-03-31 14:33:13 +11002000 sb->size = cpu_to_le64(mddev->dev_sectors);
Andre Noll9d8f0362009-06-18 08:45:01 +10002001 sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
NeilBrown62e1e382009-05-26 09:40:59 +10002002 sb->level = cpu_to_le32(mddev->level);
2003 sb->layout = cpu_to_le32(mddev->layout);
NeilBrown688834e2016-11-18 16:16:11 +11002004 if (test_bit(FailFast, &rdev->flags))
2005 sb->devflags |= FailFast1;
2006 else
2007 sb->devflags &= ~FailFast1;
NeilBrownf0ca3402006-02-02 14:28:04 -08002008
NeilBrownaeb9b2112011-08-25 14:43:08 +10002009 if (test_bit(WriteMostly, &rdev->flags))
2010 sb->devflags |= WriteMostly1;
2011 else
2012 sb->devflags &= ~WriteMostly1;
NeilBrownc6563a82012-05-21 09:27:00 +10002013 sb->data_offset = cpu_to_le64(rdev->data_offset);
2014 sb->data_size = cpu_to_le64(rdev->sectors);
NeilBrownaeb9b2112011-08-25 14:43:08 +10002015
NeilBrownc3d97142009-12-14 12:49:52 +11002016 if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
2017 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
NeilBrown71c08052005-09-09 16:23:51 -07002018 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
NeilBrowna654b9d82005-06-21 17:17:27 -07002019 }
NeilBrown5fd6c1d2006-06-26 00:27:40 -07002020
Shaohua Lif2076e72015-10-08 21:54:12 -07002021 if (rdev->raid_disk >= 0 && !test_bit(Journal, &rdev->flags) &&
NeilBrown97e4f422009-03-31 14:33:13 +11002022 !test_bit(In_sync, &rdev->flags)) {
NeilBrown93be75f2009-12-14 12:50:06 +11002023 sb->feature_map |=
2024 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
2025 sb->recovery_offset =
2026 cpu_to_le64(rdev->recovery_offset);
NeilBrownf4667222013-12-09 12:04:56 +11002027 if (rdev->saved_raid_disk >= 0 && mddev->bitmap)
2028 sb->feature_map |=
2029 cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP);
NeilBrown5fd6c1d2006-06-26 00:27:40 -07002030 }
Shaohua Li3069aa82015-08-13 14:31:56 -07002031 /* Note: recovery_offset and journal_tail share space */
2032 if (test_bit(Journal, &rdev->flags))
2033 sb->journal_tail = cpu_to_le64(rdev->journal_tail);
NeilBrown2d78f8c2011-12-23 10:17:51 +11002034 if (test_bit(Replacement, &rdev->flags))
2035 sb->feature_map |=
2036 cpu_to_le32(MD_FEATURE_REPLACEMENT);
NeilBrown5fd6c1d2006-06-26 00:27:40 -07002037
NeilBrownf6705572006-03-27 01:18:11 -08002038 if (mddev->reshape_position != MaxSector) {
2039 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
2040 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
2041 sb->new_layout = cpu_to_le32(mddev->new_layout);
2042 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
2043 sb->new_level = cpu_to_le32(mddev->new_level);
Andre Noll664e7c42009-06-18 08:45:27 +10002044 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
NeilBrown2c810cd2012-05-21 09:27:00 +10002045 if (mddev->delta_disks == 0 &&
2046 mddev->reshape_backwards)
2047 sb->feature_map
2048 |= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS);
NeilBrownc6563a82012-05-21 09:27:00 +10002049 if (rdev->new_data_offset != rdev->data_offset) {
2050 sb->feature_map
2051 |= cpu_to_le32(MD_FEATURE_NEW_OFFSET);
2052 sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset
2053 - rdev->data_offset));
2054 }
NeilBrownf6705572006-03-27 01:18:11 -08002055 }
NeilBrowna654b9d82005-06-21 17:17:27 -07002056
Goldwyn Rodrigues3c462c82015-08-19 07:35:54 +10002057 if (mddev_is_clustered(mddev))
2058 sb->feature_map |= cpu_to_le32(MD_FEATURE_CLUSTERED);
2059
NeilBrown2699b672011-07-28 11:31:47 +10002060 if (rdev->badblocks.count == 0)
2061 /* Nothing to do for bad blocks*/ ;
2062 else if (sb->bblog_offset == 0)
2063 /* Cannot record bad blocks on this device */
2064 md_error(mddev, rdev);
2065 else {
2066 struct badblocks *bb = &rdev->badblocks;
Christoph Hellwigae506402019-04-04 18:56:13 +02002067 __le64 *bbp = (__le64 *)page_address(rdev->bb_page);
NeilBrown2699b672011-07-28 11:31:47 +10002068 u64 *p = bb->page;
2069 sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
2070 if (bb->changed) {
2071 unsigned seq;
2072
2073retry:
2074 seq = read_seqbegin(&bb->lock);
2075
2076 memset(bbp, 0xff, PAGE_SIZE);
2077
2078 for (i = 0 ; i < bb->count ; i++) {
majianpeng35f9ac22012-11-08 08:56:27 +08002079 u64 internal_bb = p[i];
NeilBrown2699b672011-07-28 11:31:47 +10002080 u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
2081 | BB_LEN(internal_bb));
majianpeng35f9ac22012-11-08 08:56:27 +08002082 bbp[i] = cpu_to_le64(store_bb);
NeilBrown2699b672011-07-28 11:31:47 +10002083 }
NeilBrownd0962932012-03-19 12:46:41 +11002084 bb->changed = 0;
NeilBrown2699b672011-07-28 11:31:47 +10002085 if (read_seqretry(&bb->lock, seq))
2086 goto retry;
2087
2088 bb->sector = (rdev->sb_start +
2089 (int)le32_to_cpu(sb->bblog_offset));
2090 bb->size = le16_to_cpu(sb->bblog_size);
NeilBrown2699b672011-07-28 11:31:47 +10002091 }
2092 }
2093
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094 max_dev = 0;
NeilBrowndafb20f2012-03-19 12:46:39 +11002095 rdev_for_each(rdev2, mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096 if (rdev2->desc_nr+1 > max_dev)
2097 max_dev = rdev2->desc_nr+1;
NeilBrowna778b732007-05-23 13:58:10 -07002098
NeilBrown70471da2009-08-03 10:59:57 +10002099 if (max_dev > le32_to_cpu(sb->max_dev)) {
2100 int bmask;
NeilBrowna778b732007-05-23 13:58:10 -07002101 sb->max_dev = cpu_to_le32(max_dev);
NeilBrown70471da2009-08-03 10:59:57 +10002102 rdev->sb_size = max_dev * 2 + 256;
2103 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
2104 if (rdev->sb_size & bmask)
2105 rdev->sb_size = (rdev->sb_size | bmask) + 1;
NeilBrownddcf3522010-09-08 16:48:17 +10002106 } else
2107 max_dev = le32_to_cpu(sb->max_dev);
2108
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109 for (i=0; i<max_dev;i++)
Lidong Zhong8df72022017-06-12 10:45:55 +08002110 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
NeilBrownf72ffdd2014-09-30 14:23:59 +10002111
Song Liua97b7892015-10-08 21:54:09 -07002112 if (test_bit(MD_HAS_JOURNAL, &mddev->flags))
2113 sb->feature_map |= cpu_to_le32(MD_FEATURE_JOURNAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114
Artur Paszkiewiczea0213e2017-03-09 09:59:57 +01002115 if (test_bit(MD_HAS_PPL, &mddev->flags)) {
Pawel Baldysiakddc08822017-08-16 17:13:45 +02002116 if (test_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags))
2117 sb->feature_map |=
2118 cpu_to_le32(MD_FEATURE_MULTIPLE_PPLS);
2119 else
2120 sb->feature_map |= cpu_to_le32(MD_FEATURE_PPL);
Artur Paszkiewiczea0213e2017-03-09 09:59:57 +01002121 sb->ppl.offset = cpu_to_le16(rdev->ppl.offset);
2122 sb->ppl.size = cpu_to_le16(rdev->ppl.size);
2123 }
2124
NeilBrowndafb20f2012-03-19 12:46:39 +11002125 rdev_for_each(rdev2, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126 i = rdev2->desc_nr;
NeilBrownb2d444d2005-11-08 21:39:31 -08002127 if (test_bit(Faulty, &rdev2->flags))
Song Liuc4d4c912015-08-13 14:31:54 -07002128 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY);
NeilBrownb2d444d2005-11-08 21:39:31 -08002129 else if (test_bit(In_sync, &rdev2->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
Song Liua97b7892015-10-08 21:54:09 -07002131 else if (test_bit(Journal, &rdev2->flags))
Song Liubac624f2015-08-13 14:31:55 -07002132 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_JOURNAL);
NeilBrown93be75f2009-12-14 12:50:06 +11002133 else if (rdev2->raid_disk >= 0)
NeilBrown5fd6c1d2006-06-26 00:27:40 -07002134 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135 else
Song Liuc4d4c912015-08-13 14:31:54 -07002136 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137 }
2138
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139 sb->sb_csum = calc_sb_1_csum(sb);
2140}
2141
Xiao Nid9c0fa52020-06-30 15:55:36 +08002142static sector_t super_1_choose_bm_space(sector_t dev_size)
2143{
2144 sector_t bm_space;
2145
2146 /* if the device is bigger than 8Gig, save 64k for bitmap
2147 * usage, if bigger than 200Gig, save 128k
2148 */
2149 if (dev_size < 64*2)
2150 bm_space = 0;
2151 else if (dev_size - 64*2 >= 200*1024*1024*2)
2152 bm_space = 128*2;
2153 else if (dev_size - 4*2 > 8*1024*1024*2)
2154 bm_space = 64*2;
2155 else
2156 bm_space = 4*2;
2157 return bm_space;
2158}
2159
Chris Webb0cd17fe2008-06-28 08:31:46 +10002160static unsigned long long
NeilBrown3cb03002011-10-11 16:45:26 +11002161super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
Chris Webb0cd17fe2008-06-28 08:31:46 +10002162{
2163 struct mdp_superblock_1 *sb;
Andre Noll15f4a5f2008-07-21 14:42:12 +10002164 sector_t max_sectors;
Andre Noll58c0fed2009-03-31 14:33:13 +11002165 if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
Chris Webb0cd17fe2008-06-28 08:31:46 +10002166 return 0; /* component must fit device */
NeilBrownc6563a82012-05-21 09:27:00 +10002167 if (rdev->data_offset != rdev->new_data_offset)
2168 return 0; /* too confusing */
Andre Noll0f420352008-07-11 22:02:23 +10002169 if (rdev->sb_start < rdev->data_offset) {
Chris Webb0cd17fe2008-06-28 08:31:46 +10002170 /* minor versions 1 and 2; superblock before data */
Mike Snitzer77304d22010-11-08 14:39:12 +01002171 max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
Andre Noll15f4a5f2008-07-21 14:42:12 +10002172 max_sectors -= rdev->data_offset;
2173 if (!num_sectors || num_sectors > max_sectors)
2174 num_sectors = max_sectors;
NeilBrownc3d97142009-12-14 12:49:52 +11002175 } else if (rdev->mddev->bitmap_info.offset) {
Chris Webb0cd17fe2008-06-28 08:31:46 +10002176 /* minor version 0 with bitmap we can't move */
2177 return 0;
2178 } else {
2179 /* minor version 0; superblock after data */
Xiao Nid9c0fa52020-06-30 15:55:36 +08002180 sector_t sb_start, bm_space;
2181 sector_t dev_size = i_size_read(rdev->bdev->bd_inode) >> 9;
2182
2183 /* 8K is for superblock */
2184 sb_start = dev_size - 8*2;
Andre Noll0f420352008-07-11 22:02:23 +10002185 sb_start &= ~(sector_t)(4*2 - 1);
Xiao Nid9c0fa52020-06-30 15:55:36 +08002186
2187 bm_space = super_1_choose_bm_space(dev_size);
2188
2189 /* Space that can be used to store date needs to decrease
2190 * superblock bitmap space and bad block space(4K)
2191 */
2192 max_sectors = sb_start - bm_space - 4*2;
2193
Andre Noll15f4a5f2008-07-21 14:42:12 +10002194 if (!num_sectors || num_sectors > max_sectors)
2195 num_sectors = max_sectors;
Chris Webb0cd17fe2008-06-28 08:31:46 +10002196 }
Namhyung Kim65a06f062011-07-27 11:00:36 +10002197 sb = page_address(rdev->sb_page);
Andre Noll15f4a5f2008-07-21 14:42:12 +10002198 sb->data_size = cpu_to_le64(num_sectors);
Jason Yan3fb632e2017-03-10 11:27:23 +08002199 sb->super_offset = cpu_to_le64(rdev->sb_start);
Chris Webb0cd17fe2008-06-28 08:31:46 +10002200 sb->sb_csum = calc_sb_1_csum(sb);
NeilBrown46533ff2016-11-18 16:16:11 +11002201 do {
2202 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
2203 rdev->sb_page);
2204 } while (md_super_wait(rdev->mddev) < 0);
Justin Maggardc26a44e2010-11-24 16:36:17 +11002205 return num_sectors;
NeilBrownc6563a82012-05-21 09:27:00 +10002206
2207}
2208
2209static int
2210super_1_allow_new_offset(struct md_rdev *rdev,
2211 unsigned long long new_offset)
2212{
2213 /* All necessary checks on new >= old have been done */
2214 struct bitmap *bitmap;
2215 if (new_offset >= rdev->data_offset)
2216 return 1;
2217
2218 /* with 1.0 metadata, there is no metadata to tread on
2219 * so we can always move back */
2220 if (rdev->mddev->minor_version == 0)
2221 return 1;
2222
2223 /* otherwise we must be sure not to step on
2224 * any metadata, so stay:
2225 * 36K beyond start of superblock
2226 * beyond end of badblocks
2227 * beyond write-intent bitmap
2228 */
2229 if (rdev->sb_start + (32+4)*2 > new_offset)
2230 return 0;
2231 bitmap = rdev->mddev->bitmap;
2232 if (bitmap && !rdev->mddev->bitmap_info.file &&
2233 rdev->sb_start + rdev->mddev->bitmap_info.offset +
NeilBrown1ec885c2012-05-22 13:55:10 +10002234 bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset)
NeilBrownc6563a82012-05-21 09:27:00 +10002235 return 0;
2236 if (rdev->badblocks.sector + rdev->badblocks.size > new_offset)
2237 return 0;
2238
2239 return 1;
Chris Webb0cd17fe2008-06-28 08:31:46 +10002240}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241
Adrian Bunk75c96f82005-05-05 16:16:09 -07002242static struct super_type super_types[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243 [0] = {
2244 .name = "0.90.0",
2245 .owner = THIS_MODULE,
Chris Webb0cd17fe2008-06-28 08:31:46 +10002246 .load_super = super_90_load,
2247 .validate_super = super_90_validate,
2248 .sync_super = super_90_sync,
2249 .rdev_size_change = super_90_rdev_size_change,
NeilBrownc6563a82012-05-21 09:27:00 +10002250 .allow_new_offset = super_90_allow_new_offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251 },
2252 [1] = {
2253 .name = "md-1",
2254 .owner = THIS_MODULE,
Chris Webb0cd17fe2008-06-28 08:31:46 +10002255 .load_super = super_1_load,
2256 .validate_super = super_1_validate,
2257 .sync_super = super_1_sync,
2258 .rdev_size_change = super_1_rdev_size_change,
NeilBrownc6563a82012-05-21 09:27:00 +10002259 .allow_new_offset = super_1_allow_new_offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260 },
2261};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262
NeilBrownfd01b882011-10-11 16:47:53 +11002263static void sync_super(struct mddev *mddev, struct md_rdev *rdev)
Jonathan Brassow076f9682011-06-07 17:51:30 -05002264{
2265 if (mddev->sync_super) {
2266 mddev->sync_super(mddev, rdev);
2267 return;
2268 }
2269
2270 BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));
2271
2272 super_types[mddev->major_version].sync_super(mddev, rdev);
2273}
2274
NeilBrownfd01b882011-10-11 16:47:53 +11002275static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276{
NeilBrown3cb03002011-10-11 16:45:26 +11002277 struct md_rdev *rdev, *rdev2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278
NeilBrown4b809912008-07-21 17:05:25 +10002279 rcu_read_lock();
Song Liu0b020e82015-09-03 23:00:35 -07002280 rdev_for_each_rcu(rdev, mddev1) {
2281 if (test_bit(Faulty, &rdev->flags) ||
2282 test_bit(Journal, &rdev->flags) ||
2283 rdev->raid_disk == -1)
2284 continue;
2285 rdev_for_each_rcu(rdev2, mddev2) {
2286 if (test_bit(Faulty, &rdev2->flags) ||
2287 test_bit(Journal, &rdev2->flags) ||
2288 rdev2->raid_disk == -1)
2289 continue;
Christoph Hellwig61a27e1f2020-09-03 07:40:58 +02002290 if (rdev->bdev->bd_disk == rdev2->bdev->bd_disk) {
NeilBrown4b809912008-07-21 17:05:25 +10002291 rcu_read_unlock();
NeilBrown7dd5e7c32007-02-28 20:11:35 -08002292 return 1;
NeilBrown4b809912008-07-21 17:05:25 +10002293 }
Song Liu0b020e82015-09-03 23:00:35 -07002294 }
2295 }
NeilBrown4b809912008-07-21 17:05:25 +10002296 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297 return 0;
2298}
2299
2300static LIST_HEAD(pending_raid_disks);
2301
Andre Nollac5e7112009-08-03 10:59:47 +10002302/*
2303 * Try to register data integrity profile for an mddev
2304 *
2305 * This is called when an array is started and after a disk has been kicked
2306 * from the array. It only succeeds if all working and active component devices
2307 * are integrity capable with matching profiles.
2308 */
NeilBrownfd01b882011-10-11 16:47:53 +11002309int md_integrity_register(struct mddev *mddev)
Martin K. Petersen3f9d99c2009-03-31 14:27:02 +11002310{
NeilBrown3cb03002011-10-11 16:45:26 +11002311 struct md_rdev *rdev, *reference = NULL;
Martin K. Petersen3f9d99c2009-03-31 14:27:02 +11002312
Andre Nollac5e7112009-08-03 10:59:47 +10002313 if (list_empty(&mddev->disks))
2314 return 0; /* nothing to do */
Jonathan Brassow629acb62011-06-08 15:10:08 +10002315 if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
2316 return 0; /* shouldn't register, or already is */
NeilBrowndafb20f2012-03-19 12:46:39 +11002317 rdev_for_each(rdev, mddev) {
Andre Nollac5e7112009-08-03 10:59:47 +10002318 /* skip spares and non-functional disks */
2319 if (test_bit(Faulty, &rdev->flags))
2320 continue;
2321 if (rdev->raid_disk < 0)
2322 continue;
Andre Nollac5e7112009-08-03 10:59:47 +10002323 if (!reference) {
2324 /* Use the first rdev as the reference */
2325 reference = rdev;
2326 continue;
2327 }
2328 /* does this rdev's profile match the reference profile? */
2329 if (blk_integrity_compare(reference->bdev->bd_disk,
2330 rdev->bdev->bd_disk) < 0)
2331 return -EINVAL;
Martin K. Petersen3f9d99c2009-03-31 14:27:02 +11002332 }
Martin K. Petersen89078d52011-03-28 20:09:12 -04002333 if (!reference || !bdev_get_integrity(reference->bdev))
2334 return 0;
Andre Nollac5e7112009-08-03 10:59:47 +10002335 /*
2336 * All component devices are integrity capable and have matching
2337 * profiles, register the common profile for the md device.
2338 */
Martin K. Petersen25520d52015-10-21 13:19:49 -04002339 blk_integrity_register(mddev->gendisk,
2340 bdev_get_integrity(reference->bdev));
2341
NeilBrown9d487392016-11-02 14:16:49 +11002342 pr_debug("md: data integrity enabled on %s\n", mdname(mddev));
Guoqing Jiang10764812021-05-25 17:46:17 +08002343 if (bioset_integrity_create(&mddev->bio_set, BIO_POOL_SIZE) ||
Guoqing Jiangdaee2022021-06-03 17:21:06 +08002344 (mddev->level != 1 && mddev->level != 10 &&
2345 bioset_integrity_create(&mddev->io_acct_set, BIO_POOL_SIZE))) {
Guoqing Jiangde3ea662021-06-03 17:21:07 +08002346 /*
2347 * No need to handle the failure of bioset_integrity_create,
2348 * because the function is called by md_run() -> pers->run(),
2349 * md_run calls bioset_exit -> bioset_integrity_free in case
2350 * of failure case.
2351 */
NeilBrown9d487392016-11-02 14:16:49 +11002352 pr_err("md: failed to create integrity pool for %s\n",
Martin K. Petersena91a2782011-03-17 11:11:05 +01002353 mdname(mddev));
2354 return -EINVAL;
2355 }
Andre Nollac5e7112009-08-03 10:59:47 +10002356 return 0;
Martin K. Petersen3f9d99c2009-03-31 14:27:02 +11002357}
Andre Nollac5e7112009-08-03 10:59:47 +10002358EXPORT_SYMBOL(md_integrity_register);
2359
Dan Williams1501efa2016-01-13 16:00:07 -08002360/*
2361 * Attempt to add an rdev, but only if it is consistent with the current
2362 * integrity profile
2363 */
2364int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
Andre Nollac5e7112009-08-03 10:59:47 +10002365{
Jonathan Brassow2863b9e2012-10-11 13:38:58 +11002366 struct blk_integrity *bi_mddev;
Dan Williams1501efa2016-01-13 16:00:07 -08002367 char name[BDEVNAME_SIZE];
Jonathan Brassow2863b9e2012-10-11 13:38:58 +11002368
2369 if (!mddev->gendisk)
Dan Williams1501efa2016-01-13 16:00:07 -08002370 return 0;
Jonathan Brassow2863b9e2012-10-11 13:38:58 +11002371
Jonathan Brassow2863b9e2012-10-11 13:38:58 +11002372 bi_mddev = blk_get_integrity(mddev->gendisk);
Andre Nollac5e7112009-08-03 10:59:47 +10002373
2374 if (!bi_mddev) /* nothing to do */
Dan Williams1501efa2016-01-13 16:00:07 -08002375 return 0;
2376
2377 if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) {
NeilBrown9d487392016-11-02 14:16:49 +11002378 pr_err("%s: incompatible integrity profile for %s\n",
2379 mdname(mddev), bdevname(rdev->bdev, name));
Dan Williams1501efa2016-01-13 16:00:07 -08002380 return -ENXIO;
2381 }
2382
2383 return 0;
Andre Nollac5e7112009-08-03 10:59:47 +10002384}
2385EXPORT_SYMBOL(md_integrity_add_rdev);
Martin K. Petersen3f9d99c2009-03-31 14:27:02 +11002386
Christoph Hellwigd7a47832021-02-01 14:17:20 +01002387static bool rdev_read_only(struct md_rdev *rdev)
2388{
2389 return bdev_read_only(rdev->bdev) ||
2390 (rdev->meta_bdev && bdev_read_only(rdev->meta_bdev));
2391}
2392
NeilBrownf72ffdd2014-09-30 14:23:59 +10002393static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394{
NeilBrown7dd5e7c32007-02-28 20:11:35 -08002395 char b[BDEVNAME_SIZE];
NeilBrown5e55e2f2007-03-26 21:32:14 -08002396 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002397
Dan Williams11e2ede2008-04-30 00:52:32 -07002398 /* prevent duplicates */
2399 if (find_rdev(mddev, rdev->bdev->bd_dev))
2400 return -EEXIST;
2401
Christoph Hellwigd7a47832021-02-01 14:17:20 +01002402 if (rdev_read_only(rdev) && mddev->pers)
NeilBrown97b20ef2017-04-13 08:53:48 +10002403 return -EROFS;
2404
Andre Nolldd8ac332009-03-31 14:33:13 +11002405 /* make sure rdev->sectors exceeds mddev->dev_sectors */
Shaohua Lif6b6ec52015-12-21 10:51:02 +11002406 if (!test_bit(Journal, &rdev->flags) &&
2407 rdev->sectors &&
2408 (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) {
NeilBrowna778b732007-05-23 13:58:10 -07002409 if (mddev->pers) {
2410 /* Cannot change size, so fail
2411 * If mddev->level <= 0, then we don't care
2412 * about aligning sizes (e.g. linear)
2413 */
2414 if (mddev->level > 0)
2415 return -ENOSPC;
2416 } else
Andre Nolldd8ac332009-03-31 14:33:13 +11002417 mddev->dev_sectors = rdev->sectors;
NeilBrown2bf071b2006-01-06 00:20:55 -08002418 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419
2420 /* Verify rdev->desc_nr is unique.
2421 * If it is -1, assign a free number, else
2422 * check number is not in use
2423 */
NeilBrown4878e9e2014-09-25 17:00:11 +10002424 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002425 if (rdev->desc_nr < 0) {
2426 int choice = 0;
NeilBrown4878e9e2014-09-25 17:00:11 +10002427 if (mddev->pers)
2428 choice = mddev->raid_disks;
Goldwyn Rodrigues57d051d2015-04-14 10:43:55 -05002429 while (md_find_rdev_nr_rcu(mddev, choice))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002430 choice++;
2431 rdev->desc_nr = choice;
2432 } else {
Goldwyn Rodrigues57d051d2015-04-14 10:43:55 -05002433 if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) {
NeilBrown4878e9e2014-09-25 17:00:11 +10002434 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002435 return -EBUSY;
NeilBrown4878e9e2014-09-25 17:00:11 +10002436 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002437 }
NeilBrown4878e9e2014-09-25 17:00:11 +10002438 rcu_read_unlock();
Shaohua Lif6b6ec52015-12-21 10:51:02 +11002439 if (!test_bit(Journal, &rdev->flags) &&
2440 mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
NeilBrown9d487392016-11-02 14:16:49 +11002441 pr_warn("md: %s: array is limited to %d devices\n",
2442 mdname(mddev), mddev->max_disks);
NeilBrownde01dfa2009-02-06 18:02:46 +11002443 return -EBUSY;
2444 }
NeilBrown19133a42005-11-08 21:39:35 -08002445 bdevname(rdev->bdev,b);
Rasmus Villemoes90a9bef2015-06-25 15:02:36 -07002446 strreplace(b, '/', '!');
Greg Kroah-Hartman649316b2007-12-17 23:05:35 -07002447
Linus Torvalds1da177e2005-04-16 15:20:36 -07002448 rdev->mddev = mddev;
NeilBrown9d487392016-11-02 14:16:49 +11002449 pr_debug("md: bind<%s>\n", b);
NeilBrown86e6ffd2005-11-08 21:39:24 -08002450
Guoqing Jiang963c5552019-06-14 17:10:36 +08002451 if (mddev->raid_disks)
Guoqing Jiang404659c2019-12-23 10:48:53 +01002452 mddev_create_serial_pool(mddev, rdev, false);
Guoqing Jiang963c5552019-06-14 17:10:36 +08002453
Greg Kroah-Hartmanb2d6db52007-12-17 23:05:35 -07002454 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
NeilBrown5e55e2f2007-03-26 21:32:14 -08002455 goto fail;
NeilBrown86e6ffd2005-11-08 21:39:24 -08002456
Damien Le Moal5e3b8a82020-07-16 13:54:40 +09002457 /* failure here is OK */
Christoph Hellwig8d652692020-11-17 08:18:55 +01002458 err = sysfs_create_link(&rdev->kobj, bdev_kobj(rdev->bdev), "block");
NeilBrown00bcb4a2010-06-01 19:37:23 +10002459 rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
Junxiao Bie1a86db2020-07-14 16:10:26 -07002460 rdev->sysfs_unack_badblocks =
2461 sysfs_get_dirent_safe(rdev->kobj.sd, "unacknowledged_bad_blocks");
2462 rdev->sysfs_badblocks =
2463 sysfs_get_dirent_safe(rdev->kobj.sd, "bad_blocks");
NeilBrown3c0ee632008-10-21 13:25:28 +11002464
NeilBrown4b809912008-07-21 17:05:25 +10002465 list_add_rcu(&rdev->same_set, &mddev->disks);
Tejun Heoe09b4572010-11-13 11:55:17 +01002466 bd_link_disk_holder(rdev->bdev, mddev->gendisk);
NeilBrown4044ba52009-01-09 08:31:11 +11002467
2468 /* May as well allow recovery to be retried once */
NeilBrown53890422011-07-27 11:00:36 +10002469 mddev->recovery_disabled++;
Martin K. Petersen3f9d99c2009-03-31 14:27:02 +11002470
Linus Torvalds1da177e2005-04-16 15:20:36 -07002471 return 0;
NeilBrown5e55e2f2007-03-26 21:32:14 -08002472
2473 fail:
NeilBrown9d487392016-11-02 14:16:49 +11002474 pr_warn("md: failed to register dev-%s for %s\n",
2475 b, mdname(mddev));
NeilBrown5e55e2f2007-03-26 21:32:14 -08002476 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002477}
2478
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02002479static void rdev_delayed_delete(struct work_struct *ws)
NeilBrown5792a282007-04-04 19:08:18 -07002480{
NeilBrown3cb03002011-10-11 16:45:26 +11002481 struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work);
NeilBrown5792a282007-04-04 19:08:18 -07002482 kobject_del(&rdev->kobj);
NeilBrown177a99b2008-02-06 01:39:56 -08002483 kobject_put(&rdev->kobj);
NeilBrown5792a282007-04-04 19:08:18 -07002484}
2485
NeilBrownf72ffdd2014-09-30 14:23:59 +10002486static void unbind_rdev_from_array(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002487{
2488 char b[BDEVNAME_SIZE];
NeilBrown403df472014-09-30 15:52:29 +10002489
Tejun Heo49731ba2011-01-14 18:43:57 +01002490 bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
NeilBrown4b809912008-07-21 17:05:25 +10002491 list_del_rcu(&rdev->same_set);
NeilBrown9d487392016-11-02 14:16:49 +11002492 pr_debug("md: unbind<%s>\n", bdevname(rdev->bdev,b));
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +01002493 mddev_destroy_serial_pool(rdev->mddev, rdev, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002494 rdev->mddev = NULL;
NeilBrown86e6ffd2005-11-08 21:39:24 -08002495 sysfs_remove_link(&rdev->kobj, "block");
NeilBrown3c0ee632008-10-21 13:25:28 +11002496 sysfs_put(rdev->sysfs_state);
Junxiao Bie1a86db2020-07-14 16:10:26 -07002497 sysfs_put(rdev->sysfs_unack_badblocks);
2498 sysfs_put(rdev->sysfs_badblocks);
NeilBrown3c0ee632008-10-21 13:25:28 +11002499 rdev->sysfs_state = NULL;
Junxiao Bie1a86db2020-07-14 16:10:26 -07002500 rdev->sysfs_unack_badblocks = NULL;
2501 rdev->sysfs_badblocks = NULL;
NeilBrown2230dfe2011-07-28 11:31:46 +10002502 rdev->badblocks.count = 0;
NeilBrown5792a282007-04-04 19:08:18 -07002503 /* We need to delay this, otherwise we can deadlock when
NeilBrown4b809912008-07-21 17:05:25 +10002504 * writing to 'remove' to "dev/state". We also need
2505 * to delay it due to rcu usage.
NeilBrown5792a282007-04-04 19:08:18 -07002506 */
NeilBrown4b809912008-07-21 17:05:25 +10002507 synchronize_rcu();
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02002508 INIT_WORK(&rdev->del_work, rdev_delayed_delete);
NeilBrown177a99b2008-02-06 01:39:56 -08002509 kobject_get(&rdev->kobj);
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02002510 queue_work(md_rdev_misc_wq, &rdev->del_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002511}
2512
2513/*
2514 * prevent the device from being mounted, repartitioned or
2515 * otherwise reused by a RAID array (or any other kernel
2516 * subsystem), by bd_claiming the device.
2517 */
NeilBrown3cb03002011-10-11 16:45:26 +11002518static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002519{
2520 int err = 0;
2521 struct block_device *bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002522
Tejun Heod4d77622010-11-13 11:55:18 +01002523 bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
NeilBrown3cb03002011-10-11 16:45:26 +11002524 shared ? (struct md_rdev *)lock_rdev : rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002525 if (IS_ERR(bdev)) {
Christoph Hellwigea3edd42020-03-24 08:25:11 +01002526 pr_warn("md: could not open device unknown-block(%u,%u).\n",
2527 MAJOR(dev), MINOR(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528 return PTR_ERR(bdev);
2529 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530 rdev->bdev = bdev;
2531 return err;
2532}
2533
NeilBrown3cb03002011-10-11 16:45:26 +11002534static void unlock_rdev(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002535{
2536 struct block_device *bdev = rdev->bdev;
2537 rdev->bdev = NULL;
Tejun Heoe525fd82010-11-13 11:55:17 +01002538 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539}
2540
2541void md_autodetect_dev(dev_t dev);
2542
NeilBrownf72ffdd2014-09-30 14:23:59 +10002543static void export_rdev(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002544{
2545 char b[BDEVNAME_SIZE];
NeilBrown403df472014-09-30 15:52:29 +10002546
NeilBrown9d487392016-11-02 14:16:49 +11002547 pr_debug("md: export_rdev(%s)\n", bdevname(rdev->bdev,b));
NeilBrown545c8792012-05-22 13:54:30 +10002548 md_rdev_clear(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002549#ifndef MODULE
NeilBrownd0fae182008-03-04 14:29:31 -08002550 if (test_bit(AutoDetected, &rdev->flags))
2551 md_autodetect_dev(rdev->bdev->bd_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002552#endif
2553 unlock_rdev(rdev);
NeilBrown86e6ffd2005-11-08 21:39:24 -08002554 kobject_put(&rdev->kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002555}
2556
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05002557void md_kick_rdev_from_array(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002558{
2559 unbind_rdev_from_array(rdev);
2560 export_rdev(rdev);
2561}
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05002562EXPORT_SYMBOL_GPL(md_kick_rdev_from_array);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002563
NeilBrownfd01b882011-10-11 16:47:53 +11002564static void export_array(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002565{
NeilBrown0638bb02014-09-25 17:43:47 +10002566 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002567
NeilBrown0638bb02014-09-25 17:43:47 +10002568 while (!list_empty(&mddev->disks)) {
2569 rdev = list_first_entry(&mddev->disks, struct md_rdev,
2570 same_set);
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05002571 md_kick_rdev_from_array(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002572 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002573 mddev->raid_disks = 0;
2574 mddev->major_version = 0;
2575}
2576
NeilBrown6497709b2017-03-15 14:05:14 +11002577static bool set_in_sync(struct mddev *mddev)
2578{
Shaohua Liefa4b772017-10-18 22:08:13 -07002579 lockdep_assert_held(&mddev->lock);
NeilBrown4ad23a972017-03-15 14:05:14 +11002580 if (!mddev->in_sync) {
2581 mddev->sync_checkers++;
2582 spin_unlock(&mddev->lock);
2583 percpu_ref_switch_to_atomic_sync(&mddev->writes_pending);
2584 spin_lock(&mddev->lock);
2585 if (!mddev->in_sync &&
2586 percpu_ref_is_zero(&mddev->writes_pending)) {
NeilBrown6497709b2017-03-15 14:05:14 +11002587 mddev->in_sync = 1;
NeilBrown4ad23a972017-03-15 14:05:14 +11002588 /*
2589 * Ensure ->in_sync is visible before we clear
2590 * ->sync_checkers.
2591 */
NeilBrown55cc39f2017-03-15 14:05:14 +11002592 smp_mb();
NeilBrown6497709b2017-03-15 14:05:14 +11002593 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
2594 sysfs_notify_dirent_safe(mddev->sysfs_state);
2595 }
NeilBrown4ad23a972017-03-15 14:05:14 +11002596 if (--mddev->sync_checkers == 0)
2597 percpu_ref_switch_to_percpu(&mddev->writes_pending);
NeilBrown6497709b2017-03-15 14:05:14 +11002598 }
2599 if (mddev->safemode == 1)
2600 mddev->safemode = 0;
2601 return mddev->in_sync;
2602}
2603
NeilBrownf72ffdd2014-09-30 14:23:59 +10002604static void sync_sbs(struct mddev *mddev, int nospares)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002605{
NeilBrown42543762006-06-26 00:27:57 -07002606 /* Update each superblock (in-memory image), but
2607 * if we are allowed to, skip spares which already
2608 * have the right event counter, or have one earlier
2609 * (which would mean they aren't being marked as dirty
2610 * with the rest of the array)
2611 */
NeilBrown3cb03002011-10-11 16:45:26 +11002612 struct md_rdev *rdev;
NeilBrowndafb20f2012-03-19 12:46:39 +11002613 rdev_for_each(rdev, mddev) {
NeilBrown42543762006-06-26 00:27:57 -07002614 if (rdev->sb_events == mddev->events ||
2615 (nospares &&
2616 rdev->raid_disk < 0 &&
NeilBrown42543762006-06-26 00:27:57 -07002617 rdev->sb_events+1 == mddev->events)) {
2618 /* Don't update this superblock */
2619 rdev->sb_loaded = 2;
2620 } else {
Jonathan Brassow076f9682011-06-07 17:51:30 -05002621 sync_super(mddev, rdev);
NeilBrown42543762006-06-26 00:27:57 -07002622 rdev->sb_loaded = 1;
2623 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002624 }
2625}
2626
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002627static bool does_sb_need_changing(struct mddev *mddev)
2628{
2629 struct md_rdev *rdev;
2630 struct mdp_superblock_1 *sb;
2631 int role;
2632
2633 /* Find a good rdev */
2634 rdev_for_each(rdev, mddev)
2635 if ((rdev->raid_disk >= 0) && !test_bit(Faulty, &rdev->flags))
2636 break;
2637
2638 /* No good device found. */
2639 if (!rdev)
2640 return false;
2641
2642 sb = page_address(rdev->sb_page);
2643 /* Check if a device has become faulty or a spare become active */
2644 rdev_for_each(rdev, mddev) {
2645 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
2646 /* Device activated? */
2647 if (role == 0xffff && rdev->raid_disk >=0 &&
2648 !test_bit(Faulty, &rdev->flags))
2649 return true;
2650 /* Device turned faulty? */
2651 if (test_bit(Faulty, &rdev->flags) && (role < 0xfffd))
2652 return true;
2653 }
2654
2655 /* Check if any mddev parameters have changed */
2656 if ((mddev->dev_sectors != le64_to_cpu(sb->size)) ||
2657 (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) ||
Jason Yan13459212017-03-10 11:49:12 +08002658 (mddev->layout != le32_to_cpu(sb->layout)) ||
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002659 (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) ||
2660 (mddev->chunk_sectors != le32_to_cpu(sb->chunksize)))
2661 return true;
2662
2663 return false;
2664}
2665
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05002666void md_update_sb(struct mddev *mddev, int force_change)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002667{
NeilBrown3cb03002011-10-11 16:45:26 +11002668 struct md_rdev *rdev;
NeilBrown06d91a52005-06-21 17:17:12 -07002669 int sync_req;
NeilBrown42543762006-06-26 00:27:57 -07002670 int nospares = 0;
NeilBrown2699b672011-07-28 11:31:47 +10002671 int any_badblocks_changed = 0;
Guoqing Jiang23b63f92015-10-12 17:21:30 +08002672 int ret = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002673
NeilBrownd87f0642013-04-24 11:42:40 +10002674 if (mddev->ro) {
2675 if (force_change)
Shaohua Li29530792016-12-08 15:48:19 -08002676 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrownd87f0642013-04-24 11:42:40 +10002677 return;
2678 }
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002679
Guoqing Jiang2c97cf12016-05-02 11:33:09 -04002680repeat:
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002681 if (mddev_is_clustered(mddev)) {
Shaohua Li29530792016-12-08 15:48:19 -08002682 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002683 force_change = 1;
Shaohua Li29530792016-12-08 15:48:19 -08002684 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
Guoqing Jiang85ad1d12016-05-03 22:22:13 -04002685 nospares = 1;
Guoqing Jiang23b63f92015-10-12 17:21:30 +08002686 ret = md_cluster_ops->metadata_update_start(mddev);
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002687 /* Has someone else has updated the sb */
2688 if (!does_sb_need_changing(mddev)) {
Guoqing Jiang23b63f92015-10-12 17:21:30 +08002689 if (ret == 0)
2690 md_cluster_ops->metadata_update_cancel(mddev);
Shaohua Li29530792016-12-08 15:48:19 -08002691 bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
2692 BIT(MD_SB_CHANGE_DEVS) |
2693 BIT(MD_SB_CHANGE_CLEAN));
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002694 return;
2695 }
2696 }
Guoqing Jiang2c97cf12016-05-02 11:33:09 -04002697
NeilBrowndb0505d2017-10-17 16:18:36 +11002698 /*
2699 * First make sure individual recovery_offsets are correct
2700 * curr_resync_completed can only be used during recovery.
2701 * During reshape/resync it might use array-addresses rather
2702 * that device addresses.
2703 */
NeilBrowndafb20f2012-03-19 12:46:39 +11002704 rdev_for_each(rdev, mddev) {
NeilBrown3a3a5dd2010-08-16 18:09:31 +10002705 if (rdev->raid_disk >= 0 &&
2706 mddev->delta_disks >= 0 &&
NeilBrowndb0505d2017-10-17 16:18:36 +11002707 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
2708 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery) &&
2709 !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
Shaohua Lif2076e72015-10-08 21:54:12 -07002710 !test_bit(Journal, &rdev->flags) &&
NeilBrown3a3a5dd2010-08-16 18:09:31 +10002711 !test_bit(In_sync, &rdev->flags) &&
2712 mddev->curr_resync_completed > rdev->recovery_offset)
2713 rdev->recovery_offset = mddev->curr_resync_completed;
2714
NeilBrownf72ffdd2014-09-30 14:23:59 +10002715 }
Dan Williamsbd52b742010-08-30 17:33:33 +10002716 if (!mddev->persistent) {
Shaohua Li29530792016-12-08 15:48:19 -08002717 clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
2718 clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrownde393cd2011-07-28 11:31:48 +10002719 if (!mddev->external) {
Shaohua Li29530792016-12-08 15:48:19 -08002720 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
NeilBrowndafb20f2012-03-19 12:46:39 +11002721 rdev_for_each(rdev, mddev) {
NeilBrownde393cd2011-07-28 11:31:48 +10002722 if (rdev->badblocks.changed) {
NeilBrownd0962932012-03-19 12:46:41 +11002723 rdev->badblocks.changed = 0;
Vishal Vermafc974ee2015-12-24 19:20:34 -07002724 ack_all_badblocks(&rdev->badblocks);
NeilBrownde393cd2011-07-28 11:31:48 +10002725 md_error(mddev, rdev);
2726 }
2727 clear_bit(Blocked, &rdev->flags);
2728 clear_bit(BlockedBadBlocks, &rdev->flags);
2729 wake_up(&rdev->blocked_wait);
2730 }
2731 }
NeilBrown3a3a5dd2010-08-16 18:09:31 +10002732 wake_up(&mddev->sb_wait);
2733 return;
2734 }
2735
NeilBrown85572d72014-12-15 12:56:56 +11002736 spin_lock(&mddev->lock);
NeilBrown84692192006-08-27 01:23:49 -07002737
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11002738 mddev->utime = ktime_get_real_seconds();
NeilBrown3a3a5dd2010-08-16 18:09:31 +10002739
Shaohua Li29530792016-12-08 15:48:19 -08002740 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
NeilBrown850b2b422006-10-03 01:15:46 -07002741 force_change = 1;
Shaohua Li29530792016-12-08 15:48:19 -08002742 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
NeilBrown850b2b422006-10-03 01:15:46 -07002743 /* just a clean<-> dirty transition, possibly leave spares alone,
2744 * though if events isn't the right even/odd, we will have to do
2745 * spares after all
2746 */
2747 nospares = 1;
2748 if (force_change)
2749 nospares = 0;
2750 if (mddev->degraded)
NeilBrown84692192006-08-27 01:23:49 -07002751 /* If the array is degraded, then skipping spares is both
2752 * dangerous and fairly pointless.
2753 * Dangerous because a device that was removed from the array
2754 * might have a event_count that still looks up-to-date,
2755 * so it can be re-added without a resync.
2756 * Pointless because if there are any spares to skip,
2757 * then a recovery will happen and soon that array won't
2758 * be degraded any more and the spare can go back to sleep then.
2759 */
NeilBrown850b2b422006-10-03 01:15:46 -07002760 nospares = 0;
NeilBrown84692192006-08-27 01:23:49 -07002761
NeilBrown06d91a52005-06-21 17:17:12 -07002762 sync_req = mddev->in_sync;
NeilBrown42543762006-06-26 00:27:57 -07002763
2764 /* If this is just a dirty<->clean transition, and the array is clean
2765 * and 'events' is odd, we can roll back to the previous clean state */
NeilBrown850b2b422006-10-03 01:15:46 -07002766 if (nospares
NeilBrown42543762006-06-26 00:27:57 -07002767 && (mddev->in_sync && mddev->recovery_cp == MaxSector)
NeilBrowna8707c02010-05-18 09:28:43 +10002768 && mddev->can_decrease_events
2769 && mddev->events != 1) {
NeilBrown42543762006-06-26 00:27:57 -07002770 mddev->events--;
NeilBrowna8707c02010-05-18 09:28:43 +10002771 mddev->can_decrease_events = 0;
2772 } else {
NeilBrown42543762006-06-26 00:27:57 -07002773 /* otherwise we have to go forward and ... */
2774 mddev->events ++;
NeilBrowna8707c02010-05-18 09:28:43 +10002775 mddev->can_decrease_events = nospares;
NeilBrown42543762006-06-26 00:27:57 -07002776 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002777
NeilBrown403df472014-09-30 15:52:29 +10002778 /*
2779 * This 64-bit counter should never wrap.
2780 * Either we are in around ~1 trillion A.C., assuming
2781 * 1 reboot per second, or we have a bug...
2782 */
2783 WARN_ON(mddev->events == 0);
NeilBrown2699b672011-07-28 11:31:47 +10002784
NeilBrowndafb20f2012-03-19 12:46:39 +11002785 rdev_for_each(rdev, mddev) {
NeilBrown2699b672011-07-28 11:31:47 +10002786 if (rdev->badblocks.changed)
2787 any_badblocks_changed++;
NeilBrownde393cd2011-07-28 11:31:48 +10002788 if (test_bit(Faulty, &rdev->flags))
2789 set_bit(FaultRecorded, &rdev->flags);
2790 }
NeilBrown2699b672011-07-28 11:31:47 +10002791
NeilBrowne6910632008-02-06 01:39:51 -08002792 sync_sbs(mddev, nospares);
NeilBrown85572d72014-12-15 12:56:56 +11002793 spin_unlock(&mddev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002794
NeilBrown36a4e1f2011-10-07 14:23:17 +11002795 pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
2796 mdname(mddev), mddev->in_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002797
Shaohua Li504634f2016-11-18 09:44:08 -08002798 if (mddev->queue)
2799 blk_add_trace_msg(mddev->queue, "md md_update_sb");
NeilBrown46533ff2016-11-18 16:16:11 +11002800rewrite:
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002801 md_bitmap_update_sb(mddev->bitmap);
NeilBrowndafb20f2012-03-19 12:46:39 +11002802 rdev_for_each(rdev, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002803 char b[BDEVNAME_SIZE];
NeilBrown36a4e1f2011-10-07 14:23:17 +11002804
NeilBrown42543762006-06-26 00:27:57 -07002805 if (rdev->sb_loaded != 1)
2806 continue; /* no noise on spare devices */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002807
NeilBrownf4667222013-12-09 12:04:56 +11002808 if (!test_bit(Faulty, &rdev->flags)) {
NeilBrown7bfa19f2005-06-21 17:17:28 -07002809 md_super_write(mddev,rdev,
Andre Noll0f420352008-07-11 22:02:23 +10002810 rdev->sb_start, rdev->sb_size,
NeilBrown7bfa19f2005-06-21 17:17:28 -07002811 rdev->sb_page);
NeilBrown36a4e1f2011-10-07 14:23:17 +11002812 pr_debug("md: (write) %s's sb offset: %llu\n",
2813 bdevname(rdev->bdev, b),
2814 (unsigned long long)rdev->sb_start);
NeilBrown42543762006-06-26 00:27:57 -07002815 rdev->sb_events = mddev->events;
NeilBrown2699b672011-07-28 11:31:47 +10002816 if (rdev->badblocks.size) {
2817 md_super_write(mddev, rdev,
2818 rdev->badblocks.sector,
2819 rdev->badblocks.size << 9,
2820 rdev->bb_page);
2821 rdev->badblocks.size = 0;
2822 }
NeilBrown7bfa19f2005-06-21 17:17:28 -07002823
NeilBrownf4667222013-12-09 12:04:56 +11002824 } else
NeilBrown36a4e1f2011-10-07 14:23:17 +11002825 pr_debug("md: %s (skipping faulty)\n",
2826 bdevname(rdev->bdev, b));
Andrei Warkentind70ed2e2011-10-18 12:16:48 +11002827
NeilBrown7bfa19f2005-06-21 17:17:28 -07002828 if (mddev->level == LEVEL_MULTIPATH)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002829 /* only need to write one superblock... */
2830 break;
2831 }
NeilBrown46533ff2016-11-18 16:16:11 +11002832 if (md_super_wait(mddev) < 0)
2833 goto rewrite;
Shaohua Li29530792016-12-08 15:48:19 -08002834 /* if there was a failure, MD_SB_CHANGE_DEVS was set, and we re-write super */
NeilBrown7bfa19f2005-06-21 17:17:28 -07002835
Guoqing Jiang2c97cf12016-05-02 11:33:09 -04002836 if (mddev_is_clustered(mddev) && ret == 0)
2837 md_cluster_ops->metadata_update_finish(mddev);
2838
NeilBrown850b2b422006-10-03 01:15:46 -07002839 if (mddev->in_sync != sync_req ||
Shaohua Li29530792016-12-08 15:48:19 -08002840 !bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
2841 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_CLEAN)))
NeilBrown06d91a52005-06-21 17:17:12 -07002842 /* have to write it out again */
NeilBrown06d91a52005-06-21 17:17:12 -07002843 goto repeat;
NeilBrown3d310eb2005-06-21 17:17:26 -07002844 wake_up(&mddev->sb_wait);
NeilBrownacb180b2009-04-14 16:28:34 +10002845 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
Junxiao Bie1a86db2020-07-14 16:10:26 -07002846 sysfs_notify_dirent_safe(mddev->sysfs_completed);
NeilBrown06d91a52005-06-21 17:17:12 -07002847
NeilBrowndafb20f2012-03-19 12:46:39 +11002848 rdev_for_each(rdev, mddev) {
NeilBrownde393cd2011-07-28 11:31:48 +10002849 if (test_and_clear_bit(FaultRecorded, &rdev->flags))
2850 clear_bit(Blocked, &rdev->flags);
2851
2852 if (any_badblocks_changed)
Vishal Vermafc974ee2015-12-24 19:20:34 -07002853 ack_all_badblocks(&rdev->badblocks);
NeilBrownde393cd2011-07-28 11:31:48 +10002854 clear_bit(BlockedBadBlocks, &rdev->flags);
2855 wake_up(&rdev->blocked_wait);
2856 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002857}
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05002858EXPORT_SYMBOL(md_update_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002859
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002860static int add_bound_rdev(struct md_rdev *rdev)
2861{
2862 struct mddev *mddev = rdev->mddev;
2863 int err = 0;
Shaohua Li87d4d912016-01-06 14:37:14 -08002864 bool add_journal = test_bit(Journal, &rdev->flags);
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002865
Shaohua Li87d4d912016-01-06 14:37:14 -08002866 if (!mddev->pers->hot_remove_disk || add_journal) {
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002867 /* If there is hot_add_disk but no hot_remove_disk
2868 * then added disks for geometry changes,
2869 * and should be added immediately.
2870 */
2871 super_types[mddev->major_version].
2872 validate_super(mddev, rdev);
Shaohua Li87d4d912016-01-06 14:37:14 -08002873 if (add_journal)
2874 mddev_suspend(mddev);
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002875 err = mddev->pers->hot_add_disk(mddev, rdev);
Shaohua Li87d4d912016-01-06 14:37:14 -08002876 if (add_journal)
2877 mddev_resume(mddev);
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002878 if (err) {
Guoqing Jiangdb767672016-06-02 23:32:05 -04002879 md_kick_rdev_from_array(rdev);
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002880 return err;
2881 }
2882 }
2883 sysfs_notify_dirent_safe(rdev->sysfs_state);
2884
Shaohua Li29530792016-12-08 15:48:19 -08002885 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002886 if (mddev->degraded)
2887 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
2888 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2889 md_new_event(mddev);
2890 md_wakeup_thread(mddev->thread);
2891 return 0;
2892}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002893
Andre Noll7f6ce762008-03-23 18:34:54 +01002894/* words written to sysfs files may, or may not, be \n terminated.
NeilBrownbce74da2006-01-06 00:20:41 -08002895 * We want to accept with case. For this we use cmd_match.
2896 */
2897static int cmd_match(const char *cmd, const char *str)
2898{
2899 /* See if cmd, written into a sysfs file, matches
2900 * str. They must either be the same, or cmd can
2901 * have a trailing newline
2902 */
2903 while (*cmd && *str && *cmd == *str) {
2904 cmd++;
2905 str++;
2906 }
2907 if (*cmd == '\n')
2908 cmd++;
2909 if (*str || *cmd)
2910 return 0;
2911 return 1;
2912}
2913
NeilBrown86e6ffd2005-11-08 21:39:24 -08002914struct rdev_sysfs_entry {
2915 struct attribute attr;
NeilBrown3cb03002011-10-11 16:45:26 +11002916 ssize_t (*show)(struct md_rdev *, char *);
2917 ssize_t (*store)(struct md_rdev *, const char *, size_t);
NeilBrown86e6ffd2005-11-08 21:39:24 -08002918};
2919
2920static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11002921state_show(struct md_rdev *rdev, char *page)
NeilBrown86e6ffd2005-11-08 21:39:24 -08002922{
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02002923 char *sep = ",";
NeilBrown20a49ff2008-02-06 01:39:57 -08002924 size_t len = 0;
Mark Rutland6aa7de02017-10-23 14:07:29 -07002925 unsigned long flags = READ_ONCE(rdev->flags);
NeilBrown86e6ffd2005-11-08 21:39:24 -08002926
NeilBrown758bfc82014-12-15 12:56:59 +11002927 if (test_bit(Faulty, &flags) ||
Tomasz Majchrzakdcbcb482016-10-21 16:27:08 +02002928 (!test_bit(ExternalBbl, &flags) &&
2929 rdev->badblocks.unacked_exist))
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02002930 len += sprintf(page+len, "faulty%s", sep);
2931 if (test_bit(In_sync, &flags))
2932 len += sprintf(page+len, "in_sync%s", sep);
2933 if (test_bit(Journal, &flags))
2934 len += sprintf(page+len, "journal%s", sep);
2935 if (test_bit(WriteMostly, &flags))
2936 len += sprintf(page+len, "write_mostly%s", sep);
NeilBrown758bfc82014-12-15 12:56:59 +11002937 if (test_bit(Blocked, &flags) ||
NeilBrown52c64152011-12-08 16:22:48 +11002938 (rdev->badblocks.unacked_exist
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02002939 && !test_bit(Faulty, &flags)))
2940 len += sprintf(page+len, "blocked%s", sep);
NeilBrown758bfc82014-12-15 12:56:59 +11002941 if (!test_bit(Faulty, &flags) &&
Shaohua Lif2076e72015-10-08 21:54:12 -07002942 !test_bit(Journal, &flags) &&
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02002943 !test_bit(In_sync, &flags))
2944 len += sprintf(page+len, "spare%s", sep);
2945 if (test_bit(WriteErrorSeen, &flags))
2946 len += sprintf(page+len, "write_error%s", sep);
2947 if (test_bit(WantReplacement, &flags))
2948 len += sprintf(page+len, "want_replacement%s", sep);
2949 if (test_bit(Replacement, &flags))
2950 len += sprintf(page+len, "replacement%s", sep);
2951 if (test_bit(ExternalBbl, &flags))
2952 len += sprintf(page+len, "external_bbl%s", sep);
NeilBrown688834e2016-11-18 16:16:11 +11002953 if (test_bit(FailFast, &flags))
2954 len += sprintf(page+len, "failfast%s", sep);
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02002955
2956 if (len)
2957 len -= strlen(sep);
NeilBrown2d78f8c2011-12-23 10:17:51 +11002958
NeilBrown86e6ffd2005-11-08 21:39:24 -08002959 return len+sprintf(page+len, "\n");
2960}
2961
NeilBrown45dc2de2006-06-26 00:27:58 -07002962static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11002963state_store(struct md_rdev *rdev, const char *buf, size_t len)
NeilBrown45dc2de2006-06-26 00:27:58 -07002964{
2965 /* can write
NeilBrownde393cd2011-07-28 11:31:48 +10002966 * faulty - simulates an error
NeilBrown45dc2de2006-06-26 00:27:58 -07002967 * remove - disconnects the device
NeilBrownf6556752006-06-26 00:28:01 -07002968 * writemostly - sets write_mostly
2969 * -writemostly - clears write_mostly
NeilBrownde393cd2011-07-28 11:31:48 +10002970 * blocked - sets the Blocked flags
2971 * -blocked - clears the Blocked and possibly simulates an error
NeilBrown6d56e272009-04-14 12:01:57 +10002972 * insync - sets Insync providing device isn't active
NeilBrownf4667222013-12-09 12:04:56 +11002973 * -insync - clear Insync for a device with a slot assigned,
2974 * so that it gets rebuilt based on bitmap
NeilBrownd7a9d442011-07-28 11:31:48 +10002975 * write_error - sets WriteErrorSeen
2976 * -write_error - clears WriteErrorSeen
NeilBrown688834e2016-11-18 16:16:11 +11002977 * {,-}failfast - set/clear FailFast
NeilBrown45dc2de2006-06-26 00:27:58 -07002978 */
2979 int err = -EINVAL;
2980 if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
2981 md_error(rdev->mddev, rdev);
NeilBrown5ef56c82011-08-25 14:42:51 +10002982 if (test_bit(Faulty, &rdev->flags))
2983 err = 0;
2984 else
2985 err = -EBUSY;
NeilBrown45dc2de2006-06-26 00:27:58 -07002986 } else if (cmd_match(buf, "remove")) {
Shaohua Li5d881782016-07-28 09:06:34 -07002987 if (rdev->mddev->pers) {
2988 clear_bit(Blocked, &rdev->flags);
2989 remove_and_add_spares(rdev->mddev, rdev);
2990 }
NeilBrown45dc2de2006-06-26 00:27:58 -07002991 if (rdev->raid_disk >= 0)
2992 err = -EBUSY;
2993 else {
NeilBrownfd01b882011-10-11 16:47:53 +11002994 struct mddev *mddev = rdev->mddev;
NeilBrown45dc2de2006-06-26 00:27:58 -07002995 err = 0;
Guoqing Jianga9720902015-10-12 17:21:27 +08002996 if (mddev_is_clustered(mddev))
2997 err = md_cluster_ops->remove_disk(mddev, rdev);
2998
2999 if (err == 0) {
3000 md_kick_rdev_from_array(rdev);
NeilBrown060b0682016-11-04 16:46:03 +11003001 if (mddev->pers) {
Shaohua Li29530792016-12-08 15:48:19 -08003002 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrown060b0682016-11-04 16:46:03 +11003003 md_wakeup_thread(mddev->thread);
3004 }
Guoqing Jianga9720902015-10-12 17:21:27 +08003005 md_new_event(mddev);
3006 }
NeilBrown45dc2de2006-06-26 00:27:58 -07003007 }
NeilBrownf6556752006-06-26 00:28:01 -07003008 } else if (cmd_match(buf, "writemostly")) {
3009 set_bit(WriteMostly, &rdev->flags);
Guoqing Jiang404659c2019-12-23 10:48:53 +01003010 mddev_create_serial_pool(rdev->mddev, rdev, false);
NeilBrownf6556752006-06-26 00:28:01 -07003011 err = 0;
3012 } else if (cmd_match(buf, "-writemostly")) {
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +01003013 mddev_destroy_serial_pool(rdev->mddev, rdev, false);
NeilBrownf6556752006-06-26 00:28:01 -07003014 clear_bit(WriteMostly, &rdev->flags);
3015 err = 0;
Dan Williams6bfe0b42008-04-30 00:52:32 -07003016 } else if (cmd_match(buf, "blocked")) {
3017 set_bit(Blocked, &rdev->flags);
3018 err = 0;
3019 } else if (cmd_match(buf, "-blocked")) {
NeilBrownde393cd2011-07-28 11:31:48 +10003020 if (!test_bit(Faulty, &rdev->flags) &&
Tomasz Majchrzakdcbcb482016-10-21 16:27:08 +02003021 !test_bit(ExternalBbl, &rdev->flags) &&
NeilBrown7da64a02011-08-30 16:20:17 +10003022 rdev->badblocks.unacked_exist) {
NeilBrownde393cd2011-07-28 11:31:48 +10003023 /* metadata handler doesn't understand badblocks,
3024 * so we need to fail the device
3025 */
3026 md_error(rdev->mddev, rdev);
3027 }
Dan Williams6bfe0b42008-04-30 00:52:32 -07003028 clear_bit(Blocked, &rdev->flags);
NeilBrownde393cd2011-07-28 11:31:48 +10003029 clear_bit(BlockedBadBlocks, &rdev->flags);
Dan Williams6bfe0b42008-04-30 00:52:32 -07003030 wake_up(&rdev->blocked_wait);
3031 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3032 md_wakeup_thread(rdev->mddev->thread);
3033
3034 err = 0;
NeilBrown6d56e272009-04-14 12:01:57 +10003035 } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
3036 set_bit(In_sync, &rdev->flags);
3037 err = 0;
NeilBrown688834e2016-11-18 16:16:11 +11003038 } else if (cmd_match(buf, "failfast")) {
3039 set_bit(FailFast, &rdev->flags);
3040 err = 0;
3041 } else if (cmd_match(buf, "-failfast")) {
3042 clear_bit(FailFast, &rdev->flags);
3043 err = 0;
Shaohua Lif2076e72015-10-08 21:54:12 -07003044 } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 &&
3045 !test_bit(Journal, &rdev->flags)) {
NeilBrowne1960f82014-09-30 15:24:25 +10003046 if (rdev->mddev->pers == NULL) {
3047 clear_bit(In_sync, &rdev->flags);
3048 rdev->saved_raid_disk = rdev->raid_disk;
3049 rdev->raid_disk = -1;
3050 err = 0;
3051 }
NeilBrownd7a9d442011-07-28 11:31:48 +10003052 } else if (cmd_match(buf, "write_error")) {
3053 set_bit(WriteErrorSeen, &rdev->flags);
3054 err = 0;
3055 } else if (cmd_match(buf, "-write_error")) {
3056 clear_bit(WriteErrorSeen, &rdev->flags);
3057 err = 0;
NeilBrown2d78f8c2011-12-23 10:17:51 +11003058 } else if (cmd_match(buf, "want_replacement")) {
3059 /* Any non-spare device that is not a replacement can
3060 * become want_replacement at any time, but we then need to
3061 * check if recovery is needed.
3062 */
3063 if (rdev->raid_disk >= 0 &&
Shaohua Lif2076e72015-10-08 21:54:12 -07003064 !test_bit(Journal, &rdev->flags) &&
NeilBrown2d78f8c2011-12-23 10:17:51 +11003065 !test_bit(Replacement, &rdev->flags))
3066 set_bit(WantReplacement, &rdev->flags);
3067 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3068 md_wakeup_thread(rdev->mddev->thread);
3069 err = 0;
3070 } else if (cmd_match(buf, "-want_replacement")) {
3071 /* Clearing 'want_replacement' is always allowed.
3072 * Once replacements starts it is too late though.
3073 */
3074 err = 0;
3075 clear_bit(WantReplacement, &rdev->flags);
3076 } else if (cmd_match(buf, "replacement")) {
3077 /* Can only set a device as a replacement when array has not
3078 * yet been started. Once running, replacement is automatic
3079 * from spares, or by assigning 'slot'.
3080 */
3081 if (rdev->mddev->pers)
3082 err = -EBUSY;
3083 else {
3084 set_bit(Replacement, &rdev->flags);
3085 err = 0;
3086 }
3087 } else if (cmd_match(buf, "-replacement")) {
3088 /* Similarly, can only clear Replacement before start */
3089 if (rdev->mddev->pers)
3090 err = -EBUSY;
3091 else {
3092 clear_bit(Replacement, &rdev->flags);
3093 err = 0;
3094 }
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05003095 } else if (cmd_match(buf, "re-add")) {
Yufen Yuee37e622019-04-02 14:22:14 +08003096 if (!rdev->mddev->pers)
3097 err = -EINVAL;
3098 else if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
3099 rdev->saved_raid_disk >= 0) {
Goldwyn Rodrigues97f6cd32015-04-14 10:45:42 -05003100 /* clear_bit is performed _after_ all the devices
3101 * have their local Faulty bit cleared. If any writes
3102 * happen in the meantime in the local node, they
3103 * will land in the local bitmap, which will be synced
3104 * by this node eventually
3105 */
3106 if (!mddev_is_clustered(rdev->mddev) ||
3107 (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) {
3108 clear_bit(Faulty, &rdev->flags);
3109 err = add_bound_rdev(rdev);
3110 }
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05003111 } else
3112 err = -EBUSY;
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02003113 } else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) {
3114 set_bit(ExternalBbl, &rdev->flags);
3115 rdev->badblocks.shift = 0;
3116 err = 0;
3117 } else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) {
3118 clear_bit(ExternalBbl, &rdev->flags);
3119 err = 0;
NeilBrown45dc2de2006-06-26 00:27:58 -07003120 }
NeilBrown00bcb4a2010-06-01 19:37:23 +10003121 if (!err)
3122 sysfs_notify_dirent_safe(rdev->sysfs_state);
NeilBrown45dc2de2006-06-26 00:27:58 -07003123 return err ? err : len;
3124}
NeilBrown80ca3a42006-07-10 04:44:18 -07003125static struct rdev_sysfs_entry rdev_state =
NeilBrown750f1992014-09-30 08:53:05 +10003126__ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
NeilBrown86e6ffd2005-11-08 21:39:24 -08003127
3128static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003129errors_show(struct md_rdev *rdev, char *page)
NeilBrown4dbcdc72006-01-06 00:20:52 -08003130{
3131 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
3132}
3133
3134static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003135errors_store(struct md_rdev *rdev, const char *buf, size_t len)
NeilBrown4dbcdc72006-01-06 00:20:52 -08003136{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03003137 unsigned int n;
3138 int rv;
3139
3140 rv = kstrtouint(buf, 10, &n);
3141 if (rv < 0)
3142 return rv;
3143 atomic_set(&rdev->corrected_errors, n);
3144 return len;
NeilBrown4dbcdc72006-01-06 00:20:52 -08003145}
3146static struct rdev_sysfs_entry rdev_errors =
NeilBrown80ca3a42006-07-10 04:44:18 -07003147__ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
NeilBrown4dbcdc72006-01-06 00:20:52 -08003148
NeilBrown014236d2006-01-06 00:20:55 -08003149static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003150slot_show(struct md_rdev *rdev, char *page)
NeilBrown014236d2006-01-06 00:20:55 -08003151{
Shaohua Lif2076e72015-10-08 21:54:12 -07003152 if (test_bit(Journal, &rdev->flags))
3153 return sprintf(page, "journal\n");
3154 else if (rdev->raid_disk < 0)
NeilBrown014236d2006-01-06 00:20:55 -08003155 return sprintf(page, "none\n");
3156 else
3157 return sprintf(page, "%d\n", rdev->raid_disk);
3158}
3159
3160static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003161slot_store(struct md_rdev *rdev, const char *buf, size_t len)
NeilBrown014236d2006-01-06 00:20:55 -08003162{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03003163 int slot;
NeilBrownc303da62008-02-06 01:39:51 -08003164 int err;
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03003165
Shaohua Lif2076e72015-10-08 21:54:12 -07003166 if (test_bit(Journal, &rdev->flags))
3167 return -EBUSY;
NeilBrown014236d2006-01-06 00:20:55 -08003168 if (strncmp(buf, "none", 4)==0)
3169 slot = -1;
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03003170 else {
3171 err = kstrtouint(buf, 10, (unsigned int *)&slot);
3172 if (err < 0)
3173 return err;
3174 }
Neil Brown6c2fce22008-06-28 08:31:31 +10003175 if (rdev->mddev->pers && slot == -1) {
NeilBrownc303da62008-02-06 01:39:51 -08003176 /* Setting 'slot' on an active array requires also
3177 * updating the 'rd%d' link, and communicating
3178 * with the personality with ->hot_*_disk.
3179 * For now we only support removing
3180 * failed/spare devices. This normally happens automatically,
3181 * but not when the metadata is externally managed.
3182 */
NeilBrownc303da62008-02-06 01:39:51 -08003183 if (rdev->raid_disk == -1)
3184 return -EEXIST;
3185 /* personality does all needed checks */
Namhyung Kim01393f32011-06-09 11:42:54 +10003186 if (rdev->mddev->pers->hot_remove_disk == NULL)
NeilBrownc303da62008-02-06 01:39:51 -08003187 return -EINVAL;
NeilBrown746d3202013-04-24 11:42:41 +10003188 clear_bit(Blocked, &rdev->flags);
3189 remove_and_add_spares(rdev->mddev, rdev);
3190 if (rdev->raid_disk >= 0)
3191 return -EBUSY;
NeilBrownc303da62008-02-06 01:39:51 -08003192 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3193 md_wakeup_thread(rdev->mddev->thread);
Neil Brown6c2fce22008-06-28 08:31:31 +10003194 } else if (rdev->mddev->pers) {
Neil Brown6c2fce22008-06-28 08:31:31 +10003195 /* Activating a spare .. or possibly reactivating
NeilBrown6d56e272009-04-14 12:01:57 +10003196 * if we ever get bitmaps working here.
Neil Brown6c2fce22008-06-28 08:31:31 +10003197 */
Goldwyn Rodriguescb01c542015-12-18 15:19:16 +11003198 int err;
Neil Brown6c2fce22008-06-28 08:31:31 +10003199
3200 if (rdev->raid_disk != -1)
3201 return -EBUSY;
3202
NeilBrownc6751b22011-02-02 11:57:13 +11003203 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
3204 return -EBUSY;
3205
Neil Brown6c2fce22008-06-28 08:31:31 +10003206 if (rdev->mddev->pers->hot_add_disk == NULL)
3207 return -EINVAL;
3208
NeilBrownba1b41b2011-01-14 09:14:34 +11003209 if (slot >= rdev->mddev->raid_disks &&
3210 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
3211 return -ENOSPC;
3212
Neil Brown6c2fce22008-06-28 08:31:31 +10003213 rdev->raid_disk = slot;
3214 if (test_bit(In_sync, &rdev->flags))
3215 rdev->saved_raid_disk = slot;
3216 else
3217 rdev->saved_raid_disk = -1;
NeilBrownd30519f2011-10-18 12:13:47 +11003218 clear_bit(In_sync, &rdev->flags);
NeilBrown8313b8e2013-12-12 10:13:33 +11003219 clear_bit(Bitmap_sync, &rdev->flags);
Guoqing Jiang3f79cc22020-04-04 23:57:11 +02003220 err = rdev->mddev->pers->hot_add_disk(rdev->mddev, rdev);
Goldwyn Rodriguescb01c542015-12-18 15:19:16 +11003221 if (err) {
3222 rdev->raid_disk = -1;
3223 return err;
3224 } else
3225 sysfs_notify_dirent_safe(rdev->sysfs_state);
Damien Le Moal5e3b8a82020-07-16 13:54:40 +09003226 /* failure here is OK */;
3227 sysfs_link_rdev(rdev->mddev, rdev);
Neil Brown6c2fce22008-06-28 08:31:31 +10003228 /* don't wakeup anyone, leave that to userspace. */
NeilBrownc303da62008-02-06 01:39:51 -08003229 } else {
NeilBrownba1b41b2011-01-14 09:14:34 +11003230 if (slot >= rdev->mddev->raid_disks &&
3231 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
NeilBrownc303da62008-02-06 01:39:51 -08003232 return -ENOSPC;
3233 rdev->raid_disk = slot;
3234 /* assume it is working */
NeilBrownc5d79ad2008-02-06 01:39:54 -08003235 clear_bit(Faulty, &rdev->flags);
3236 clear_bit(WriteMostly, &rdev->flags);
NeilBrownc303da62008-02-06 01:39:51 -08003237 set_bit(In_sync, &rdev->flags);
NeilBrown00bcb4a2010-06-01 19:37:23 +10003238 sysfs_notify_dirent_safe(rdev->sysfs_state);
NeilBrownc303da62008-02-06 01:39:51 -08003239 }
NeilBrown014236d2006-01-06 00:20:55 -08003240 return len;
3241}
3242
NeilBrown014236d2006-01-06 00:20:55 -08003243static struct rdev_sysfs_entry rdev_slot =
NeilBrown80ca3a42006-07-10 04:44:18 -07003244__ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
NeilBrown014236d2006-01-06 00:20:55 -08003245
NeilBrown93c8cad2006-01-06 00:20:56 -08003246static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003247offset_show(struct md_rdev *rdev, char *page)
NeilBrown93c8cad2006-01-06 00:20:56 -08003248{
Andrew Morton6961ece2006-01-06 00:20:59 -08003249 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
NeilBrown93c8cad2006-01-06 00:20:56 -08003250}
3251
3252static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003253offset_store(struct md_rdev *rdev, const char *buf, size_t len)
NeilBrown93c8cad2006-01-06 00:20:56 -08003254{
NeilBrownc6563a82012-05-21 09:27:00 +10003255 unsigned long long offset;
Jingoo Hanb29bebd2013-06-01 16:15:16 +09003256 if (kstrtoull(buf, 10, &offset) < 0)
NeilBrown93c8cad2006-01-06 00:20:56 -08003257 return -EINVAL;
Neil Brown8ed0a522008-06-28 08:31:29 +10003258 if (rdev->mddev->pers && rdev->raid_disk >= 0)
NeilBrown93c8cad2006-01-06 00:20:56 -08003259 return -EBUSY;
Andre Nolldd8ac332009-03-31 14:33:13 +11003260 if (rdev->sectors && rdev->mddev->external)
NeilBrownc5d79ad2008-02-06 01:39:54 -08003261 /* Must set offset before size, so overlap checks
3262 * can be sane */
3263 return -EBUSY;
NeilBrown93c8cad2006-01-06 00:20:56 -08003264 rdev->data_offset = offset;
NeilBrown25f7fd42012-07-19 15:59:18 +10003265 rdev->new_data_offset = offset;
NeilBrown93c8cad2006-01-06 00:20:56 -08003266 return len;
3267}
3268
3269static struct rdev_sysfs_entry rdev_offset =
NeilBrown80ca3a42006-07-10 04:44:18 -07003270__ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
NeilBrown93c8cad2006-01-06 00:20:56 -08003271
NeilBrownc6563a82012-05-21 09:27:00 +10003272static ssize_t new_offset_show(struct md_rdev *rdev, char *page)
3273{
3274 return sprintf(page, "%llu\n",
3275 (unsigned long long)rdev->new_data_offset);
3276}
3277
3278static ssize_t new_offset_store(struct md_rdev *rdev,
3279 const char *buf, size_t len)
3280{
3281 unsigned long long new_offset;
3282 struct mddev *mddev = rdev->mddev;
3283
Jingoo Hanb29bebd2013-06-01 16:15:16 +09003284 if (kstrtoull(buf, 10, &new_offset) < 0)
NeilBrownc6563a82012-05-21 09:27:00 +10003285 return -EINVAL;
3286
NeilBrownf851b602014-12-11 10:02:10 +11003287 if (mddev->sync_thread ||
3288 test_bit(MD_RECOVERY_RUNNING,&mddev->recovery))
NeilBrownc6563a82012-05-21 09:27:00 +10003289 return -EBUSY;
3290 if (new_offset == rdev->data_offset)
3291 /* reset is always permitted */
3292 ;
3293 else if (new_offset > rdev->data_offset) {
3294 /* must not push array size beyond rdev_sectors */
3295 if (new_offset - rdev->data_offset
3296 + mddev->dev_sectors > rdev->sectors)
3297 return -E2BIG;
3298 }
3299 /* Metadata worries about other space details. */
3300
3301 /* decreasing the offset is inconsistent with a backwards
3302 * reshape.
3303 */
3304 if (new_offset < rdev->data_offset &&
3305 mddev->reshape_backwards)
3306 return -EINVAL;
3307 /* Increasing offset is inconsistent with forwards
3308 * reshape. reshape_direction should be set to
3309 * 'backwards' first.
3310 */
3311 if (new_offset > rdev->data_offset &&
3312 !mddev->reshape_backwards)
3313 return -EINVAL;
3314
3315 if (mddev->pers && mddev->persistent &&
3316 !super_types[mddev->major_version]
3317 .allow_new_offset(rdev, new_offset))
3318 return -E2BIG;
3319 rdev->new_data_offset = new_offset;
3320 if (new_offset > rdev->data_offset)
3321 mddev->reshape_backwards = 1;
3322 else if (new_offset < rdev->data_offset)
3323 mddev->reshape_backwards = 0;
3324
3325 return len;
3326}
3327static struct rdev_sysfs_entry rdev_new_offset =
3328__ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store);
3329
NeilBrown83303b62006-01-06 00:21:06 -08003330static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003331rdev_size_show(struct md_rdev *rdev, char *page)
NeilBrown83303b62006-01-06 00:21:06 -08003332{
Andre Nolldd8ac332009-03-31 14:33:13 +11003333 return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
NeilBrown83303b62006-01-06 00:21:06 -08003334}
3335
NeilBrownc5d79ad2008-02-06 01:39:54 -08003336static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
3337{
3338 /* check if two start/length pairs overlap */
3339 if (s1+l1 <= s2)
3340 return 0;
3341 if (s2+l2 <= s1)
3342 return 0;
3343 return 1;
3344}
3345
Dan Williamsb522adc2009-03-31 15:00:31 +11003346static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
3347{
3348 unsigned long long blocks;
3349 sector_t new;
3350
Jingoo Hanb29bebd2013-06-01 16:15:16 +09003351 if (kstrtoull(buf, 10, &blocks) < 0)
Dan Williamsb522adc2009-03-31 15:00:31 +11003352 return -EINVAL;
3353
3354 if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
3355 return -EINVAL; /* sector conversion overflow */
3356
3357 new = blocks * 2;
3358 if (new != blocks * 2)
3359 return -EINVAL; /* unsigned long long to sector_t overflow */
3360
3361 *sectors = new;
3362 return 0;
3363}
3364
NeilBrown83303b62006-01-06 00:21:06 -08003365static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003366rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
NeilBrown83303b62006-01-06 00:21:06 -08003367{
NeilBrownfd01b882011-10-11 16:47:53 +11003368 struct mddev *my_mddev = rdev->mddev;
Andre Nolldd8ac332009-03-31 14:33:13 +11003369 sector_t oldsectors = rdev->sectors;
Dan Williamsb522adc2009-03-31 15:00:31 +11003370 sector_t sectors;
NeilBrown27c529b2008-03-04 14:29:33 -08003371
Shaohua Lif2076e72015-10-08 21:54:12 -07003372 if (test_bit(Journal, &rdev->flags))
3373 return -EBUSY;
Dan Williamsb522adc2009-03-31 15:00:31 +11003374 if (strict_blocks_to_sectors(buf, &sectors) < 0)
Neil Brownd7027452008-07-12 10:37:50 +10003375 return -EINVAL;
NeilBrownc6563a82012-05-21 09:27:00 +10003376 if (rdev->data_offset != rdev->new_data_offset)
3377 return -EINVAL; /* too confusing */
Chris Webb0cd17fe2008-06-28 08:31:46 +10003378 if (my_mddev->pers && rdev->raid_disk >= 0) {
Neil Brownd7027452008-07-12 10:37:50 +10003379 if (my_mddev->persistent) {
Andre Nolldd8ac332009-03-31 14:33:13 +11003380 sectors = super_types[my_mddev->major_version].
3381 rdev_size_change(rdev, sectors);
3382 if (!sectors)
Chris Webb0cd17fe2008-06-28 08:31:46 +10003383 return -EBUSY;
Andre Nolldd8ac332009-03-31 14:33:13 +11003384 } else if (!sectors)
Mike Snitzer77304d22010-11-08 14:39:12 +01003385 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
Andre Nolldd8ac332009-03-31 14:33:13 +11003386 rdev->data_offset;
NeilBrowna6468532013-02-21 14:33:17 +11003387 if (!my_mddev->pers->resize)
3388 /* Cannot change size for RAID0 or Linear etc */
3389 return -EINVAL;
Chris Webb0cd17fe2008-06-28 08:31:46 +10003390 }
Andre Nolldd8ac332009-03-31 14:33:13 +11003391 if (sectors < my_mddev->dev_sectors)
Chris Webb7d3c6f82008-10-13 11:55:11 +11003392 return -EINVAL; /* component must fit device */
Chris Webb0cd17fe2008-06-28 08:31:46 +10003393
Andre Nolldd8ac332009-03-31 14:33:13 +11003394 rdev->sectors = sectors;
3395 if (sectors > oldsectors && my_mddev->external) {
NeilBrown8b1afc32014-09-29 15:33:20 +10003396 /* Need to check that all other rdevs with the same
3397 * ->bdev do not overlap. 'rcu' is sufficient to walk
3398 * the rdev lists safely.
3399 * This check does not provide a hard guarantee, it
3400 * just helps avoid dangerous mistakes.
NeilBrownc5d79ad2008-02-06 01:39:54 -08003401 */
NeilBrownfd01b882011-10-11 16:47:53 +11003402 struct mddev *mddev;
NeilBrownc5d79ad2008-02-06 01:39:54 -08003403 int overlap = 0;
Cheng Renquan159ec1f2009-01-09 08:31:08 +11003404 struct list_head *tmp;
NeilBrownc5d79ad2008-02-06 01:39:54 -08003405
NeilBrown8b1afc32014-09-29 15:33:20 +10003406 rcu_read_lock();
NeilBrown29ac4aa2008-02-06 01:39:58 -08003407 for_each_mddev(mddev, tmp) {
NeilBrown3cb03002011-10-11 16:45:26 +11003408 struct md_rdev *rdev2;
NeilBrownc5d79ad2008-02-06 01:39:54 -08003409
NeilBrowndafb20f2012-03-19 12:46:39 +11003410 rdev_for_each(rdev2, mddev)
NeilBrownf21e9ff2011-01-31 12:10:09 +11003411 if (rdev->bdev == rdev2->bdev &&
3412 rdev != rdev2 &&
3413 overlaps(rdev->data_offset, rdev->sectors,
3414 rdev2->data_offset,
3415 rdev2->sectors)) {
NeilBrownc5d79ad2008-02-06 01:39:54 -08003416 overlap = 1;
3417 break;
3418 }
NeilBrownc5d79ad2008-02-06 01:39:54 -08003419 if (overlap) {
3420 mddev_put(mddev);
3421 break;
3422 }
3423 }
NeilBrown8b1afc32014-09-29 15:33:20 +10003424 rcu_read_unlock();
NeilBrownc5d79ad2008-02-06 01:39:54 -08003425 if (overlap) {
3426 /* Someone else could have slipped in a size
3427 * change here, but doing so is just silly.
Andre Nolldd8ac332009-03-31 14:33:13 +11003428 * We put oldsectors back because we *know* it is
NeilBrownc5d79ad2008-02-06 01:39:54 -08003429 * safe, and trust userspace not to race with
3430 * itself
3431 */
Andre Nolldd8ac332009-03-31 14:33:13 +11003432 rdev->sectors = oldsectors;
NeilBrownc5d79ad2008-02-06 01:39:54 -08003433 return -EBUSY;
3434 }
3435 }
NeilBrown83303b62006-01-06 00:21:06 -08003436 return len;
3437}
3438
3439static struct rdev_sysfs_entry rdev_size =
NeilBrown80ca3a42006-07-10 04:44:18 -07003440__ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
NeilBrown83303b62006-01-06 00:21:06 -08003441
NeilBrown3cb03002011-10-11 16:45:26 +11003442static ssize_t recovery_start_show(struct md_rdev *rdev, char *page)
Dan Williams06e3c812009-12-12 21:17:12 -07003443{
3444 unsigned long long recovery_start = rdev->recovery_offset;
3445
3446 if (test_bit(In_sync, &rdev->flags) ||
3447 recovery_start == MaxSector)
3448 return sprintf(page, "none\n");
3449
3450 return sprintf(page, "%llu\n", recovery_start);
3451}
3452
NeilBrown3cb03002011-10-11 16:45:26 +11003453static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len)
Dan Williams06e3c812009-12-12 21:17:12 -07003454{
3455 unsigned long long recovery_start;
3456
3457 if (cmd_match(buf, "none"))
3458 recovery_start = MaxSector;
Jingoo Hanb29bebd2013-06-01 16:15:16 +09003459 else if (kstrtoull(buf, 10, &recovery_start))
Dan Williams06e3c812009-12-12 21:17:12 -07003460 return -EINVAL;
3461
3462 if (rdev->mddev->pers &&
3463 rdev->raid_disk >= 0)
3464 return -EBUSY;
3465
3466 rdev->recovery_offset = recovery_start;
3467 if (recovery_start == MaxSector)
3468 set_bit(In_sync, &rdev->flags);
3469 else
3470 clear_bit(In_sync, &rdev->flags);
3471 return len;
3472}
3473
3474static struct rdev_sysfs_entry rdev_recovery_start =
3475__ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
3476
Vishal Vermafc974ee2015-12-24 19:20:34 -07003477/* sysfs access to bad-blocks list.
3478 * We present two files.
3479 * 'bad-blocks' lists sector numbers and lengths of ranges that
3480 * are recorded as bad. The list is truncated to fit within
3481 * the one-page limit of sysfs.
3482 * Writing "sector length" to this file adds an acknowledged
3483 * bad block list.
3484 * 'unacknowledged-bad-blocks' lists bad blocks that have not yet
3485 * been acknowledged. Writing to this file adds bad blocks
3486 * without acknowledging them. This is largely for testing.
3487 */
NeilBrown3cb03002011-10-11 16:45:26 +11003488static ssize_t bb_show(struct md_rdev *rdev, char *page)
NeilBrown16c791a2011-07-28 11:31:47 +10003489{
3490 return badblocks_show(&rdev->badblocks, page, 0);
3491}
NeilBrown3cb03002011-10-11 16:45:26 +11003492static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len)
NeilBrown16c791a2011-07-28 11:31:47 +10003493{
NeilBrownde393cd2011-07-28 11:31:48 +10003494 int rv = badblocks_store(&rdev->badblocks, page, len, 0);
3495 /* Maybe that ack was all we needed */
3496 if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags))
3497 wake_up(&rdev->blocked_wait);
3498 return rv;
NeilBrown16c791a2011-07-28 11:31:47 +10003499}
3500static struct rdev_sysfs_entry rdev_bad_blocks =
3501__ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store);
3502
NeilBrown3cb03002011-10-11 16:45:26 +11003503static ssize_t ubb_show(struct md_rdev *rdev, char *page)
NeilBrown16c791a2011-07-28 11:31:47 +10003504{
3505 return badblocks_show(&rdev->badblocks, page, 1);
3506}
NeilBrown3cb03002011-10-11 16:45:26 +11003507static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len)
NeilBrown16c791a2011-07-28 11:31:47 +10003508{
3509 return badblocks_store(&rdev->badblocks, page, len, 1);
3510}
3511static struct rdev_sysfs_entry rdev_unack_bad_blocks =
3512__ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store);
3513
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01003514static ssize_t
3515ppl_sector_show(struct md_rdev *rdev, char *page)
3516{
3517 return sprintf(page, "%llu\n", (unsigned long long)rdev->ppl.sector);
3518}
3519
3520static ssize_t
3521ppl_sector_store(struct md_rdev *rdev, const char *buf, size_t len)
3522{
3523 unsigned long long sector;
3524
3525 if (kstrtoull(buf, 10, &sector) < 0)
3526 return -EINVAL;
3527 if (sector != (sector_t)sector)
3528 return -EINVAL;
3529
3530 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) &&
3531 rdev->raid_disk >= 0)
3532 return -EBUSY;
3533
3534 if (rdev->mddev->persistent) {
3535 if (rdev->mddev->major_version == 0)
3536 return -EINVAL;
3537 if ((sector > rdev->sb_start &&
3538 sector - rdev->sb_start > S16_MAX) ||
3539 (sector < rdev->sb_start &&
3540 rdev->sb_start - sector > -S16_MIN))
3541 return -EINVAL;
3542 rdev->ppl.offset = sector - rdev->sb_start;
3543 } else if (!rdev->mddev->external) {
3544 return -EBUSY;
3545 }
3546 rdev->ppl.sector = sector;
3547 return len;
3548}
3549
3550static struct rdev_sysfs_entry rdev_ppl_sector =
3551__ATTR(ppl_sector, S_IRUGO|S_IWUSR, ppl_sector_show, ppl_sector_store);
3552
3553static ssize_t
3554ppl_size_show(struct md_rdev *rdev, char *page)
3555{
3556 return sprintf(page, "%u\n", rdev->ppl.size);
3557}
3558
3559static ssize_t
3560ppl_size_store(struct md_rdev *rdev, const char *buf, size_t len)
3561{
3562 unsigned int size;
3563
3564 if (kstrtouint(buf, 10, &size) < 0)
3565 return -EINVAL;
3566
3567 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) &&
3568 rdev->raid_disk >= 0)
3569 return -EBUSY;
3570
3571 if (rdev->mddev->persistent) {
3572 if (rdev->mddev->major_version == 0)
3573 return -EINVAL;
3574 if (size > U16_MAX)
3575 return -EINVAL;
3576 } else if (!rdev->mddev->external) {
3577 return -EBUSY;
3578 }
3579 rdev->ppl.size = size;
3580 return len;
3581}
3582
3583static struct rdev_sysfs_entry rdev_ppl_size =
3584__ATTR(ppl_size, S_IRUGO|S_IWUSR, ppl_size_show, ppl_size_store);
3585
NeilBrown86e6ffd2005-11-08 21:39:24 -08003586static struct attribute *rdev_default_attrs[] = {
3587 &rdev_state.attr,
NeilBrown4dbcdc72006-01-06 00:20:52 -08003588 &rdev_errors.attr,
NeilBrown014236d2006-01-06 00:20:55 -08003589 &rdev_slot.attr,
NeilBrown93c8cad2006-01-06 00:20:56 -08003590 &rdev_offset.attr,
NeilBrownc6563a82012-05-21 09:27:00 +10003591 &rdev_new_offset.attr,
NeilBrown83303b62006-01-06 00:21:06 -08003592 &rdev_size.attr,
Dan Williams06e3c812009-12-12 21:17:12 -07003593 &rdev_recovery_start.attr,
NeilBrown16c791a2011-07-28 11:31:47 +10003594 &rdev_bad_blocks.attr,
3595 &rdev_unack_bad_blocks.attr,
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01003596 &rdev_ppl_sector.attr,
3597 &rdev_ppl_size.attr,
NeilBrown86e6ffd2005-11-08 21:39:24 -08003598 NULL,
3599};
3600static ssize_t
3601rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
3602{
3603 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
NeilBrown3cb03002011-10-11 16:45:26 +11003604 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
NeilBrown86e6ffd2005-11-08 21:39:24 -08003605
3606 if (!entry->show)
3607 return -EIO;
NeilBrown758bfc82014-12-15 12:56:59 +11003608 if (!rdev->mddev)
Marcos Paulo de Souza168b3052019-06-14 15:41:06 -07003609 return -ENODEV;
NeilBrown758bfc82014-12-15 12:56:59 +11003610 return entry->show(rdev, page);
NeilBrown86e6ffd2005-11-08 21:39:24 -08003611}
3612
3613static ssize_t
3614rdev_attr_store(struct kobject *kobj, struct attribute *attr,
3615 const char *page, size_t length)
3616{
3617 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
NeilBrown3cb03002011-10-11 16:45:26 +11003618 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
NeilBrown27c529b2008-03-04 14:29:33 -08003619 ssize_t rv;
NeilBrownfd01b882011-10-11 16:47:53 +11003620 struct mddev *mddev = rdev->mddev;
NeilBrown86e6ffd2005-11-08 21:39:24 -08003621
3622 if (!entry->store)
3623 return -EIO;
NeilBrown67463ac2006-07-10 04:44:19 -07003624 if (!capable(CAP_SYS_ADMIN))
3625 return -EACCES;
Pawel Baldysiakc42d3242019-03-27 13:48:21 +01003626 rv = mddev ? mddev_lock(mddev) : -ENODEV;
NeilBrownca388052008-02-06 01:39:55 -08003627 if (!rv) {
NeilBrown27c529b2008-03-04 14:29:33 -08003628 if (rdev->mddev == NULL)
Pawel Baldysiakc42d3242019-03-27 13:48:21 +01003629 rv = -ENODEV;
NeilBrown27c529b2008-03-04 14:29:33 -08003630 else
3631 rv = entry->store(rdev, page, length);
Dan Williams6a518302008-04-30 00:52:28 -07003632 mddev_unlock(mddev);
NeilBrownca388052008-02-06 01:39:55 -08003633 }
3634 return rv;
NeilBrown86e6ffd2005-11-08 21:39:24 -08003635}
3636
3637static void rdev_free(struct kobject *ko)
3638{
NeilBrown3cb03002011-10-11 16:45:26 +11003639 struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj);
NeilBrown86e6ffd2005-11-08 21:39:24 -08003640 kfree(rdev);
3641}
Emese Revfy52cf25d2010-01-19 02:58:23 +01003642static const struct sysfs_ops rdev_sysfs_ops = {
NeilBrown86e6ffd2005-11-08 21:39:24 -08003643 .show = rdev_attr_show,
3644 .store = rdev_attr_store,
3645};
3646static struct kobj_type rdev_ktype = {
3647 .release = rdev_free,
3648 .sysfs_ops = &rdev_sysfs_ops,
3649 .default_attrs = rdev_default_attrs,
3650};
3651
NeilBrown3cb03002011-10-11 16:45:26 +11003652int md_rdev_init(struct md_rdev *rdev)
NeilBrowne8bb9a82010-06-01 19:37:26 +10003653{
3654 rdev->desc_nr = -1;
3655 rdev->saved_raid_disk = -1;
3656 rdev->raid_disk = -1;
3657 rdev->flags = 0;
3658 rdev->data_offset = 0;
NeilBrownc6563a82012-05-21 09:27:00 +10003659 rdev->new_data_offset = 0;
NeilBrowne8bb9a82010-06-01 19:37:26 +10003660 rdev->sb_events = 0;
Arnd Bergmann0e3ef492016-06-17 17:33:10 +02003661 rdev->last_read_error = 0;
NeilBrown2699b672011-07-28 11:31:47 +10003662 rdev->sb_loaded = 0;
3663 rdev->bb_page = NULL;
NeilBrowne8bb9a82010-06-01 19:37:26 +10003664 atomic_set(&rdev->nr_pending, 0);
3665 atomic_set(&rdev->read_errors, 0);
3666 atomic_set(&rdev->corrected_errors, 0);
3667
3668 INIT_LIST_HEAD(&rdev->same_set);
3669 init_waitqueue_head(&rdev->blocked_wait);
NeilBrown2230dfe2011-07-28 11:31:46 +10003670
3671 /* Add space to store bad block list.
3672 * This reserves the space even on arrays where it cannot
3673 * be used - I wonder if that matters
3674 */
Vishal Vermafc974ee2015-12-24 19:20:34 -07003675 return badblocks_init(&rdev->badblocks, 0);
NeilBrowne8bb9a82010-06-01 19:37:26 +10003676}
3677EXPORT_SYMBOL_GPL(md_rdev_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003678/*
3679 * Import a device. If 'super_format' >= 0, then sanity check the superblock
3680 *
3681 * mark the device faulty if:
3682 *
3683 * - the device is nonexistent (zero size)
3684 * - the device has no valid superblock
3685 *
3686 * a faulty rdev _never_ has rdev->sb set.
3687 */
NeilBrown3cb03002011-10-11 16:45:26 +11003688static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003689{
3690 char b[BDEVNAME_SIZE];
3691 int err;
NeilBrown3cb03002011-10-11 16:45:26 +11003692 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003693 sector_t size;
3694
NeilBrown9ffae0c2006-01-06 00:20:32 -08003695 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
NeilBrown9d487392016-11-02 14:16:49 +11003696 if (!rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003697 return ERR_PTR(-ENOMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003698
NeilBrown2230dfe2011-07-28 11:31:46 +10003699 err = md_rdev_init(rdev);
3700 if (err)
3701 goto abort_free;
3702 err = alloc_disk_sb(rdev);
3703 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003704 goto abort_free;
3705
NeilBrownc5d79ad2008-02-06 01:39:54 -08003706 err = lock_rdev(rdev, newdev, super_format == -2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003707 if (err)
3708 goto abort_free;
3709
Greg Kroah-Hartmanf9cb0742007-12-17 23:05:35 -07003710 kobject_init(&rdev->kobj, &rdev_ktype);
NeilBrown86e6ffd2005-11-08 21:39:24 -08003711
Mike Snitzer77304d22010-11-08 14:39:12 +01003712 size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003713 if (!size) {
NeilBrown9d487392016-11-02 14:16:49 +11003714 pr_warn("md: %s has zero or unknown size, marking faulty!\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003715 bdevname(rdev->bdev,b));
3716 err = -EINVAL;
3717 goto abort_free;
3718 }
3719
3720 if (super_format >= 0) {
3721 err = super_types[super_format].
3722 load_super(rdev, NULL, super_minor);
3723 if (err == -EINVAL) {
NeilBrown9d487392016-11-02 14:16:49 +11003724 pr_warn("md: %s does not have a valid v%d.%d superblock, not importing!\n",
NeilBrowndf968c42007-07-17 04:06:11 -07003725 bdevname(rdev->bdev,b),
NeilBrown9d487392016-11-02 14:16:49 +11003726 super_format, super_minor);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003727 goto abort_free;
3728 }
3729 if (err < 0) {
NeilBrown9d487392016-11-02 14:16:49 +11003730 pr_warn("md: could not read %s's sb, not importing!\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003731 bdevname(rdev->bdev,b));
3732 goto abort_free;
3733 }
3734 }
Dan Williams6bfe0b42008-04-30 00:52:32 -07003735
Linus Torvalds1da177e2005-04-16 15:20:36 -07003736 return rdev;
3737
3738abort_free:
NeilBrown2699b672011-07-28 11:31:47 +10003739 if (rdev->bdev)
3740 unlock_rdev(rdev);
NeilBrown545c8792012-05-22 13:54:30 +10003741 md_rdev_clear(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003742 kfree(rdev);
3743 return ERR_PTR(err);
3744}
3745
3746/*
3747 * Check a full RAID array for plausibility
3748 */
3749
Yufen Yu6a5cb532019-10-16 16:00:03 +08003750static int analyze_sbs(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003751{
3752 int i;
NeilBrown3cb03002011-10-11 16:45:26 +11003753 struct md_rdev *rdev, *freshest, *tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003754 char b[BDEVNAME_SIZE];
3755
3756 freshest = NULL;
NeilBrowndafb20f2012-03-19 12:46:39 +11003757 rdev_for_each_safe(rdev, tmp, mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003758 switch (super_types[mddev->major_version].
3759 load_super(rdev, freshest, mddev->minor_version)) {
3760 case 1:
3761 freshest = rdev;
3762 break;
3763 case 0:
3764 break;
3765 default:
NeilBrown9d487392016-11-02 14:16:49 +11003766 pr_warn("md: fatal superblock inconsistency in %s -- removing from array\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003767 bdevname(rdev->bdev,b));
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05003768 md_kick_rdev_from_array(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003769 }
3770
Yufen Yu6a5cb532019-10-16 16:00:03 +08003771 /* Cannot find a valid fresh disk */
3772 if (!freshest) {
3773 pr_warn("md: cannot find a valid disk\n");
3774 return -EINVAL;
3775 }
3776
Linus Torvalds1da177e2005-04-16 15:20:36 -07003777 super_types[mddev->major_version].
3778 validate_super(mddev, freshest);
3779
3780 i = 0;
NeilBrowndafb20f2012-03-19 12:46:39 +11003781 rdev_for_each_safe(rdev, tmp, mddev) {
NeilBrown233fca32010-04-14 17:02:09 +10003782 if (mddev->max_disks &&
3783 (rdev->desc_nr >= mddev->max_disks ||
3784 i > mddev->max_disks)) {
NeilBrown9d487392016-11-02 14:16:49 +11003785 pr_warn("md: %s: %s: only %d devices permitted\n",
3786 mdname(mddev), bdevname(rdev->bdev, b),
3787 mddev->max_disks);
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05003788 md_kick_rdev_from_array(rdev);
NeilBrownde01dfa2009-02-06 18:02:46 +11003789 continue;
3790 }
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05003791 if (rdev != freshest) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003792 if (super_types[mddev->major_version].
3793 validate_super(mddev, rdev)) {
NeilBrown9d487392016-11-02 14:16:49 +11003794 pr_warn("md: kicking non-fresh %s from array!\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003795 bdevname(rdev->bdev,b));
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05003796 md_kick_rdev_from_array(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003797 continue;
3798 }
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05003799 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003800 if (mddev->level == LEVEL_MULTIPATH) {
3801 rdev->desc_nr = i++;
3802 rdev->raid_disk = rdev->desc_nr;
NeilBrownb2d444d2005-11-08 21:39:31 -08003803 set_bit(In_sync, &rdev->flags);
Shaohua Lif2076e72015-10-08 21:54:12 -07003804 } else if (rdev->raid_disk >=
3805 (mddev->raid_disks - min(0, mddev->delta_disks)) &&
3806 !test_bit(Journal, &rdev->flags)) {
NeilBrowna778b732007-05-23 13:58:10 -07003807 rdev->raid_disk = -1;
3808 clear_bit(In_sync, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003809 }
3810 }
Yufen Yu6a5cb532019-10-16 16:00:03 +08003811
3812 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003813}
3814
NeilBrown72e02072009-12-14 12:49:55 +11003815/* Read a fixed-point number.
3816 * Numbers in sysfs attributes should be in "standard" units where
3817 * possible, so time should be in seconds.
NeilBrownf72ffdd2014-09-30 14:23:59 +10003818 * However we internally use a a much smaller unit such as
NeilBrown72e02072009-12-14 12:49:55 +11003819 * milliseconds or jiffies.
3820 * This function takes a decimal number with a possible fractional
3821 * component, and produces an integer which is the result of
3822 * multiplying that number by 10^'scale'.
3823 * all without any floating-point arithmetic.
3824 */
3825int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
3826{
3827 unsigned long result = 0;
3828 long decimals = -1;
3829 while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
3830 if (*cp == '.')
3831 decimals = 0;
3832 else if (decimals < scale) {
3833 unsigned int value;
3834 value = *cp - '0';
3835 result = result * 10 + value;
3836 if (decimals >= 0)
3837 decimals++;
3838 }
3839 cp++;
3840 }
3841 if (*cp == '\n')
3842 cp++;
3843 if (*cp)
3844 return -EINVAL;
3845 if (decimals < 0)
3846 decimals = 0;
Andy Shevchenkocf891602019-07-23 23:41:55 +03003847 *res = result * int_pow(10, scale - decimals);
NeilBrown72e02072009-12-14 12:49:55 +11003848 return 0;
3849}
3850
NeilBrowneae17012005-11-08 21:39:23 -08003851static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11003852safe_delay_show(struct mddev *mddev, char *page)
NeilBrown16f17b32006-06-26 00:27:37 -07003853{
3854 int msec = (mddev->safemode_delay*1000)/HZ;
3855 return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
3856}
3857static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11003858safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
NeilBrown16f17b32006-06-26 00:27:37 -07003859{
NeilBrown16f17b32006-06-26 00:27:37 -07003860 unsigned long msec;
Dan Williams97ce0a72008-09-24 22:48:19 -07003861
Goldwyn Rodrigues28c1b9f2015-10-22 16:01:25 +11003862 if (mddev_is_clustered(mddev)) {
NeilBrown9d487392016-11-02 14:16:49 +11003863 pr_warn("md: Safemode is disabled for clustered mode\n");
Goldwyn Rodrigues28c1b9f2015-10-22 16:01:25 +11003864 return -EINVAL;
3865 }
3866
NeilBrown72e02072009-12-14 12:49:55 +11003867 if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
NeilBrown16f17b32006-06-26 00:27:37 -07003868 return -EINVAL;
NeilBrown16f17b32006-06-26 00:27:37 -07003869 if (msec == 0)
3870 mddev->safemode_delay = 0;
3871 else {
NeilBrown19052c02008-08-05 15:54:13 +10003872 unsigned long old_delay = mddev->safemode_delay;
NeilBrown1b30e662014-12-15 12:57:00 +11003873 unsigned long new_delay = (msec*HZ)/1000;
3874
3875 if (new_delay == 0)
3876 new_delay = 1;
3877 mddev->safemode_delay = new_delay;
3878 if (new_delay < old_delay || old_delay == 0)
3879 mod_timer(&mddev->safemode_timer, jiffies+1);
NeilBrown16f17b32006-06-26 00:27:37 -07003880 }
3881 return len;
3882}
3883static struct md_sysfs_entry md_safe_delay =
NeilBrown80ca3a42006-07-10 04:44:18 -07003884__ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
NeilBrown16f17b32006-06-26 00:27:37 -07003885
3886static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11003887level_show(struct mddev *mddev, char *page)
NeilBrowneae17012005-11-08 21:39:23 -08003888{
NeilBrown36d091f2014-12-15 12:56:58 +11003889 struct md_personality *p;
3890 int ret;
3891 spin_lock(&mddev->lock);
3892 p = mddev->pers;
NeilBrownd9d166c2006-01-06 00:20:51 -08003893 if (p)
NeilBrown36d091f2014-12-15 12:56:58 +11003894 ret = sprintf(page, "%s\n", p->name);
NeilBrownd9d166c2006-01-06 00:20:51 -08003895 else if (mddev->clevel[0])
NeilBrown36d091f2014-12-15 12:56:58 +11003896 ret = sprintf(page, "%s\n", mddev->clevel);
NeilBrownd9d166c2006-01-06 00:20:51 -08003897 else if (mddev->level != LEVEL_NONE)
NeilBrown36d091f2014-12-15 12:56:58 +11003898 ret = sprintf(page, "%d\n", mddev->level);
NeilBrownd9d166c2006-01-06 00:20:51 -08003899 else
NeilBrown36d091f2014-12-15 12:56:58 +11003900 ret = 0;
3901 spin_unlock(&mddev->lock);
3902 return ret;
NeilBrowneae17012005-11-08 21:39:23 -08003903}
3904
NeilBrownd9d166c2006-01-06 00:20:51 -08003905static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11003906level_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrownd9d166c2006-01-06 00:20:51 -08003907{
Dan Williamsf2859af2010-05-02 10:04:16 -07003908 char clevel[16];
NeilBrown67918752014-12-15 12:57:01 +11003909 ssize_t rv;
3910 size_t slen = len;
NeilBrowndb721d32014-12-15 12:56:58 +11003911 struct md_personality *pers, *oldpers;
Dan Williamsf2859af2010-05-02 10:04:16 -07003912 long level;
NeilBrowndb721d32014-12-15 12:56:58 +11003913 void *priv, *oldpriv;
NeilBrown3cb03002011-10-11 16:45:26 +11003914 struct md_rdev *rdev;
NeilBrown245f46c2009-03-31 14:39:39 +11003915
NeilBrown67918752014-12-15 12:57:01 +11003916 if (slen == 0 || slen >= sizeof(clevel))
3917 return -EINVAL;
3918
3919 rv = mddev_lock(mddev);
3920 if (rv)
NeilBrown245f46c2009-03-31 14:39:39 +11003921 return rv;
NeilBrown67918752014-12-15 12:57:01 +11003922
3923 if (mddev->pers == NULL) {
3924 strncpy(mddev->clevel, buf, slen);
3925 if (mddev->clevel[slen-1] == '\n')
3926 slen--;
3927 mddev->clevel[slen] = 0;
3928 mddev->level = LEVEL_NONE;
3929 rv = len;
3930 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11003931 }
NeilBrown67918752014-12-15 12:57:01 +11003932 rv = -EROFS;
NeilBrownbd8839e2014-05-28 13:39:21 +10003933 if (mddev->ro)
NeilBrown67918752014-12-15 12:57:01 +11003934 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11003935
3936 /* request to change the personality. Need to ensure:
3937 * - array is not engaged in resync/recovery/reshape
3938 * - old personality can be suspended
3939 * - new personality will access other array.
3940 */
3941
NeilBrown67918752014-12-15 12:57:01 +11003942 rv = -EBUSY;
NeilBrownbb4f1e92010-08-08 21:18:03 +10003943 if (mddev->sync_thread ||
NeilBrownf851b602014-12-11 10:02:10 +11003944 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
NeilBrownbb4f1e92010-08-08 21:18:03 +10003945 mddev->reshape_position != MaxSector ||
3946 mddev->sysfs_active)
NeilBrown67918752014-12-15 12:57:01 +11003947 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11003948
NeilBrown67918752014-12-15 12:57:01 +11003949 rv = -EINVAL;
NeilBrown245f46c2009-03-31 14:39:39 +11003950 if (!mddev->pers->quiesce) {
NeilBrown9d487392016-11-02 14:16:49 +11003951 pr_warn("md: %s: %s does not support online personality change\n",
3952 mdname(mddev), mddev->pers->name);
NeilBrown67918752014-12-15 12:57:01 +11003953 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11003954 }
3955
3956 /* Now find the new personality */
NeilBrown67918752014-12-15 12:57:01 +11003957 strncpy(clevel, buf, slen);
3958 if (clevel[slen-1] == '\n')
3959 slen--;
3960 clevel[slen] = 0;
Jingoo Hanb29bebd2013-06-01 16:15:16 +09003961 if (kstrtol(clevel, 10, &level))
Dan Williamsf2859af2010-05-02 10:04:16 -07003962 level = LEVEL_NONE;
NeilBrown245f46c2009-03-31 14:39:39 +11003963
Dan Williamsf2859af2010-05-02 10:04:16 -07003964 if (request_module("md-%s", clevel) != 0)
3965 request_module("md-level-%s", clevel);
NeilBrown245f46c2009-03-31 14:39:39 +11003966 spin_lock(&pers_lock);
Dan Williamsf2859af2010-05-02 10:04:16 -07003967 pers = find_pers(level, clevel);
NeilBrown245f46c2009-03-31 14:39:39 +11003968 if (!pers || !try_module_get(pers->owner)) {
3969 spin_unlock(&pers_lock);
NeilBrown9d487392016-11-02 14:16:49 +11003970 pr_warn("md: personality %s not loaded\n", clevel);
NeilBrown67918752014-12-15 12:57:01 +11003971 rv = -EINVAL;
3972 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11003973 }
3974 spin_unlock(&pers_lock);
3975
3976 if (pers == mddev->pers) {
3977 /* Nothing to do! */
3978 module_put(pers->owner);
NeilBrown67918752014-12-15 12:57:01 +11003979 rv = len;
3980 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11003981 }
3982 if (!pers->takeover) {
3983 module_put(pers->owner);
NeilBrown9d487392016-11-02 14:16:49 +11003984 pr_warn("md: %s: %s does not support personality takeover\n",
3985 mdname(mddev), clevel);
NeilBrown67918752014-12-15 12:57:01 +11003986 rv = -EINVAL;
3987 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11003988 }
3989
NeilBrowndafb20f2012-03-19 12:46:39 +11003990 rdev_for_each(rdev, mddev)
NeilBrowne93f68a2010-06-15 09:36:03 +01003991 rdev->new_raid_disk = rdev->raid_disk;
3992
NeilBrown245f46c2009-03-31 14:39:39 +11003993 /* ->takeover must set new_* and/or delta_disks
3994 * if it succeeds, and may set them when it fails.
3995 */
3996 priv = pers->takeover(mddev);
3997 if (IS_ERR(priv)) {
3998 mddev->new_level = mddev->level;
3999 mddev->new_layout = mddev->layout;
Andre Noll664e7c42009-06-18 08:45:27 +10004000 mddev->new_chunk_sectors = mddev->chunk_sectors;
NeilBrown245f46c2009-03-31 14:39:39 +11004001 mddev->raid_disks -= mddev->delta_disks;
4002 mddev->delta_disks = 0;
NeilBrown2c810cd2012-05-21 09:27:00 +10004003 mddev->reshape_backwards = 0;
NeilBrown245f46c2009-03-31 14:39:39 +11004004 module_put(pers->owner);
NeilBrown9d487392016-11-02 14:16:49 +11004005 pr_warn("md: %s: %s would not accept array\n",
4006 mdname(mddev), clevel);
NeilBrown67918752014-12-15 12:57:01 +11004007 rv = PTR_ERR(priv);
4008 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11004009 }
4010
4011 /* Looks like we have a winner */
4012 mddev_suspend(mddev);
NeilBrown5aa61f42014-12-15 12:56:57 +11004013 mddev_detach(mddev);
NeilBrown36d091f2014-12-15 12:56:58 +11004014
4015 spin_lock(&mddev->lock);
NeilBrowndb721d32014-12-15 12:56:58 +11004016 oldpers = mddev->pers;
4017 oldpriv = mddev->private;
4018 mddev->pers = pers;
4019 mddev->private = priv;
4020 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
4021 mddev->level = mddev->new_level;
4022 mddev->layout = mddev->new_layout;
4023 mddev->chunk_sectors = mddev->new_chunk_sectors;
4024 mddev->delta_disks = 0;
4025 mddev->reshape_backwards = 0;
4026 mddev->degraded = 0;
NeilBrown36d091f2014-12-15 12:56:58 +11004027 spin_unlock(&mddev->lock);
NeilBrownf72ffdd2014-09-30 14:23:59 +10004028
NeilBrowndb721d32014-12-15 12:56:58 +11004029 if (oldpers->sync_request == NULL &&
Trela Maciej54071b32010-03-08 16:02:42 +11004030 mddev->external) {
4031 /* We are converting from a no-redundancy array
4032 * to a redundancy array and metadata is managed
4033 * externally so we need to be sure that writes
4034 * won't block due to a need to transition
4035 * clean->dirty
4036 * until external management is started.
4037 */
4038 mddev->in_sync = 0;
4039 mddev->safemode_delay = 0;
4040 mddev->safemode = 0;
4041 }
4042
NeilBrowndb721d32014-12-15 12:56:58 +11004043 oldpers->free(mddev, oldpriv);
4044
4045 if (oldpers->sync_request == NULL &&
4046 pers->sync_request != NULL) {
4047 /* need to add the md_redundancy_group */
4048 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
NeilBrown9d487392016-11-02 14:16:49 +11004049 pr_warn("md: cannot register extra attributes for %s\n",
4050 mdname(mddev));
NeilBrowndb721d32014-12-15 12:56:58 +11004051 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
Junxiao Bie8efa9b2020-08-04 17:27:18 -07004052 mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed");
4053 mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded");
NeilBrowndb721d32014-12-15 12:56:58 +11004054 }
4055 if (oldpers->sync_request != NULL &&
4056 pers->sync_request == NULL) {
4057 /* need to remove the md_redundancy_group */
4058 if (mddev->to_remove == NULL)
4059 mddev->to_remove = &md_redundancy_group;
4060 }
4061
Alexey Obitotskiy4cb9da72016-06-23 12:11:01 +02004062 module_put(oldpers->owner);
4063
NeilBrowndafb20f2012-03-19 12:46:39 +11004064 rdev_for_each(rdev, mddev) {
NeilBrowne93f68a2010-06-15 09:36:03 +01004065 if (rdev->raid_disk < 0)
4066 continue;
NeilBrownbf2cb0d2011-01-14 09:14:34 +11004067 if (rdev->new_raid_disk >= mddev->raid_disks)
NeilBrowne93f68a2010-06-15 09:36:03 +01004068 rdev->new_raid_disk = -1;
4069 if (rdev->new_raid_disk == rdev->raid_disk)
4070 continue;
Namhyung Kim36fad852011-07-27 11:00:36 +10004071 sysfs_unlink_rdev(mddev, rdev);
NeilBrowne93f68a2010-06-15 09:36:03 +01004072 }
NeilBrowndafb20f2012-03-19 12:46:39 +11004073 rdev_for_each(rdev, mddev) {
NeilBrowne93f68a2010-06-15 09:36:03 +01004074 if (rdev->raid_disk < 0)
4075 continue;
4076 if (rdev->new_raid_disk == rdev->raid_disk)
4077 continue;
4078 rdev->raid_disk = rdev->new_raid_disk;
4079 if (rdev->raid_disk < 0)
NeilBrown3a981b02009-08-03 10:59:55 +10004080 clear_bit(In_sync, &rdev->flags);
NeilBrowne93f68a2010-06-15 09:36:03 +01004081 else {
Namhyung Kim36fad852011-07-27 11:00:36 +10004082 if (sysfs_link_rdev(mddev, rdev))
NeilBrown9d487392016-11-02 14:16:49 +11004083 pr_warn("md: cannot register rd%d for %s after level change\n",
4084 rdev->raid_disk, mdname(mddev));
NeilBrown3a981b02009-08-03 10:59:55 +10004085 }
NeilBrowne93f68a2010-06-15 09:36:03 +01004086 }
4087
NeilBrowndb721d32014-12-15 12:56:58 +11004088 if (pers->sync_request == NULL) {
Trela, Maciej9af204c2010-03-08 16:02:44 +11004089 /* this is now an array without redundancy, so
4090 * it must always be in_sync
4091 */
4092 mddev->in_sync = 1;
4093 del_timer_sync(&mddev->safemode_timer);
4094 }
NeilBrown02e5f5c2013-11-14 15:16:15 +11004095 blk_set_stacking_limits(&mddev->queue->limits);
NeilBrown245f46c2009-03-31 14:39:39 +11004096 pers->run(mddev);
Shaohua Li29530792016-12-08 15:48:19 -08004097 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
Jonathan Brassow47525e52012-05-22 13:55:29 +10004098 mddev_resume(mddev);
NeilBrown830778a2014-01-14 15:17:03 +11004099 if (!mddev->thread)
4100 md_update_sb(mddev, 1);
Junxiao Bie1a86db2020-07-14 16:10:26 -07004101 sysfs_notify_dirent_safe(mddev->sysfs_level);
Dan Williamsbb7f8d22010-05-01 18:14:57 -07004102 md_new_event(mddev);
NeilBrown67918752014-12-15 12:57:01 +11004103 rv = len;
4104out_unlock:
4105 mddev_unlock(mddev);
NeilBrownd9d166c2006-01-06 00:20:51 -08004106 return rv;
4107}
4108
4109static struct md_sysfs_entry md_level =
NeilBrown80ca3a42006-07-10 04:44:18 -07004110__ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
NeilBrowneae17012005-11-08 21:39:23 -08004111
NeilBrownd4dbd022006-06-26 00:27:59 -07004112static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004113layout_show(struct mddev *mddev, char *page)
NeilBrownd4dbd022006-06-26 00:27:59 -07004114{
4115 /* just a number, not meaningful for all levels */
NeilBrown08a02ec2007-05-09 02:35:38 -07004116 if (mddev->reshape_position != MaxSector &&
4117 mddev->layout != mddev->new_layout)
4118 return sprintf(page, "%d (%d)\n",
4119 mddev->new_layout, mddev->layout);
NeilBrownd4dbd022006-06-26 00:27:59 -07004120 return sprintf(page, "%d\n", mddev->layout);
4121}
4122
4123static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004124layout_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrownd4dbd022006-06-26 00:27:59 -07004125{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004126 unsigned int n;
NeilBrown67918752014-12-15 12:57:01 +11004127 int err;
NeilBrownd4dbd022006-06-26 00:27:59 -07004128
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004129 err = kstrtouint(buf, 10, &n);
4130 if (err < 0)
4131 return err;
NeilBrown67918752014-12-15 12:57:01 +11004132 err = mddev_lock(mddev);
4133 if (err)
4134 return err;
NeilBrownd4dbd022006-06-26 00:27:59 -07004135
NeilBrownb3546032009-03-31 14:56:41 +11004136 if (mddev->pers) {
NeilBrown50ac1682009-06-18 08:47:55 +10004137 if (mddev->pers->check_reshape == NULL)
NeilBrown67918752014-12-15 12:57:01 +11004138 err = -EBUSY;
4139 else if (mddev->ro)
4140 err = -EROFS;
4141 else {
4142 mddev->new_layout = n;
4143 err = mddev->pers->check_reshape(mddev);
4144 if (err)
4145 mddev->new_layout = mddev->layout;
NeilBrown597a7112009-06-18 08:47:42 +10004146 }
NeilBrownb3546032009-03-31 14:56:41 +11004147 } else {
NeilBrown08a02ec2007-05-09 02:35:38 -07004148 mddev->new_layout = n;
NeilBrownb3546032009-03-31 14:56:41 +11004149 if (mddev->reshape_position == MaxSector)
4150 mddev->layout = n;
4151 }
NeilBrown67918752014-12-15 12:57:01 +11004152 mddev_unlock(mddev);
4153 return err ?: len;
NeilBrownd4dbd022006-06-26 00:27:59 -07004154}
4155static struct md_sysfs_entry md_layout =
NeilBrown80ca3a42006-07-10 04:44:18 -07004156__ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
NeilBrownd4dbd022006-06-26 00:27:59 -07004157
NeilBrowneae17012005-11-08 21:39:23 -08004158static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004159raid_disks_show(struct mddev *mddev, char *page)
NeilBrowneae17012005-11-08 21:39:23 -08004160{
NeilBrownbb636542005-11-08 21:39:45 -08004161 if (mddev->raid_disks == 0)
4162 return 0;
NeilBrown08a02ec2007-05-09 02:35:38 -07004163 if (mddev->reshape_position != MaxSector &&
4164 mddev->delta_disks != 0)
4165 return sprintf(page, "%d (%d)\n", mddev->raid_disks,
4166 mddev->raid_disks - mddev->delta_disks);
NeilBrowneae17012005-11-08 21:39:23 -08004167 return sprintf(page, "%d\n", mddev->raid_disks);
4168}
4169
NeilBrownfd01b882011-10-11 16:47:53 +11004170static int update_raid_disks(struct mddev *mddev, int raid_disks);
NeilBrownda943b992006-01-06 00:20:54 -08004171
4172static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004173raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrownda943b992006-01-06 00:20:54 -08004174{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004175 unsigned int n;
NeilBrown67918752014-12-15 12:57:01 +11004176 int err;
NeilBrownda943b992006-01-06 00:20:54 -08004177
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004178 err = kstrtouint(buf, 10, &n);
4179 if (err < 0)
4180 return err;
NeilBrownda943b992006-01-06 00:20:54 -08004181
NeilBrown67918752014-12-15 12:57:01 +11004182 err = mddev_lock(mddev);
4183 if (err)
4184 return err;
NeilBrownda943b992006-01-06 00:20:54 -08004185 if (mddev->pers)
NeilBrown67918752014-12-15 12:57:01 +11004186 err = update_raid_disks(mddev, n);
NeilBrown08a02ec2007-05-09 02:35:38 -07004187 else if (mddev->reshape_position != MaxSector) {
NeilBrownc6563a82012-05-21 09:27:00 +10004188 struct md_rdev *rdev;
NeilBrown08a02ec2007-05-09 02:35:38 -07004189 int olddisks = mddev->raid_disks - mddev->delta_disks;
NeilBrownc6563a82012-05-21 09:27:00 +10004190
NeilBrown67918752014-12-15 12:57:01 +11004191 err = -EINVAL;
NeilBrownc6563a82012-05-21 09:27:00 +10004192 rdev_for_each(rdev, mddev) {
4193 if (olddisks < n &&
4194 rdev->data_offset < rdev->new_data_offset)
NeilBrown67918752014-12-15 12:57:01 +11004195 goto out_unlock;
NeilBrownc6563a82012-05-21 09:27:00 +10004196 if (olddisks > n &&
4197 rdev->data_offset > rdev->new_data_offset)
NeilBrown67918752014-12-15 12:57:01 +11004198 goto out_unlock;
NeilBrownc6563a82012-05-21 09:27:00 +10004199 }
NeilBrown67918752014-12-15 12:57:01 +11004200 err = 0;
NeilBrown08a02ec2007-05-09 02:35:38 -07004201 mddev->delta_disks = n - olddisks;
4202 mddev->raid_disks = n;
NeilBrown2c810cd2012-05-21 09:27:00 +10004203 mddev->reshape_backwards = (mddev->delta_disks < 0);
NeilBrown08a02ec2007-05-09 02:35:38 -07004204 } else
NeilBrownda943b992006-01-06 00:20:54 -08004205 mddev->raid_disks = n;
NeilBrown67918752014-12-15 12:57:01 +11004206out_unlock:
4207 mddev_unlock(mddev);
4208 return err ? err : len;
NeilBrownda943b992006-01-06 00:20:54 -08004209}
4210static struct md_sysfs_entry md_raid_disks =
NeilBrown80ca3a42006-07-10 04:44:18 -07004211__ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
NeilBrowneae17012005-11-08 21:39:23 -08004212
NeilBrown24dd4692005-11-08 21:39:26 -08004213static ssize_t
Sebastian Parschauerec164d072020-07-28 12:01:39 +02004214uuid_show(struct mddev *mddev, char *page)
4215{
4216 return sprintf(page, "%pU\n", mddev->uuid);
4217}
4218static struct md_sysfs_entry md_uuid =
4219__ATTR(uuid, S_IRUGO, uuid_show, NULL);
4220
4221static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004222chunk_size_show(struct mddev *mddev, char *page)
NeilBrown3b343802006-01-06 00:20:47 -08004223{
NeilBrown08a02ec2007-05-09 02:35:38 -07004224 if (mddev->reshape_position != MaxSector &&
Andre Noll664e7c42009-06-18 08:45:27 +10004225 mddev->chunk_sectors != mddev->new_chunk_sectors)
4226 return sprintf(page, "%d (%d)\n",
4227 mddev->new_chunk_sectors << 9,
Andre Noll9d8f0362009-06-18 08:45:01 +10004228 mddev->chunk_sectors << 9);
4229 return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
NeilBrown3b343802006-01-06 00:20:47 -08004230}
4231
4232static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004233chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown3b343802006-01-06 00:20:47 -08004234{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004235 unsigned long n;
NeilBrown67918752014-12-15 12:57:01 +11004236 int err;
NeilBrown3b343802006-01-06 00:20:47 -08004237
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004238 err = kstrtoul(buf, 10, &n);
4239 if (err < 0)
4240 return err;
NeilBrown3b343802006-01-06 00:20:47 -08004241
NeilBrown67918752014-12-15 12:57:01 +11004242 err = mddev_lock(mddev);
4243 if (err)
4244 return err;
NeilBrownb3546032009-03-31 14:56:41 +11004245 if (mddev->pers) {
NeilBrown50ac1682009-06-18 08:47:55 +10004246 if (mddev->pers->check_reshape == NULL)
NeilBrown67918752014-12-15 12:57:01 +11004247 err = -EBUSY;
4248 else if (mddev->ro)
4249 err = -EROFS;
4250 else {
4251 mddev->new_chunk_sectors = n >> 9;
4252 err = mddev->pers->check_reshape(mddev);
4253 if (err)
4254 mddev->new_chunk_sectors = mddev->chunk_sectors;
NeilBrown597a7112009-06-18 08:47:42 +10004255 }
NeilBrownb3546032009-03-31 14:56:41 +11004256 } else {
Andre Noll664e7c42009-06-18 08:45:27 +10004257 mddev->new_chunk_sectors = n >> 9;
NeilBrownb3546032009-03-31 14:56:41 +11004258 if (mddev->reshape_position == MaxSector)
Andre Noll9d8f0362009-06-18 08:45:01 +10004259 mddev->chunk_sectors = n >> 9;
NeilBrownb3546032009-03-31 14:56:41 +11004260 }
NeilBrown67918752014-12-15 12:57:01 +11004261 mddev_unlock(mddev);
4262 return err ?: len;
NeilBrown3b343802006-01-06 00:20:47 -08004263}
4264static struct md_sysfs_entry md_chunk_size =
NeilBrown80ca3a42006-07-10 04:44:18 -07004265__ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
NeilBrown3b343802006-01-06 00:20:47 -08004266
NeilBrowna94213b2006-06-26 00:28:00 -07004267static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004268resync_start_show(struct mddev *mddev, char *page)
NeilBrowna94213b2006-06-26 00:28:00 -07004269{
NeilBrownd1a7c502009-03-31 15:24:32 +11004270 if (mddev->recovery_cp == MaxSector)
4271 return sprintf(page, "none\n");
NeilBrowna94213b2006-06-26 00:28:00 -07004272 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
4273}
4274
4275static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004276resync_start_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrowna94213b2006-06-26 00:28:00 -07004277{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004278 unsigned long long n;
NeilBrown67918752014-12-15 12:57:01 +11004279 int err;
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004280
4281 if (cmd_match(buf, "none"))
4282 n = MaxSector;
4283 else {
4284 err = kstrtoull(buf, 10, &n);
4285 if (err < 0)
4286 return err;
4287 if (n != (sector_t)n)
4288 return -EINVAL;
4289 }
NeilBrowna94213b2006-06-26 00:28:00 -07004290
NeilBrown67918752014-12-15 12:57:01 +11004291 err = mddev_lock(mddev);
4292 if (err)
4293 return err;
NeilBrownb0986362011-05-11 15:52:21 +10004294 if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
NeilBrown67918752014-12-15 12:57:01 +11004295 err = -EBUSY;
NeilBrowna94213b2006-06-26 00:28:00 -07004296
NeilBrown67918752014-12-15 12:57:01 +11004297 if (!err) {
4298 mddev->recovery_cp = n;
4299 if (mddev->pers)
Shaohua Li29530792016-12-08 15:48:19 -08004300 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
NeilBrown67918752014-12-15 12:57:01 +11004301 }
4302 mddev_unlock(mddev);
4303 return err ?: len;
NeilBrowna94213b2006-06-26 00:28:00 -07004304}
4305static struct md_sysfs_entry md_resync_start =
NeilBrown750f1992014-09-30 08:53:05 +10004306__ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR,
4307 resync_start_show, resync_start_store);
NeilBrowna94213b2006-06-26 00:28:00 -07004308
NeilBrown9e653b62006-06-26 00:27:58 -07004309/*
4310 * The array state can be:
4311 *
4312 * clear
4313 * No devices, no size, no level
4314 * Equivalent to STOP_ARRAY ioctl
4315 * inactive
4316 * May have some settings, but array is not active
4317 * all IO results in error
4318 * When written, doesn't tear down array, but just stops it
4319 * suspended (not supported yet)
4320 * All IO requests will block. The array can be reconfigured.
Andre Noll910d8cb2008-03-25 21:00:53 +01004321 * Writing this, if accepted, will block until array is quiescent
NeilBrown9e653b62006-06-26 00:27:58 -07004322 * readonly
4323 * no resync can happen. no superblocks get written.
4324 * write requests fail
4325 * read-auto
4326 * like readonly, but behaves like 'clean' on a write request.
4327 *
4328 * clean - no pending writes, but otherwise active.
4329 * When written to inactive array, starts without resync
4330 * If a write request arrives then
4331 * if metadata is known, mark 'dirty' and switch to 'active'.
4332 * if not known, block and switch to write-pending
4333 * If written to an active array that has pending writes, then fails.
4334 * active
4335 * fully active: IO and resync can be happening.
4336 * When written to inactive array, starts with resync
4337 *
4338 * write-pending
4339 * clean, but writes are blocked waiting for 'active' to be written.
4340 *
4341 * active-idle
4342 * like active, but no writes have been seen for a while (100msec).
4343 *
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -03004344 * broken
4345 * RAID0/LINEAR-only: same as clean, but array is missing a member.
4346 * It's useful because RAID0/LINEAR mounted-arrays aren't stopped
4347 * when a member is gone, so this state will at least alert the
4348 * user that something is wrong.
NeilBrown9e653b62006-06-26 00:27:58 -07004349 */
4350enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -03004351 write_pending, active_idle, broken, bad_word};
Adrian Bunk05381952006-06-26 00:28:01 -07004352static char *array_states[] = {
NeilBrown9e653b62006-06-26 00:27:58 -07004353 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -03004354 "write-pending", "active-idle", "broken", NULL };
NeilBrown9e653b62006-06-26 00:27:58 -07004355
4356static int match_word(const char *word, char **list)
4357{
4358 int n;
4359 for (n=0; list[n]; n++)
4360 if (cmd_match(word, list[n]))
4361 break;
4362 return n;
4363}
4364
4365static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004366array_state_show(struct mddev *mddev, char *page)
NeilBrown9e653b62006-06-26 00:27:58 -07004367{
4368 enum array_state st = inactive;
4369
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -03004370 if (mddev->pers && !test_bit(MD_NOT_READY, &mddev->flags)) {
NeilBrown9e653b62006-06-26 00:27:58 -07004371 switch(mddev->ro) {
4372 case 1:
4373 st = readonly;
4374 break;
4375 case 2:
4376 st = read_auto;
4377 break;
4378 case 0:
NeilBrown55cc39f2017-03-15 14:05:14 +11004379 spin_lock(&mddev->lock);
Shaohua Li29530792016-12-08 15:48:19 -08004380 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
NeilBrowne6910632008-02-06 01:39:51 -08004381 st = write_pending;
Tomasz Majchrzak16f88942016-10-24 12:47:28 +02004382 else if (mddev->in_sync)
4383 st = clean;
NeilBrown9e653b62006-06-26 00:27:58 -07004384 else if (mddev->safemode)
4385 st = active_idle;
4386 else
4387 st = active;
NeilBrown55cc39f2017-03-15 14:05:14 +11004388 spin_unlock(&mddev->lock);
NeilBrown9e653b62006-06-26 00:27:58 -07004389 }
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -03004390
4391 if (test_bit(MD_BROKEN, &mddev->flags) && st == clean)
4392 st = broken;
4393 } else {
NeilBrown9e653b62006-06-26 00:27:58 -07004394 if (list_empty(&mddev->disks) &&
4395 mddev->raid_disks == 0 &&
Andre Noll58c0fed2009-03-31 14:33:13 +11004396 mddev->dev_sectors == 0)
NeilBrown9e653b62006-06-26 00:27:58 -07004397 st = clear;
4398 else
4399 st = inactive;
4400 }
4401 return sprintf(page, "%s\n", array_states[st]);
4402}
4403
NeilBrownf72ffdd2014-09-30 14:23:59 +10004404static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev);
4405static int md_set_readonly(struct mddev *mddev, struct block_device *bdev);
NeilBrownfd01b882011-10-11 16:47:53 +11004406static int restart_array(struct mddev *mddev);
NeilBrown9e653b62006-06-26 00:27:58 -07004407
4408static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004409array_state_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown9e653b62006-06-26 00:27:58 -07004410{
NeilBrown6497709b2017-03-15 14:05:14 +11004411 int err = 0;
NeilBrown9e653b62006-06-26 00:27:58 -07004412 enum array_state st = match_word(buf, array_states);
NeilBrown67918752014-12-15 12:57:01 +11004413
4414 if (mddev->pers && (st == active || st == clean) && mddev->ro != 1) {
4415 /* don't take reconfig_mutex when toggling between
4416 * clean and active
4417 */
4418 spin_lock(&mddev->lock);
4419 if (st == active) {
4420 restart_array(mddev);
Shaohua Li29530792016-12-08 15:48:19 -08004421 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
Tomasz Majchrzak91a6c4a2016-10-25 17:07:08 +02004422 md_wakeup_thread(mddev->thread);
NeilBrown67918752014-12-15 12:57:01 +11004423 wake_up(&mddev->sb_wait);
NeilBrown67918752014-12-15 12:57:01 +11004424 } else /* st == clean */ {
4425 restart_array(mddev);
NeilBrown6497709b2017-03-15 14:05:14 +11004426 if (!set_in_sync(mddev))
NeilBrown67918752014-12-15 12:57:01 +11004427 err = -EBUSY;
4428 }
Tomasz Majchrzak573275b2016-06-30 10:47:09 +02004429 if (!err)
4430 sysfs_notify_dirent_safe(mddev->sysfs_state);
NeilBrown67918752014-12-15 12:57:01 +11004431 spin_unlock(&mddev->lock);
NeilBrownc008f1d2015-06-12 19:46:44 +10004432 return err ?: len;
NeilBrown67918752014-12-15 12:57:01 +11004433 }
4434 err = mddev_lock(mddev);
4435 if (err)
4436 return err;
4437 err = -EINVAL;
NeilBrown9e653b62006-06-26 00:27:58 -07004438 switch(st) {
4439 case bad_word:
4440 break;
4441 case clear:
4442 /* stopping an active array */
NeilBrowna05b7ea2012-07-19 15:59:18 +10004443 err = do_md_stop(mddev, 0, NULL);
NeilBrown9e653b62006-06-26 00:27:58 -07004444 break;
4445 case inactive:
4446 /* stopping an active array */
NeilBrown90cf1952012-07-31 10:04:55 +10004447 if (mddev->pers)
NeilBrowna05b7ea2012-07-19 15:59:18 +10004448 err = do_md_stop(mddev, 2, NULL);
NeilBrown90cf1952012-07-31 10:04:55 +10004449 else
NeilBrowne6910632008-02-06 01:39:51 -08004450 err = 0; /* already inactive */
NeilBrown9e653b62006-06-26 00:27:58 -07004451 break;
4452 case suspended:
4453 break; /* not supported yet */
4454 case readonly:
4455 if (mddev->pers)
NeilBrowna05b7ea2012-07-19 15:59:18 +10004456 err = md_set_readonly(mddev, NULL);
NeilBrown9e653b62006-06-26 00:27:58 -07004457 else {
4458 mddev->ro = 1;
NeilBrown648b6292008-04-30 00:52:30 -07004459 set_disk_ro(mddev->gendisk, 1);
NeilBrown9e653b62006-06-26 00:27:58 -07004460 err = do_md_run(mddev);
4461 }
4462 break;
4463 case read_auto:
NeilBrown9e653b62006-06-26 00:27:58 -07004464 if (mddev->pers) {
NeilBrown80268ee2008-10-13 11:55:12 +11004465 if (mddev->ro == 0)
NeilBrowna05b7ea2012-07-19 15:59:18 +10004466 err = md_set_readonly(mddev, NULL);
NeilBrown80268ee2008-10-13 11:55:12 +11004467 else if (mddev->ro == 1)
NeilBrown648b6292008-04-30 00:52:30 -07004468 err = restart_array(mddev);
4469 if (err == 0) {
4470 mddev->ro = 2;
4471 set_disk_ro(mddev->gendisk, 0);
4472 }
NeilBrown9e653b62006-06-26 00:27:58 -07004473 } else {
4474 mddev->ro = 2;
4475 err = do_md_run(mddev);
4476 }
4477 break;
4478 case clean:
4479 if (mddev->pers) {
Song Liu339421d2015-10-08 21:54:13 -07004480 err = restart_array(mddev);
4481 if (err)
4482 break;
NeilBrown85572d72014-12-15 12:56:56 +11004483 spin_lock(&mddev->lock);
NeilBrown6497709b2017-03-15 14:05:14 +11004484 if (!set_in_sync(mddev))
NeilBrowne6910632008-02-06 01:39:51 -08004485 err = -EBUSY;
NeilBrown85572d72014-12-15 12:56:56 +11004486 spin_unlock(&mddev->lock);
NeilBrown5bf29592009-05-07 12:50:57 +10004487 } else
4488 err = -EINVAL;
NeilBrown9e653b62006-06-26 00:27:58 -07004489 break;
4490 case active:
4491 if (mddev->pers) {
Song Liu339421d2015-10-08 21:54:13 -07004492 err = restart_array(mddev);
4493 if (err)
4494 break;
Shaohua Li29530792016-12-08 15:48:19 -08004495 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
NeilBrown9e653b62006-06-26 00:27:58 -07004496 wake_up(&mddev->sb_wait);
4497 err = 0;
4498 } else {
4499 mddev->ro = 0;
NeilBrown648b6292008-04-30 00:52:30 -07004500 set_disk_ro(mddev->gendisk, 0);
NeilBrown9e653b62006-06-26 00:27:58 -07004501 err = do_md_run(mddev);
4502 }
4503 break;
4504 case write_pending:
4505 case active_idle:
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -03004506 case broken:
NeilBrown9e653b62006-06-26 00:27:58 -07004507 /* these cannot be set */
4508 break;
4509 }
NeilBrown67918752014-12-15 12:57:01 +11004510
4511 if (!err) {
NeilBrown1d23f172011-12-08 15:49:12 +11004512 if (mddev->hold_active == UNTIL_IOCTL)
4513 mddev->hold_active = 0;
NeilBrown00bcb4a2010-06-01 19:37:23 +10004514 sysfs_notify_dirent_safe(mddev->sysfs_state);
Neil Brown0fd62b82008-06-28 08:31:36 +10004515 }
NeilBrown67918752014-12-15 12:57:01 +11004516 mddev_unlock(mddev);
4517 return err ?: len;
NeilBrown9e653b62006-06-26 00:27:58 -07004518}
NeilBrown80ca3a42006-07-10 04:44:18 -07004519static struct md_sysfs_entry md_array_state =
NeilBrown750f1992014-09-30 08:53:05 +10004520__ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
NeilBrown9e653b62006-06-26 00:27:58 -07004521
NeilBrown6d7ff7382006-01-06 00:21:16 -08004522static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004523max_corrected_read_errors_show(struct mddev *mddev, char *page) {
Robert Becker1e509152009-12-14 12:49:58 +11004524 return sprintf(page, "%d\n",
4525 atomic_read(&mddev->max_corr_read_errors));
4526}
4527
4528static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004529max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len)
Robert Becker1e509152009-12-14 12:49:58 +11004530{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004531 unsigned int n;
4532 int rv;
Robert Becker1e509152009-12-14 12:49:58 +11004533
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004534 rv = kstrtouint(buf, 10, &n);
4535 if (rv < 0)
4536 return rv;
4537 atomic_set(&mddev->max_corr_read_errors, n);
4538 return len;
Robert Becker1e509152009-12-14 12:49:58 +11004539}
4540
4541static struct md_sysfs_entry max_corr_read_errors =
4542__ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
4543 max_corrected_read_errors_store);
4544
4545static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004546null_show(struct mddev *mddev, char *page)
NeilBrown6d7ff7382006-01-06 00:21:16 -08004547{
4548 return -EINVAL;
4549}
4550
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02004551/* need to ensure rdev_delayed_delete() has completed */
4552static void flush_rdev_wq(struct mddev *mddev)
4553{
4554 struct md_rdev *rdev;
4555
4556 rcu_read_lock();
4557 rdev_for_each_rcu(rdev, mddev)
4558 if (work_pending(&rdev->del_work)) {
4559 flush_workqueue(md_rdev_misc_wq);
4560 break;
4561 }
4562 rcu_read_unlock();
4563}
4564
NeilBrown6d7ff7382006-01-06 00:21:16 -08004565static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004566new_dev_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown6d7ff7382006-01-06 00:21:16 -08004567{
4568 /* buf must be %d:%d\n? giving major and minor numbers */
4569 /* The new device is added to the array.
4570 * If the array has a persistent superblock, we read the
4571 * superblock to initialise info and check validity.
4572 * Otherwise, only checking done is that in bind_rdev_to_array,
4573 * which mainly checks size.
4574 */
4575 char *e;
4576 int major = simple_strtoul(buf, &e, 10);
4577 int minor;
4578 dev_t dev;
NeilBrown3cb03002011-10-11 16:45:26 +11004579 struct md_rdev *rdev;
NeilBrown6d7ff7382006-01-06 00:21:16 -08004580 int err;
4581
4582 if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
4583 return -EINVAL;
4584 minor = simple_strtoul(e+1, &e, 10);
4585 if (*e && *e != '\n')
4586 return -EINVAL;
4587 dev = MKDEV(major, minor);
4588 if (major != MAJOR(dev) ||
4589 minor != MINOR(dev))
4590 return -EOVERFLOW;
4591
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02004592 flush_rdev_wq(mddev);
NeilBrown67918752014-12-15 12:57:01 +11004593 err = mddev_lock(mddev);
4594 if (err)
4595 return err;
NeilBrown6d7ff7382006-01-06 00:21:16 -08004596 if (mddev->persistent) {
4597 rdev = md_import_device(dev, mddev->major_version,
4598 mddev->minor_version);
4599 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
NeilBrown3cb03002011-10-11 16:45:26 +11004600 struct md_rdev *rdev0
4601 = list_entry(mddev->disks.next,
4602 struct md_rdev, same_set);
NeilBrown6d7ff7382006-01-06 00:21:16 -08004603 err = super_types[mddev->major_version]
4604 .load_super(rdev, rdev0, mddev->minor_version);
4605 if (err < 0)
4606 goto out;
4607 }
NeilBrownc5d79ad2008-02-06 01:39:54 -08004608 } else if (mddev->external)
4609 rdev = md_import_device(dev, -2, -1);
4610 else
NeilBrown6d7ff7382006-01-06 00:21:16 -08004611 rdev = md_import_device(dev, -1, -1);
4612
NeilBrown9a8c0fa2015-06-25 17:06:40 +10004613 if (IS_ERR(rdev)) {
4614 mddev_unlock(mddev);
NeilBrown6d7ff7382006-01-06 00:21:16 -08004615 return PTR_ERR(rdev);
NeilBrown9a8c0fa2015-06-25 17:06:40 +10004616 }
NeilBrown6d7ff7382006-01-06 00:21:16 -08004617 err = bind_rdev_to_array(rdev, mddev);
4618 out:
4619 if (err)
4620 export_rdev(rdev);
NeilBrown67918752014-12-15 12:57:01 +11004621 mddev_unlock(mddev);
Alexey Obitotskiy5492c462017-07-28 15:49:25 +02004622 if (!err)
4623 md_new_event(mddev);
NeilBrown6d7ff7382006-01-06 00:21:16 -08004624 return err ? err : len;
4625}
4626
4627static struct md_sysfs_entry md_new_device =
NeilBrown80ca3a42006-07-10 04:44:18 -07004628__ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
NeilBrown3b343802006-01-06 00:20:47 -08004629
4630static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004631bitmap_store(struct mddev *mddev, const char *buf, size_t len)
Paul Clements9b1d1da2006-10-03 01:15:49 -07004632{
4633 char *end;
4634 unsigned long chunk, end_chunk;
NeilBrown67918752014-12-15 12:57:01 +11004635 int err;
Paul Clements9b1d1da2006-10-03 01:15:49 -07004636
NeilBrown67918752014-12-15 12:57:01 +11004637 err = mddev_lock(mddev);
4638 if (err)
4639 return err;
Paul Clements9b1d1da2006-10-03 01:15:49 -07004640 if (!mddev->bitmap)
4641 goto out;
4642 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
4643 while (*buf) {
4644 chunk = end_chunk = simple_strtoul(buf, &end, 0);
4645 if (buf == end) break;
4646 if (*end == '-') { /* range */
4647 buf = end + 1;
4648 end_chunk = simple_strtoul(buf, &end, 0);
4649 if (buf == end) break;
4650 }
4651 if (*end && !isspace(*end)) break;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07004652 md_bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
André Goddard Rosae7d28602009-12-14 18:01:06 -08004653 buf = skip_spaces(end);
Paul Clements9b1d1da2006-10-03 01:15:49 -07004654 }
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07004655 md_bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
Paul Clements9b1d1da2006-10-03 01:15:49 -07004656out:
NeilBrown67918752014-12-15 12:57:01 +11004657 mddev_unlock(mddev);
Paul Clements9b1d1da2006-10-03 01:15:49 -07004658 return len;
4659}
4660
4661static struct md_sysfs_entry md_bitmap =
4662__ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
4663
4664static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004665size_show(struct mddev *mddev, char *page)
NeilBrowna35b0d62006-01-06 00:20:49 -08004666{
Andre Noll58c0fed2009-03-31 14:33:13 +11004667 return sprintf(page, "%llu\n",
4668 (unsigned long long)mddev->dev_sectors / 2);
NeilBrowna35b0d62006-01-06 00:20:49 -08004669}
4670
NeilBrownfd01b882011-10-11 16:47:53 +11004671static int update_size(struct mddev *mddev, sector_t num_sectors);
NeilBrowna35b0d62006-01-06 00:20:49 -08004672
4673static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004674size_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrowna35b0d62006-01-06 00:20:49 -08004675{
4676 /* If array is inactive, we can reduce the component size, but
4677 * not increase it (except from 0).
4678 * If array is active, we can try an on-line resize
4679 */
Dan Williamsb522adc2009-03-31 15:00:31 +11004680 sector_t sectors;
4681 int err = strict_blocks_to_sectors(buf, &sectors);
NeilBrowna35b0d62006-01-06 00:20:49 -08004682
Andre Noll58c0fed2009-03-31 14:33:13 +11004683 if (err < 0)
4684 return err;
NeilBrown67918752014-12-15 12:57:01 +11004685 err = mddev_lock(mddev);
4686 if (err)
4687 return err;
NeilBrowna35b0d62006-01-06 00:20:49 -08004688 if (mddev->pers) {
Andre Noll58c0fed2009-03-31 14:33:13 +11004689 err = update_size(mddev, sectors);
Xiao Ni4ba1e782016-06-12 17:18:00 +08004690 if (err == 0)
4691 md_update_sb(mddev, 1);
NeilBrowna35b0d62006-01-06 00:20:49 -08004692 } else {
Andre Noll58c0fed2009-03-31 14:33:13 +11004693 if (mddev->dev_sectors == 0 ||
4694 mddev->dev_sectors > sectors)
4695 mddev->dev_sectors = sectors;
NeilBrowna35b0d62006-01-06 00:20:49 -08004696 else
4697 err = -ENOSPC;
4698 }
NeilBrown67918752014-12-15 12:57:01 +11004699 mddev_unlock(mddev);
NeilBrowna35b0d62006-01-06 00:20:49 -08004700 return err ? err : len;
4701}
4702
4703static struct md_sysfs_entry md_size =
NeilBrown80ca3a42006-07-10 04:44:18 -07004704__ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
NeilBrowna35b0d62006-01-06 00:20:49 -08004705
Masanari Iida83f0d772012-10-30 00:18:08 +09004706/* Metadata version.
NeilBrowne6910632008-02-06 01:39:51 -08004707 * This is one of
4708 * 'none' for arrays with no metadata (good luck...)
4709 * 'external' for arrays with externally managed metadata,
NeilBrown8bb93aa2006-01-06 00:20:50 -08004710 * or N.M for internally known formats
4711 */
4712static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004713metadata_show(struct mddev *mddev, char *page)
NeilBrown8bb93aa2006-01-06 00:20:50 -08004714{
4715 if (mddev->persistent)
4716 return sprintf(page, "%d.%d\n",
4717 mddev->major_version, mddev->minor_version);
NeilBrowne6910632008-02-06 01:39:51 -08004718 else if (mddev->external)
4719 return sprintf(page, "external:%s\n", mddev->metadata_type);
NeilBrown8bb93aa2006-01-06 00:20:50 -08004720 else
4721 return sprintf(page, "none\n");
4722}
4723
4724static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004725metadata_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown8bb93aa2006-01-06 00:20:50 -08004726{
4727 int major, minor;
4728 char *e;
NeilBrown67918752014-12-15 12:57:01 +11004729 int err;
NeilBrownea43ddd2008-10-13 11:55:11 +11004730 /* Changing the details of 'external' metadata is
4731 * always permitted. Otherwise there must be
4732 * no devices attached to the array.
4733 */
NeilBrown67918752014-12-15 12:57:01 +11004734
4735 err = mddev_lock(mddev);
4736 if (err)
4737 return err;
4738 err = -EBUSY;
NeilBrownea43ddd2008-10-13 11:55:11 +11004739 if (mddev->external && strncmp(buf, "external:", 9) == 0)
4740 ;
4741 else if (!list_empty(&mddev->disks))
NeilBrown67918752014-12-15 12:57:01 +11004742 goto out_unlock;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004743
NeilBrown67918752014-12-15 12:57:01 +11004744 err = 0;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004745 if (cmd_match(buf, "none")) {
4746 mddev->persistent = 0;
NeilBrowne6910632008-02-06 01:39:51 -08004747 mddev->external = 0;
4748 mddev->major_version = 0;
4749 mddev->minor_version = 90;
NeilBrown67918752014-12-15 12:57:01 +11004750 goto out_unlock;
NeilBrowne6910632008-02-06 01:39:51 -08004751 }
4752 if (strncmp(buf, "external:", 9) == 0) {
NeilBrown20a49ff2008-02-06 01:39:57 -08004753 size_t namelen = len-9;
NeilBrowne6910632008-02-06 01:39:51 -08004754 if (namelen >= sizeof(mddev->metadata_type))
4755 namelen = sizeof(mddev->metadata_type)-1;
4756 strncpy(mddev->metadata_type, buf+9, namelen);
4757 mddev->metadata_type[namelen] = 0;
4758 if (namelen && mddev->metadata_type[namelen-1] == '\n')
4759 mddev->metadata_type[--namelen] = 0;
4760 mddev->persistent = 0;
4761 mddev->external = 1;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004762 mddev->major_version = 0;
4763 mddev->minor_version = 90;
NeilBrown67918752014-12-15 12:57:01 +11004764 goto out_unlock;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004765 }
4766 major = simple_strtoul(buf, &e, 10);
NeilBrown67918752014-12-15 12:57:01 +11004767 err = -EINVAL;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004768 if (e==buf || *e != '.')
NeilBrown67918752014-12-15 12:57:01 +11004769 goto out_unlock;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004770 buf = e+1;
4771 minor = simple_strtoul(buf, &e, 10);
NeilBrown3f9d7b02006-12-22 01:11:41 -08004772 if (e==buf || (*e && *e != '\n') )
NeilBrown67918752014-12-15 12:57:01 +11004773 goto out_unlock;
4774 err = -ENOENT;
Ahmed S. Darwish50511da2007-05-09 02:35:34 -07004775 if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
NeilBrown67918752014-12-15 12:57:01 +11004776 goto out_unlock;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004777 mddev->major_version = major;
4778 mddev->minor_version = minor;
4779 mddev->persistent = 1;
NeilBrowne6910632008-02-06 01:39:51 -08004780 mddev->external = 0;
NeilBrown67918752014-12-15 12:57:01 +11004781 err = 0;
4782out_unlock:
4783 mddev_unlock(mddev);
4784 return err ?: len;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004785}
4786
4787static struct md_sysfs_entry md_metadata =
NeilBrown750f1992014-09-30 08:53:05 +10004788__ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
NeilBrown8bb93aa2006-01-06 00:20:50 -08004789
NeilBrowna35b0d62006-01-06 00:20:49 -08004790static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004791action_show(struct mddev *mddev, char *page)
NeilBrown24dd4692005-11-08 21:39:26 -08004792{
NeilBrown7eec3142005-11-08 21:39:44 -08004793 char *type = "idle";
NeilBrownb7b17c92014-12-15 12:56:59 +11004794 unsigned long recovery = mddev->recovery;
4795 if (test_bit(MD_RECOVERY_FROZEN, &recovery))
NeilBrownb6a9ce62009-05-26 09:41:17 +10004796 type = "frozen";
NeilBrownb7b17c92014-12-15 12:56:59 +11004797 else if (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
4798 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery))) {
4799 if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
NeilBrownccfcc3c2006-03-27 01:18:09 -08004800 type = "reshape";
NeilBrownb7b17c92014-12-15 12:56:59 +11004801 else if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
4802 if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
NeilBrown24dd4692005-11-08 21:39:26 -08004803 type = "resync";
NeilBrownb7b17c92014-12-15 12:56:59 +11004804 else if (test_bit(MD_RECOVERY_CHECK, &recovery))
NeilBrown24dd4692005-11-08 21:39:26 -08004805 type = "check";
4806 else
4807 type = "repair";
NeilBrownb7b17c92014-12-15 12:56:59 +11004808 } else if (test_bit(MD_RECOVERY_RECOVER, &recovery))
NeilBrown24dd4692005-11-08 21:39:26 -08004809 type = "recover";
NeilBrown985ca972015-07-06 12:26:57 +10004810 else if (mddev->reshape_position != MaxSector)
4811 type = "reshape";
NeilBrown24dd4692005-11-08 21:39:26 -08004812 }
4813 return sprintf(page, "%s\n", type);
4814}
4815
4816static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004817action_store(struct mddev *mddev, const char *page, size_t len)
NeilBrown24dd4692005-11-08 21:39:26 -08004818{
NeilBrown7eec3142005-11-08 21:39:44 -08004819 if (!mddev->pers || !mddev->pers->sync_request)
4820 return -EINVAL;
4821
NeilBrownb6a9ce62009-05-26 09:41:17 +10004822
4823 if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
NeilBrown56ccc112015-05-28 17:53:29 +10004824 if (cmd_match(page, "frozen"))
4825 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4826 else
4827 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
NeilBrown8e8e2512015-06-12 19:51:27 +10004828 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
4829 mddev_lock(mddev) == 0) {
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02004830 if (work_pending(&mddev->del_work))
4831 flush_workqueue(md_misc_wq);
NeilBrown8e8e2512015-06-12 19:51:27 +10004832 if (mddev->sync_thread) {
4833 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
NeilBrown67918752014-12-15 12:57:01 +11004834 md_reap_sync_thread(mddev);
NeilBrown67918752014-12-15 12:57:01 +11004835 }
NeilBrown8e8e2512015-06-12 19:51:27 +10004836 mddev_unlock(mddev);
NeilBrown7eec3142005-11-08 21:39:44 -08004837 }
NeilBrown312045e2015-12-21 11:01:21 +11004838 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
NeilBrown24dd4692005-11-08 21:39:26 -08004839 return -EBUSY;
Neil Brown72a23c22008-06-28 08:31:41 +10004840 else if (cmd_match(page, "resync"))
NeilBrown56ccc112015-05-28 17:53:29 +10004841 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
Neil Brown72a23c22008-06-28 08:31:41 +10004842 else if (cmd_match(page, "recover")) {
NeilBrown56ccc112015-05-28 17:53:29 +10004843 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
Neil Brown72a23c22008-06-28 08:31:41 +10004844 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
Neil Brown72a23c22008-06-28 08:31:41 +10004845 } else if (cmd_match(page, "reshape")) {
NeilBrown16484bf2006-03-27 01:18:13 -08004846 int err;
4847 if (mddev->pers->start_reshape == NULL)
4848 return -EINVAL;
NeilBrown67918752014-12-15 12:57:01 +11004849 err = mddev_lock(mddev);
4850 if (!err) {
NeilBrown312045e2015-12-21 11:01:21 +11004851 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4852 err = -EBUSY;
4853 else {
4854 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4855 err = mddev->pers->start_reshape(mddev);
4856 }
NeilBrown67918752014-12-15 12:57:01 +11004857 mddev_unlock(mddev);
4858 }
NeilBrown16484bf2006-03-27 01:18:13 -08004859 if (err)
4860 return err;
Junxiao Bie1a86db2020-07-14 16:10:26 -07004861 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
NeilBrown16484bf2006-03-27 01:18:13 -08004862 } else {
NeilBrownbce74da2006-01-06 00:20:41 -08004863 if (cmd_match(page, "check"))
NeilBrown7eec3142005-11-08 21:39:44 -08004864 set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
NeilBrown2adc7d42006-05-20 14:59:57 -07004865 else if (!cmd_match(page, "repair"))
NeilBrown7eec3142005-11-08 21:39:44 -08004866 return -EINVAL;
NeilBrown56ccc112015-05-28 17:53:29 +10004867 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
NeilBrown7eec3142005-11-08 21:39:44 -08004868 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
4869 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
NeilBrown7eec3142005-11-08 21:39:44 -08004870 }
NeilBrown48c26dd2012-10-11 14:19:39 +11004871 if (mddev->ro == 2) {
4872 /* A write to sync_action is enough to justify
4873 * canceling read-auto mode
4874 */
4875 mddev->ro = 0;
4876 md_wakeup_thread(mddev->sync_thread);
4877 }
NeilBrown03c902e2006-01-06 00:20:46 -08004878 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
NeilBrown24dd4692005-11-08 21:39:26 -08004879 md_wakeup_thread(mddev->thread);
NeilBrown00bcb4a2010-06-01 19:37:23 +10004880 sysfs_notify_dirent_safe(mddev->sysfs_action);
NeilBrown24dd4692005-11-08 21:39:26 -08004881 return len;
4882}
4883
Jonathan Brassowc4a39552013-06-25 01:23:59 -05004884static struct md_sysfs_entry md_scan_mode =
NeilBrown750f1992014-09-30 08:53:05 +10004885__ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
Jonathan Brassowc4a39552013-06-25 01:23:59 -05004886
4887static ssize_t
4888last_sync_action_show(struct mddev *mddev, char *page)
4889{
4890 return sprintf(page, "%s\n", mddev->last_sync_action);
4891}
4892
4893static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action);
4894
NeilBrown9d888832005-11-08 21:39:26 -08004895static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004896mismatch_cnt_show(struct mddev *mddev, char *page)
NeilBrown9d888832005-11-08 21:39:26 -08004897{
4898 return sprintf(page, "%llu\n",
Jianpeng Ma7f7583d2012-10-11 14:17:59 +11004899 (unsigned long long)
4900 atomic64_read(&mddev->resync_mismatches));
NeilBrown9d888832005-11-08 21:39:26 -08004901}
4902
NeilBrown80ca3a42006-07-10 04:44:18 -07004903static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
NeilBrown9d888832005-11-08 21:39:26 -08004904
NeilBrown88202a02006-01-06 00:21:36 -08004905static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004906sync_min_show(struct mddev *mddev, char *page)
NeilBrown88202a02006-01-06 00:21:36 -08004907{
4908 return sprintf(page, "%d (%s)\n", speed_min(mddev),
4909 mddev->sync_speed_min ? "local": "system");
4910}
4911
4912static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004913sync_min_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown88202a02006-01-06 00:21:36 -08004914{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004915 unsigned int min;
4916 int rv;
4917
NeilBrown88202a02006-01-06 00:21:36 -08004918 if (strncmp(buf, "system", 6)==0) {
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004919 min = 0;
4920 } else {
4921 rv = kstrtouint(buf, 10, &min);
4922 if (rv < 0)
4923 return rv;
4924 if (min == 0)
4925 return -EINVAL;
NeilBrown88202a02006-01-06 00:21:36 -08004926 }
NeilBrown88202a02006-01-06 00:21:36 -08004927 mddev->sync_speed_min = min;
4928 return len;
4929}
4930
4931static struct md_sysfs_entry md_sync_min =
4932__ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
4933
4934static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004935sync_max_show(struct mddev *mddev, char *page)
NeilBrown88202a02006-01-06 00:21:36 -08004936{
4937 return sprintf(page, "%d (%s)\n", speed_max(mddev),
4938 mddev->sync_speed_max ? "local": "system");
4939}
4940
4941static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004942sync_max_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown88202a02006-01-06 00:21:36 -08004943{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004944 unsigned int max;
4945 int rv;
4946
NeilBrown88202a02006-01-06 00:21:36 -08004947 if (strncmp(buf, "system", 6)==0) {
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004948 max = 0;
4949 } else {
4950 rv = kstrtouint(buf, 10, &max);
4951 if (rv < 0)
4952 return rv;
4953 if (max == 0)
4954 return -EINVAL;
NeilBrown88202a02006-01-06 00:21:36 -08004955 }
NeilBrown88202a02006-01-06 00:21:36 -08004956 mddev->sync_speed_max = max;
4957 return len;
4958}
4959
4960static struct md_sysfs_entry md_sync_max =
4961__ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
4962
Iustin Popd7f3d292007-10-16 23:30:54 -07004963static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004964degraded_show(struct mddev *mddev, char *page)
Iustin Popd7f3d292007-10-16 23:30:54 -07004965{
4966 return sprintf(page, "%d\n", mddev->degraded);
4967}
4968static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
NeilBrown88202a02006-01-06 00:21:36 -08004969
4970static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004971sync_force_parallel_show(struct mddev *mddev, char *page)
Bernd Schubert90b08712008-05-23 13:04:38 -07004972{
4973 return sprintf(page, "%d\n", mddev->parallel_resync);
4974}
4975
4976static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004977sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len)
Bernd Schubert90b08712008-05-23 13:04:38 -07004978{
4979 long n;
4980
Jingoo Hanb29bebd2013-06-01 16:15:16 +09004981 if (kstrtol(buf, 10, &n))
Bernd Schubert90b08712008-05-23 13:04:38 -07004982 return -EINVAL;
4983
4984 if (n != 0 && n != 1)
4985 return -EINVAL;
4986
4987 mddev->parallel_resync = n;
4988
4989 if (mddev->sync_thread)
4990 wake_up(&resync_wait);
4991
4992 return len;
4993}
4994
4995/* force parallel resync, even with shared block devices */
4996static struct md_sysfs_entry md_sync_force_parallel =
4997__ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
4998 sync_force_parallel_show, sync_force_parallel_store);
4999
5000static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005001sync_speed_show(struct mddev *mddev, char *page)
NeilBrown88202a02006-01-06 00:21:36 -08005002{
5003 unsigned long resync, dt, db;
NeilBrownd1a7c502009-03-31 15:24:32 +11005004 if (mddev->curr_resync == 0)
5005 return sprintf(page, "none\n");
Andre Noll9687a602008-03-25 22:24:09 +01005006 resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
5007 dt = (jiffies - mddev->resync_mark) / HZ;
NeilBrown88202a02006-01-06 00:21:36 -08005008 if (!dt) dt++;
Andre Noll9687a602008-03-25 22:24:09 +01005009 db = resync - mddev->resync_mark_cnt;
5010 return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
NeilBrown88202a02006-01-06 00:21:36 -08005011}
5012
NeilBrown80ca3a42006-07-10 04:44:18 -07005013static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
NeilBrown88202a02006-01-06 00:21:36 -08005014
5015static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005016sync_completed_show(struct mddev *mddev, char *page)
NeilBrown88202a02006-01-06 00:21:36 -08005017{
RĂ©mi RĂ©rolle13ae8642011-01-14 09:14:34 +11005018 unsigned long long max_sectors, resync;
NeilBrown88202a02006-01-06 00:21:36 -08005019
NeilBrownacb180b2009-04-14 16:28:34 +10005020 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5021 return sprintf(page, "none\n");
5022
NeilBrown72f36d52012-10-11 14:25:57 +11005023 if (mddev->curr_resync == 1 ||
5024 mddev->curr_resync == 2)
5025 return sprintf(page, "delayed\n");
5026
NeilBrownc804cde2012-05-21 09:28:33 +10005027 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
5028 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
Andre Noll58c0fed2009-03-31 14:33:13 +11005029 max_sectors = mddev->resync_max_sectors;
NeilBrown88202a02006-01-06 00:21:36 -08005030 else
Andre Noll58c0fed2009-03-31 14:33:13 +11005031 max_sectors = mddev->dev_sectors;
NeilBrown88202a02006-01-06 00:21:36 -08005032
NeilBrownacb180b2009-04-14 16:28:34 +10005033 resync = mddev->curr_resync_completed;
RĂ©mi RĂ©rolle13ae8642011-01-14 09:14:34 +11005034 return sprintf(page, "%llu / %llu\n", resync, max_sectors);
NeilBrown88202a02006-01-06 00:21:36 -08005035}
5036
NeilBrown750f1992014-09-30 08:53:05 +10005037static struct md_sysfs_entry md_sync_completed =
5038 __ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL);
NeilBrown88202a02006-01-06 00:21:36 -08005039
NeilBrowne464eaf2006-03-27 01:18:14 -08005040static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005041min_sync_show(struct mddev *mddev, char *page)
Neil Brown5e96ee62008-06-28 08:31:24 +10005042{
5043 return sprintf(page, "%llu\n",
5044 (unsigned long long)mddev->resync_min);
5045}
5046static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005047min_sync_store(struct mddev *mddev, const char *buf, size_t len)
Neil Brown5e96ee62008-06-28 08:31:24 +10005048{
5049 unsigned long long min;
NeilBrown23da4222014-12-15 12:57:01 +11005050 int err;
NeilBrown23da4222014-12-15 12:57:01 +11005051
Jingoo Hanb29bebd2013-06-01 16:15:16 +09005052 if (kstrtoull(buf, 10, &min))
Neil Brown5e96ee62008-06-28 08:31:24 +10005053 return -EINVAL;
NeilBrown23da4222014-12-15 12:57:01 +11005054
5055 spin_lock(&mddev->lock);
5056 err = -EINVAL;
Neil Brown5e96ee62008-06-28 08:31:24 +10005057 if (min > mddev->resync_max)
NeilBrown23da4222014-12-15 12:57:01 +11005058 goto out_unlock;
5059
5060 err = -EBUSY;
Neil Brown5e96ee62008-06-28 08:31:24 +10005061 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
NeilBrown23da4222014-12-15 12:57:01 +11005062 goto out_unlock;
Neil Brown5e96ee62008-06-28 08:31:24 +10005063
NeilBrown50c37b12015-03-23 17:36:38 +11005064 /* Round down to multiple of 4K for safety */
5065 mddev->resync_min = round_down(min, 8);
NeilBrown23da4222014-12-15 12:57:01 +11005066 err = 0;
Neil Brown5e96ee62008-06-28 08:31:24 +10005067
NeilBrown23da4222014-12-15 12:57:01 +11005068out_unlock:
5069 spin_unlock(&mddev->lock);
5070 return err ?: len;
Neil Brown5e96ee62008-06-28 08:31:24 +10005071}
5072
5073static struct md_sysfs_entry md_min_sync =
5074__ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
5075
5076static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005077max_sync_show(struct mddev *mddev, char *page)
NeilBrownc6207272008-02-06 01:39:52 -08005078{
5079 if (mddev->resync_max == MaxSector)
5080 return sprintf(page, "max\n");
5081 else
5082 return sprintf(page, "%llu\n",
5083 (unsigned long long)mddev->resync_max);
5084}
5085static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005086max_sync_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrownc6207272008-02-06 01:39:52 -08005087{
NeilBrown23da4222014-12-15 12:57:01 +11005088 int err;
5089 spin_lock(&mddev->lock);
NeilBrownc6207272008-02-06 01:39:52 -08005090 if (strncmp(buf, "max", 3) == 0)
5091 mddev->resync_max = MaxSector;
5092 else {
Neil Brown5e96ee62008-06-28 08:31:24 +10005093 unsigned long long max;
NeilBrown23da4222014-12-15 12:57:01 +11005094 int chunk;
5095
5096 err = -EINVAL;
Jingoo Hanb29bebd2013-06-01 16:15:16 +09005097 if (kstrtoull(buf, 10, &max))
NeilBrown23da4222014-12-15 12:57:01 +11005098 goto out_unlock;
Neil Brown5e96ee62008-06-28 08:31:24 +10005099 if (max < mddev->resync_min)
NeilBrown23da4222014-12-15 12:57:01 +11005100 goto out_unlock;
5101
5102 err = -EBUSY;
NeilBrownc6207272008-02-06 01:39:52 -08005103 if (max < mddev->resync_max &&
NeilBrown4d484a42009-08-13 10:41:50 +10005104 mddev->ro == 0 &&
NeilBrownc6207272008-02-06 01:39:52 -08005105 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
NeilBrown23da4222014-12-15 12:57:01 +11005106 goto out_unlock;
NeilBrownc6207272008-02-06 01:39:52 -08005107
5108 /* Must be a multiple of chunk_size */
NeilBrown23da4222014-12-15 12:57:01 +11005109 chunk = mddev->chunk_sectors;
5110 if (chunk) {
raz ben yehuda2ac06c32009-06-16 17:01:42 +10005111 sector_t temp = max;
NeilBrown23da4222014-12-15 12:57:01 +11005112
5113 err = -EINVAL;
5114 if (sector_div(temp, chunk))
5115 goto out_unlock;
NeilBrownc6207272008-02-06 01:39:52 -08005116 }
5117 mddev->resync_max = max;
5118 }
5119 wake_up(&mddev->recovery_wait);
NeilBrown23da4222014-12-15 12:57:01 +11005120 err = 0;
5121out_unlock:
5122 spin_unlock(&mddev->lock);
5123 return err ?: len;
NeilBrownc6207272008-02-06 01:39:52 -08005124}
5125
5126static struct md_sysfs_entry md_max_sync =
5127__ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
5128
5129static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005130suspend_lo_show(struct mddev *mddev, char *page)
NeilBrowne464eaf2006-03-27 01:18:14 -08005131{
5132 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
5133}
5134
5135static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005136suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrowne464eaf2006-03-27 01:18:14 -08005137{
NeilBrownb03e0cc2017-10-19 12:49:15 +11005138 unsigned long long new;
NeilBrown67918752014-12-15 12:57:01 +11005139 int err;
NeilBrowne464eaf2006-03-27 01:18:14 -08005140
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03005141 err = kstrtoull(buf, 10, &new);
5142 if (err < 0)
5143 return err;
5144 if (new != (sector_t)new)
NeilBrowne464eaf2006-03-27 01:18:14 -08005145 return -EINVAL;
NeilBrown23ddff32011-01-14 09:14:34 +11005146
NeilBrown67918752014-12-15 12:57:01 +11005147 err = mddev_lock(mddev);
5148 if (err)
5149 return err;
5150 err = -EINVAL;
5151 if (mddev->pers == NULL ||
5152 mddev->pers->quiesce == NULL)
5153 goto unlock;
NeilBrownb03e0cc2017-10-19 12:49:15 +11005154 mddev_suspend(mddev);
NeilBrown23ddff32011-01-14 09:14:34 +11005155 mddev->suspend_lo = new;
NeilBrownb03e0cc2017-10-19 12:49:15 +11005156 mddev_resume(mddev);
5157
NeilBrown67918752014-12-15 12:57:01 +11005158 err = 0;
5159unlock:
5160 mddev_unlock(mddev);
5161 return err ?: len;
NeilBrowne464eaf2006-03-27 01:18:14 -08005162}
5163static struct md_sysfs_entry md_suspend_lo =
5164__ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
5165
NeilBrowne464eaf2006-03-27 01:18:14 -08005166static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005167suspend_hi_show(struct mddev *mddev, char *page)
NeilBrowne464eaf2006-03-27 01:18:14 -08005168{
5169 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
5170}
5171
5172static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005173suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrowne464eaf2006-03-27 01:18:14 -08005174{
NeilBrownb03e0cc2017-10-19 12:49:15 +11005175 unsigned long long new;
NeilBrown67918752014-12-15 12:57:01 +11005176 int err;
NeilBrowne464eaf2006-03-27 01:18:14 -08005177
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03005178 err = kstrtoull(buf, 10, &new);
5179 if (err < 0)
5180 return err;
5181 if (new != (sector_t)new)
NeilBrowne464eaf2006-03-27 01:18:14 -08005182 return -EINVAL;
NeilBrown23ddff32011-01-14 09:14:34 +11005183
NeilBrown67918752014-12-15 12:57:01 +11005184 err = mddev_lock(mddev);
5185 if (err)
5186 return err;
5187 err = -EINVAL;
NeilBrownb03e0cc2017-10-19 12:49:15 +11005188 if (mddev->pers == NULL)
NeilBrown67918752014-12-15 12:57:01 +11005189 goto unlock;
NeilBrownb03e0cc2017-10-19 12:49:15 +11005190
5191 mddev_suspend(mddev);
NeilBrown23ddff32011-01-14 09:14:34 +11005192 mddev->suspend_hi = new;
NeilBrownb03e0cc2017-10-19 12:49:15 +11005193 mddev_resume(mddev);
5194
NeilBrown67918752014-12-15 12:57:01 +11005195 err = 0;
5196unlock:
5197 mddev_unlock(mddev);
5198 return err ?: len;
NeilBrowne464eaf2006-03-27 01:18:14 -08005199}
5200static struct md_sysfs_entry md_suspend_hi =
5201__ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
5202
NeilBrown08a02ec2007-05-09 02:35:38 -07005203static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005204reshape_position_show(struct mddev *mddev, char *page)
NeilBrown08a02ec2007-05-09 02:35:38 -07005205{
5206 if (mddev->reshape_position != MaxSector)
5207 return sprintf(page, "%llu\n",
5208 (unsigned long long)mddev->reshape_position);
5209 strcpy(page, "none\n");
5210 return 5;
5211}
5212
5213static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005214reshape_position_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown08a02ec2007-05-09 02:35:38 -07005215{
NeilBrownc6563a82012-05-21 09:27:00 +10005216 struct md_rdev *rdev;
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03005217 unsigned long long new;
NeilBrown67918752014-12-15 12:57:01 +11005218 int err;
NeilBrown67918752014-12-15 12:57:01 +11005219
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03005220 err = kstrtoull(buf, 10, &new);
5221 if (err < 0)
5222 return err;
5223 if (new != (sector_t)new)
NeilBrown08a02ec2007-05-09 02:35:38 -07005224 return -EINVAL;
NeilBrown67918752014-12-15 12:57:01 +11005225 err = mddev_lock(mddev);
5226 if (err)
5227 return err;
5228 err = -EBUSY;
5229 if (mddev->pers)
5230 goto unlock;
NeilBrown08a02ec2007-05-09 02:35:38 -07005231 mddev->reshape_position = new;
5232 mddev->delta_disks = 0;
NeilBrown2c810cd2012-05-21 09:27:00 +10005233 mddev->reshape_backwards = 0;
NeilBrown08a02ec2007-05-09 02:35:38 -07005234 mddev->new_level = mddev->level;
5235 mddev->new_layout = mddev->layout;
Andre Noll664e7c42009-06-18 08:45:27 +10005236 mddev->new_chunk_sectors = mddev->chunk_sectors;
NeilBrownc6563a82012-05-21 09:27:00 +10005237 rdev_for_each(rdev, mddev)
5238 rdev->new_data_offset = rdev->data_offset;
NeilBrown67918752014-12-15 12:57:01 +11005239 err = 0;
5240unlock:
5241 mddev_unlock(mddev);
5242 return err ?: len;
NeilBrown08a02ec2007-05-09 02:35:38 -07005243}
5244
5245static struct md_sysfs_entry md_reshape_position =
5246__ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
5247 reshape_position_store);
5248
Dan Williamsb522adc2009-03-31 15:00:31 +11005249static ssize_t
NeilBrown2c810cd2012-05-21 09:27:00 +10005250reshape_direction_show(struct mddev *mddev, char *page)
5251{
5252 return sprintf(page, "%s\n",
5253 mddev->reshape_backwards ? "backwards" : "forwards");
5254}
5255
5256static ssize_t
5257reshape_direction_store(struct mddev *mddev, const char *buf, size_t len)
5258{
5259 int backwards = 0;
NeilBrown67918752014-12-15 12:57:01 +11005260 int err;
5261
NeilBrown2c810cd2012-05-21 09:27:00 +10005262 if (cmd_match(buf, "forwards"))
5263 backwards = 0;
5264 else if (cmd_match(buf, "backwards"))
5265 backwards = 1;
5266 else
5267 return -EINVAL;
5268 if (mddev->reshape_backwards == backwards)
5269 return len;
5270
NeilBrown67918752014-12-15 12:57:01 +11005271 err = mddev_lock(mddev);
5272 if (err)
5273 return err;
NeilBrown2c810cd2012-05-21 09:27:00 +10005274 /* check if we are allowed to change */
5275 if (mddev->delta_disks)
NeilBrown67918752014-12-15 12:57:01 +11005276 err = -EBUSY;
5277 else if (mddev->persistent &&
NeilBrown2c810cd2012-05-21 09:27:00 +10005278 mddev->major_version == 0)
NeilBrown67918752014-12-15 12:57:01 +11005279 err = -EINVAL;
5280 else
5281 mddev->reshape_backwards = backwards;
5282 mddev_unlock(mddev);
5283 return err ?: len;
NeilBrown2c810cd2012-05-21 09:27:00 +10005284}
5285
5286static struct md_sysfs_entry md_reshape_direction =
5287__ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show,
5288 reshape_direction_store);
5289
5290static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005291array_size_show(struct mddev *mddev, char *page)
Dan Williamsb522adc2009-03-31 15:00:31 +11005292{
5293 if (mddev->external_size)
5294 return sprintf(page, "%llu\n",
5295 (unsigned long long)mddev->array_sectors/2);
5296 else
5297 return sprintf(page, "default\n");
5298}
5299
5300static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005301array_size_store(struct mddev *mddev, const char *buf, size_t len)
Dan Williamsb522adc2009-03-31 15:00:31 +11005302{
5303 sector_t sectors;
NeilBrown67918752014-12-15 12:57:01 +11005304 int err;
5305
5306 err = mddev_lock(mddev);
5307 if (err)
5308 return err;
Dan Williamsb522adc2009-03-31 15:00:31 +11005309
Guoqing Jiangab5a98b2016-05-02 11:33:13 -04005310 /* cluster raid doesn't support change array_sectors */
Zhilong Liub6708832017-04-10 14:15:55 +08005311 if (mddev_is_clustered(mddev)) {
5312 mddev_unlock(mddev);
Guoqing Jiangab5a98b2016-05-02 11:33:13 -04005313 return -EINVAL;
Zhilong Liub6708832017-04-10 14:15:55 +08005314 }
Guoqing Jiangab5a98b2016-05-02 11:33:13 -04005315
Dan Williamsb522adc2009-03-31 15:00:31 +11005316 if (strncmp(buf, "default", 7) == 0) {
5317 if (mddev->pers)
5318 sectors = mddev->pers->size(mddev, 0, 0);
5319 else
5320 sectors = mddev->array_sectors;
5321
5322 mddev->external_size = 0;
5323 } else {
5324 if (strict_blocks_to_sectors(buf, &sectors) < 0)
NeilBrown67918752014-12-15 12:57:01 +11005325 err = -EINVAL;
5326 else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
5327 err = -E2BIG;
5328 else
5329 mddev->external_size = 1;
Dan Williamsb522adc2009-03-31 15:00:31 +11005330 }
5331
NeilBrown67918752014-12-15 12:57:01 +11005332 if (!err) {
5333 mddev->array_sectors = sectors;
Christoph Hellwig2c247c52020-11-16 15:57:11 +01005334 if (mddev->pers)
5335 set_capacity_and_notify(mddev->gendisk,
5336 mddev->array_sectors);
NeilBrowncbe6ef12011-02-16 13:58:38 +11005337 }
NeilBrown67918752014-12-15 12:57:01 +11005338 mddev_unlock(mddev);
5339 return err ?: len;
Dan Williamsb522adc2009-03-31 15:00:31 +11005340}
5341
5342static struct md_sysfs_entry md_array_size =
5343__ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
5344 array_size_store);
NeilBrowne464eaf2006-03-27 01:18:14 -08005345
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005346static ssize_t
5347consistency_policy_show(struct mddev *mddev, char *page)
5348{
5349 int ret;
5350
5351 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
5352 ret = sprintf(page, "journal\n");
5353 } else if (test_bit(MD_HAS_PPL, &mddev->flags)) {
5354 ret = sprintf(page, "ppl\n");
5355 } else if (mddev->bitmap) {
5356 ret = sprintf(page, "bitmap\n");
5357 } else if (mddev->pers) {
5358 if (mddev->pers->sync_request)
5359 ret = sprintf(page, "resync\n");
5360 else
5361 ret = sprintf(page, "none\n");
5362 } else {
5363 ret = sprintf(page, "unknown\n");
5364 }
5365
5366 return ret;
5367}
5368
5369static ssize_t
5370consistency_policy_store(struct mddev *mddev, const char *buf, size_t len)
5371{
Artur Paszkiewiczba903a32017-03-09 10:00:03 +01005372 int err = 0;
5373
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005374 if (mddev->pers) {
Artur Paszkiewiczba903a32017-03-09 10:00:03 +01005375 if (mddev->pers->change_consistency_policy)
5376 err = mddev->pers->change_consistency_policy(mddev, buf);
5377 else
5378 err = -EBUSY;
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005379 } else if (mddev->external && strncmp(buf, "ppl", 3) == 0) {
5380 set_bit(MD_HAS_PPL, &mddev->flags);
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005381 } else {
Artur Paszkiewiczba903a32017-03-09 10:00:03 +01005382 err = -EINVAL;
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005383 }
Artur Paszkiewiczba903a32017-03-09 10:00:03 +01005384
5385 return err ? err : len;
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005386}
5387
5388static struct md_sysfs_entry md_consistency_policy =
5389__ATTR(consistency_policy, S_IRUGO | S_IWUSR, consistency_policy_show,
5390 consistency_policy_store);
5391
Guoqing Jiang9a567842019-07-24 11:09:19 +02005392static ssize_t fail_last_dev_show(struct mddev *mddev, char *page)
5393{
5394 return sprintf(page, "%d\n", mddev->fail_last_dev);
5395}
5396
5397/*
5398 * Setting fail_last_dev to true to allow last device to be forcibly removed
5399 * from RAID1/RAID10.
5400 */
5401static ssize_t
5402fail_last_dev_store(struct mddev *mddev, const char *buf, size_t len)
5403{
5404 int ret;
5405 bool value;
5406
5407 ret = kstrtobool(buf, &value);
5408 if (ret)
5409 return ret;
5410
5411 if (value != mddev->fail_last_dev)
5412 mddev->fail_last_dev = value;
5413
5414 return len;
5415}
5416static struct md_sysfs_entry md_fail_last_dev =
5417__ATTR(fail_last_dev, S_IRUGO | S_IWUSR, fail_last_dev_show,
5418 fail_last_dev_store);
5419
Guoqing Jiang3938f5f2019-12-23 10:48:56 +01005420static ssize_t serialize_policy_show(struct mddev *mddev, char *page)
5421{
5422 if (mddev->pers == NULL || (mddev->pers->level != 1))
5423 return sprintf(page, "n/a\n");
5424 else
5425 return sprintf(page, "%d\n", mddev->serialize_policy);
5426}
5427
5428/*
5429 * Setting serialize_policy to true to enforce write IO is not reordered
5430 * for raid1.
5431 */
5432static ssize_t
5433serialize_policy_store(struct mddev *mddev, const char *buf, size_t len)
5434{
5435 int err;
5436 bool value;
5437
5438 err = kstrtobool(buf, &value);
5439 if (err)
5440 return err;
5441
5442 if (value == mddev->serialize_policy)
5443 return len;
5444
5445 err = mddev_lock(mddev);
5446 if (err)
5447 return err;
5448 if (mddev->pers == NULL || (mddev->pers->level != 1)) {
5449 pr_err("md: serialize_policy is only effective for raid1\n");
5450 err = -EINVAL;
5451 goto unlock;
5452 }
5453
5454 mddev_suspend(mddev);
5455 if (value)
5456 mddev_create_serial_pool(mddev, NULL, true);
5457 else
5458 mddev_destroy_serial_pool(mddev, NULL, true);
5459 mddev->serialize_policy = value;
5460 mddev_resume(mddev);
5461unlock:
5462 mddev_unlock(mddev);
5463 return err ?: len;
5464}
5465
5466static struct md_sysfs_entry md_serialize_policy =
5467__ATTR(serialize_policy, S_IRUGO | S_IWUSR, serialize_policy_show,
5468 serialize_policy_store);
5469
5470
NeilBrowneae17012005-11-08 21:39:23 -08005471static struct attribute *md_default_attrs[] = {
5472 &md_level.attr,
NeilBrownd4dbd022006-06-26 00:27:59 -07005473 &md_layout.attr,
NeilBrowneae17012005-11-08 21:39:23 -08005474 &md_raid_disks.attr,
Sebastian Parschauerec164d072020-07-28 12:01:39 +02005475 &md_uuid.attr,
NeilBrown3b343802006-01-06 00:20:47 -08005476 &md_chunk_size.attr,
NeilBrowna35b0d62006-01-06 00:20:49 -08005477 &md_size.attr,
NeilBrowna94213b2006-06-26 00:28:00 -07005478 &md_resync_start.attr,
NeilBrown8bb93aa2006-01-06 00:20:50 -08005479 &md_metadata.attr,
NeilBrown6d7ff7382006-01-06 00:21:16 -08005480 &md_new_device.attr,
NeilBrown16f17b32006-06-26 00:27:37 -07005481 &md_safe_delay.attr,
NeilBrown9e653b62006-06-26 00:27:58 -07005482 &md_array_state.attr,
NeilBrown08a02ec2007-05-09 02:35:38 -07005483 &md_reshape_position.attr,
NeilBrown2c810cd2012-05-21 09:27:00 +10005484 &md_reshape_direction.attr,
Dan Williamsb522adc2009-03-31 15:00:31 +11005485 &md_array_size.attr,
Robert Becker1e509152009-12-14 12:49:58 +11005486 &max_corr_read_errors.attr,
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005487 &md_consistency_policy.attr,
Guoqing Jiang9a567842019-07-24 11:09:19 +02005488 &md_fail_last_dev.attr,
Guoqing Jiang3938f5f2019-12-23 10:48:56 +01005489 &md_serialize_policy.attr,
NeilBrown411036f2005-11-08 21:39:40 -08005490 NULL,
5491};
5492
Christoph Hellwig51238e7f2021-09-01 13:38:31 +02005493static const struct attribute_group md_default_group = {
5494 .attrs = md_default_attrs,
5495};
5496
NeilBrown411036f2005-11-08 21:39:40 -08005497static struct attribute *md_redundancy_attrs[] = {
NeilBrown24dd4692005-11-08 21:39:26 -08005498 &md_scan_mode.attr,
Jonathan Brassowc4a39552013-06-25 01:23:59 -05005499 &md_last_scan_mode.attr,
NeilBrown9d888832005-11-08 21:39:26 -08005500 &md_mismatches.attr,
NeilBrown88202a02006-01-06 00:21:36 -08005501 &md_sync_min.attr,
5502 &md_sync_max.attr,
5503 &md_sync_speed.attr,
Bernd Schubert90b08712008-05-23 13:04:38 -07005504 &md_sync_force_parallel.attr,
NeilBrown88202a02006-01-06 00:21:36 -08005505 &md_sync_completed.attr,
Neil Brown5e96ee62008-06-28 08:31:24 +10005506 &md_min_sync.attr,
NeilBrownc6207272008-02-06 01:39:52 -08005507 &md_max_sync.attr,
NeilBrowne464eaf2006-03-27 01:18:14 -08005508 &md_suspend_lo.attr,
5509 &md_suspend_hi.attr,
Paul Clements9b1d1da2006-10-03 01:15:49 -07005510 &md_bitmap.attr,
Iustin Popd7f3d292007-10-16 23:30:54 -07005511 &md_degraded.attr,
NeilBrowneae17012005-11-08 21:39:23 -08005512 NULL,
5513};
Rikard Falkebornc32dc042021-05-29 12:30:49 +02005514static const struct attribute_group md_redundancy_group = {
NeilBrown411036f2005-11-08 21:39:40 -08005515 .name = NULL,
5516 .attrs = md_redundancy_attrs,
5517};
5518
Christoph Hellwig51238e7f2021-09-01 13:38:31 +02005519static const struct attribute_group *md_attr_groups[] = {
5520 &md_default_group,
5521 &md_bitmap_group,
5522 NULL,
5523};
5524
NeilBrowneae17012005-11-08 21:39:23 -08005525static ssize_t
5526md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
5527{
5528 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
NeilBrownfd01b882011-10-11 16:47:53 +11005529 struct mddev *mddev = container_of(kobj, struct mddev, kobj);
NeilBrown96de1e62005-11-08 21:39:39 -08005530 ssize_t rv;
NeilBrowneae17012005-11-08 21:39:23 -08005531
5532 if (!entry->show)
5533 return -EIO;
NeilBrownaf8a2432011-12-08 15:49:46 +11005534 spin_lock(&all_mddevs_lock);
5535 if (list_empty(&mddev->all_mddevs)) {
5536 spin_unlock(&all_mddevs_lock);
5537 return -EBUSY;
5538 }
5539 mddev_get(mddev);
5540 spin_unlock(&all_mddevs_lock);
5541
NeilBrownb7b17c92014-12-15 12:56:59 +11005542 rv = entry->show(mddev, page);
NeilBrownaf8a2432011-12-08 15:49:46 +11005543 mddev_put(mddev);
NeilBrown96de1e62005-11-08 21:39:39 -08005544 return rv;
NeilBrowneae17012005-11-08 21:39:23 -08005545}
5546
5547static ssize_t
5548md_attr_store(struct kobject *kobj, struct attribute *attr,
5549 const char *page, size_t length)
5550{
5551 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
NeilBrownfd01b882011-10-11 16:47:53 +11005552 struct mddev *mddev = container_of(kobj, struct mddev, kobj);
NeilBrown96de1e62005-11-08 21:39:39 -08005553 ssize_t rv;
NeilBrowneae17012005-11-08 21:39:23 -08005554
5555 if (!entry->store)
5556 return -EIO;
NeilBrown67463ac2006-07-10 04:44:19 -07005557 if (!capable(CAP_SYS_ADMIN))
5558 return -EACCES;
NeilBrownaf8a2432011-12-08 15:49:46 +11005559 spin_lock(&all_mddevs_lock);
5560 if (list_empty(&mddev->all_mddevs)) {
5561 spin_unlock(&all_mddevs_lock);
5562 return -EBUSY;
5563 }
5564 mddev_get(mddev);
5565 spin_unlock(&all_mddevs_lock);
NeilBrown67918752014-12-15 12:57:01 +11005566 rv = entry->store(mddev, page, length);
NeilBrownaf8a2432011-12-08 15:49:46 +11005567 mddev_put(mddev);
NeilBrown96de1e62005-11-08 21:39:39 -08005568 return rv;
NeilBrowneae17012005-11-08 21:39:23 -08005569}
5570
5571static void md_free(struct kobject *ko)
5572{
NeilBrownfd01b882011-10-11 16:47:53 +11005573 struct mddev *mddev = container_of(ko, struct mddev, kobj);
NeilBrowna21d1502009-01-09 08:31:09 +11005574
5575 if (mddev->sysfs_state)
5576 sysfs_put(mddev->sysfs_state);
Junxiao Bie1a86db2020-07-14 16:10:26 -07005577 if (mddev->sysfs_level)
5578 sysfs_put(mddev->sysfs_level);
5579
Christoph Hellwig0f1d2e02021-05-21 07:51:04 +02005580 if (mddev->gendisk) {
Bart Van Assched8115c352018-02-28 10:15:29 -08005581 del_gendisk(mddev->gendisk);
Christoph Hellwig0f1d2e02021-05-21 07:51:04 +02005582 blk_cleanup_disk(mddev->gendisk);
5583 }
NeilBrown4ad23a972017-03-15 14:05:14 +11005584 percpu_ref_exit(&mddev->writes_pending);
NeilBrowna21d1502009-01-09 08:31:09 +11005585
Kent Overstreet28dec872018-06-07 20:52:54 -04005586 bioset_exit(&mddev->bio_set);
5587 bioset_exit(&mddev->sync_set);
Guoqing Jiangdaee2022021-06-03 17:21:06 +08005588 if (mddev->level != 1 && mddev->level != 10)
5589 bioset_exit(&mddev->io_acct_set);
NeilBrowneae17012005-11-08 21:39:23 -08005590 kfree(mddev);
5591}
5592
Emese Revfy52cf25d2010-01-19 02:58:23 +01005593static const struct sysfs_ops md_sysfs_ops = {
NeilBrowneae17012005-11-08 21:39:23 -08005594 .show = md_attr_show,
5595 .store = md_attr_store,
5596};
5597static struct kobj_type md_ktype = {
5598 .release = md_free,
5599 .sysfs_ops = &md_sysfs_ops,
Christoph Hellwig51238e7f2021-09-01 13:38:31 +02005600 .default_groups = md_attr_groups,
NeilBrowneae17012005-11-08 21:39:23 -08005601};
5602
Linus Torvalds1da177e2005-04-16 15:20:36 -07005603int mdp_major = 0;
5604
Dan Williams5fd3a172009-03-04 00:57:25 -07005605static void mddev_delayed_delete(struct work_struct *ws)
5606{
NeilBrownfd01b882011-10-11 16:47:53 +11005607 struct mddev *mddev = container_of(ws, struct mddev, del_work);
Dan Williams5fd3a172009-03-04 00:57:25 -07005608
Dan Williams5fd3a172009-03-04 00:57:25 -07005609 kobject_del(&mddev->kobj);
5610 kobject_put(&mddev->kobj);
5611}
5612
NeilBrown4ad23a972017-03-15 14:05:14 +11005613static void no_op(struct percpu_ref *r) {}
5614
NeilBrowna415c0f2017-06-05 16:05:13 +10005615int mddev_init_writes_pending(struct mddev *mddev)
5616{
5617 if (mddev->writes_pending.percpu_count_ptr)
5618 return 0;
Roman Gushchinddde2af2019-05-07 10:01:49 -07005619 if (percpu_ref_init(&mddev->writes_pending, no_op,
5620 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL) < 0)
NeilBrowna415c0f2017-06-05 16:05:13 +10005621 return -ENOMEM;
5622 /* We want to start with the refcount at zero */
5623 percpu_ref_put(&mddev->writes_pending);
5624 return 0;
5625}
5626EXPORT_SYMBOL_GPL(mddev_init_writes_pending);
5627
NeilBrownefeb53c2009-01-09 08:31:10 +11005628static int md_alloc(dev_t dev, char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005629{
NeilBrown039b7222017-04-12 16:26:13 +10005630 /*
5631 * If dev is zero, name is the name of a device to allocate with
5632 * an arbitrary minor number. It will be "md_???"
5633 * If dev is non-zero it must be a device number with a MAJOR of
5634 * MD_MAJOR or mdp_major. In this case, if "name" is NULL, then
5635 * the device is being created by opening a node in /dev.
5636 * If "name" is not NULL, the device is being created by
5637 * writing to /sys/module/md_mod/parameters/new_array.
5638 */
Arjan van de Ven48c9c272006-03-27 01:18:20 -08005639 static DEFINE_MUTEX(disks_mutex);
Christoph Hellwig0d809b32021-04-12 10:05:30 +02005640 struct mddev *mddev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005641 struct gendisk *disk;
NeilBrownefeb53c2009-01-09 08:31:10 +11005642 int partitioned;
5643 int shift;
5644 int unit;
Christoph Hellwig0d809b32021-04-12 10:05:30 +02005645 int error ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005646
Christoph Hellwig0d809b32021-04-12 10:05:30 +02005647 /*
5648 * Wait for any previous instance of this device to be completely
5649 * removed (mddev_delayed_delete).
NeilBrownd3374822009-01-09 08:31:10 +11005650 */
Tejun Heoe804ac72010-10-15 15:36:08 +02005651 flush_workqueue(md_misc_wq);
NeilBrownd3374822009-01-09 08:31:10 +11005652
Arjan van de Ven48c9c272006-03-27 01:18:20 -08005653 mutex_lock(&disks_mutex);
Christoph Hellwig0d809b32021-04-12 10:05:30 +02005654 mddev = mddev_alloc(dev);
5655 if (IS_ERR(mddev)) {
5656 mutex_unlock(&disks_mutex);
5657 return PTR_ERR(mddev);
5658 }
5659
5660 partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
5661 shift = partitioned ? MdpMinorShift : 0;
5662 unit = MINOR(mddev->unit) >> shift;
NeilBrownefeb53c2009-01-09 08:31:10 +11005663
NeilBrown039b7222017-04-12 16:26:13 +10005664 if (name && !dev) {
NeilBrownefeb53c2009-01-09 08:31:10 +11005665 /* Need to ensure that 'name' is not a duplicate.
5666 */
NeilBrownfd01b882011-10-11 16:47:53 +11005667 struct mddev *mddev2;
NeilBrownefeb53c2009-01-09 08:31:10 +11005668 spin_lock(&all_mddevs_lock);
5669
5670 list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
5671 if (mddev2->gendisk &&
5672 strcmp(mddev2->gendisk->disk_name, name) == 0) {
5673 spin_unlock(&all_mddevs_lock);
Christoph Hellwig0d809b32021-04-12 10:05:30 +02005674 error = -EEXIST;
NeilBrown0909dc42009-07-01 12:27:21 +10005675 goto abort;
NeilBrownefeb53c2009-01-09 08:31:10 +11005676 }
5677 spin_unlock(&all_mddevs_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005678 }
NeilBrown039b7222017-04-12 16:26:13 +10005679 if (name && dev)
5680 /*
5681 * Creating /dev/mdNNN via "newarray", so adjust hold_active.
5682 */
5683 mddev->hold_active = UNTIL_STOP;
NeilBrown8b765392009-01-09 08:31:08 +11005684
NeilBrown0909dc42009-07-01 12:27:21 +10005685 error = -ENOMEM;
Christoph Hellwig0f1d2e02021-05-21 07:51:04 +02005686 disk = blk_alloc_disk(NUMA_NO_NODE);
5687 if (!disk)
NeilBrown0909dc42009-07-01 12:27:21 +10005688 goto abort;
NeilBrown409c57f2009-03-31 14:39:39 +11005689
NeilBrownefeb53c2009-01-09 08:31:10 +11005690 disk->major = MAJOR(mddev->unit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005691 disk->first_minor = unit << shift;
Christoph Hellwig0f1d2e02021-05-21 07:51:04 +02005692 disk->minors = 1 << shift;
NeilBrownefeb53c2009-01-09 08:31:10 +11005693 if (name)
5694 strcpy(disk->disk_name, name);
5695 else if (partitioned)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005696 sprintf(disk->disk_name, "md_d%d", unit);
Greg Kroah-Hartmance7b0f462005-06-20 21:15:16 -07005697 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07005698 sprintf(disk->disk_name, "md%d", unit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005699 disk->fops = &md_fops;
5700 disk->private_data = mddev;
Christoph Hellwig0f1d2e02021-05-21 07:51:04 +02005701
5702 mddev->queue = disk->queue;
5703 blk_set_stacking_limits(&mddev->queue->limits);
Jens Axboe56883a72016-03-30 10:16:53 -06005704 blk_queue_write_cache(mddev->queue, true, true);
NeilBrown92850bb2008-10-21 13:25:32 +11005705 /* Allow extended partitions. This makes the
NeilBrownd3374822009-01-09 08:31:10 +11005706 * 'mdp' device redundant, but we can't really
NeilBrown92850bb2008-10-21 13:25:32 +11005707 * remove it now.
5708 */
5709 disk->flags |= GENHD_FL_EXT_DEVT;
Christoph Hellwiga564e232020-07-08 14:25:41 +02005710 disk->events |= DISK_EVENT_MEDIA_CHANGE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005711 mddev->gendisk = disk;
Luis Chamberlain9be68dd2021-09-01 13:38:30 +02005712 error = add_disk(disk);
5713 if (error) {
5714 blk_cleanup_disk(disk);
5715 goto abort;
5716 }
NeilBrownb0140892011-05-10 17:49:01 +10005717
Kent Overstreet28dec872018-06-07 20:52:54 -04005718 error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md");
NeilBrown0909dc42009-07-01 12:27:21 +10005719 if (error) {
5720 /* This isn't possible, but as kobject_init_and_add is marked
5721 * __must_check, we must do something with the result
5722 */
NeilBrown9d487392016-11-02 14:16:49 +11005723 pr_debug("md: cannot register %s/md - name in use\n",
5724 disk->disk_name);
NeilBrown0909dc42009-07-01 12:27:21 +10005725 error = 0;
5726 }
5727 abort:
NeilBrown00bcb4a2010-06-01 19:37:23 +10005728 if (!error && mddev->kobj.sd) {
Greg Kroah-Hartman3830c622007-12-17 15:54:39 -04005729 kobject_uevent(&mddev->kobj, KOBJ_ADD);
NeilBrown00bcb4a2010-06-01 19:37:23 +10005730 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
Junxiao Bie1a86db2020-07-14 16:10:26 -07005731 mddev->sysfs_level = sysfs_get_dirent_safe(mddev->kobj.sd, "level");
NeilBrownb62b7592008-10-21 13:25:21 +11005732 }
Christoph Hellwig94f3cd72021-09-01 13:38:32 +02005733 mutex_unlock(&disks_mutex);
NeilBrownd3374822009-01-09 08:31:10 +11005734 mddev_put(mddev);
NeilBrown0909dc42009-07-01 12:27:21 +10005735 return error;
NeilBrownefeb53c2009-01-09 08:31:10 +11005736}
5737
Christoph Hellwig28144f92020-10-29 15:58:34 +01005738static void md_probe(dev_t dev)
NeilBrownefeb53c2009-01-09 08:31:10 +11005739{
Christoph Hellwig28144f92020-10-29 15:58:34 +01005740 if (MAJOR(dev) == MD_MAJOR && MINOR(dev) >= 512)
5741 return;
NeilBrown78b63502017-04-12 16:26:13 +10005742 if (create_on_open)
5743 md_alloc(dev, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005744}
5745
Kees Cooke4dca7b2017-10-17 19:04:42 -07005746static int add_named_array(const char *val, const struct kernel_param *kp)
NeilBrownefeb53c2009-01-09 08:31:10 +11005747{
NeilBrown039b7222017-04-12 16:26:13 +10005748 /*
5749 * val must be "md_*" or "mdNNN".
5750 * For "md_*" we allocate an array with a large free minor number, and
NeilBrownefeb53c2009-01-09 08:31:10 +11005751 * set the name to val. val must not already be an active name.
NeilBrown039b7222017-04-12 16:26:13 +10005752 * For "mdNNN" we allocate an array with the minor number NNN
5753 * which must not already be in use.
NeilBrownefeb53c2009-01-09 08:31:10 +11005754 */
5755 int len = strlen(val);
5756 char buf[DISK_NAME_LEN];
NeilBrown039b7222017-04-12 16:26:13 +10005757 unsigned long devnum;
NeilBrownefeb53c2009-01-09 08:31:10 +11005758
5759 while (len && val[len-1] == '\n')
5760 len--;
5761 if (len >= DISK_NAME_LEN)
5762 return -E2BIG;
5763 strlcpy(buf, val, len+1);
NeilBrown039b7222017-04-12 16:26:13 +10005764 if (strncmp(buf, "md_", 3) == 0)
5765 return md_alloc(0, buf);
5766 if (strncmp(buf, "md", 2) == 0 &&
5767 isdigit(buf[2]) &&
5768 kstrtoul(buf+2, 10, &devnum) == 0 &&
5769 devnum <= MINORMASK)
5770 return md_alloc(MKDEV(MD_MAJOR, devnum), NULL);
5771
5772 return -EINVAL;
NeilBrownefeb53c2009-01-09 08:31:10 +11005773}
5774
Kees Cook8376d3c2017-10-16 17:01:48 -07005775static void md_safemode_timeout(struct timer_list *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005776{
Kees Cook8376d3c2017-10-16 17:01:48 -07005777 struct mddev *mddev = from_timer(mddev, t, safemode_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005778
NeilBrown4ad23a972017-03-15 14:05:14 +11005779 mddev->safemode = 1;
5780 if (mddev->external)
5781 sysfs_notify_dirent_safe(mddev->sysfs_state);
5782
Linus Torvalds1da177e2005-04-16 15:20:36 -07005783 md_wakeup_thread(mddev->thread);
5784}
5785
NeilBrown6ff8d8ec2006-01-06 00:20:15 -08005786static int start_dirty_degraded;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005787
NeilBrownfd01b882011-10-11 16:47:53 +11005788int md_run(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005789{
NeilBrown2604b702006-01-06 00:20:36 -08005790 int err;
NeilBrown3cb03002011-10-11 16:45:26 +11005791 struct md_rdev *rdev;
NeilBrown84fc4b52011-10-11 16:49:58 +11005792 struct md_personality *pers;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005793
NeilBrowna757e642005-04-16 15:26:42 -07005794 if (list_empty(&mddev->disks))
5795 /* cannot run an array with no devices.. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005796 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005797
5798 if (mddev->pers)
5799 return -EBUSY;
NeilBrownbb4f1e92010-08-08 21:18:03 +10005800 /* Cannot run until previous stop completes properly */
5801 if (mddev->sysfs_active)
5802 return -EBUSY;
NeilBrownb6eb1272010-04-15 10:13:47 +10005803
Linus Torvalds1da177e2005-04-16 15:20:36 -07005804 /*
5805 * Analyze all RAID superblock(s)
5806 */
NeilBrown1ec4a932008-02-06 01:39:53 -08005807 if (!mddev->raid_disks) {
5808 if (!mddev->persistent)
5809 return -EINVAL;
Yufen Yu6a5cb532019-10-16 16:00:03 +08005810 err = analyze_sbs(mddev);
5811 if (err)
5812 return -EINVAL;
NeilBrown1ec4a932008-02-06 01:39:53 -08005813 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005814
NeilBrownd9d166c2006-01-06 00:20:51 -08005815 if (mddev->level != LEVEL_NONE)
5816 request_module("md-level-%d", mddev->level);
5817 else if (mddev->clevel[0])
5818 request_module("md-%s", mddev->clevel);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005819
5820 /*
5821 * Drop all container device buffers, from now on
5822 * the only valid external interface is through the md
5823 * device.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005824 */
Heinz Mauelshagen4b6c1062018-02-02 23:13:19 +01005825 mddev->has_superblocks = false;
NeilBrowndafb20f2012-03-19 12:46:39 +11005826 rdev_for_each(rdev, mddev) {
NeilBrownb2d444d2005-11-08 21:39:31 -08005827 if (test_bit(Faulty, &rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005828 continue;
5829 sync_blockdev(rdev->bdev);
Peter Zijlstraf98393a2007-05-06 14:49:54 -07005830 invalidate_bdev(rdev->bdev);
Christoph Hellwigd7a47832021-02-01 14:17:20 +01005831 if (mddev->ro != 1 && rdev_read_only(rdev)) {
NeilBrown97b20ef2017-04-13 08:53:48 +10005832 mddev->ro = 1;
5833 if (mddev->gendisk)
5834 set_disk_ro(mddev->gendisk, 1);
5835 }
NeilBrownf0d76d72007-07-17 04:06:12 -07005836
Heinz Mauelshagen4b6c1062018-02-02 23:13:19 +01005837 if (rdev->sb_page)
5838 mddev->has_superblocks = true;
5839
NeilBrownf0d76d72007-07-17 04:06:12 -07005840 /* perform some consistency tests on the device.
5841 * We don't want the data to overlap the metadata,
Andre Noll58c0fed2009-03-31 14:33:13 +11005842 * Internal Bitmap issues have been handled elsewhere.
NeilBrownf0d76d72007-07-17 04:06:12 -07005843 */
Jonathan Brassowa6ff7e02011-01-14 09:14:34 +11005844 if (rdev->meta_bdev) {
5845 /* Nothing to check */;
5846 } else if (rdev->data_offset < rdev->sb_start) {
Andre Noll58c0fed2009-03-31 14:33:13 +11005847 if (mddev->dev_sectors &&
5848 rdev->data_offset + mddev->dev_sectors
Andre Noll0f420352008-07-11 22:02:23 +10005849 > rdev->sb_start) {
NeilBrown9d487392016-11-02 14:16:49 +11005850 pr_warn("md: %s: data overlaps metadata\n",
5851 mdname(mddev));
NeilBrownf0d76d72007-07-17 04:06:12 -07005852 return -EINVAL;
5853 }
5854 } else {
Andre Noll0f420352008-07-11 22:02:23 +10005855 if (rdev->sb_start + rdev->sb_size/512
NeilBrownf0d76d72007-07-17 04:06:12 -07005856 > rdev->data_offset) {
NeilBrown9d487392016-11-02 14:16:49 +11005857 pr_warn("md: %s: metadata overlaps data\n",
5858 mdname(mddev));
NeilBrownf0d76d72007-07-17 04:06:12 -07005859 return -EINVAL;
5860 }
5861 }
NeilBrown00bcb4a2010-06-01 19:37:23 +10005862 sysfs_notify_dirent_safe(rdev->sysfs_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005863 }
5864
Kent Overstreetafeee512018-05-20 18:25:52 -04005865 if (!bioset_initialized(&mddev->bio_set)) {
5866 err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
5867 if (err)
5868 return err;
Ming Lei10273172017-02-14 23:29:00 +08005869 }
Kent Overstreetafeee512018-05-20 18:25:52 -04005870 if (!bioset_initialized(&mddev->sync_set)) {
5871 err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
5872 if (err)
Guoqing Jiang10764812021-05-25 17:46:17 +08005873 goto exit_bio_set;
5874 }
Guoqing Jiangdaee2022021-06-03 17:21:06 +08005875 if (mddev->level != 1 && mddev->level != 10 &&
5876 !bioset_initialized(&mddev->io_acct_set)) {
Guoqing Jiang10764812021-05-25 17:46:17 +08005877 err = bioset_init(&mddev->io_acct_set, BIO_POOL_SIZE,
5878 offsetof(struct md_io_acct, bio_clone), 0);
5879 if (err)
5880 goto exit_sync_set;
NeilBrown5a850712017-06-21 09:12:21 +10005881 }
NeilBrowna167f662010-10-26 18:31:13 +11005882
Linus Torvalds1da177e2005-04-16 15:20:36 -07005883 spin_lock(&pers_lock);
NeilBrownd9d166c2006-01-06 00:20:51 -08005884 pers = find_pers(mddev->level, mddev->clevel);
NeilBrown2604b702006-01-06 00:20:36 -08005885 if (!pers || !try_module_get(pers->owner)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005886 spin_unlock(&pers_lock);
NeilBrownd9d166c2006-01-06 00:20:51 -08005887 if (mddev->level != LEVEL_NONE)
NeilBrown9d487392016-11-02 14:16:49 +11005888 pr_warn("md: personality for level %d is not loaded!\n",
5889 mddev->level);
NeilBrownd9d166c2006-01-06 00:20:51 -08005890 else
NeilBrown9d487392016-11-02 14:16:49 +11005891 pr_warn("md: personality for level %s is not loaded!\n",
5892 mddev->clevel);
Shaohua Libfc9dfd2018-06-13 08:39:49 -07005893 err = -EINVAL;
5894 goto abort;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005895 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005896 spin_unlock(&pers_lock);
NeilBrown34817e82009-03-31 14:39:38 +11005897 if (mddev->level != pers->level) {
5898 mddev->level = pers->level;
5899 mddev->new_level = pers->level;
5900 }
NeilBrownd9d166c2006-01-06 00:20:51 -08005901 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005902
NeilBrownf6705572006-03-27 01:18:11 -08005903 if (mddev->reshape_position != MaxSector &&
NeilBrown63c70c42006-03-27 01:18:13 -08005904 pers->start_reshape == NULL) {
NeilBrownf6705572006-03-27 01:18:11 -08005905 /* This personality cannot handle reshaping... */
NeilBrownf6705572006-03-27 01:18:11 -08005906 module_put(pers->owner);
Shaohua Libfc9dfd2018-06-13 08:39:49 -07005907 err = -EINVAL;
5908 goto abort;
NeilBrownf6705572006-03-27 01:18:11 -08005909 }
5910
NeilBrown7dd5e7c32007-02-28 20:11:35 -08005911 if (pers->sync_request) {
5912 /* Warn if this is a potentially silly
5913 * configuration.
5914 */
5915 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
NeilBrown3cb03002011-10-11 16:45:26 +11005916 struct md_rdev *rdev2;
NeilBrown7dd5e7c32007-02-28 20:11:35 -08005917 int warned = 0;
Cheng Renquan159ec1f2009-01-09 08:31:08 +11005918
NeilBrowndafb20f2012-03-19 12:46:39 +11005919 rdev_for_each(rdev, mddev)
5920 rdev_for_each(rdev2, mddev) {
NeilBrown7dd5e7c32007-02-28 20:11:35 -08005921 if (rdev < rdev2 &&
Christoph Hellwig61a27e1f2020-09-03 07:40:58 +02005922 rdev->bdev->bd_disk ==
5923 rdev2->bdev->bd_disk) {
NeilBrown9d487392016-11-02 14:16:49 +11005924 pr_warn("%s: WARNING: %s appears to be on the same physical disk as %s.\n",
5925 mdname(mddev),
5926 bdevname(rdev->bdev,b),
5927 bdevname(rdev2->bdev,b2));
NeilBrown7dd5e7c32007-02-28 20:11:35 -08005928 warned = 1;
5929 }
5930 }
Cheng Renquan159ec1f2009-01-09 08:31:08 +11005931
NeilBrown7dd5e7c32007-02-28 20:11:35 -08005932 if (warned)
NeilBrown9d487392016-11-02 14:16:49 +11005933 pr_warn("True protection against single-disk failure might be compromised.\n");
NeilBrown7dd5e7c32007-02-28 20:11:35 -08005934 }
5935
NeilBrown657390d2005-08-26 18:34:16 -07005936 mddev->recovery = 0;
Andre Noll58c0fed2009-03-31 14:33:13 +11005937 /* may be over-ridden by personality */
5938 mddev->resync_max_sectors = mddev->dev_sectors;
5939
NeilBrown6ff8d8ec2006-01-06 00:20:15 -08005940 mddev->ok_start_degraded = start_dirty_degraded;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005941
NeilBrown0f9552b52009-12-30 12:08:50 +11005942 if (start_readonly && mddev->ro == 0)
NeilBrownf91de922005-11-08 21:39:36 -08005943 mddev->ro = 2; /* read-only, but switch on first write */
5944
NeilBrown36d091f2014-12-15 12:56:58 +11005945 err = pers->run(mddev);
Andre Noll13e53df2008-03-26 00:07:03 +01005946 if (err)
NeilBrown9d487392016-11-02 14:16:49 +11005947 pr_warn("md: pers->run() failed ...\n");
NeilBrown36d091f2014-12-15 12:56:58 +11005948 else if (pers->size(mddev, 0, 0) < mddev->array_sectors) {
NeilBrown9d487392016-11-02 14:16:49 +11005949 WARN_ONCE(!mddev->external_size,
5950 "%s: default size too small, but 'external_size' not in effect?\n",
5951 __func__);
5952 pr_warn("md: invalid array_size %llu > default size %llu\n",
5953 (unsigned long long)mddev->array_sectors / 2,
5954 (unsigned long long)pers->size(mddev, 0, 0) / 2);
Dan Williamsb522adc2009-03-31 15:00:31 +11005955 err = -EINVAL;
Dan Williamsb522adc2009-03-31 15:00:31 +11005956 }
NeilBrown36d091f2014-12-15 12:56:58 +11005957 if (err == 0 && pers->sync_request &&
NeilBrownef99bf42012-05-22 13:55:08 +10005958 (mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05005959 struct bitmap *bitmap;
5960
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07005961 bitmap = md_bitmap_create(mddev, -1);
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05005962 if (IS_ERR(bitmap)) {
5963 err = PTR_ERR(bitmap);
NeilBrown9d487392016-11-02 14:16:49 +11005964 pr_warn("%s: failed to create bitmap (%d)\n",
5965 mdname(mddev), err);
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05005966 } else
5967 mddev->bitmap = bitmap;
5968
NeilBrownb15c2e52006-01-06 00:20:16 -08005969 }
Guoqing Jiangd4945492019-06-14 17:10:39 +08005970 if (err)
5971 goto bitmap_abort;
Guoqing Jiang3e148a32019-06-19 17:30:46 +08005972
5973 if (mddev->bitmap_info.max_write_behind > 0) {
Guoqing Jiang3e173ab2019-12-23 10:48:54 +01005974 bool create_pool = false;
Guoqing Jiang3e148a32019-06-19 17:30:46 +08005975
5976 rdev_for_each(rdev, mddev) {
5977 if (test_bit(WriteMostly, &rdev->flags) &&
Guoqing Jiang404659c2019-12-23 10:48:53 +01005978 rdev_init_serial(rdev))
Guoqing Jiang3e173ab2019-12-23 10:48:54 +01005979 create_pool = true;
Guoqing Jiang3e148a32019-06-19 17:30:46 +08005980 }
Guoqing Jiang3e173ab2019-12-23 10:48:54 +01005981 if (create_pool && mddev->serial_info_pool == NULL) {
Guoqing Jiang404659c2019-12-23 10:48:53 +01005982 mddev->serial_info_pool =
5983 mempool_create_kmalloc_pool(NR_SERIAL_INFOS,
5984 sizeof(struct serial_info));
5985 if (!mddev->serial_info_pool) {
Guoqing Jiang3e148a32019-06-19 17:30:46 +08005986 err = -ENOMEM;
Guoqing Jiangd4945492019-06-14 17:10:39 +08005987 goto bitmap_abort;
Guoqing Jiang3e148a32019-06-19 17:30:46 +08005988 }
5989 }
5990 }
5991
NeilBrown5c675f82014-12-15 12:56:56 +11005992 if (mddev->queue) {
Shaohua Libb086a82016-09-30 09:45:40 -07005993 bool nonrot = true;
5994
5995 rdev_for_each(rdev, mddev) {
5996 if (rdev->raid_disk >= 0 &&
5997 !blk_queue_nonrot(bdev_get_queue(rdev->bdev))) {
5998 nonrot = false;
5999 break;
6000 }
6001 }
6002 if (mddev->degraded)
6003 nonrot = false;
6004 if (nonrot)
Bart Van Assche8b904b52018-03-07 17:10:10 -08006005 blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue);
Shaohua Libb086a82016-09-30 09:45:40 -07006006 else
Bart Van Assche8b904b52018-03-07 17:10:10 -08006007 blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue);
Guoqing Jiang10764812021-05-25 17:46:17 +08006008 blk_queue_flag_set(QUEUE_FLAG_IO_STAT, mddev->queue);
NeilBrown5c675f82014-12-15 12:56:56 +11006009 }
NeilBrown36d091f2014-12-15 12:56:58 +11006010 if (pers->sync_request) {
NeilBrown00bcb4a2010-06-01 19:37:23 +10006011 if (mddev->kobj.sd &&
6012 sysfs_create_group(&mddev->kobj, &md_redundancy_group))
NeilBrown9d487392016-11-02 14:16:49 +11006013 pr_warn("md: cannot register extra attributes for %s\n",
6014 mdname(mddev));
NeilBrown00bcb4a2010-06-01 19:37:23 +10006015 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
Junxiao Bie8efa9b2020-08-04 17:27:18 -07006016 mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed");
6017 mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded");
NeilBrown5e55e2f2007-03-26 21:32:14 -08006018 } else if (mddev->ro == 2) /* auto-readonly not meaningful */
NeilBrownfd9d49c2005-11-08 21:39:42 -08006019 mddev->ro = 0;
6020
Robert Becker1e509152009-12-14 12:49:58 +11006021 atomic_set(&mddev->max_corr_read_errors,
6022 MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006023 mddev->safemode = 0;
Goldwyn Rodrigues28c1b9f2015-10-22 16:01:25 +11006024 if (mddev_is_clustered(mddev))
6025 mddev->safemode_delay = 0;
6026 else
Zhao Heming7c9d5c52020-07-21 02:08:52 +08006027 mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006028 mddev->in_sync = 1;
NeilBrown0ca69882011-01-14 09:14:33 +11006029 smp_wmb();
NeilBrown36d091f2014-12-15 12:56:58 +11006030 spin_lock(&mddev->lock);
6031 mddev->pers = pers;
NeilBrown36d091f2014-12-15 12:56:58 +11006032 spin_unlock(&mddev->lock);
NeilBrowndafb20f2012-03-19 12:46:39 +11006033 rdev_for_each(rdev, mddev)
Namhyung Kim36fad852011-07-27 11:00:36 +10006034 if (rdev->raid_disk >= 0)
Yufen Yue5b521e2019-06-14 15:41:07 -07006035 sysfs_link_rdev(mddev, rdev); /* failure here is OK */
NeilBrownf72ffdd2014-09-30 14:23:59 +10006036
NeilBrowna4a3d262015-07-17 11:57:30 +10006037 if (mddev->degraded && !mddev->ro)
6038 /* This ensures that recovering status is reported immediately
6039 * via sysfs - until a lack of spares is confirmed.
6040 */
6041 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006042 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
NeilBrownf72ffdd2014-09-30 14:23:59 +10006043
Shaohua Li29530792016-12-08 15:48:19 -08006044 if (mddev->sb_flags)
NeilBrown850b2b422006-10-03 01:15:46 -07006045 md_update_sb(mddev, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006046
NeilBrownd7603b72006-01-06 00:20:30 -08006047 md_new_event(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006048 return 0;
Xiao Nib1261942018-01-24 12:17:38 +08006049
Guoqing Jiangd4945492019-06-14 17:10:39 +08006050bitmap_abort:
6051 mddev_detach(mddev);
6052 if (mddev->private)
6053 pers->free(mddev, mddev->private);
6054 mddev->private = NULL;
6055 module_put(pers->owner);
6056 md_bitmap_destroy(mddev);
Xiao Nib1261942018-01-24 12:17:38 +08006057abort:
Guoqing Jiangdaee2022021-06-03 17:21:06 +08006058 if (mddev->level != 1 && mddev->level != 10)
6059 bioset_exit(&mddev->io_acct_set);
Guoqing Jiang10764812021-05-25 17:46:17 +08006060exit_sync_set:
NeilBrown4bc034d2019-03-29 10:46:16 -07006061 bioset_exit(&mddev->sync_set);
Guoqing Jiang10764812021-05-25 17:46:17 +08006062exit_bio_set:
6063 bioset_exit(&mddev->bio_set);
Xiao Nib1261942018-01-24 12:17:38 +08006064 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006065}
NeilBrown390ee602010-06-01 19:37:27 +10006066EXPORT_SYMBOL_GPL(md_run);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006067
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02006068int do_md_run(struct mddev *mddev)
NeilBrownfe60b012010-03-29 11:10:42 +11006069{
6070 int err;
6071
NeilBrown9d4b45d2019-08-20 10:21:09 +10006072 set_bit(MD_NOT_READY, &mddev->flags);
NeilBrownfe60b012010-03-29 11:10:42 +11006073 err = md_run(mddev);
6074 if (err)
6075 goto out;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07006076 err = md_bitmap_load(mddev);
NeilBrown69e51b42010-06-01 19:37:35 +10006077 if (err) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07006078 md_bitmap_destroy(mddev);
NeilBrown69e51b42010-06-01 19:37:35 +10006079 goto out;
6080 }
Jonathan Brassow0fd018a2011-06-07 17:49:36 -05006081
Goldwyn Rodrigues28c1b9f2015-10-22 16:01:25 +11006082 if (mddev_is_clustered(mddev))
6083 md_allow_write(mddev);
6084
Song Liud5d885f2017-11-19 22:17:01 -08006085 /* run start up tasks that require md_thread */
6086 md_start(mddev);
6087
Jonathan Brassow0fd018a2011-06-07 17:49:36 -05006088 md_wakeup_thread(mddev->thread);
6089 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
6090
Christoph Hellwig2c247c52020-11-16 15:57:11 +01006091 set_capacity_and_notify(mddev->gendisk, mddev->array_sectors);
NeilBrown9d4b45d2019-08-20 10:21:09 +10006092 clear_bit(MD_NOT_READY, &mddev->flags);
NeilBrownf0b4f7e2011-02-24 17:26:41 +11006093 mddev->changed = 1;
NeilBrownfe60b012010-03-29 11:10:42 +11006094 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
NeilBrown9d4b45d2019-08-20 10:21:09 +10006095 sysfs_notify_dirent_safe(mddev->sysfs_state);
6096 sysfs_notify_dirent_safe(mddev->sysfs_action);
Junxiao Bie1a86db2020-07-14 16:10:26 -07006097 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
NeilBrownfe60b012010-03-29 11:10:42 +11006098out:
NeilBrown9d4b45d2019-08-20 10:21:09 +10006099 clear_bit(MD_NOT_READY, &mddev->flags);
NeilBrownfe60b012010-03-29 11:10:42 +11006100 return err;
6101}
6102
Song Liud5d885f2017-11-19 22:17:01 -08006103int md_start(struct mddev *mddev)
6104{
6105 int ret = 0;
6106
6107 if (mddev->pers->start) {
6108 set_bit(MD_RECOVERY_WAIT, &mddev->recovery);
6109 md_wakeup_thread(mddev->thread);
6110 ret = mddev->pers->start(mddev);
6111 clear_bit(MD_RECOVERY_WAIT, &mddev->recovery);
6112 md_wakeup_thread(mddev->sync_thread);
6113 }
6114 return ret;
6115}
6116EXPORT_SYMBOL_GPL(md_start);
6117
NeilBrownfd01b882011-10-11 16:47:53 +11006118static int restart_array(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006119{
6120 struct gendisk *disk = mddev->gendisk;
NeilBrown97b20ef2017-04-13 08:53:48 +10006121 struct md_rdev *rdev;
6122 bool has_journal = false;
6123 bool has_readonly = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006124
Andre Noll80fab1d2008-07-11 22:02:21 +10006125 /* Complain if it has no devices */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006126 if (list_empty(&mddev->disks))
Andre Noll80fab1d2008-07-11 22:02:21 +10006127 return -ENXIO;
6128 if (!mddev->pers)
6129 return -EINVAL;
6130 if (!mddev->ro)
6131 return -EBUSY;
Song Liu339421d2015-10-08 21:54:13 -07006132
NeilBrown97b20ef2017-04-13 08:53:48 +10006133 rcu_read_lock();
6134 rdev_for_each_rcu(rdev, mddev) {
6135 if (test_bit(Journal, &rdev->flags) &&
6136 !test_bit(Faulty, &rdev->flags))
6137 has_journal = true;
Christoph Hellwiga42e0d72021-02-01 14:17:21 +01006138 if (rdev_read_only(rdev))
NeilBrown97b20ef2017-04-13 08:53:48 +10006139 has_readonly = true;
Song Liu339421d2015-10-08 21:54:13 -07006140 }
NeilBrown97b20ef2017-04-13 08:53:48 +10006141 rcu_read_unlock();
6142 if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !has_journal)
6143 /* Don't restart rw with journal missing/faulty */
6144 return -EINVAL;
6145 if (has_readonly)
6146 return -EROFS;
Song Liu339421d2015-10-08 21:54:13 -07006147
Andre Noll80fab1d2008-07-11 22:02:21 +10006148 mddev->safemode = 0;
6149 mddev->ro = 0;
6150 set_disk_ro(disk, 0);
NeilBrown9d487392016-11-02 14:16:49 +11006151 pr_debug("md: %s switched to read-write mode.\n", mdname(mddev));
Andre Noll80fab1d2008-07-11 22:02:21 +10006152 /* Kick recovery or resync if necessary */
6153 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6154 md_wakeup_thread(mddev->thread);
6155 md_wakeup_thread(mddev->sync_thread);
NeilBrown00bcb4a2010-06-01 19:37:23 +10006156 sysfs_notify_dirent_safe(mddev->sysfs_state);
Andre Noll80fab1d2008-07-11 22:02:21 +10006157 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006158}
6159
NeilBrownfd01b882011-10-11 16:47:53 +11006160static void md_clean(struct mddev *mddev)
NeilBrown6177b472010-03-29 11:37:13 +11006161{
6162 mddev->array_sectors = 0;
6163 mddev->external_size = 0;
6164 mddev->dev_sectors = 0;
6165 mddev->raid_disks = 0;
6166 mddev->recovery_cp = 0;
6167 mddev->resync_min = 0;
6168 mddev->resync_max = MaxSector;
6169 mddev->reshape_position = MaxSector;
6170 mddev->external = 0;
6171 mddev->persistent = 0;
6172 mddev->level = LEVEL_NONE;
6173 mddev->clevel[0] = 0;
6174 mddev->flags = 0;
Shaohua Li29530792016-12-08 15:48:19 -08006175 mddev->sb_flags = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006176 mddev->ro = 0;
6177 mddev->metadata_type[0] = 0;
6178 mddev->chunk_sectors = 0;
6179 mddev->ctime = mddev->utime = 0;
6180 mddev->layout = 0;
6181 mddev->max_disks = 0;
6182 mddev->events = 0;
NeilBrowna8707c02010-05-18 09:28:43 +10006183 mddev->can_decrease_events = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006184 mddev->delta_disks = 0;
NeilBrown2c810cd2012-05-21 09:27:00 +10006185 mddev->reshape_backwards = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006186 mddev->new_level = LEVEL_NONE;
6187 mddev->new_layout = 0;
6188 mddev->new_chunk_sectors = 0;
6189 mddev->curr_resync = 0;
Jianpeng Ma7f7583d2012-10-11 14:17:59 +11006190 atomic64_set(&mddev->resync_mismatches, 0);
NeilBrown6177b472010-03-29 11:37:13 +11006191 mddev->suspend_lo = mddev->suspend_hi = 0;
6192 mddev->sync_speed_min = mddev->sync_speed_max = 0;
6193 mddev->recovery = 0;
6194 mddev->in_sync = 0;
NeilBrownf0b4f7e2011-02-24 17:26:41 +11006195 mddev->changed = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006196 mddev->degraded = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006197 mddev->safemode = 0;
NeilBrownbd691922015-06-25 17:01:40 +10006198 mddev->private = NULL;
Guoqing Jiangc20c33f2016-08-12 13:42:38 +08006199 mddev->cluster_info = NULL;
NeilBrown6177b472010-03-29 11:37:13 +11006200 mddev->bitmap_info.offset = 0;
6201 mddev->bitmap_info.default_offset = 0;
NeilBrown6409bb02012-05-22 13:55:07 +10006202 mddev->bitmap_info.default_space = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006203 mddev->bitmap_info.chunksize = 0;
6204 mddev->bitmap_info.daemon_sleep = 0;
6205 mddev->bitmap_info.max_write_behind = 0;
Guoqing Jiangc20c33f2016-08-12 13:42:38 +08006206 mddev->bitmap_info.nodes = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006207}
6208
NeilBrownfd01b882011-10-11 16:47:53 +11006209static void __md_stop_writes(struct mddev *mddev)
NeilBrowna047e122010-03-29 12:07:53 +11006210{
NeilBrown6b6204e2013-05-09 09:48:30 +10006211 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
Guoqing Jiang21e09582020-04-04 23:57:07 +02006212 if (work_pending(&mddev->del_work))
6213 flush_workqueue(md_misc_wq);
NeilBrowna047e122010-03-29 12:07:53 +11006214 if (mddev->sync_thread) {
NeilBrowna047e122010-03-29 12:07:53 +11006215 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10006216 md_reap_sync_thread(mddev);
NeilBrowna047e122010-03-29 12:07:53 +11006217 }
6218
6219 del_timer_sync(&mddev->safemode_timer);
6220
Shaohua Li034e33f2016-11-21 10:29:19 -08006221 if (mddev->pers && mddev->pers->quiesce) {
6222 mddev->pers->quiesce(mddev, 1);
6223 mddev->pers->quiesce(mddev, 0);
6224 }
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07006225 md_bitmap_flush(mddev);
NeilBrowna047e122010-03-29 12:07:53 +11006226
NeilBrownb6d428c2013-04-24 11:42:42 +10006227 if (mddev->ro == 0 &&
Goldwyn Rodrigues28c1b9f2015-10-22 16:01:25 +11006228 ((!mddev->in_sync && !mddev_is_clustered(mddev)) ||
Shaohua Li29530792016-12-08 15:48:19 -08006229 mddev->sb_flags)) {
NeilBrowna047e122010-03-29 12:07:53 +11006230 /* mark array as shutdown cleanly */
Goldwyn Rodrigues28c1b9f2015-10-22 16:01:25 +11006231 if (!mddev_is_clustered(mddev))
6232 mddev->in_sync = 1;
NeilBrowna047e122010-03-29 12:07:53 +11006233 md_update_sb(mddev, 1);
6234 }
Guoqing Jiang69b00b52019-12-23 10:49:00 +01006235 /* disable policy to guarantee rdevs free resources for serialization */
6236 mddev->serialize_policy = 0;
6237 mddev_destroy_serial_pool(mddev, NULL, true);
NeilBrowna047e122010-03-29 12:07:53 +11006238}
NeilBrowndefad612011-01-14 09:14:33 +11006239
NeilBrownfd01b882011-10-11 16:47:53 +11006240void md_stop_writes(struct mddev *mddev)
NeilBrowndefad612011-01-14 09:14:33 +11006241{
NeilBrown29f097c2013-11-14 17:54:51 +11006242 mddev_lock_nointr(mddev);
NeilBrowndefad612011-01-14 09:14:33 +11006243 __md_stop_writes(mddev);
6244 mddev_unlock(mddev);
6245}
NeilBrown390ee602010-06-01 19:37:27 +10006246EXPORT_SYMBOL_GPL(md_stop_writes);
NeilBrowna047e122010-03-29 12:07:53 +11006247
NeilBrown5aa61f42014-12-15 12:56:57 +11006248static void mddev_detach(struct mddev *mddev)
6249{
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07006250 md_bitmap_wait_behind_writes(mddev);
Guoqing Jiang6b40bec2020-02-11 11:10:04 +01006251 if (mddev->pers && mddev->pers->quiesce && !mddev->suspended) {
NeilBrown5aa61f42014-12-15 12:56:57 +11006252 mddev->pers->quiesce(mddev, 1);
6253 mddev->pers->quiesce(mddev, 0);
6254 }
6255 md_unregister_thread(&mddev->thread);
6256 if (mddev->queue)
6257 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
6258}
6259
NeilBrown5eff3c42012-11-19 10:47:48 +11006260static void __md_stop(struct mddev *mddev)
NeilBrown6177b472010-03-29 11:37:13 +11006261{
NeilBrown36d091f2014-12-15 12:56:58 +11006262 struct md_personality *pers = mddev->pers;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07006263 md_bitmap_destroy(mddev);
NeilBrown5aa61f42014-12-15 12:56:57 +11006264 mddev_detach(mddev);
NeilBrownee5d0042015-07-22 10:20:07 +10006265 /* Ensure ->event_work is done */
Guoqing Jiang21e09582020-04-04 23:57:07 +02006266 if (mddev->event_work.func)
6267 flush_workqueue(md_misc_wq);
NeilBrown36d091f2014-12-15 12:56:58 +11006268 spin_lock(&mddev->lock);
NeilBrown6177b472010-03-29 11:37:13 +11006269 mddev->pers = NULL;
NeilBrown36d091f2014-12-15 12:56:58 +11006270 spin_unlock(&mddev->lock);
6271 pers->free(mddev, mddev->private);
NeilBrownbd691922015-06-25 17:01:40 +10006272 mddev->private = NULL;
NeilBrown36d091f2014-12-15 12:56:58 +11006273 if (pers->sync_request && mddev->to_remove == NULL)
6274 mddev->to_remove = &md_redundancy_group;
6275 module_put(pers->owner);
NeilBrowncca9cf92010-04-01 12:08:16 +11006276 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
Jack Wang6aaa58c2018-10-19 16:21:31 +02006277}
6278
6279void md_stop(struct mddev *mddev)
6280{
6281 /* stop the array and free an attached data structures.
6282 * This is called from dm-raid
6283 */
6284 __md_stop(mddev);
Kent Overstreetafeee512018-05-20 18:25:52 -04006285 bioset_exit(&mddev->bio_set);
6286 bioset_exit(&mddev->sync_set);
Guoqing Jiangdaee2022021-06-03 17:21:06 +08006287 if (mddev->level != 1 && mddev->level != 10)
6288 bioset_exit(&mddev->io_acct_set);
NeilBrown5eff3c42012-11-19 10:47:48 +11006289}
6290
NeilBrown390ee602010-06-01 19:37:27 +10006291EXPORT_SYMBOL_GPL(md_stop);
NeilBrown6177b472010-03-29 11:37:13 +11006292
NeilBrowna05b7ea2012-07-19 15:59:18 +10006293static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
NeilBrowna4bd82d2010-03-29 13:23:10 +11006294{
6295 int err = 0;
NeilBrown30b8feb2013-11-14 15:16:17 +11006296 int did_freeze = 0;
6297
6298 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
6299 did_freeze = 1;
6300 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6301 md_wakeup_thread(mddev->thread);
6302 }
NeilBrownf851b602014-12-11 10:02:10 +11006303 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
NeilBrown30b8feb2013-11-14 15:16:17 +11006304 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
NeilBrownf851b602014-12-11 10:02:10 +11006305 if (mddev->sync_thread)
NeilBrown30b8feb2013-11-14 15:16:17 +11006306 /* Thread might be blocked waiting for metadata update
6307 * which will now never happen */
6308 wake_up_process(mddev->sync_thread->tsk);
NeilBrownf851b602014-12-11 10:02:10 +11006309
Shaohua Li29530792016-12-08 15:48:19 -08006310 if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
NeilBrown88724bf2015-09-24 14:00:51 +10006311 return -EBUSY;
NeilBrown30b8feb2013-11-14 15:16:17 +11006312 mddev_unlock(mddev);
NeilBrownf851b602014-12-11 10:02:10 +11006313 wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
6314 &mddev->recovery));
NeilBrown88724bf2015-09-24 14:00:51 +10006315 wait_event(mddev->sb_wait,
Shaohua Li29530792016-12-08 15:48:19 -08006316 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
NeilBrown30b8feb2013-11-14 15:16:17 +11006317 mddev_lock_nointr(mddev);
6318
NeilBrowna4bd82d2010-03-29 13:23:10 +11006319 mutex_lock(&mddev->open_mutex);
NeilBrown9ba3b7f2014-09-09 14:00:15 +10006320 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
NeilBrown30b8feb2013-11-14 15:16:17 +11006321 mddev->sync_thread ||
Guoqing Jiangaf8d8e62016-08-12 13:42:37 +08006322 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
NeilBrown9d487392016-11-02 14:16:49 +11006323 pr_warn("md: %s still in use.\n",mdname(mddev));
NeilBrown30b8feb2013-11-14 15:16:17 +11006324 if (did_freeze) {
6325 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
NeilBrown45eaf452014-10-29 08:49:50 +11006326 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
NeilBrown30b8feb2013-11-14 15:16:17 +11006327 md_wakeup_thread(mddev->thread);
6328 }
NeilBrowna4bd82d2010-03-29 13:23:10 +11006329 err = -EBUSY;
6330 goto out;
6331 }
6332 if (mddev->pers) {
NeilBrowndefad612011-01-14 09:14:33 +11006333 __md_stop_writes(mddev);
NeilBrowna4bd82d2010-03-29 13:23:10 +11006334
6335 err = -ENXIO;
6336 if (mddev->ro==1)
6337 goto out;
6338 mddev->ro = 1;
6339 set_disk_ro(mddev->gendisk, 1);
6340 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
NeilBrown45eaf452014-10-29 08:49:50 +11006341 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6342 md_wakeup_thread(mddev->thread);
NeilBrown00bcb4a2010-06-01 19:37:23 +10006343 sysfs_notify_dirent_safe(mddev->sysfs_state);
NeilBrown30b8feb2013-11-14 15:16:17 +11006344 err = 0;
NeilBrowna4bd82d2010-03-29 13:23:10 +11006345 }
6346out:
6347 mutex_unlock(&mddev->open_mutex);
6348 return err;
6349}
6350
NeilBrown9e653b62006-06-26 00:27:58 -07006351/* mode:
6352 * 0 - completely stop and dis-assemble array
NeilBrown9e653b62006-06-26 00:27:58 -07006353 * 2 - stop but do not disassemble array
6354 */
NeilBrownf72ffdd2014-09-30 14:23:59 +10006355static int do_md_stop(struct mddev *mddev, int mode,
NeilBrowna05b7ea2012-07-19 15:59:18 +10006356 struct block_device *bdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006357{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006358 struct gendisk *disk = mddev->gendisk;
NeilBrown3cb03002011-10-11 16:45:26 +11006359 struct md_rdev *rdev;
NeilBrown30b8feb2013-11-14 15:16:17 +11006360 int did_freeze = 0;
6361
6362 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
6363 did_freeze = 1;
6364 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6365 md_wakeup_thread(mddev->thread);
6366 }
NeilBrownf851b602014-12-11 10:02:10 +11006367 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
NeilBrown30b8feb2013-11-14 15:16:17 +11006368 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
NeilBrownf851b602014-12-11 10:02:10 +11006369 if (mddev->sync_thread)
NeilBrown30b8feb2013-11-14 15:16:17 +11006370 /* Thread might be blocked waiting for metadata update
6371 * which will now never happen */
6372 wake_up_process(mddev->sync_thread->tsk);
NeilBrownf851b602014-12-11 10:02:10 +11006373
NeilBrown30b8feb2013-11-14 15:16:17 +11006374 mddev_unlock(mddev);
NeilBrownf851b602014-12-11 10:02:10 +11006375 wait_event(resync_wait, (mddev->sync_thread == NULL &&
6376 !test_bit(MD_RECOVERY_RUNNING,
6377 &mddev->recovery)));
NeilBrown30b8feb2013-11-14 15:16:17 +11006378 mddev_lock_nointr(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006379
NeilBrownc8c00a62009-08-10 12:50:52 +10006380 mutex_lock(&mddev->open_mutex);
NeilBrown9ba3b7f2014-09-09 14:00:15 +10006381 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
NeilBrown30b8feb2013-11-14 15:16:17 +11006382 mddev->sysfs_active ||
6383 mddev->sync_thread ||
Guoqing Jiangaf8d8e62016-08-12 13:42:37 +08006384 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
NeilBrown9d487392016-11-02 14:16:49 +11006385 pr_warn("md: %s still in use.\n",mdname(mddev));
NeilBrown6e17b022010-08-07 21:41:19 +10006386 mutex_unlock(&mddev->open_mutex);
NeilBrown30b8feb2013-11-14 15:16:17 +11006387 if (did_freeze) {
6388 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
NeilBrown45eaf452014-10-29 08:49:50 +11006389 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
NeilBrown30b8feb2013-11-14 15:16:17 +11006390 md_wakeup_thread(mddev->thread);
6391 }
NeilBrown260fa032013-08-27 16:44:13 +10006392 return -EBUSY;
6393 }
NeilBrown6e17b022010-08-07 21:41:19 +10006394 if (mddev->pers) {
NeilBrowna4bd82d2010-03-29 13:23:10 +11006395 if (mddev->ro)
6396 set_disk_ro(disk, 0);
NeilBrown409c57f2009-03-31 14:39:39 +11006397
NeilBrowndefad612011-01-14 09:14:33 +11006398 __md_stop_writes(mddev);
NeilBrown5eff3c42012-11-19 10:47:48 +11006399 __md_stop(mddev);
NeilBrown6177b472010-03-29 11:37:13 +11006400
NeilBrowna4bd82d2010-03-29 13:23:10 +11006401 /* tell userspace to handle 'inactive' */
NeilBrown00bcb4a2010-06-01 19:37:23 +10006402 sysfs_notify_dirent_safe(mddev->sysfs_state);
NeilBrown0d4ca602006-12-10 02:20:44 -08006403
NeilBrowndafb20f2012-03-19 12:46:39 +11006404 rdev_for_each(rdev, mddev)
Namhyung Kim36fad852011-07-27 11:00:36 +10006405 if (rdev->raid_disk >= 0)
6406 sysfs_unlink_rdev(mddev, rdev);
NeilBrownc4647292009-05-07 12:51:06 +10006407
Christoph Hellwig2c247c52020-11-16 15:57:11 +01006408 set_capacity_and_notify(disk, 0);
NeilBrown6e17b022010-08-07 21:41:19 +10006409 mutex_unlock(&mddev->open_mutex);
NeilBrownf0b4f7e2011-02-24 17:26:41 +11006410 mddev->changed = 1;
NeilBrown0d4ca602006-12-10 02:20:44 -08006411
NeilBrowna4bd82d2010-03-29 13:23:10 +11006412 if (mddev->ro)
6413 mddev->ro = 0;
NeilBrown6e17b022010-08-07 21:41:19 +10006414 } else
6415 mutex_unlock(&mddev->open_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006416 /*
6417 * Free resources if final stop
6418 */
NeilBrown9e653b62006-06-26 00:27:58 -07006419 if (mode == 0) {
NeilBrown9d487392016-11-02 14:16:49 +11006420 pr_info("md: %s stopped.\n", mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006421
NeilBrownc3d97142009-12-14 12:49:52 +11006422 if (mddev->bitmap_info.file) {
NeilBrown4af1a042014-12-15 12:57:00 +11006423 struct file *f = mddev->bitmap_info.file;
6424 spin_lock(&mddev->lock);
NeilBrownc3d97142009-12-14 12:49:52 +11006425 mddev->bitmap_info.file = NULL;
NeilBrown4af1a042014-12-15 12:57:00 +11006426 spin_unlock(&mddev->lock);
6427 fput(f);
NeilBrown978f9462006-02-02 14:28:05 -08006428 }
NeilBrownc3d97142009-12-14 12:49:52 +11006429 mddev->bitmap_info.offset = 0;
NeilBrown978f9462006-02-02 14:28:05 -08006430
Linus Torvalds1da177e2005-04-16 15:20:36 -07006431 export_array(mddev);
6432
NeilBrown6177b472010-03-29 11:37:13 +11006433 md_clean(mddev);
NeilBrownefeb53c2009-01-09 08:31:10 +11006434 if (mddev->hold_active == UNTIL_STOP)
6435 mddev->hold_active = 0;
NeilBrowna4bd82d2010-03-29 13:23:10 +11006436 }
NeilBrownd7603b72006-01-06 00:20:30 -08006437 md_new_event(mddev);
NeilBrown00bcb4a2010-06-01 19:37:23 +10006438 sysfs_notify_dirent_safe(mddev->sysfs_state);
NeilBrown6e17b022010-08-07 21:41:19 +10006439 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006440}
6441
Jeff Garzikfdee8ae2006-12-10 02:20:50 -08006442#ifndef MODULE
NeilBrownfd01b882011-10-11 16:47:53 +11006443static void autorun_array(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006444{
NeilBrown3cb03002011-10-11 16:45:26 +11006445 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006446 int err;
6447
NeilBrowna757e642005-04-16 15:26:42 -07006448 if (list_empty(&mddev->disks))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006449 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006450
NeilBrown9d487392016-11-02 14:16:49 +11006451 pr_info("md: running: ");
Linus Torvalds1da177e2005-04-16 15:20:36 -07006452
NeilBrowndafb20f2012-03-19 12:46:39 +11006453 rdev_for_each(rdev, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006454 char b[BDEVNAME_SIZE];
NeilBrown9d487392016-11-02 14:16:49 +11006455 pr_cont("<%s>", bdevname(rdev->bdev,b));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006456 }
NeilBrown9d487392016-11-02 14:16:49 +11006457 pr_cont("\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07006458
NeilBrownd710e132008-10-13 11:55:12 +11006459 err = do_md_run(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006460 if (err) {
NeilBrown9d487392016-11-02 14:16:49 +11006461 pr_warn("md: do_md_run() returned %d\n", err);
NeilBrowna05b7ea2012-07-19 15:59:18 +10006462 do_md_stop(mddev, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006463 }
6464}
6465
6466/*
6467 * lets try to run arrays based on all disks that have arrived
6468 * until now. (those are in pending_raid_disks)
6469 *
6470 * the method: pick the first pending disk, collect all disks with
6471 * the same UUID, remove all from the pending list and put them into
6472 * the 'same_array' list. Then order this list based on superblock
6473 * update time (freshest comes first), kick out 'old' disks and
6474 * compare superblocks. If everything's fine then run it.
6475 *
6476 * If "unit" is allocated, then bump its reference count
6477 */
6478static void autorun_devices(int part)
6479{
NeilBrown3cb03002011-10-11 16:45:26 +11006480 struct md_rdev *rdev0, *rdev, *tmp;
NeilBrownfd01b882011-10-11 16:47:53 +11006481 struct mddev *mddev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006482 char b[BDEVNAME_SIZE];
6483
NeilBrown9d487392016-11-02 14:16:49 +11006484 pr_info("md: autorun ...\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07006485 while (!list_empty(&pending_raid_disks)) {
NeilBrowne8703fe2006-10-03 01:15:59 -07006486 int unit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006487 dev_t dev;
NeilBrownad01c9e2006-03-27 01:18:07 -08006488 LIST_HEAD(candidates);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006489 rdev0 = list_entry(pending_raid_disks.next,
NeilBrown3cb03002011-10-11 16:45:26 +11006490 struct md_rdev, same_set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006491
NeilBrown9d487392016-11-02 14:16:49 +11006492 pr_debug("md: considering %s ...\n", bdevname(rdev0->bdev,b));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006493 INIT_LIST_HEAD(&candidates);
Cheng Renquan159ec1f2009-01-09 08:31:08 +11006494 rdev_for_each_list(rdev, tmp, &pending_raid_disks)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006495 if (super_90_load(rdev, rdev0, 0) >= 0) {
NeilBrown9d487392016-11-02 14:16:49 +11006496 pr_debug("md: adding %s ...\n",
6497 bdevname(rdev->bdev,b));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006498 list_move(&rdev->same_set, &candidates);
6499 }
6500 /*
6501 * now we have a set of devices, with all of them having
6502 * mostly sane superblocks. It's time to allocate the
6503 * mddev.
6504 */
NeilBrowne8703fe2006-10-03 01:15:59 -07006505 if (part) {
6506 dev = MKDEV(mdp_major,
6507 rdev0->preferred_minor << MdpMinorShift);
6508 unit = MINOR(dev) >> MdpMinorShift;
6509 } else {
6510 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
6511 unit = MINOR(dev);
6512 }
6513 if (rdev0->preferred_minor != unit) {
NeilBrown9d487392016-11-02 14:16:49 +11006514 pr_warn("md: unit number in %s is bad: %d\n",
6515 bdevname(rdev0->bdev, b), rdev0->preferred_minor);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006516 break;
6517 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006518
Christoph Hellwig28144f92020-10-29 15:58:34 +01006519 md_probe(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006520 mddev = mddev_find(dev);
Christoph Hellwig65aa97c2021-04-03 18:15:29 +02006521 if (!mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006522 break;
Christoph Hellwig65aa97c2021-04-03 18:15:29 +02006523
NeilBrownf72ffdd2014-09-30 14:23:59 +10006524 if (mddev_lock(mddev))
NeilBrown9d487392016-11-02 14:16:49 +11006525 pr_warn("md: %s locked, cannot run\n", mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006526 else if (mddev->raid_disks || mddev->major_version
6527 || !list_empty(&mddev->disks)) {
NeilBrown9d487392016-11-02 14:16:49 +11006528 pr_warn("md: %s already running, cannot run %s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006529 mdname(mddev), bdevname(rdev0->bdev,b));
6530 mddev_unlock(mddev);
6531 } else {
NeilBrown9d487392016-11-02 14:16:49 +11006532 pr_debug("md: created %s\n", mdname(mddev));
NeilBrown1ec4a932008-02-06 01:39:53 -08006533 mddev->persistent = 1;
Cheng Renquan159ec1f2009-01-09 08:31:08 +11006534 rdev_for_each_list(rdev, tmp, &candidates) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006535 list_del_init(&rdev->same_set);
6536 if (bind_rdev_to_array(rdev, mddev))
6537 export_rdev(rdev);
6538 }
6539 autorun_array(mddev);
6540 mddev_unlock(mddev);
6541 }
6542 /* on success, candidates will be empty, on error
6543 * it won't...
6544 */
Cheng Renquan159ec1f2009-01-09 08:31:08 +11006545 rdev_for_each_list(rdev, tmp, &candidates) {
NeilBrown4b809912008-07-21 17:05:25 +10006546 list_del_init(&rdev->same_set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006547 export_rdev(rdev);
NeilBrown4b809912008-07-21 17:05:25 +10006548 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006549 mddev_put(mddev);
6550 }
NeilBrown9d487392016-11-02 14:16:49 +11006551 pr_info("md: ... autorun DONE.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07006552}
Jeff Garzikfdee8ae2006-12-10 02:20:50 -08006553#endif /* !MODULE */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006554
NeilBrownf72ffdd2014-09-30 14:23:59 +10006555static int get_version(void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006556{
6557 mdu_version_t ver;
6558
6559 ver.major = MD_MAJOR_VERSION;
6560 ver.minor = MD_MINOR_VERSION;
6561 ver.patchlevel = MD_PATCHLEVEL_VERSION;
6562
6563 if (copy_to_user(arg, &ver, sizeof(ver)))
6564 return -EFAULT;
6565
6566 return 0;
6567}
6568
NeilBrownf72ffdd2014-09-30 14:23:59 +10006569static int get_array_info(struct mddev *mddev, void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006570{
6571 mdu_array_info_t info;
NeilBrowna9f326e2009-09-23 18:06:41 +10006572 int nr,working,insync,failed,spare;
NeilBrown3cb03002011-10-11 16:45:26 +11006573 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006574
NeilBrown1ca69c42012-10-11 13:37:33 +11006575 nr = working = insync = failed = spare = 0;
6576 rcu_read_lock();
6577 rdev_for_each_rcu(rdev, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006578 nr++;
NeilBrownb2d444d2005-11-08 21:39:31 -08006579 if (test_bit(Faulty, &rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006580 failed++;
6581 else {
6582 working++;
NeilBrownb2d444d2005-11-08 21:39:31 -08006583 if (test_bit(In_sync, &rdev->flags))
NeilBrownf72ffdd2014-09-30 14:23:59 +10006584 insync++;
Song Liub347af82016-08-11 17:14:45 -07006585 else if (test_bit(Journal, &rdev->flags))
6586 /* TODO: add journal count to md_u.h */
6587 ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006588 else
6589 spare++;
6590 }
6591 }
NeilBrown1ca69c42012-10-11 13:37:33 +11006592 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006593
6594 info.major_version = mddev->major_version;
6595 info.minor_version = mddev->minor_version;
6596 info.patch_version = MD_PATCHLEVEL_VERSION;
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11006597 info.ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006598 info.level = mddev->level;
Andre Noll58c0fed2009-03-31 14:33:13 +11006599 info.size = mddev->dev_sectors / 2;
6600 if (info.size != mddev->dev_sectors / 2) /* overflow */
NeilBrown284ae7c2006-02-03 03:03:40 -08006601 info.size = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006602 info.nr_disks = nr;
6603 info.raid_disks = mddev->raid_disks;
6604 info.md_minor = mddev->md_minor;
6605 info.not_persistent= !mddev->persistent;
6606
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11006607 info.utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006608 info.state = 0;
6609 if (mddev->in_sync)
6610 info.state = (1<<MD_SB_CLEAN);
NeilBrownc3d97142009-12-14 12:49:52 +11006611 if (mddev->bitmap && mddev->bitmap_info.offset)
NeilBrown9bd35922014-07-02 11:35:06 +10006612 info.state |= (1<<MD_SB_BITMAP_PRESENT);
Goldwyn Rodriguesca8895d2014-11-26 12:22:03 -06006613 if (mddev_is_clustered(mddev))
6614 info.state |= (1<<MD_SB_CLUSTERED);
NeilBrowna9f326e2009-09-23 18:06:41 +10006615 info.active_disks = insync;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006616 info.working_disks = working;
6617 info.failed_disks = failed;
6618 info.spare_disks = spare;
6619
6620 info.layout = mddev->layout;
Andre Noll9d8f0362009-06-18 08:45:01 +10006621 info.chunk_size = mddev->chunk_sectors << 9;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006622
6623 if (copy_to_user(arg, &info, sizeof(info)))
6624 return -EFAULT;
6625
6626 return 0;
6627}
6628
NeilBrownf72ffdd2014-09-30 14:23:59 +10006629static int get_bitmap_file(struct mddev *mddev, void __user * arg)
NeilBrown32a76272005-06-21 17:17:14 -07006630{
6631 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
NeilBrownf4ad3d32014-12-15 12:57:00 +11006632 char *ptr;
NeilBrown4af1a042014-12-15 12:57:00 +11006633 int err;
NeilBrown32a76272005-06-21 17:17:14 -07006634
Benjamin Randazzob6878d92015-07-25 16:36:50 +02006635 file = kzalloc(sizeof(*file), GFP_NOIO);
NeilBrown32a76272005-06-21 17:17:14 -07006636 if (!file)
NeilBrown4af1a042014-12-15 12:57:00 +11006637 return -ENOMEM;
NeilBrown32a76272005-06-21 17:17:14 -07006638
NeilBrown32a76272005-06-21 17:17:14 -07006639 err = 0;
NeilBrown4af1a042014-12-15 12:57:00 +11006640 spin_lock(&mddev->lock);
Benjamin Randazzo25eafe12015-07-25 16:36:50 +02006641 /* bitmap enabled */
6642 if (mddev->bitmap_info.file) {
6643 ptr = file_path(mddev->bitmap_info.file, file->pathname,
6644 sizeof(file->pathname));
6645 if (IS_ERR(ptr))
6646 err = PTR_ERR(ptr);
6647 else
6648 memmove(file->pathname, ptr,
6649 sizeof(file->pathname)-(ptr-file->pathname));
6650 }
NeilBrown4af1a042014-12-15 12:57:00 +11006651 spin_unlock(&mddev->lock);
6652
6653 if (err == 0 &&
6654 copy_to_user(arg, file, sizeof(*file)))
NeilBrown32a76272005-06-21 17:17:14 -07006655 err = -EFAULT;
NeilBrown4af1a042014-12-15 12:57:00 +11006656
NeilBrown32a76272005-06-21 17:17:14 -07006657 kfree(file);
6658 return err;
6659}
6660
NeilBrownf72ffdd2014-09-30 14:23:59 +10006661static int get_disk_info(struct mddev *mddev, void __user * arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006662{
6663 mdu_disk_info_t info;
NeilBrown3cb03002011-10-11 16:45:26 +11006664 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006665
6666 if (copy_from_user(&info, arg, sizeof(info)))
6667 return -EFAULT;
6668
NeilBrown1ca69c42012-10-11 13:37:33 +11006669 rcu_read_lock();
Goldwyn Rodrigues57d051d2015-04-14 10:43:55 -05006670 rdev = md_find_rdev_nr_rcu(mddev, info.number);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006671 if (rdev) {
6672 info.major = MAJOR(rdev->bdev->bd_dev);
6673 info.minor = MINOR(rdev->bdev->bd_dev);
6674 info.raid_disk = rdev->raid_disk;
6675 info.state = 0;
NeilBrownb2d444d2005-11-08 21:39:31 -08006676 if (test_bit(Faulty, &rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006677 info.state |= (1<<MD_DISK_FAULTY);
NeilBrownb2d444d2005-11-08 21:39:31 -08006678 else if (test_bit(In_sync, &rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006679 info.state |= (1<<MD_DISK_ACTIVE);
6680 info.state |= (1<<MD_DISK_SYNC);
6681 }
Shaohua Li9efdca12015-10-12 16:59:50 -07006682 if (test_bit(Journal, &rdev->flags))
Song Liubac624f2015-08-13 14:31:55 -07006683 info.state |= (1<<MD_DISK_JOURNAL);
NeilBrown8ddf9ef2005-09-09 16:23:45 -07006684 if (test_bit(WriteMostly, &rdev->flags))
6685 info.state |= (1<<MD_DISK_WRITEMOSTLY);
NeilBrown688834e2016-11-18 16:16:11 +11006686 if (test_bit(FailFast, &rdev->flags))
6687 info.state |= (1<<MD_DISK_FAILFAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006688 } else {
6689 info.major = info.minor = 0;
6690 info.raid_disk = -1;
6691 info.state = (1<<MD_DISK_REMOVED);
6692 }
NeilBrown1ca69c42012-10-11 13:37:33 +11006693 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006694
6695 if (copy_to_user(arg, &info, sizeof(info)))
6696 return -EFAULT;
6697
6698 return 0;
6699}
6700
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02006701int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006702{
6703 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
NeilBrown3cb03002011-10-11 16:45:26 +11006704 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006705 dev_t dev = MKDEV(info->major,info->minor);
6706
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006707 if (mddev_is_clustered(mddev) &&
6708 !(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) {
NeilBrown9d487392016-11-02 14:16:49 +11006709 pr_warn("%s: Cannot add to clustered mddev.\n",
6710 mdname(mddev));
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006711 return -EINVAL;
6712 }
6713
Linus Torvalds1da177e2005-04-16 15:20:36 -07006714 if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
6715 return -EOVERFLOW;
6716
6717 if (!mddev->raid_disks) {
6718 int err;
6719 /* expecting a device which has a superblock */
6720 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
6721 if (IS_ERR(rdev)) {
NeilBrown9d487392016-11-02 14:16:49 +11006722 pr_warn("md: md_import_device returned %ld\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006723 PTR_ERR(rdev));
6724 return PTR_ERR(rdev);
6725 }
6726 if (!list_empty(&mddev->disks)) {
NeilBrown3cb03002011-10-11 16:45:26 +11006727 struct md_rdev *rdev0
6728 = list_entry(mddev->disks.next,
6729 struct md_rdev, same_set);
NeilBrowna9f326e2009-09-23 18:06:41 +10006730 err = super_types[mddev->major_version]
Linus Torvalds1da177e2005-04-16 15:20:36 -07006731 .load_super(rdev, rdev0, mddev->minor_version);
6732 if (err < 0) {
NeilBrown9d487392016-11-02 14:16:49 +11006733 pr_warn("md: %s has different UUID to %s\n",
NeilBrownf72ffdd2014-09-30 14:23:59 +10006734 bdevname(rdev->bdev,b),
Linus Torvalds1da177e2005-04-16 15:20:36 -07006735 bdevname(rdev0->bdev,b2));
6736 export_rdev(rdev);
6737 return -EINVAL;
6738 }
6739 }
6740 err = bind_rdev_to_array(rdev, mddev);
6741 if (err)
6742 export_rdev(rdev);
6743 return err;
6744 }
6745
6746 /*
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02006747 * md_add_new_disk can be used once the array is assembled
Linus Torvalds1da177e2005-04-16 15:20:36 -07006748 * to add "hot spares". They must already have a superblock
6749 * written
6750 */
6751 if (mddev->pers) {
6752 int err;
6753 if (!mddev->pers->hot_add_disk) {
NeilBrown9d487392016-11-02 14:16:49 +11006754 pr_warn("%s: personality does not support diskops!\n",
6755 mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006756 return -EINVAL;
6757 }
NeilBrown7b1e35f2005-09-09 16:23:50 -07006758 if (mddev->persistent)
6759 rdev = md_import_device(dev, mddev->major_version,
6760 mddev->minor_version);
6761 else
6762 rdev = md_import_device(dev, -1, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006763 if (IS_ERR(rdev)) {
NeilBrown9d487392016-11-02 14:16:49 +11006764 pr_warn("md: md_import_device returned %ld\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006765 PTR_ERR(rdev));
6766 return PTR_ERR(rdev);
6767 }
NeilBrown1a855a02010-12-09 16:36:28 +11006768 /* set saved_raid_disk if appropriate */
NeilBrown41158c72005-06-21 17:17:25 -07006769 if (!mddev->persistent) {
6770 if (info->state & (1<<MD_DISK_SYNC) &&
NeilBrownbf572542011-01-12 09:03:35 +11006771 info->raid_disk < mddev->raid_disks) {
NeilBrown41158c72005-06-21 17:17:25 -07006772 rdev->raid_disk = info->raid_disk;
NeilBrownbf572542011-01-12 09:03:35 +11006773 set_bit(In_sync, &rdev->flags);
NeilBrown8313b8e2013-12-12 10:13:33 +11006774 clear_bit(Bitmap_sync, &rdev->flags);
NeilBrownbf572542011-01-12 09:03:35 +11006775 } else
NeilBrown41158c72005-06-21 17:17:25 -07006776 rdev->raid_disk = -1;
NeilBrownf4667222013-12-09 12:04:56 +11006777 rdev->saved_raid_disk = rdev->raid_disk;
NeilBrown41158c72005-06-21 17:17:25 -07006778 } else
6779 super_types[mddev->major_version].
6780 validate_super(mddev, rdev);
NeilBrownbedd86b2011-05-11 14:26:20 +10006781 if ((info->state & (1<<MD_DISK_SYNC)) &&
NeilBrownf4563092012-07-03 15:59:06 +10006782 rdev->raid_disk != info->raid_disk) {
NeilBrownbedd86b2011-05-11 14:26:20 +10006783 /* This was a hot-add request, but events doesn't
6784 * match, so reject it.
6785 */
6786 export_rdev(rdev);
6787 return -EINVAL;
6788 }
6789
NeilBrownb2d444d2005-11-08 21:39:31 -08006790 clear_bit(In_sync, &rdev->flags); /* just to be sure */
NeilBrown8ddf9ef2005-09-09 16:23:45 -07006791 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
6792 set_bit(WriteMostly, &rdev->flags);
NeilBrown575a80f2009-03-31 14:33:13 +11006793 else
6794 clear_bit(WriteMostly, &rdev->flags);
NeilBrown688834e2016-11-18 16:16:11 +11006795 if (info->state & (1<<MD_DISK_FAILFAST))
6796 set_bit(FailFast, &rdev->flags);
6797 else
6798 clear_bit(FailFast, &rdev->flags);
NeilBrown8ddf9ef2005-09-09 16:23:45 -07006799
Shaohua Lif6b6ec52015-12-21 10:51:02 +11006800 if (info->state & (1<<MD_DISK_JOURNAL)) {
6801 struct md_rdev *rdev2;
6802 bool has_journal = false;
6803
6804 /* make sure no existing journal disk */
6805 rdev_for_each(rdev2, mddev) {
6806 if (test_bit(Journal, &rdev2->flags)) {
6807 has_journal = true;
6808 break;
6809 }
6810 }
NeilBrown230b55f2017-10-17 14:24:09 +11006811 if (has_journal || mddev->bitmap) {
Shaohua Lif6b6ec52015-12-21 10:51:02 +11006812 export_rdev(rdev);
6813 return -EBUSY;
6814 }
Song Liubac624f2015-08-13 14:31:55 -07006815 set_bit(Journal, &rdev->flags);
Shaohua Lif6b6ec52015-12-21 10:51:02 +11006816 }
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006817 /*
6818 * check whether the device shows up in other nodes
6819 */
6820 if (mddev_is_clustered(mddev)) {
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006821 if (info->state & (1 << MD_DISK_CANDIDATE))
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006822 set_bit(Candidate, &rdev->flags);
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006823 else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) {
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006824 /* --add initiated by this node */
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006825 err = md_cluster_ops->add_new_disk(mddev, rdev);
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006826 if (err) {
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006827 export_rdev(rdev);
6828 return err;
6829 }
6830 }
6831 }
6832
Linus Torvalds1da177e2005-04-16 15:20:36 -07006833 rdev->raid_disk = -1;
6834 err = bind_rdev_to_array(rdev, mddev);
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006835
Linus Torvalds1da177e2005-04-16 15:20:36 -07006836 if (err)
6837 export_rdev(rdev);
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006838
6839 if (mddev_is_clustered(mddev)) {
Guoqing Jiange566aef2016-08-12 13:42:34 +08006840 if (info->state & (1 << MD_DISK_CANDIDATE)) {
6841 if (!err) {
6842 err = md_cluster_ops->new_disk_ack(mddev,
6843 err == 0);
6844 if (err)
6845 md_kick_rdev_from_array(rdev);
6846 }
6847 } else {
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006848 if (err)
6849 md_cluster_ops->add_new_disk_cancel(mddev);
6850 else
6851 err = add_bound_rdev(rdev);
6852 }
6853
6854 } else if (!err)
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05006855 err = add_bound_rdev(rdev);
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006856
Linus Torvalds1da177e2005-04-16 15:20:36 -07006857 return err;
6858 }
6859
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02006860 /* otherwise, md_add_new_disk is only allowed
Linus Torvalds1da177e2005-04-16 15:20:36 -07006861 * for major_version==0 superblocks
6862 */
6863 if (mddev->major_version != 0) {
NeilBrown9d487392016-11-02 14:16:49 +11006864 pr_warn("%s: ADD_NEW_DISK not supported\n", mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006865 return -EINVAL;
6866 }
6867
6868 if (!(info->state & (1<<MD_DISK_FAULTY))) {
6869 int err;
NeilBrownd710e132008-10-13 11:55:12 +11006870 rdev = md_import_device(dev, -1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006871 if (IS_ERR(rdev)) {
NeilBrown9d487392016-11-02 14:16:49 +11006872 pr_warn("md: error, md_import_device() returned %ld\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006873 PTR_ERR(rdev));
6874 return PTR_ERR(rdev);
6875 }
6876 rdev->desc_nr = info->number;
6877 if (info->raid_disk < mddev->raid_disks)
6878 rdev->raid_disk = info->raid_disk;
6879 else
6880 rdev->raid_disk = -1;
6881
Linus Torvalds1da177e2005-04-16 15:20:36 -07006882 if (rdev->raid_disk < mddev->raid_disks)
NeilBrownb2d444d2005-11-08 21:39:31 -08006883 if (info->state & (1<<MD_DISK_SYNC))
6884 set_bit(In_sync, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006885
NeilBrown8ddf9ef2005-09-09 16:23:45 -07006886 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
6887 set_bit(WriteMostly, &rdev->flags);
NeilBrown688834e2016-11-18 16:16:11 +11006888 if (info->state & (1<<MD_DISK_FAILFAST))
6889 set_bit(FailFast, &rdev->flags);
NeilBrown8ddf9ef2005-09-09 16:23:45 -07006890
Linus Torvalds1da177e2005-04-16 15:20:36 -07006891 if (!mddev->persistent) {
NeilBrown9d487392016-11-02 14:16:49 +11006892 pr_debug("md: nonpersistent superblock ...\n");
Mike Snitzer77304d22010-11-08 14:39:12 +01006893 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
6894 } else
Jonathan Brassow57b2caa2011-01-14 09:14:33 +11006895 rdev->sb_start = calc_dev_sboffset(rdev);
NeilBrown8190e752009-06-18 08:48:58 +10006896 rdev->sectors = rdev->sb_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006897
NeilBrown2bf071b2006-01-06 00:20:55 -08006898 err = bind_rdev_to_array(rdev, mddev);
6899 if (err) {
6900 export_rdev(rdev);
6901 return err;
6902 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006903 }
6904
6905 return 0;
6906}
6907
NeilBrownf72ffdd2014-09-30 14:23:59 +10006908static int hot_remove_disk(struct mddev *mddev, dev_t dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006909{
6910 char b[BDEVNAME_SIZE];
NeilBrown3cb03002011-10-11 16:45:26 +11006911 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006912
Yufen Yuc42a0e22018-05-04 18:08:10 +08006913 if (!mddev->pers)
6914 return -ENODEV;
6915
Linus Torvalds1da177e2005-04-16 15:20:36 -07006916 rdev = find_rdev(mddev, dev);
6917 if (!rdev)
6918 return -ENXIO;
6919
Goldwyn Rodrigues2910ff12015-09-28 10:27:26 -05006920 if (rdev->raid_disk < 0)
6921 goto kick_rdev;
Goldwyn Rodrigues293467a2014-06-07 01:44:51 -05006922
NeilBrown3ea8929d2013-04-24 11:42:41 +10006923 clear_bit(Blocked, &rdev->flags);
6924 remove_and_add_spares(mddev, rdev);
6925
Linus Torvalds1da177e2005-04-16 15:20:36 -07006926 if (rdev->raid_disk >= 0)
6927 goto busy;
6928
Goldwyn Rodrigues2910ff12015-09-28 10:27:26 -05006929kick_rdev:
Zhao Hemingbca5b062020-11-19 19:41:34 +08006930 if (mddev_is_clustered(mddev)) {
6931 if (md_cluster_ops->remove_disk(mddev, rdev))
6932 goto busy;
6933 }
Goldwyn Rodrigues88bcfef2015-04-14 10:44:44 -05006934
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05006935 md_kick_rdev_from_array(rdev);
Shaohua Li29530792016-12-08 15:48:19 -08006936 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrown060b0682016-11-04 16:46:03 +11006937 if (mddev->thread)
6938 md_wakeup_thread(mddev->thread);
6939 else
6940 md_update_sb(mddev, 1);
NeilBrownd7603b72006-01-06 00:20:30 -08006941 md_new_event(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006942
6943 return 0;
6944busy:
NeilBrown9d487392016-11-02 14:16:49 +11006945 pr_debug("md: cannot remove active disk %s from %s ...\n",
6946 bdevname(rdev->bdev,b), mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006947 return -EBUSY;
6948}
6949
NeilBrownf72ffdd2014-09-30 14:23:59 +10006950static int hot_add_disk(struct mddev *mddev, dev_t dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006951{
6952 char b[BDEVNAME_SIZE];
6953 int err;
NeilBrown3cb03002011-10-11 16:45:26 +11006954 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006955
6956 if (!mddev->pers)
6957 return -ENODEV;
6958
6959 if (mddev->major_version != 0) {
NeilBrown9d487392016-11-02 14:16:49 +11006960 pr_warn("%s: HOT_ADD may only be used with version-0 superblocks.\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006961 mdname(mddev));
6962 return -EINVAL;
6963 }
6964 if (!mddev->pers->hot_add_disk) {
NeilBrown9d487392016-11-02 14:16:49 +11006965 pr_warn("%s: personality does not support diskops!\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006966 mdname(mddev));
6967 return -EINVAL;
6968 }
6969
NeilBrownd710e132008-10-13 11:55:12 +11006970 rdev = md_import_device(dev, -1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006971 if (IS_ERR(rdev)) {
NeilBrown9d487392016-11-02 14:16:49 +11006972 pr_warn("md: error, md_import_device() returned %ld\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006973 PTR_ERR(rdev));
6974 return -EINVAL;
6975 }
6976
6977 if (mddev->persistent)
Jonathan Brassow57b2caa2011-01-14 09:14:33 +11006978 rdev->sb_start = calc_dev_sboffset(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006979 else
Mike Snitzer77304d22010-11-08 14:39:12 +01006980 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006981
NeilBrown8190e752009-06-18 08:48:58 +10006982 rdev->sectors = rdev->sb_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006983
NeilBrownb2d444d2005-11-08 21:39:31 -08006984 if (test_bit(Faulty, &rdev->flags)) {
NeilBrown9d487392016-11-02 14:16:49 +11006985 pr_warn("md: can not hot-add faulty %s disk to %s!\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006986 bdevname(rdev->bdev,b), mdname(mddev));
6987 err = -EINVAL;
6988 goto abort_export;
6989 }
Goldwyn Rodrigues293467a2014-06-07 01:44:51 -05006990
NeilBrownb2d444d2005-11-08 21:39:31 -08006991 clear_bit(In_sync, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006992 rdev->desc_nr = -1;
NeilBrown58427302006-10-06 00:44:04 -07006993 rdev->saved_raid_disk = -1;
NeilBrown2bf071b2006-01-06 00:20:55 -08006994 err = bind_rdev_to_array(rdev, mddev);
6995 if (err)
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05006996 goto abort_export;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006997
6998 /*
6999 * The rest should better be atomic, we can have disk failures
7000 * noticed in interrupt contexts ...
7001 */
7002
Linus Torvalds1da177e2005-04-16 15:20:36 -07007003 rdev->raid_disk = -1;
7004
Shaohua Li29530792016-12-08 15:48:19 -08007005 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrown060b0682016-11-04 16:46:03 +11007006 if (!mddev->thread)
7007 md_update_sb(mddev, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007008 /*
7009 * Kick recovery, maybe this spare has to be added to the
7010 * array immediately.
7011 */
7012 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7013 md_wakeup_thread(mddev->thread);
NeilBrownd7603b72006-01-06 00:20:30 -08007014 md_new_event(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007015 return 0;
7016
Linus Torvalds1da177e2005-04-16 15:20:36 -07007017abort_export:
7018 export_rdev(rdev);
7019 return err;
7020}
7021
NeilBrownfd01b882011-10-11 16:47:53 +11007022static int set_bitmap_file(struct mddev *mddev, int fd)
NeilBrown32a76272005-06-21 17:17:14 -07007023{
NeilBrown035328c2014-04-09 12:25:40 +10007024 int err = 0;
NeilBrown32a76272005-06-21 17:17:14 -07007025
NeilBrown36fa3062005-09-09 16:23:45 -07007026 if (mddev->pers) {
NeilBrownd66b1b32014-08-08 15:40:24 +10007027 if (!mddev->pers->quiesce || !mddev->thread)
NeilBrown36fa3062005-09-09 16:23:45 -07007028 return -EBUSY;
7029 if (mddev->recovery || mddev->sync_thread)
7030 return -EBUSY;
7031 /* we should be able to change the bitmap.. */
NeilBrown32a76272005-06-21 17:17:14 -07007032 }
7033
NeilBrown36fa3062005-09-09 16:23:45 -07007034 if (fd >= 0) {
NeilBrown035328c2014-04-09 12:25:40 +10007035 struct inode *inode;
NeilBrown1e594bb2014-12-15 12:57:00 +11007036 struct file *f;
NeilBrown36fa3062005-09-09 16:23:45 -07007037
NeilBrown1e594bb2014-12-15 12:57:00 +11007038 if (mddev->bitmap || mddev->bitmap_info.file)
7039 return -EEXIST; /* cannot add when bitmap is present */
7040 f = fget(fd);
7041
7042 if (f == NULL) {
NeilBrown9d487392016-11-02 14:16:49 +11007043 pr_warn("%s: error: failed to get bitmap file\n",
7044 mdname(mddev));
NeilBrown36fa3062005-09-09 16:23:45 -07007045 return -EBADF;
7046 }
7047
NeilBrown1e594bb2014-12-15 12:57:00 +11007048 inode = f->f_mapping->host;
NeilBrown035328c2014-04-09 12:25:40 +10007049 if (!S_ISREG(inode->i_mode)) {
NeilBrown9d487392016-11-02 14:16:49 +11007050 pr_warn("%s: error: bitmap file must be a regular file\n",
7051 mdname(mddev));
NeilBrown035328c2014-04-09 12:25:40 +10007052 err = -EBADF;
NeilBrown1e594bb2014-12-15 12:57:00 +11007053 } else if (!(f->f_mode & FMODE_WRITE)) {
NeilBrown9d487392016-11-02 14:16:49 +11007054 pr_warn("%s: error: bitmap file must open for write\n",
7055 mdname(mddev));
NeilBrown035328c2014-04-09 12:25:40 +10007056 err = -EBADF;
7057 } else if (atomic_read(&inode->i_writecount) != 1) {
NeilBrown9d487392016-11-02 14:16:49 +11007058 pr_warn("%s: error: bitmap file is already in use\n",
7059 mdname(mddev));
NeilBrown035328c2014-04-09 12:25:40 +10007060 err = -EBUSY;
7061 }
7062 if (err) {
NeilBrown1e594bb2014-12-15 12:57:00 +11007063 fput(f);
NeilBrown36fa3062005-09-09 16:23:45 -07007064 return err;
7065 }
NeilBrown1e594bb2014-12-15 12:57:00 +11007066 mddev->bitmap_info.file = f;
NeilBrownc3d97142009-12-14 12:49:52 +11007067 mddev->bitmap_info.offset = 0; /* file overrides offset */
NeilBrown36fa3062005-09-09 16:23:45 -07007068 } else if (mddev->bitmap == NULL)
7069 return -ENOENT; /* cannot remove what isn't there */
7070 err = 0;
7071 if (mddev->pers) {
NeilBrown69e51b42010-06-01 19:37:35 +10007072 if (fd >= 0) {
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05007073 struct bitmap *bitmap;
7074
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007075 bitmap = md_bitmap_create(mddev, -1);
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007076 mddev_suspend(mddev);
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05007077 if (!IS_ERR(bitmap)) {
7078 mddev->bitmap = bitmap;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007079 err = md_bitmap_load(mddev);
NeilBrownba599ac2015-02-25 11:44:11 +11007080 } else
7081 err = PTR_ERR(bitmap);
NeilBrown52a0d492017-10-17 13:46:43 +11007082 if (err) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007083 md_bitmap_destroy(mddev);
NeilBrown52a0d492017-10-17 13:46:43 +11007084 fd = -1;
7085 }
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007086 mddev_resume(mddev);
NeilBrown52a0d492017-10-17 13:46:43 +11007087 } else if (fd < 0) {
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007088 mddev_suspend(mddev);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007089 md_bitmap_destroy(mddev);
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007090 mddev_resume(mddev);
NeilBrownd7375ab2006-06-26 00:27:43 -07007091 }
NeilBrownd7375ab2006-06-26 00:27:43 -07007092 }
7093 if (fd < 0) {
NeilBrown4af1a042014-12-15 12:57:00 +11007094 struct file *f = mddev->bitmap_info.file;
7095 if (f) {
7096 spin_lock(&mddev->lock);
7097 mddev->bitmap_info.file = NULL;
7098 spin_unlock(&mddev->lock);
7099 fput(f);
7100 }
NeilBrown36fa3062005-09-09 16:23:45 -07007101 }
7102
NeilBrown32a76272005-06-21 17:17:14 -07007103 return err;
7104}
7105
Linus Torvalds1da177e2005-04-16 15:20:36 -07007106/*
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02007107 * md_set_array_info is used two different ways
Linus Torvalds1da177e2005-04-16 15:20:36 -07007108 * The original usage is when creating a new array.
7109 * In this usage, raid_disks is > 0 and it together with
7110 * level, size, not_persistent,layout,chunksize determine the
7111 * shape of the array.
7112 * This will always create an array with a type-0.90.0 superblock.
7113 * The newer usage is when assembling an array.
7114 * In this case raid_disks will be 0, and the major_version field is
7115 * use to determine which style super-blocks are to be found on the devices.
7116 * The minor and patch _version numbers are also kept incase the
7117 * super_block handler wishes to interpret them.
7118 */
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02007119int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007120{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007121 if (info->raid_disks == 0) {
7122 /* just setting version number for superblock loading */
7123 if (info->major_version < 0 ||
Ahmed S. Darwish50511da2007-05-09 02:35:34 -07007124 info->major_version >= ARRAY_SIZE(super_types) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07007125 super_types[info->major_version].name == NULL) {
7126 /* maybe try to auto-load a module? */
NeilBrown9d487392016-11-02 14:16:49 +11007127 pr_warn("md: superblock version %d not known\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07007128 info->major_version);
7129 return -EINVAL;
7130 }
7131 mddev->major_version = info->major_version;
7132 mddev->minor_version = info->minor_version;
7133 mddev->patch_version = info->patch_version;
NeilBrown3f9d7b02006-12-22 01:11:41 -08007134 mddev->persistent = !info->not_persistent;
NeilBrowncbd19982009-12-30 12:08:49 +11007135 /* ensure mddev_put doesn't delete this now that there
7136 * is some minimal configuration.
7137 */
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11007138 mddev->ctime = ktime_get_real_seconds();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007139 return 0;
7140 }
7141 mddev->major_version = MD_MAJOR_VERSION;
7142 mddev->minor_version = MD_MINOR_VERSION;
7143 mddev->patch_version = MD_PATCHLEVEL_VERSION;
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11007144 mddev->ctime = ktime_get_real_seconds();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007145
7146 mddev->level = info->level;
NeilBrown17115e02006-01-16 22:14:57 -08007147 mddev->clevel[0] = 0;
Andre Noll58c0fed2009-03-31 14:33:13 +11007148 mddev->dev_sectors = 2 * (sector_t)info->size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007149 mddev->raid_disks = info->raid_disks;
7150 /* don't set md_minor, it is determined by which /dev/md* was
7151 * openned
7152 */
7153 if (info->state & (1<<MD_SB_CLEAN))
7154 mddev->recovery_cp = MaxSector;
7155 else
7156 mddev->recovery_cp = 0;
7157 mddev->persistent = ! info->not_persistent;
NeilBrowne6910632008-02-06 01:39:51 -08007158 mddev->external = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007159
7160 mddev->layout = info->layout;
NeilBrown33f2c352019-09-09 16:52:29 +10007161 if (mddev->level == 0)
7162 /* Cannot trust RAID0 layout info here */
7163 mddev->layout = -1;
Andre Noll9d8f0362009-06-18 08:45:01 +10007164 mddev->chunk_sectors = info->chunk_size >> 9;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007165
Shaohua Li29530792016-12-08 15:48:19 -08007166 if (mddev->persistent) {
NeilBrown1b3bae42017-03-01 07:31:28 +11007167 mddev->max_disks = MD_SB_DISKS;
7168 mddev->flags = 0;
7169 mddev->sb_flags = 0;
Shaohua Li29530792016-12-08 15:48:19 -08007170 }
7171 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007172
NeilBrownc3d97142009-12-14 12:49:52 +11007173 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
NeilBrown6409bb02012-05-22 13:55:07 +10007174 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
NeilBrownc3d97142009-12-14 12:49:52 +11007175 mddev->bitmap_info.offset = 0;
NeilBrownb2a27032005-11-28 13:44:12 -08007176
NeilBrownf6705572006-03-27 01:18:11 -08007177 mddev->reshape_position = MaxSector;
7178
Linus Torvalds1da177e2005-04-16 15:20:36 -07007179 /*
7180 * Generate a 128 bit UUID
7181 */
7182 get_random_bytes(mddev->uuid, 16);
7183
NeilBrownf6705572006-03-27 01:18:11 -08007184 mddev->new_level = mddev->level;
Andre Noll664e7c42009-06-18 08:45:27 +10007185 mddev->new_chunk_sectors = mddev->chunk_sectors;
NeilBrownf6705572006-03-27 01:18:11 -08007186 mddev->new_layout = mddev->layout;
7187 mddev->delta_disks = 0;
NeilBrown2c810cd2012-05-21 09:27:00 +10007188 mddev->reshape_backwards = 0;
NeilBrownf6705572006-03-27 01:18:11 -08007189
Linus Torvalds1da177e2005-04-16 15:20:36 -07007190 return 0;
7191}
7192
NeilBrownfd01b882011-10-11 16:47:53 +11007193void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors)
Dan Williams1f403622009-03-31 14:59:03 +11007194{
Shaohua Liefa4b772017-10-18 22:08:13 -07007195 lockdep_assert_held(&mddev->reconfig_mutex);
Dan Williamsb522adc2009-03-31 15:00:31 +11007196
7197 if (mddev->external_size)
7198 return;
7199
Dan Williams1f403622009-03-31 14:59:03 +11007200 mddev->array_sectors = array_sectors;
7201}
7202EXPORT_SYMBOL(md_set_array_sectors);
7203
NeilBrownfd01b882011-10-11 16:47:53 +11007204static int update_size(struct mddev *mddev, sector_t num_sectors)
NeilBrowna35b0d62006-01-06 00:20:49 -08007205{
NeilBrown3cb03002011-10-11 16:45:26 +11007206 struct md_rdev *rdev;
NeilBrowna35b0d62006-01-06 00:20:49 -08007207 int rv;
Andre Nolld71f9f82008-07-11 22:02:22 +10007208 int fit = (num_sectors == 0);
Guoqing Jiang818da592017-03-01 16:42:40 +08007209 sector_t old_dev_sectors = mddev->dev_sectors;
Guoqing Jiangab5a98b2016-05-02 11:33:13 -04007210
NeilBrowna35b0d62006-01-06 00:20:49 -08007211 if (mddev->pers->resize == NULL)
7212 return -EINVAL;
Andre Nolld71f9f82008-07-11 22:02:22 +10007213 /* The "num_sectors" is the number of sectors of each device that
7214 * is used. This can only make sense for arrays with redundancy.
7215 * linear and raid0 always use whatever space is available. We can only
7216 * consider changing this number if no resync or reconstruction is
7217 * happening, and if the new size is acceptable. It must fit before the
Andre Noll0f420352008-07-11 22:02:23 +10007218 * sb_start or, if that is <data_offset, it must fit before the size
Andre Nolld71f9f82008-07-11 22:02:22 +10007219 * of each device. If num_sectors is zero, we find the largest size
7220 * that fits.
NeilBrowna35b0d62006-01-06 00:20:49 -08007221 */
NeilBrownf851b602014-12-11 10:02:10 +11007222 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
7223 mddev->sync_thread)
NeilBrowna35b0d62006-01-06 00:20:49 -08007224 return -EBUSY;
NeilBrownbd8839e2014-05-28 13:39:21 +10007225 if (mddev->ro)
7226 return -EROFS;
NeilBrowna4a61252012-05-22 13:55:27 +10007227
NeilBrowndafb20f2012-03-19 12:46:39 +11007228 rdev_for_each(rdev, mddev) {
Andre Nolldd8ac332009-03-31 14:33:13 +11007229 sector_t avail = rdev->sectors;
NeilBrown01ab5662006-10-28 10:38:30 -07007230
Andre Nolld71f9f82008-07-11 22:02:22 +10007231 if (fit && (num_sectors == 0 || num_sectors > avail))
7232 num_sectors = avail;
7233 if (avail < num_sectors)
NeilBrowna35b0d62006-01-06 00:20:49 -08007234 return -ENOSPC;
7235 }
Andre Nolld71f9f82008-07-11 22:02:22 +10007236 rv = mddev->pers->resize(mddev, num_sectors);
Guoqing Jiangc9483632017-02-24 11:15:23 +08007237 if (!rv) {
Guoqing Jiang818da592017-03-01 16:42:40 +08007238 if (mddev_is_clustered(mddev))
7239 md_cluster_ops->update_size(mddev, old_dev_sectors);
7240 else if (mddev->queue) {
Christoph Hellwig2c247c52020-11-16 15:57:11 +01007241 set_capacity_and_notify(mddev->gendisk,
7242 mddev->array_sectors);
Guoqing Jiangc9483632017-02-24 11:15:23 +08007243 }
7244 }
NeilBrowna35b0d62006-01-06 00:20:49 -08007245 return rv;
7246}
7247
NeilBrownfd01b882011-10-11 16:47:53 +11007248static int update_raid_disks(struct mddev *mddev, int raid_disks)
NeilBrownda943b992006-01-06 00:20:54 -08007249{
7250 int rv;
NeilBrownc6563a82012-05-21 09:27:00 +10007251 struct md_rdev *rdev;
NeilBrownda943b992006-01-06 00:20:54 -08007252 /* change the number of raid disks */
NeilBrown63c70c42006-03-27 01:18:13 -08007253 if (mddev->pers->check_reshape == NULL)
NeilBrownda943b992006-01-06 00:20:54 -08007254 return -EINVAL;
NeilBrownbd8839e2014-05-28 13:39:21 +10007255 if (mddev->ro)
7256 return -EROFS;
NeilBrownda943b992006-01-06 00:20:54 -08007257 if (raid_disks <= 0 ||
NeilBrown233fca32010-04-14 17:02:09 +10007258 (mddev->max_disks && raid_disks >= mddev->max_disks))
NeilBrownda943b992006-01-06 00:20:54 -08007259 return -EINVAL;
NeilBrownf851b602014-12-11 10:02:10 +11007260 if (mddev->sync_thread ||
7261 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
Zhao Heminga8da01f2020-11-19 19:41:33 +08007262 test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) ||
NeilBrownf851b602014-12-11 10:02:10 +11007263 mddev->reshape_position != MaxSector)
NeilBrownda943b992006-01-06 00:20:54 -08007264 return -EBUSY;
NeilBrownc6563a82012-05-21 09:27:00 +10007265
7266 rdev_for_each(rdev, mddev) {
7267 if (mddev->raid_disks < raid_disks &&
7268 rdev->data_offset < rdev->new_data_offset)
7269 return -EINVAL;
7270 if (mddev->raid_disks > raid_disks &&
7271 rdev->data_offset > rdev->new_data_offset)
7272 return -EINVAL;
7273 }
7274
NeilBrown63c70c42006-03-27 01:18:13 -08007275 mddev->delta_disks = raid_disks - mddev->raid_disks;
NeilBrown2c810cd2012-05-21 09:27:00 +10007276 if (mddev->delta_disks < 0)
7277 mddev->reshape_backwards = 1;
7278 else if (mddev->delta_disks > 0)
7279 mddev->reshape_backwards = 0;
NeilBrown63c70c42006-03-27 01:18:13 -08007280
7281 rv = mddev->pers->check_reshape(mddev);
NeilBrown2c810cd2012-05-21 09:27:00 +10007282 if (rv < 0) {
NeilBrownde171cb2011-01-31 11:57:42 +11007283 mddev->delta_disks = 0;
NeilBrown2c810cd2012-05-21 09:27:00 +10007284 mddev->reshape_backwards = 0;
7285 }
NeilBrownda943b992006-01-06 00:20:54 -08007286 return rv;
7287}
7288
Linus Torvalds1da177e2005-04-16 15:20:36 -07007289/*
7290 * update_array_info is used to change the configuration of an
7291 * on-line array.
7292 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
7293 * fields in the info are checked against the array.
7294 * Any differences that cannot be handled will cause an error.
7295 * Normally, only one change can be managed at a time.
7296 */
NeilBrownfd01b882011-10-11 16:47:53 +11007297static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007298{
7299 int rv = 0;
7300 int cnt = 0;
NeilBrown36fa3062005-09-09 16:23:45 -07007301 int state = 0;
7302
7303 /* calculate expected state,ignoring low bits */
NeilBrownc3d97142009-12-14 12:49:52 +11007304 if (mddev->bitmap && mddev->bitmap_info.offset)
NeilBrown36fa3062005-09-09 16:23:45 -07007305 state |= (1 << MD_SB_BITMAP_PRESENT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007306
7307 if (mddev->major_version != info->major_version ||
7308 mddev->minor_version != info->minor_version ||
7309/* mddev->patch_version != info->patch_version || */
7310 mddev->ctime != info->ctime ||
7311 mddev->level != info->level ||
7312/* mddev->layout != info->layout || */
Firo Yang4e023612015-06-11 09:41:10 +08007313 mddev->persistent != !info->not_persistent ||
Andre Noll9d8f0362009-06-18 08:45:01 +10007314 mddev->chunk_sectors != info->chunk_size >> 9 ||
NeilBrown36fa3062005-09-09 16:23:45 -07007315 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
7316 ((state^info->state) & 0xfffffe00)
7317 )
Linus Torvalds1da177e2005-04-16 15:20:36 -07007318 return -EINVAL;
7319 /* Check there is only one change */
Andre Noll58c0fed2009-03-31 14:33:13 +11007320 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
7321 cnt++;
7322 if (mddev->raid_disks != info->raid_disks)
7323 cnt++;
7324 if (mddev->layout != info->layout)
7325 cnt++;
7326 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
7327 cnt++;
7328 if (cnt == 0)
7329 return 0;
7330 if (cnt > 1)
7331 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007332
7333 if (mddev->layout != info->layout) {
7334 /* Change layout
7335 * we don't need to do anything at the md level, the
7336 * personality will take care of it all.
7337 */
NeilBrown50ac1682009-06-18 08:47:55 +10007338 if (mddev->pers->check_reshape == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007339 return -EINVAL;
NeilBrown597a7112009-06-18 08:47:42 +10007340 else {
7341 mddev->new_layout = info->layout;
NeilBrown50ac1682009-06-18 08:47:55 +10007342 rv = mddev->pers->check_reshape(mddev);
NeilBrown597a7112009-06-18 08:47:42 +10007343 if (rv)
7344 mddev->new_layout = mddev->layout;
7345 return rv;
7346 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007347 }
Andre Noll58c0fed2009-03-31 14:33:13 +11007348 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
Andre Nolld71f9f82008-07-11 22:02:22 +10007349 rv = update_size(mddev, (sector_t)info->size * 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007350
NeilBrownda943b992006-01-06 00:20:54 -08007351 if (mddev->raid_disks != info->raid_disks)
7352 rv = update_raid_disks(mddev, info->raid_disks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007353
NeilBrown36fa3062005-09-09 16:23:45 -07007354 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
Goldwyn Rodrigues293467a2014-06-07 01:44:51 -05007355 if (mddev->pers->quiesce == NULL || mddev->thread == NULL) {
7356 rv = -EINVAL;
7357 goto err;
7358 }
7359 if (mddev->recovery || mddev->sync_thread) {
7360 rv = -EBUSY;
7361 goto err;
7362 }
NeilBrown36fa3062005-09-09 16:23:45 -07007363 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05007364 struct bitmap *bitmap;
NeilBrown36fa3062005-09-09 16:23:45 -07007365 /* add the bitmap */
Goldwyn Rodrigues293467a2014-06-07 01:44:51 -05007366 if (mddev->bitmap) {
7367 rv = -EEXIST;
7368 goto err;
7369 }
7370 if (mddev->bitmap_info.default_offset == 0) {
7371 rv = -EINVAL;
7372 goto err;
7373 }
NeilBrownc3d97142009-12-14 12:49:52 +11007374 mddev->bitmap_info.offset =
7375 mddev->bitmap_info.default_offset;
NeilBrown6409bb02012-05-22 13:55:07 +10007376 mddev->bitmap_info.space =
7377 mddev->bitmap_info.default_space;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007378 bitmap = md_bitmap_create(mddev, -1);
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007379 mddev_suspend(mddev);
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05007380 if (!IS_ERR(bitmap)) {
7381 mddev->bitmap = bitmap;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007382 rv = md_bitmap_load(mddev);
NeilBrownba599ac2015-02-25 11:44:11 +11007383 } else
7384 rv = PTR_ERR(bitmap);
NeilBrown36fa3062005-09-09 16:23:45 -07007385 if (rv)
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007386 md_bitmap_destroy(mddev);
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007387 mddev_resume(mddev);
NeilBrown36fa3062005-09-09 16:23:45 -07007388 } else {
7389 /* remove the bitmap */
Goldwyn Rodrigues293467a2014-06-07 01:44:51 -05007390 if (!mddev->bitmap) {
7391 rv = -ENOENT;
7392 goto err;
7393 }
7394 if (mddev->bitmap->storage.file) {
7395 rv = -EINVAL;
7396 goto err;
7397 }
Guoqing Jiangf6a2dc62015-12-21 10:51:00 +11007398 if (mddev->bitmap_info.nodes) {
7399 /* hold PW on all the bitmap lock */
7400 if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) {
NeilBrown9d487392016-11-02 14:16:49 +11007401 pr_warn("md: can't change bitmap to none since the array is in use by more than one node\n");
Guoqing Jiangf6a2dc62015-12-21 10:51:00 +11007402 rv = -EPERM;
7403 md_cluster_ops->unlock_all_bitmaps(mddev);
7404 goto err;
7405 }
7406
7407 mddev->bitmap_info.nodes = 0;
7408 md_cluster_ops->leave(mddev);
Zhao Hemingedee9df2020-07-21 02:08:53 +08007409 module_put(md_cluster_mod);
Zhao Heming7c9d5c52020-07-21 02:08:52 +08007410 mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY;
Guoqing Jiangf6a2dc62015-12-21 10:51:00 +11007411 }
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007412 mddev_suspend(mddev);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007413 md_bitmap_destroy(mddev);
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007414 mddev_resume(mddev);
NeilBrownc3d97142009-12-14 12:49:52 +11007415 mddev->bitmap_info.offset = 0;
NeilBrown36fa3062005-09-09 16:23:45 -07007416 }
7417 }
NeilBrown850b2b422006-10-03 01:15:46 -07007418 md_update_sb(mddev, 1);
Goldwyn Rodrigues293467a2014-06-07 01:44:51 -05007419 return rv;
7420err:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007421 return rv;
7422}
7423
NeilBrownfd01b882011-10-11 16:47:53 +11007424static int set_disk_faulty(struct mddev *mddev, dev_t dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007425{
NeilBrown3cb03002011-10-11 16:45:26 +11007426 struct md_rdev *rdev;
NeilBrown1ca69c42012-10-11 13:37:33 +11007427 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007428
7429 if (mddev->pers == NULL)
7430 return -ENODEV;
7431
NeilBrown1ca69c42012-10-11 13:37:33 +11007432 rcu_read_lock();
Tomasz Majchrzak1532d9e2017-12-27 10:31:40 +01007433 rdev = md_find_rdev_rcu(mddev, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007434 if (!rdev)
NeilBrown1ca69c42012-10-11 13:37:33 +11007435 err = -ENODEV;
7436 else {
7437 md_error(mddev, rdev);
7438 if (!test_bit(Faulty, &rdev->flags))
7439 err = -EBUSY;
7440 }
7441 rcu_read_unlock();
7442 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007443}
7444
Andre Noll2f9618c2008-04-25 18:57:58 +02007445/*
7446 * We have a problem here : there is no easy way to give a CHS
7447 * virtual geometry. We currently pretend that we have a 2 heads
7448 * 4 sectors (with a BIG number of cylinders...). This drives
7449 * dosfs just mad... ;-)
7450 */
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08007451static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
7452{
NeilBrownfd01b882011-10-11 16:47:53 +11007453 struct mddev *mddev = bdev->bd_disk->private_data;
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08007454
7455 geo->heads = 2;
7456 geo->sectors = 4;
NeilBrown49ce6ce2010-03-29 10:51:42 +11007457 geo->cylinders = mddev->array_sectors / 8;
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08007458 return 0;
7459}
7460
Nicolas Schichancb335f82014-01-15 16:58:52 +01007461static inline bool md_ioctl_valid(unsigned int cmd)
7462{
7463 switch (cmd) {
7464 case ADD_NEW_DISK:
Nicolas Schichancb335f82014-01-15 16:58:52 +01007465 case GET_ARRAY_INFO:
7466 case GET_BITMAP_FILE:
7467 case GET_DISK_INFO:
7468 case HOT_ADD_DISK:
7469 case HOT_REMOVE_DISK:
Nicolas Schichancb335f82014-01-15 16:58:52 +01007470 case RAID_VERSION:
7471 case RESTART_ARRAY_RW:
7472 case RUN_ARRAY:
7473 case SET_ARRAY_INFO:
7474 case SET_BITMAP_FILE:
7475 case SET_DISK_FAULTY:
7476 case STOP_ARRAY:
7477 case STOP_ARRAY_RO:
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05007478 case CLUSTERED_DISK_NACK:
Nicolas Schichancb335f82014-01-15 16:58:52 +01007479 return true;
7480 default:
7481 return false;
7482 }
7483}
7484
Al Viroa39907f2008-03-02 10:31:15 -05007485static int md_ioctl(struct block_device *bdev, fmode_t mode,
Linus Torvalds1da177e2005-04-16 15:20:36 -07007486 unsigned int cmd, unsigned long arg)
7487{
7488 int err = 0;
7489 void __user *argp = (void __user *)arg;
NeilBrownfd01b882011-10-11 16:47:53 +11007490 struct mddev *mddev = NULL;
NeilBrown065e5192017-04-06 11:16:33 +08007491 bool did_set_md_closing = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007492
Nicolas Schichancb335f82014-01-15 16:58:52 +01007493 if (!md_ioctl_valid(cmd))
7494 return -ENOTTY;
7495
NeilBrown506c9e42011-12-23 10:17:26 +11007496 switch (cmd) {
7497 case RAID_VERSION:
7498 case GET_ARRAY_INFO:
7499 case GET_DISK_INFO:
7500 break;
7501 default:
7502 if (!capable(CAP_SYS_ADMIN))
7503 return -EACCES;
7504 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007505
7506 /*
7507 * Commands dealing with the RAID driver but not any
7508 * particular array:
7509 */
NeilBrownc02c0ae2012-12-11 13:39:21 +11007510 switch (cmd) {
7511 case RAID_VERSION:
7512 err = get_version(argp);
NeilBrown3adc28d2014-09-30 15:46:41 +10007513 goto out;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007514 default:;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007515 }
7516
7517 /*
7518 * Commands creating/starting a new array:
7519 */
7520
Al Viroa39907f2008-03-02 10:31:15 -05007521 mddev = bdev->bd_disk->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007522
7523 if (!mddev) {
7524 BUG();
NeilBrown3adc28d2014-09-30 15:46:41 +10007525 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007526 }
7527
NeilBrown1ca69c42012-10-11 13:37:33 +11007528 /* Some actions do not requires the mutex */
7529 switch (cmd) {
7530 case GET_ARRAY_INFO:
7531 if (!mddev->raid_disks && !mddev->external)
7532 err = -ENODEV;
7533 else
7534 err = get_array_info(mddev, argp);
NeilBrown3adc28d2014-09-30 15:46:41 +10007535 goto out;
NeilBrown1ca69c42012-10-11 13:37:33 +11007536
7537 case GET_DISK_INFO:
7538 if (!mddev->raid_disks && !mddev->external)
7539 err = -ENODEV;
7540 else
7541 err = get_disk_info(mddev, argp);
NeilBrown3adc28d2014-09-30 15:46:41 +10007542 goto out;
NeilBrown1ca69c42012-10-11 13:37:33 +11007543
7544 case SET_DISK_FAULTY:
7545 err = set_disk_faulty(mddev, new_decode_dev(arg));
NeilBrown3adc28d2014-09-30 15:46:41 +10007546 goto out;
NeilBrown4af1a042014-12-15 12:57:00 +11007547
7548 case GET_BITMAP_FILE:
7549 err = get_bitmap_file(mddev, argp);
7550 goto out;
7551
NeilBrown1ca69c42012-10-11 13:37:33 +11007552 }
7553
Guoqing Jiang78b990c2020-04-04 23:57:10 +02007554 if (cmd == ADD_NEW_DISK || cmd == HOT_ADD_DISK)
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02007555 flush_rdev_wq(mddev);
NeilBrowna7a3f082012-12-11 13:35:54 +11007556
Hannes Reinecke90f5f7a2013-04-02 08:38:55 +02007557 if (cmd == HOT_REMOVE_DISK)
7558 /* need to ensure recovery thread has run */
7559 wait_event_interruptible_timeout(mddev->sb_wait,
7560 !test_bit(MD_RECOVERY_NEEDED,
Shaohua Li82a301c2016-12-08 15:48:18 -08007561 &mddev->recovery),
Hannes Reinecke90f5f7a2013-04-02 08:38:55 +02007562 msecs_to_jiffies(5000));
NeilBrown260fa032013-08-27 16:44:13 +10007563 if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
7564 /* Need to flush page cache, and ensure no-one else opens
7565 * and writes
7566 */
7567 mutex_lock(&mddev->open_mutex);
NeilBrown9ba3b7f2014-09-09 14:00:15 +10007568 if (mddev->pers && atomic_read(&mddev->openers) > 1) {
NeilBrown260fa032013-08-27 16:44:13 +10007569 mutex_unlock(&mddev->open_mutex);
7570 err = -EBUSY;
NeilBrown3adc28d2014-09-30 15:46:41 +10007571 goto out;
NeilBrown260fa032013-08-27 16:44:13 +10007572 }
Dae R. Jeongc731b842020-10-22 10:21:28 +09007573 if (test_and_set_bit(MD_CLOSING, &mddev->flags)) {
7574 mutex_unlock(&mddev->open_mutex);
7575 err = -EBUSY;
7576 goto out;
7577 }
NeilBrown065e5192017-04-06 11:16:33 +08007578 did_set_md_closing = true;
NeilBrown260fa032013-08-27 16:44:13 +10007579 mutex_unlock(&mddev->open_mutex);
7580 sync_blockdev(bdev);
7581 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007582 err = mddev_lock(mddev);
7583 if (err) {
NeilBrown9d487392016-11-02 14:16:49 +11007584 pr_debug("md: ioctl lock interrupted, reason %d, cmd %d\n",
7585 err, cmd);
NeilBrown3adc28d2014-09-30 15:46:41 +10007586 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007587 }
7588
NeilBrownc02c0ae2012-12-11 13:39:21 +11007589 if (cmd == SET_ARRAY_INFO) {
7590 mdu_array_info_t info;
7591 if (!arg)
7592 memset(&info, 0, sizeof(info));
7593 else if (copy_from_user(&info, argp, sizeof(info))) {
7594 err = -EFAULT;
NeilBrown3adc28d2014-09-30 15:46:41 +10007595 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007596 }
7597 if (mddev->pers) {
7598 err = update_array_info(mddev, &info);
7599 if (err) {
NeilBrown9d487392016-11-02 14:16:49 +11007600 pr_warn("md: couldn't update array info. %d\n", err);
NeilBrown3adc28d2014-09-30 15:46:41 +10007601 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007602 }
NeilBrown3adc28d2014-09-30 15:46:41 +10007603 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007604 }
7605 if (!list_empty(&mddev->disks)) {
NeilBrown9d487392016-11-02 14:16:49 +11007606 pr_warn("md: array %s already has disks!\n", mdname(mddev));
NeilBrownc02c0ae2012-12-11 13:39:21 +11007607 err = -EBUSY;
NeilBrown3adc28d2014-09-30 15:46:41 +10007608 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007609 }
7610 if (mddev->raid_disks) {
NeilBrown9d487392016-11-02 14:16:49 +11007611 pr_warn("md: array %s already initialised!\n", mdname(mddev));
NeilBrownc02c0ae2012-12-11 13:39:21 +11007612 err = -EBUSY;
NeilBrown3adc28d2014-09-30 15:46:41 +10007613 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007614 }
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02007615 err = md_set_array_info(mddev, &info);
NeilBrownc02c0ae2012-12-11 13:39:21 +11007616 if (err) {
NeilBrown9d487392016-11-02 14:16:49 +11007617 pr_warn("md: couldn't set array info. %d\n", err);
NeilBrown3adc28d2014-09-30 15:46:41 +10007618 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007619 }
NeilBrown3adc28d2014-09-30 15:46:41 +10007620 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007621 }
7622
7623 /*
7624 * Commands querying/configuring an existing array:
7625 */
NeilBrown32a76272005-06-21 17:17:14 -07007626 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
NeilBrown3f9d7b02006-12-22 01:11:41 -08007627 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
NeilBrowna17184a2008-02-06 01:39:55 -08007628 if ((!mddev->raid_disks && !mddev->external)
7629 && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
7630 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
7631 && cmd != GET_BITMAP_FILE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007632 err = -ENODEV;
NeilBrown3adc28d2014-09-30 15:46:41 +10007633 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007634 }
7635
7636 /*
7637 * Commands even a read-only array can execute:
7638 */
NeilBrownc02c0ae2012-12-11 13:39:21 +11007639 switch (cmd) {
NeilBrownc02c0ae2012-12-11 13:39:21 +11007640 case RESTART_ARRAY_RW:
7641 err = restart_array(mddev);
NeilBrown3adc28d2014-09-30 15:46:41 +10007642 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007643
7644 case STOP_ARRAY:
7645 err = do_md_stop(mddev, 0, bdev);
NeilBrown3adc28d2014-09-30 15:46:41 +10007646 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007647
7648 case STOP_ARRAY_RO:
7649 err = md_set_readonly(mddev, bdev);
NeilBrown3adc28d2014-09-30 15:46:41 +10007650 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007651
NeilBrown3ea8929d2013-04-24 11:42:41 +10007652 case HOT_REMOVE_DISK:
7653 err = hot_remove_disk(mddev, new_decode_dev(arg));
NeilBrown3adc28d2014-09-30 15:46:41 +10007654 goto unlock;
NeilBrown3ea8929d2013-04-24 11:42:41 +10007655
NeilBrown7ceb17e2013-04-24 11:42:42 +10007656 case ADD_NEW_DISK:
7657 /* We can support ADD_NEW_DISK on read-only arrays
Wei Fang466ad292016-03-21 19:19:30 +08007658 * only if we are re-adding a preexisting device.
NeilBrown7ceb17e2013-04-24 11:42:42 +10007659 * So require mddev->pers and MD_DISK_SYNC.
7660 */
7661 if (mddev->pers) {
7662 mdu_disk_info_t info;
7663 if (copy_from_user(&info, argp, sizeof(info)))
7664 err = -EFAULT;
7665 else if (!(info.state & (1<<MD_DISK_SYNC)))
7666 /* Need to clear read-only for this */
7667 break;
7668 else
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02007669 err = md_add_new_disk(mddev, &info);
NeilBrown3adc28d2014-09-30 15:46:41 +10007670 goto unlock;
NeilBrown7ceb17e2013-04-24 11:42:42 +10007671 }
7672 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007673 }
7674
7675 /*
7676 * The remaining ioctls are changing the state of the
NeilBrownf91de922005-11-08 21:39:36 -08007677 * superblock, so we do not allow them on read-only arrays.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007678 */
NeilBrown326eb172014-09-30 15:36:28 +10007679 if (mddev->ro && mddev->pers) {
NeilBrownf91de922005-11-08 21:39:36 -08007680 if (mddev->ro == 2) {
7681 mddev->ro = 0;
NeilBrown00bcb4a2010-06-01 19:37:23 +10007682 sysfs_notify_dirent_safe(mddev->sysfs_state);
Neil Brown0fd62b82008-06-28 08:31:36 +10007683 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
NeilBrownf3378b42013-02-28 11:59:03 +11007684 /* mddev_unlock will wake thread */
7685 /* If a device failed while we were read-only, we
7686 * need to make sure the metadata is updated now.
7687 */
Shaohua Li29530792016-12-08 15:48:19 -08007688 if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) {
NeilBrownf3378b42013-02-28 11:59:03 +11007689 mddev_unlock(mddev);
7690 wait_event(mddev->sb_wait,
Shaohua Li29530792016-12-08 15:48:19 -08007691 !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) &&
7692 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
NeilBrown29f097c2013-11-14 17:54:51 +11007693 mddev_lock_nointr(mddev);
NeilBrownf3378b42013-02-28 11:59:03 +11007694 }
NeilBrownf91de922005-11-08 21:39:36 -08007695 } else {
7696 err = -EROFS;
NeilBrown3adc28d2014-09-30 15:46:41 +10007697 goto unlock;
NeilBrownf91de922005-11-08 21:39:36 -08007698 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007699 }
7700
NeilBrownc02c0ae2012-12-11 13:39:21 +11007701 switch (cmd) {
7702 case ADD_NEW_DISK:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007703 {
NeilBrownc02c0ae2012-12-11 13:39:21 +11007704 mdu_disk_info_t info;
7705 if (copy_from_user(&info, argp, sizeof(info)))
7706 err = -EFAULT;
7707 else
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02007708 err = md_add_new_disk(mddev, &info);
NeilBrown3adc28d2014-09-30 15:46:41 +10007709 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007710 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007711
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05007712 case CLUSTERED_DISK_NACK:
7713 if (mddev_is_clustered(mddev))
7714 md_cluster_ops->new_disk_ack(mddev, false);
7715 else
7716 err = -EINVAL;
7717 goto unlock;
7718
NeilBrownc02c0ae2012-12-11 13:39:21 +11007719 case HOT_ADD_DISK:
7720 err = hot_add_disk(mddev, new_decode_dev(arg));
NeilBrown3adc28d2014-09-30 15:46:41 +10007721 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007722
NeilBrownc02c0ae2012-12-11 13:39:21 +11007723 case RUN_ARRAY:
7724 err = do_md_run(mddev);
NeilBrown3adc28d2014-09-30 15:46:41 +10007725 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007726
NeilBrownc02c0ae2012-12-11 13:39:21 +11007727 case SET_BITMAP_FILE:
7728 err = set_bitmap_file(mddev, (int)arg);
NeilBrown3adc28d2014-09-30 15:46:41 +10007729 goto unlock;
NeilBrown32a76272005-06-21 17:17:14 -07007730
NeilBrownc02c0ae2012-12-11 13:39:21 +11007731 default:
7732 err = -EINVAL;
NeilBrown3adc28d2014-09-30 15:46:41 +10007733 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007734 }
7735
NeilBrown3adc28d2014-09-30 15:46:41 +10007736unlock:
NeilBrownd3374822009-01-09 08:31:10 +11007737 if (mddev->hold_active == UNTIL_IOCTL &&
7738 err != -EINVAL)
7739 mddev->hold_active = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007740 mddev_unlock(mddev);
NeilBrown3adc28d2014-09-30 15:46:41 +10007741out:
NeilBrown065e5192017-04-06 11:16:33 +08007742 if(did_set_md_closing)
7743 clear_bit(MD_CLOSING, &mddev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007744 return err;
7745}
Arnd Bergmannaa98aa32009-12-14 12:50:05 +11007746#ifdef CONFIG_COMPAT
7747static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
7748 unsigned int cmd, unsigned long arg)
7749{
7750 switch (cmd) {
7751 case HOT_REMOVE_DISK:
7752 case HOT_ADD_DISK:
7753 case SET_DISK_FAULTY:
7754 case SET_BITMAP_FILE:
7755 /* These take in integer arg, do not convert */
7756 break;
7757 default:
7758 arg = (unsigned long)compat_ptr(arg);
7759 break;
7760 }
7761
7762 return md_ioctl(bdev, mode, cmd, arg);
7763}
7764#endif /* CONFIG_COMPAT */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007765
Christoph Hellwig118cf082020-11-03 11:00:13 +01007766static int md_set_read_only(struct block_device *bdev, bool ro)
7767{
7768 struct mddev *mddev = bdev->bd_disk->private_data;
7769 int err;
7770
7771 err = mddev_lock(mddev);
7772 if (err)
7773 return err;
7774
7775 if (!mddev->raid_disks && !mddev->external) {
7776 err = -ENODEV;
7777 goto out_unlock;
7778 }
7779
7780 /*
7781 * Transitioning to read-auto need only happen for arrays that call
7782 * md_write_start and which are not ready for writes yet.
7783 */
7784 if (!ro && mddev->ro == 1 && mddev->pers) {
7785 err = restart_array(mddev);
7786 if (err)
7787 goto out_unlock;
7788 mddev->ro = 2;
7789 }
7790
7791out_unlock:
7792 mddev_unlock(mddev);
7793 return err;
7794}
7795
Al Viroa39907f2008-03-02 10:31:15 -05007796static int md_open(struct block_device *bdev, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007797{
7798 /*
7799 * Succeed if we can lock the mddev, which confirms that
7800 * it isn't being stopped right now.
7801 */
NeilBrownfd01b882011-10-11 16:47:53 +11007802 struct mddev *mddev = mddev_find(bdev->bd_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007803 int err;
7804
Yuanhan Liu0c098222012-05-22 13:55:32 +10007805 if (!mddev)
7806 return -ENODEV;
7807
NeilBrownd3374822009-01-09 08:31:10 +11007808 if (mddev->gendisk != bdev->bd_disk) {
7809 /* we are racing with mddev_put which is discarding this
7810 * bd_disk.
7811 */
7812 mddev_put(mddev);
7813 /* Wait until bdev->bd_disk is definitely gone */
Guoqing Jiangf6766ff2020-04-04 23:57:09 +02007814 if (work_pending(&mddev->del_work))
7815 flush_workqueue(md_misc_wq);
Zhao Heming6a4db2a2021-04-03 11:01:25 +08007816 return -EBUSY;
NeilBrownd3374822009-01-09 08:31:10 +11007817 }
7818 BUG_ON(mddev != bdev->bd_disk->private_data);
7819
NeilBrownc8c00a62009-08-10 12:50:52 +10007820 if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07007821 goto out;
7822
Guoqing Jiangaf8d8e62016-08-12 13:42:37 +08007823 if (test_bit(MD_CLOSING, &mddev->flags)) {
7824 mutex_unlock(&mddev->open_mutex);
NeilBrowne2342ca2016-12-05 16:40:50 +11007825 err = -ENODEV;
7826 goto out;
Guoqing Jiangaf8d8e62016-08-12 13:42:37 +08007827 }
7828
Linus Torvalds1da177e2005-04-16 15:20:36 -07007829 err = 0;
NeilBrownf2ea68c2008-07-21 17:05:25 +10007830 atomic_inc(&mddev->openers);
NeilBrownc8c00a62009-08-10 12:50:52 +10007831 mutex_unlock(&mddev->open_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007832
Christoph Hellwig818077d2020-09-08 16:53:43 +02007833 bdev_check_media_change(bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007834 out:
NeilBrowne2342ca2016-12-05 16:40:50 +11007835 if (err)
7836 mddev_put(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007837 return err;
7838}
7839
Al Virodb2a1442013-05-05 21:52:57 -04007840static void md_release(struct gendisk *disk, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007841{
NeilBrownf72ffdd2014-09-30 14:23:59 +10007842 struct mddev *mddev = disk->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007843
Eric Sesterhenn52e5f9d2006-10-03 23:33:23 +02007844 BUG_ON(!mddev);
NeilBrownf2ea68c2008-07-21 17:05:25 +10007845 atomic_dec(&mddev->openers);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007846 mddev_put(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007847}
NeilBrownf0b4f7e2011-02-24 17:26:41 +11007848
Christoph Hellwiga564e232020-07-08 14:25:41 +02007849static unsigned int md_check_events(struct gendisk *disk, unsigned int clearing)
NeilBrownf0b4f7e2011-02-24 17:26:41 +11007850{
NeilBrownfd01b882011-10-11 16:47:53 +11007851 struct mddev *mddev = disk->private_data;
Christoph Hellwiga564e232020-07-08 14:25:41 +02007852 unsigned int ret = 0;
NeilBrownf0b4f7e2011-02-24 17:26:41 +11007853
Christoph Hellwiga564e232020-07-08 14:25:41 +02007854 if (mddev->changed)
7855 ret = DISK_EVENT_MEDIA_CHANGE;
NeilBrownf0b4f7e2011-02-24 17:26:41 +11007856 mddev->changed = 0;
Christoph Hellwiga564e232020-07-08 14:25:41 +02007857 return ret;
NeilBrownf0b4f7e2011-02-24 17:26:41 +11007858}
Christoph Hellwiga564e232020-07-08 14:25:41 +02007859
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02007860const struct block_device_operations md_fops =
Linus Torvalds1da177e2005-04-16 15:20:36 -07007861{
7862 .owner = THIS_MODULE,
Christoph Hellwigc62b37d2020-07-01 10:59:43 +02007863 .submit_bio = md_submit_bio,
Al Viroa39907f2008-03-02 10:31:15 -05007864 .open = md_open,
7865 .release = md_release,
NeilBrownb492b852009-05-26 12:57:36 +10007866 .ioctl = md_ioctl,
Arnd Bergmannaa98aa32009-12-14 12:50:05 +11007867#ifdef CONFIG_COMPAT
7868 .compat_ioctl = md_compat_ioctl,
7869#endif
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08007870 .getgeo = md_getgeo,
Christoph Hellwiga564e232020-07-08 14:25:41 +02007871 .check_events = md_check_events,
Christoph Hellwig118cf082020-11-03 11:00:13 +01007872 .set_read_only = md_set_read_only,
Linus Torvalds1da177e2005-04-16 15:20:36 -07007873};
7874
NeilBrownf72ffdd2014-09-30 14:23:59 +10007875static int md_thread(void *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007876{
NeilBrown2b8bf342011-10-11 16:48:23 +11007877 struct md_thread *thread = arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007878
Linus Torvalds1da177e2005-04-16 15:20:36 -07007879 /*
7880 * md_thread is a 'system-thread', it's priority should be very
7881 * high. We avoid resource deadlocks individually in each
7882 * raid personality. (RAID5 does preallocation) We also use RR and
7883 * the very same RT priority as kswapd, thus we will never get
7884 * into a priority inversion deadlock.
7885 *
7886 * we definitely have to have equal or higher priority than
7887 * bdflush, otherwise bdflush will deadlock if there are too
7888 * many dirty RAID5 blocks.
7889 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007890
NeilBrown6985c432005-10-19 21:23:47 -07007891 allow_signal(SIGKILL);
NeilBrowna6fb0932005-09-09 16:23:56 -07007892 while (!kthread_should_stop()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007893
NeilBrown93588e22005-11-15 00:09:12 -08007894 /* We need to wait INTERRUPTIBLE so that
7895 * we don't add to the load-average.
7896 * That means we need to be sure no signals are
7897 * pending
7898 */
7899 if (signal_pending(current))
7900 flush_signals(current);
7901
7902 wait_event_interruptible_timeout
7903 (thread->wqueue,
7904 test_bit(THREAD_WAKEUP, &thread->flags)
Shaohua Lice1ccd02016-11-21 10:29:18 -08007905 || kthread_should_stop() || kthread_should_park(),
NeilBrown93588e22005-11-15 00:09:12 -08007906 thread->timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007907
NeilBrown6c987912011-01-14 09:13:53 +11007908 clear_bit(THREAD_WAKEUP, &thread->flags);
Shaohua Lice1ccd02016-11-21 10:29:18 -08007909 if (kthread_should_park())
7910 kthread_parkme();
NeilBrown6c987912011-01-14 09:13:53 +11007911 if (!kthread_should_stop())
Shaohua Li4ed87312012-10-11 13:34:00 +11007912 thread->run(thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007913 }
NeilBrowna6fb0932005-09-09 16:23:56 -07007914
Linus Torvalds1da177e2005-04-16 15:20:36 -07007915 return 0;
7916}
7917
NeilBrown2b8bf342011-10-11 16:48:23 +11007918void md_wakeup_thread(struct md_thread *thread)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007919{
7920 if (thread) {
NeilBrown36a4e1f2011-10-07 14:23:17 +11007921 pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm);
Guoqing Jiangd1d90142017-10-09 10:32:48 +08007922 set_bit(THREAD_WAKEUP, &thread->flags);
7923 wake_up(&thread->wqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007924 }
7925}
NeilBrown6c144d32014-09-30 16:15:38 +10007926EXPORT_SYMBOL(md_wakeup_thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007927
Shaohua Li4ed87312012-10-11 13:34:00 +11007928struct md_thread *md_register_thread(void (*run) (struct md_thread *),
7929 struct mddev *mddev, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007930{
NeilBrown2b8bf342011-10-11 16:48:23 +11007931 struct md_thread *thread;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007932
NeilBrown2b8bf342011-10-11 16:48:23 +11007933 thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007934 if (!thread)
7935 return NULL;
7936
Linus Torvalds1da177e2005-04-16 15:20:36 -07007937 init_waitqueue_head(&thread->wqueue);
7938
Linus Torvalds1da177e2005-04-16 15:20:36 -07007939 thread->run = run;
7940 thread->mddev = mddev;
NeilBrown32a76272005-06-21 17:17:14 -07007941 thread->timeout = MAX_SCHEDULE_TIMEOUT;
NeilBrown0da3c612009-09-23 18:09:45 +10007942 thread->tsk = kthread_run(md_thread, thread,
7943 "%s_%s",
7944 mdname(thread->mddev),
NeilBrown02326052012-07-03 15:56:52 +10007945 name);
NeilBrowna6fb0932005-09-09 16:23:56 -07007946 if (IS_ERR(thread->tsk)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007947 kfree(thread);
7948 return NULL;
7949 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007950 return thread;
7951}
NeilBrown6c144d32014-09-30 16:15:38 +10007952EXPORT_SYMBOL(md_register_thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007953
NeilBrown2b8bf342011-10-11 16:48:23 +11007954void md_unregister_thread(struct md_thread **threadp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007955{
NeilBrown2b8bf342011-10-11 16:48:23 +11007956 struct md_thread *thread = *threadp;
NeilBrowne0cf8f02009-03-31 14:39:39 +11007957 if (!thread)
7958 return;
NeilBrown36a4e1f2011-10-07 14:23:17 +11007959 pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
NeilBrown01f96c02011-09-21 15:30:20 +10007960 /* Locking ensures that mddev_unlock does not wake_up a
7961 * non-existent thread
7962 */
7963 spin_lock(&pers_lock);
7964 *threadp = NULL;
7965 spin_unlock(&pers_lock);
NeilBrowna6fb0932005-09-09 16:23:56 -07007966
7967 kthread_stop(thread->tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007968 kfree(thread);
7969}
NeilBrown6c144d32014-09-30 16:15:38 +10007970EXPORT_SYMBOL(md_unregister_thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007971
NeilBrownfd01b882011-10-11 16:47:53 +11007972void md_error(struct mddev *mddev, struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007973{
NeilBrownb2d444d2005-11-08 21:39:31 -08007974 if (!rdev || test_bit(Faulty, &rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07007975 return;
Dan Williams6bfe0b42008-04-30 00:52:32 -07007976
NeilBrownde393cd2011-07-28 11:31:48 +10007977 if (!mddev->pers || !mddev->pers->error_handler)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007978 return;
7979 mddev->pers->error_handler(mddev,rdev);
Neil Brown72a23c22008-06-28 08:31:41 +10007980 if (mddev->degraded)
7981 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
NeilBrown00bcb4a2010-06-01 19:37:23 +10007982 sysfs_notify_dirent_safe(rdev->sysfs_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007983 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7984 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7985 md_wakeup_thread(mddev->thread);
NeilBrown768a4182010-07-26 11:49:55 +10007986 if (mddev->event_work.func)
Tejun Heoe804ac72010-10-15 15:36:08 +02007987 queue_work(md_misc_wq, &mddev->event_work);
Guoqing Jiangbb9ef712015-12-28 10:46:38 +08007988 md_new_event(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007989}
NeilBrown6c144d32014-09-30 16:15:38 +10007990EXPORT_SYMBOL(md_error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007991
7992/* seq_file implementation /proc/mdstat */
7993
7994static void status_unused(struct seq_file *seq)
7995{
7996 int i = 0;
NeilBrown3cb03002011-10-11 16:45:26 +11007997 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007998
7999 seq_printf(seq, "unused devices: ");
8000
Cheng Renquan159ec1f2009-01-09 08:31:08 +11008001 list_for_each_entry(rdev, &pending_raid_disks, same_set) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008002 char b[BDEVNAME_SIZE];
8003 i++;
8004 seq_printf(seq, "%s ",
8005 bdevname(rdev->bdev,b));
8006 }
8007 if (!i)
8008 seq_printf(seq, "<none>");
8009
8010 seq_printf(seq, "\n");
8011}
8012
NeilBrownf7851be2015-07-02 17:12:58 +10008013static int status_resync(struct seq_file *seq, struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008014{
NeilBrowndd71cf62009-05-07 12:49:35 +10008015 sector_t max_sectors, resync, res;
Mariusz Tkaczyk9642fa72019-06-13 16:11:41 +02008016 unsigned long dt, db = 0;
8017 sector_t rt, curr_mark_cnt, resync_mark_cnt;
8018 int scale, recovery_active;
NeilBrown4588b422006-03-27 01:18:04 -08008019 unsigned int per_milli;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008020
NeilBrownc804cde2012-05-21 09:28:33 +10008021 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
8022 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
NeilBrowndd71cf62009-05-07 12:49:35 +10008023 max_sectors = mddev->resync_max_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008024 else
NeilBrowndd71cf62009-05-07 12:49:35 +10008025 max_sectors = mddev->dev_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008026
NeilBrownf7851be2015-07-02 17:12:58 +10008027 resync = mddev->curr_resync;
8028 if (resync <= 3) {
8029 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
8030 /* Still cleaning up */
8031 resync = max_sectors;
Nate Daileyd2e2ec82017-11-30 11:33:30 -05008032 } else if (resync > max_sectors)
8033 resync = max_sectors;
8034 else
NeilBrownf7851be2015-07-02 17:12:58 +10008035 resync -= atomic_read(&mddev->recovery_active);
8036
8037 if (resync == 0) {
Guoqing Jiang0357ba22018-07-02 16:26:25 +08008038 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery)) {
8039 struct md_rdev *rdev;
8040
8041 rdev_for_each(rdev, mddev)
8042 if (rdev->raid_disk >= 0 &&
8043 !test_bit(Faulty, &rdev->flags) &&
8044 rdev->recovery_offset != MaxSector &&
8045 rdev->recovery_offset) {
8046 seq_printf(seq, "\trecover=REMOTE");
8047 return 1;
8048 }
8049 if (mddev->reshape_position != MaxSector)
8050 seq_printf(seq, "\treshape=REMOTE");
8051 else
8052 seq_printf(seq, "\tresync=REMOTE");
8053 return 1;
8054 }
NeilBrownf7851be2015-07-02 17:12:58 +10008055 if (mddev->recovery_cp < MaxSector) {
8056 seq_printf(seq, "\tresync=PENDING");
8057 return 1;
8058 }
8059 return 0;
8060 }
8061 if (resync < 3) {
8062 seq_printf(seq, "\tresync=DELAYED");
8063 return 1;
8064 }
8065
NeilBrown403df472014-09-30 15:52:29 +10008066 WARN_ON(max_sectors == 0);
NeilBrown4588b422006-03-27 01:18:04 -08008067 /* Pick 'scale' such that (resync>>scale)*1000 will fit
NeilBrowndd71cf62009-05-07 12:49:35 +10008068 * in a sector_t, and (max_sectors>>scale) will fit in a
NeilBrown4588b422006-03-27 01:18:04 -08008069 * u32, as those are the requirements for sector_div.
8070 * Thus 'scale' must be at least 10
8071 */
8072 scale = 10;
8073 if (sizeof(sector_t) > sizeof(unsigned long)) {
NeilBrowndd71cf62009-05-07 12:49:35 +10008074 while ( max_sectors/2 > (1ULL<<(scale+32)))
NeilBrown4588b422006-03-27 01:18:04 -08008075 scale++;
8076 }
8077 res = (resync>>scale)*1000;
NeilBrowndd71cf62009-05-07 12:49:35 +10008078 sector_div(res, (u32)((max_sectors>>scale)+1));
NeilBrown4588b422006-03-27 01:18:04 -08008079
8080 per_milli = res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008081 {
NeilBrown4588b422006-03-27 01:18:04 -08008082 int i, x = per_milli/50, y = 20-x;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008083 seq_printf(seq, "[");
8084 for (i = 0; i < x; i++)
8085 seq_printf(seq, "=");
8086 seq_printf(seq, ">");
8087 for (i = 0; i < y; i++)
8088 seq_printf(seq, ".");
8089 seq_printf(seq, "] ");
8090 }
NeilBrown4588b422006-03-27 01:18:04 -08008091 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
NeilBrownccfcc3c2006-03-27 01:18:09 -08008092 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
8093 "reshape" :
NeilBrown61df9d92006-10-03 01:15:57 -07008094 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
8095 "check" :
8096 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
8097 "resync" : "recovery"))),
8098 per_milli/10, per_milli % 10,
NeilBrowndd71cf62009-05-07 12:49:35 +10008099 (unsigned long long) resync/2,
8100 (unsigned long long) max_sectors/2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008101
8102 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07008103 * dt: time from mark until now
8104 * db: blocks written from mark until now
8105 * rt: remaining time
NeilBrowndd71cf62009-05-07 12:49:35 +10008106 *
Mariusz Tkaczyk9642fa72019-06-13 16:11:41 +02008107 * rt is a sector_t, which is always 64bit now. We are keeping
8108 * the original algorithm, but it is not really necessary.
8109 *
8110 * Original algorithm:
8111 * So we divide before multiply in case it is 32bit and close
8112 * to the limit.
8113 * We scale the divisor (db) by 32 to avoid losing precision
8114 * near the end of resync when the number of remaining sectors
8115 * is close to 'db'.
8116 * We then divide rt by 32 after multiplying by db to compensate.
8117 * The '+1' avoids division by zero if db is very small.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008118 */
8119 dt = ((jiffies - mddev->resync_mark) / HZ);
8120 if (!dt) dt++;
Mariusz Tkaczyk9642fa72019-06-13 16:11:41 +02008121
8122 curr_mark_cnt = mddev->curr_mark_cnt;
8123 recovery_active = atomic_read(&mddev->recovery_active);
8124 resync_mark_cnt = mddev->resync_mark_cnt;
8125
8126 if (curr_mark_cnt >= (recovery_active + resync_mark_cnt))
8127 db = curr_mark_cnt - (recovery_active + resync_mark_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008128
NeilBrowndd71cf62009-05-07 12:49:35 +10008129 rt = max_sectors - resync; /* number of remaining sectors */
Mariusz Tkaczyk9642fa72019-06-13 16:11:41 +02008130 rt = div64_u64(rt, db/32+1);
NeilBrowndd71cf62009-05-07 12:49:35 +10008131 rt *= dt;
8132 rt >>= 5;
8133
8134 seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
8135 ((unsigned long)rt % 60)/6);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008136
NeilBrownff4e8d92006-07-10 04:44:16 -07008137 seq_printf(seq, " speed=%ldK/sec", db/2/dt);
NeilBrownf7851be2015-07-02 17:12:58 +10008138 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008139}
8140
8141static void *md_seq_start(struct seq_file *seq, loff_t *pos)
8142{
8143 struct list_head *tmp;
8144 loff_t l = *pos;
NeilBrownfd01b882011-10-11 16:47:53 +11008145 struct mddev *mddev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008146
Jan Glauber7abfaba2021-03-17 15:04:39 +01008147 if (l == 0x10000) {
8148 ++*pos;
8149 return (void *)2;
8150 }
8151 if (l > 0x10000)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008152 return NULL;
8153 if (!l--)
8154 /* header */
8155 return (void*)1;
8156
8157 spin_lock(&all_mddevs_lock);
8158 list_for_each(tmp,&all_mddevs)
8159 if (!l--) {
NeilBrownfd01b882011-10-11 16:47:53 +11008160 mddev = list_entry(tmp, struct mddev, all_mddevs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008161 mddev_get(mddev);
8162 spin_unlock(&all_mddevs_lock);
8163 return mddev;
8164 }
8165 spin_unlock(&all_mddevs_lock);
8166 if (!l--)
8167 return (void*)2;/* tail */
8168 return NULL;
8169}
8170
8171static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
8172{
8173 struct list_head *tmp;
NeilBrownfd01b882011-10-11 16:47:53 +11008174 struct mddev *next_mddev, *mddev = v;
NeilBrownf72ffdd2014-09-30 14:23:59 +10008175
Linus Torvalds1da177e2005-04-16 15:20:36 -07008176 ++*pos;
8177 if (v == (void*)2)
8178 return NULL;
8179
8180 spin_lock(&all_mddevs_lock);
8181 if (v == (void*)1)
8182 tmp = all_mddevs.next;
8183 else
8184 tmp = mddev->all_mddevs.next;
8185 if (tmp != &all_mddevs)
NeilBrownfd01b882011-10-11 16:47:53 +11008186 next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs));
Linus Torvalds1da177e2005-04-16 15:20:36 -07008187 else {
8188 next_mddev = (void*)2;
8189 *pos = 0x10000;
NeilBrownf72ffdd2014-09-30 14:23:59 +10008190 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008191 spin_unlock(&all_mddevs_lock);
8192
8193 if (v != (void*)1)
8194 mddev_put(mddev);
8195 return next_mddev;
8196
8197}
8198
8199static void md_seq_stop(struct seq_file *seq, void *v)
8200{
NeilBrownfd01b882011-10-11 16:47:53 +11008201 struct mddev *mddev = v;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008202
8203 if (mddev && v != (void*)1 && v != (void*)2)
8204 mddev_put(mddev);
8205}
8206
8207static int md_seq_show(struct seq_file *seq, void *v)
8208{
NeilBrownfd01b882011-10-11 16:47:53 +11008209 struct mddev *mddev = v;
Andre Nolldd8ac332009-03-31 14:33:13 +11008210 sector_t sectors;
NeilBrown3cb03002011-10-11 16:45:26 +11008211 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008212
8213 if (v == (void*)1) {
NeilBrown84fc4b52011-10-11 16:49:58 +11008214 struct md_personality *pers;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008215 seq_printf(seq, "Personalities : ");
8216 spin_lock(&pers_lock);
NeilBrown2604b702006-01-06 00:20:36 -08008217 list_for_each_entry(pers, &pers_list, list)
8218 seq_printf(seq, "[%s] ", pers->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008219
8220 spin_unlock(&pers_lock);
8221 seq_printf(seq, "\n");
Kay Sieversf1514632011-07-12 20:48:39 +02008222 seq->poll_event = atomic_read(&md_event_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008223 return 0;
8224 }
8225 if (v == (void*)2) {
8226 status_unused(seq);
8227 return 0;
8228 }
8229
NeilBrown36d091f2014-12-15 12:56:58 +11008230 spin_lock(&mddev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008231 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
8232 seq_printf(seq, "%s : %sactive", mdname(mddev),
8233 mddev->pers ? "" : "in");
8234 if (mddev->pers) {
NeilBrownf91de922005-11-08 21:39:36 -08008235 if (mddev->ro==1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008236 seq_printf(seq, " (read-only)");
NeilBrownf91de922005-11-08 21:39:36 -08008237 if (mddev->ro==2)
NeilBrown52720ae2008-03-10 11:43:47 -07008238 seq_printf(seq, " (auto-read-only)");
Linus Torvalds1da177e2005-04-16 15:20:36 -07008239 seq_printf(seq, " %s", mddev->pers->name);
8240 }
8241
Andre Nolldd8ac332009-03-31 14:33:13 +11008242 sectors = 0;
NeilBrownf97fcad2014-12-15 12:56:59 +11008243 rcu_read_lock();
8244 rdev_for_each_rcu(rdev, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008245 char b[BDEVNAME_SIZE];
8246 seq_printf(seq, " %s[%d]",
8247 bdevname(rdev->bdev,b), rdev->desc_nr);
NeilBrown8ddf9ef2005-09-09 16:23:45 -07008248 if (test_bit(WriteMostly, &rdev->flags))
8249 seq_printf(seq, "(W)");
Shaohua Li9efdca12015-10-12 16:59:50 -07008250 if (test_bit(Journal, &rdev->flags))
8251 seq_printf(seq, "(J)");
NeilBrownb2d444d2005-11-08 21:39:31 -08008252 if (test_bit(Faulty, &rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008253 seq_printf(seq, "(F)");
8254 continue;
NeilBrown2d78f8c2011-12-23 10:17:51 +11008255 }
8256 if (rdev->raid_disk < 0)
NeilBrownb325a322005-09-09 16:24:00 -07008257 seq_printf(seq, "(S)"); /* spare */
NeilBrown2d78f8c2011-12-23 10:17:51 +11008258 if (test_bit(Replacement, &rdev->flags))
8259 seq_printf(seq, "(R)");
Andre Nolldd8ac332009-03-31 14:33:13 +11008260 sectors += rdev->sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008261 }
NeilBrownf97fcad2014-12-15 12:56:59 +11008262 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07008263
8264 if (!list_empty(&mddev->disks)) {
8265 if (mddev->pers)
8266 seq_printf(seq, "\n %llu blocks",
Andre Nollf233ea52008-07-21 17:05:22 +10008267 (unsigned long long)
8268 mddev->array_sectors / 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008269 else
8270 seq_printf(seq, "\n %llu blocks",
Andre Nolldd8ac332009-03-31 14:33:13 +11008271 (unsigned long long)sectors / 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008272 }
NeilBrown1cd6bf12005-09-09 16:24:00 -07008273 if (mddev->persistent) {
8274 if (mddev->major_version != 0 ||
8275 mddev->minor_version != 90) {
8276 seq_printf(seq," super %d.%d",
8277 mddev->major_version,
8278 mddev->minor_version);
8279 }
NeilBrowne6910632008-02-06 01:39:51 -08008280 } else if (mddev->external)
8281 seq_printf(seq, " super external:%s",
8282 mddev->metadata_type);
8283 else
NeilBrown1cd6bf12005-09-09 16:24:00 -07008284 seq_printf(seq, " super non-persistent");
Linus Torvalds1da177e2005-04-16 15:20:36 -07008285
8286 if (mddev->pers) {
NeilBrownd710e132008-10-13 11:55:12 +11008287 mddev->pers->status(seq, mddev);
NeilBrownf72ffdd2014-09-30 14:23:59 +10008288 seq_printf(seq, "\n ");
NeilBrown8e1b39d2005-11-08 21:39:41 -08008289 if (mddev->pers->sync_request) {
NeilBrownf7851be2015-07-02 17:12:58 +10008290 if (status_resync(seq, mddev))
NeilBrown8e1b39d2005-11-08 21:39:41 -08008291 seq_printf(seq, "\n ");
NeilBrown8e1b39d2005-11-08 21:39:41 -08008292 }
NeilBrown32a76272005-06-21 17:17:14 -07008293 } else
8294 seq_printf(seq, "\n ");
8295
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07008296 md_bitmap_status(seq, mddev->bitmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008297
8298 seq_printf(seq, "\n");
8299 }
NeilBrown36d091f2014-12-15 12:56:58 +11008300 spin_unlock(&mddev->lock);
NeilBrownf72ffdd2014-09-30 14:23:59 +10008301
Linus Torvalds1da177e2005-04-16 15:20:36 -07008302 return 0;
8303}
8304
Jan Engelhardt110518b2009-05-07 12:49:37 +10008305static const struct seq_operations md_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008306 .start = md_seq_start,
8307 .next = md_seq_next,
8308 .stop = md_seq_stop,
8309 .show = md_seq_show,
8310};
8311
8312static int md_seq_open(struct inode *inode, struct file *file)
8313{
Kay Sieversf1514632011-07-12 20:48:39 +02008314 struct seq_file *seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008315 int error;
8316
8317 error = seq_open(file, &md_seq_ops);
NeilBrownd7603b72006-01-06 00:20:30 -08008318 if (error)
Kay Sieversf1514632011-07-12 20:48:39 +02008319 return error;
8320
8321 seq = file->private_data;
8322 seq->poll_event = atomic_read(&md_event_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008323 return error;
8324}
8325
NeilBrowne2f23b62014-04-09 14:33:51 +10008326static int md_unloading;
Al Viroafc9a422017-07-03 06:39:46 -04008327static __poll_t mdstat_poll(struct file *filp, poll_table *wait)
NeilBrownd7603b72006-01-06 00:20:30 -08008328{
Kay Sieversf1514632011-07-12 20:48:39 +02008329 struct seq_file *seq = filp->private_data;
Al Viroafc9a422017-07-03 06:39:46 -04008330 __poll_t mask;
NeilBrownd7603b72006-01-06 00:20:30 -08008331
NeilBrowne2f23b62014-04-09 14:33:51 +10008332 if (md_unloading)
Linus Torvaldsa9a08842018-02-11 14:34:03 -08008333 return EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI;
NeilBrownd7603b72006-01-06 00:20:30 -08008334 poll_wait(filp, &md_event_waiters, wait);
8335
8336 /* always allow read */
Linus Torvaldsa9a08842018-02-11 14:34:03 -08008337 mask = EPOLLIN | EPOLLRDNORM;
NeilBrownd7603b72006-01-06 00:20:30 -08008338
Kay Sieversf1514632011-07-12 20:48:39 +02008339 if (seq->poll_event != atomic_read(&md_event_count))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08008340 mask |= EPOLLERR | EPOLLPRI;
NeilBrownd7603b72006-01-06 00:20:30 -08008341 return mask;
8342}
8343
Alexey Dobriyan97a32532020-02-03 17:37:17 -08008344static const struct proc_ops mdstat_proc_ops = {
8345 .proc_open = md_seq_open,
8346 .proc_read = seq_read,
8347 .proc_lseek = seq_lseek,
8348 .proc_release = seq_release,
8349 .proc_poll = mdstat_poll,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008350};
8351
NeilBrown84fc4b52011-10-11 16:49:58 +11008352int register_md_personality(struct md_personality *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008353{
NeilBrown9d487392016-11-02 14:16:49 +11008354 pr_debug("md: %s personality registered for level %d\n",
8355 p->name, p->level);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008356 spin_lock(&pers_lock);
NeilBrown2604b702006-01-06 00:20:36 -08008357 list_add_tail(&p->list, &pers_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008358 spin_unlock(&pers_lock);
8359 return 0;
8360}
NeilBrown6c144d32014-09-30 16:15:38 +10008361EXPORT_SYMBOL(register_md_personality);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008362
NeilBrown84fc4b52011-10-11 16:49:58 +11008363int unregister_md_personality(struct md_personality *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008364{
NeilBrown9d487392016-11-02 14:16:49 +11008365 pr_debug("md: %s personality unregistered\n", p->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008366 spin_lock(&pers_lock);
NeilBrown2604b702006-01-06 00:20:36 -08008367 list_del_init(&p->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008368 spin_unlock(&pers_lock);
8369 return 0;
8370}
NeilBrown6c144d32014-09-30 16:15:38 +10008371EXPORT_SYMBOL(unregister_md_personality);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008372
NeilBrown6022e752015-08-13 12:32:55 +10008373int register_md_cluster_operations(struct md_cluster_operations *ops,
8374 struct module *module)
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008375{
NeilBrown6022e752015-08-13 12:32:55 +10008376 int ret = 0;
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008377 spin_lock(&pers_lock);
NeilBrown6022e752015-08-13 12:32:55 +10008378 if (md_cluster_ops != NULL)
8379 ret = -EALREADY;
8380 else {
8381 md_cluster_ops = ops;
8382 md_cluster_mod = module;
8383 }
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008384 spin_unlock(&pers_lock);
NeilBrown6022e752015-08-13 12:32:55 +10008385 return ret;
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008386}
8387EXPORT_SYMBOL(register_md_cluster_operations);
8388
8389int unregister_md_cluster_operations(void)
8390{
8391 spin_lock(&pers_lock);
8392 md_cluster_ops = NULL;
8393 spin_unlock(&pers_lock);
8394 return 0;
8395}
8396EXPORT_SYMBOL(unregister_md_cluster_operations);
8397
8398int md_setup_cluster(struct mddev *mddev, int nodes)
8399{
Zhao Heming7c9d5c52020-07-21 02:08:52 +08008400 int ret;
Guoqing Jiang47a7b0d2016-09-04 22:17:28 -04008401 if (!md_cluster_ops)
8402 request_module("md-cluster");
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008403 spin_lock(&pers_lock);
Guoqing Jiang47a7b0d2016-09-04 22:17:28 -04008404 /* ensure module won't be unloaded */
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008405 if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
NeilBrown9d487392016-11-02 14:16:49 +11008406 pr_warn("can't find md-cluster module or get it's reference.\n");
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008407 spin_unlock(&pers_lock);
8408 return -ENOENT;
8409 }
8410 spin_unlock(&pers_lock);
8411
Zhao Heming7c9d5c52020-07-21 02:08:52 +08008412 ret = md_cluster_ops->join(mddev, nodes);
8413 if (!ret)
8414 mddev->safemode_delay = 0;
8415 return ret;
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008416}
8417
8418void md_cluster_stop(struct mddev *mddev)
8419{
Goldwyn Rodriguesc4ce8672014-03-29 10:20:02 -05008420 if (!md_cluster_ops)
8421 return;
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008422 md_cluster_ops->leave(mddev);
8423 module_put(md_cluster_mod);
8424}
8425
NeilBrownfd01b882011-10-11 16:47:53 +11008426static int is_mddev_idle(struct mddev *mddev, int init)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008427{
NeilBrownf72ffdd2014-09-30 14:23:59 +10008428 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008429 int idle;
NeilBrowneea1bf32009-03-31 14:27:02 +11008430 int curr_events;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008431
8432 idle = 1;
NeilBrown4b809912008-07-21 17:05:25 +10008433 rcu_read_lock();
8434 rdev_for_each_rcu(rdev, mddev) {
Christoph Hellwig4245e522020-09-03 07:40:59 +02008435 struct gendisk *disk = rdev->bdev->bd_disk;
Christoph Hellwig8446fe92020-11-24 09:36:54 +01008436 curr_events = (int)part_stat_read_accum(disk->part0, sectors) -
NeilBrowneea1bf32009-03-31 14:27:02 +11008437 atomic_read(&disk->sync_io);
NeilBrown713f6ab2007-07-17 04:06:12 -07008438 /* sync IO will cause sync_io to increase before the disk_stats
8439 * as sync_io is counted when a request starts, and
8440 * disk_stats is counted when it completes.
8441 * So resync activity will cause curr_events to be smaller than
8442 * when there was no such activity.
8443 * non-sync IO will cause disk_stat to increase without
8444 * increasing sync_io so curr_events will (eventually)
8445 * be larger than it was before. Once it becomes
8446 * substantially larger, the test below will cause
8447 * the array to appear non-idle, and resync will slow
8448 * down.
8449 * If there is a lot of outstanding resync activity when
8450 * we set last_event to curr_events, then all that activity
8451 * completing might cause the array to appear non-idle
8452 * and resync will be slowed down even though there might
8453 * not have been non-resync activity. This will only
8454 * happen once though. 'last_events' will soon reflect
8455 * the state where there is little or no outstanding
8456 * resync requests, and further resync activity will
8457 * always make curr_events less than last_events.
NeilBrownc0e48522005-11-18 01:11:01 -08008458 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07008459 */
NeilBrowneea1bf32009-03-31 14:27:02 +11008460 if (init || curr_events - rdev->last_events > 64) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008461 rdev->last_events = curr_events;
8462 idle = 0;
8463 }
8464 }
NeilBrown4b809912008-07-21 17:05:25 +10008465 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07008466 return idle;
8467}
8468
NeilBrownfd01b882011-10-11 16:47:53 +11008469void md_done_sync(struct mddev *mddev, int blocks, int ok)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008470{
8471 /* another "blocks" (512byte) blocks have been synced */
8472 atomic_sub(blocks, &mddev->recovery_active);
8473 wake_up(&mddev->recovery_wait);
8474 if (!ok) {
NeilBrowndfc70642008-05-23 13:04:39 -07008475 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
majianpeng0a19caa2012-11-19 19:57:34 +08008476 set_bit(MD_RECOVERY_ERROR, &mddev->recovery);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008477 md_wakeup_thread(mddev->thread);
8478 // stop recovery, signal do_sync ....
8479 }
8480}
NeilBrown6c144d32014-09-30 16:15:38 +10008481EXPORT_SYMBOL(md_done_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008482
NeilBrown06d91a52005-06-21 17:17:12 -07008483/* md_write_start(mddev, bi)
8484 * If we need to update some array metadata (e.g. 'active' flag
NeilBrown3d310eb2005-06-21 17:17:26 -07008485 * in superblock) before writing, schedule a superblock update
8486 * and wait for it to complete.
NeilBrowncc27b0c2017-06-05 16:49:39 +10008487 * A return value of 'false' means that the write wasn't recorded
8488 * and cannot proceed as the array is being suspend.
NeilBrown06d91a52005-06-21 17:17:12 -07008489 */
NeilBrowncc27b0c2017-06-05 16:49:39 +10008490bool md_write_start(struct mddev *mddev, struct bio *bi)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008491{
Neil Brown0fd62b82008-06-28 08:31:36 +10008492 int did_change = 0;
Heinz Mauelshagen4b6c1062018-02-02 23:13:19 +01008493
NeilBrown06d91a52005-06-21 17:17:12 -07008494 if (bio_data_dir(bi) != WRITE)
NeilBrowncc27b0c2017-06-05 16:49:39 +10008495 return true;
NeilBrown06d91a52005-06-21 17:17:12 -07008496
NeilBrownf91de922005-11-08 21:39:36 -08008497 BUG_ON(mddev->ro == 1);
8498 if (mddev->ro == 2) {
8499 /* need to switch to read/write */
8500 mddev->ro = 0;
8501 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8502 md_wakeup_thread(mddev->thread);
NeilBrown25156192008-03-04 14:29:32 -08008503 md_wakeup_thread(mddev->sync_thread);
Neil Brown0fd62b82008-06-28 08:31:36 +10008504 did_change = 1;
NeilBrownf91de922005-11-08 21:39:36 -08008505 }
NeilBrown4ad23a972017-03-15 14:05:14 +11008506 rcu_read_lock();
8507 percpu_ref_get(&mddev->writes_pending);
NeilBrown55cc39f2017-03-15 14:05:14 +11008508 smp_mb(); /* Match smp_mb in set_in_sync() */
NeilBrown31a59e32008-04-30 00:52:30 -07008509 if (mddev->safemode == 1)
8510 mddev->safemode = 0;
NeilBrown4ad23a972017-03-15 14:05:14 +11008511 /* sync_checkers is always 0 when writes_pending is in per-cpu mode */
NeilBrown81fe48e2017-08-08 16:56:36 +10008512 if (mddev->in_sync || mddev->sync_checkers) {
NeilBrown85572d72014-12-15 12:56:56 +11008513 spin_lock(&mddev->lock);
NeilBrown3d310eb2005-06-21 17:17:26 -07008514 if (mddev->in_sync) {
8515 mddev->in_sync = 0;
Shaohua Li29530792016-12-08 15:48:19 -08008516 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
8517 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
NeilBrown3d310eb2005-06-21 17:17:26 -07008518 md_wakeup_thread(mddev->thread);
Neil Brown0fd62b82008-06-28 08:31:36 +10008519 did_change = 1;
NeilBrown3d310eb2005-06-21 17:17:26 -07008520 }
NeilBrown85572d72014-12-15 12:56:56 +11008521 spin_unlock(&mddev->lock);
NeilBrown06d91a52005-06-21 17:17:12 -07008522 }
NeilBrown4ad23a972017-03-15 14:05:14 +11008523 rcu_read_unlock();
Neil Brown0fd62b82008-06-28 08:31:36 +10008524 if (did_change)
NeilBrown00bcb4a2010-06-01 19:37:23 +10008525 sysfs_notify_dirent_safe(mddev->sysfs_state);
Heinz Mauelshagen4b6c1062018-02-02 23:13:19 +01008526 if (!mddev->has_superblocks)
8527 return true;
NeilBrown09a44cc2008-05-23 13:04:36 -07008528 wait_event(mddev->sb_wait,
NeilBrownd47c8ad2017-10-05 16:23:16 +11008529 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) ||
8530 mddev->suspended);
NeilBrowncc27b0c2017-06-05 16:49:39 +10008531 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
8532 percpu_ref_put(&mddev->writes_pending);
8533 return false;
8534 }
8535 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008536}
NeilBrown6c144d32014-09-30 16:15:38 +10008537EXPORT_SYMBOL(md_write_start);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008538
NeilBrown49728052017-03-15 14:05:12 +11008539/* md_write_inc can only be called when md_write_start() has
8540 * already been called at least once of the current request.
8541 * It increments the counter and is useful when a single request
8542 * is split into several parts. Each part causes an increment and
8543 * so needs a matching md_write_end().
8544 * Unlike md_write_start(), it is safe to call md_write_inc() inside
8545 * a spinlocked region.
8546 */
8547void md_write_inc(struct mddev *mddev, struct bio *bi)
8548{
8549 if (bio_data_dir(bi) != WRITE)
8550 return;
8551 WARN_ON_ONCE(mddev->in_sync || mddev->ro);
NeilBrown4ad23a972017-03-15 14:05:14 +11008552 percpu_ref_get(&mddev->writes_pending);
NeilBrown49728052017-03-15 14:05:12 +11008553}
8554EXPORT_SYMBOL(md_write_inc);
8555
NeilBrownfd01b882011-10-11 16:47:53 +11008556void md_write_end(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008557{
NeilBrown4ad23a972017-03-15 14:05:14 +11008558 percpu_ref_put(&mddev->writes_pending);
8559
8560 if (mddev->safemode == 2)
8561 md_wakeup_thread(mddev->thread);
8562 else if (mddev->safemode_delay)
8563 /* The roundup() ensures this only performs locking once
8564 * every ->safemode_delay jiffies
8565 */
8566 mod_timer(&mddev->safemode_timer,
8567 roundup(jiffies, mddev->safemode_delay) +
8568 mddev->safemode_delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008569}
NeilBrown4ad23a972017-03-15 14:05:14 +11008570
NeilBrown6c144d32014-09-30 16:15:38 +10008571EXPORT_SYMBOL(md_write_end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008572
Xiao Nicf784082021-02-04 15:50:43 +08008573/* This is used by raid0 and raid10 */
8574void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
8575 struct bio *bio, sector_t start, sector_t size)
8576{
8577 struct bio *discard_bio = NULL;
8578
8579 if (__blkdev_issue_discard(rdev->bdev, start, size, GFP_NOIO, 0,
8580 &discard_bio) || !discard_bio)
8581 return;
8582
8583 bio_chain(discard_bio, bio);
8584 bio_clone_blkg_association(discard_bio, bio);
8585 if (mddev->gendisk)
8586 trace_block_bio_remap(discard_bio,
8587 disk_devt(mddev->gendisk),
8588 bio->bi_iter.bi_sector);
8589 submit_bio_noacct(discard_bio);
8590}
8591EXPORT_SYMBOL_GPL(md_submit_discard_bio);
8592
Guoqing Jiang10764812021-05-25 17:46:17 +08008593static void md_end_io_acct(struct bio *bio)
8594{
8595 struct md_io_acct *md_io_acct = bio->bi_private;
8596 struct bio *orig_bio = md_io_acct->orig_bio;
8597
8598 orig_bio->bi_status = bio->bi_status;
8599
8600 bio_end_io_acct(orig_bio, md_io_acct->start_time);
8601 bio_put(bio);
8602 bio_endio(orig_bio);
8603}
8604
Guoqing Jiangdaee2022021-06-03 17:21:06 +08008605/*
8606 * Used by personalities that don't already clone the bio and thus can't
8607 * easily add the timestamp to their extended bio structure.
8608 */
Guoqing Jiang10764812021-05-25 17:46:17 +08008609void md_account_bio(struct mddev *mddev, struct bio **bio)
8610{
8611 struct md_io_acct *md_io_acct;
8612 struct bio *clone;
8613
8614 if (!blk_queue_io_stat((*bio)->bi_bdev->bd_disk->queue))
8615 return;
8616
8617 clone = bio_clone_fast(*bio, GFP_NOIO, &mddev->io_acct_set);
8618 md_io_acct = container_of(clone, struct md_io_acct, bio_clone);
8619 md_io_acct->orig_bio = *bio;
8620 md_io_acct->start_time = bio_start_io_acct(*bio);
8621
8622 clone->bi_end_io = md_end_io_acct;
8623 clone->bi_private = md_io_acct;
8624 *bio = clone;
8625}
8626EXPORT_SYMBOL_GPL(md_account_bio);
8627
NeilBrown2a2275d2007-01-26 00:57:11 -08008628/* md_allow_write(mddev)
8629 * Calling this ensures that the array is marked 'active' so that writes
8630 * may proceed without blocking. It is important to call this before
8631 * attempting a GFP_KERNEL allocation while holding the mddev lock.
8632 * Must be called with mddev_lock held.
8633 */
Artur Paszkiewicz2214c262017-05-08 11:56:55 +02008634void md_allow_write(struct mddev *mddev)
NeilBrown2a2275d2007-01-26 00:57:11 -08008635{
8636 if (!mddev->pers)
Artur Paszkiewicz2214c262017-05-08 11:56:55 +02008637 return;
NeilBrown2a2275d2007-01-26 00:57:11 -08008638 if (mddev->ro)
Artur Paszkiewicz2214c262017-05-08 11:56:55 +02008639 return;
Neil Brown1a0fd492008-06-28 08:31:27 +10008640 if (!mddev->pers->sync_request)
Artur Paszkiewicz2214c262017-05-08 11:56:55 +02008641 return;
NeilBrown2a2275d2007-01-26 00:57:11 -08008642
NeilBrown85572d72014-12-15 12:56:56 +11008643 spin_lock(&mddev->lock);
NeilBrown2a2275d2007-01-26 00:57:11 -08008644 if (mddev->in_sync) {
8645 mddev->in_sync = 0;
Shaohua Li29530792016-12-08 15:48:19 -08008646 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
8647 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
NeilBrown2a2275d2007-01-26 00:57:11 -08008648 if (mddev->safemode_delay &&
8649 mddev->safemode == 0)
8650 mddev->safemode = 1;
NeilBrown85572d72014-12-15 12:56:56 +11008651 spin_unlock(&mddev->lock);
NeilBrown2a2275d2007-01-26 00:57:11 -08008652 md_update_sb(mddev, 0);
NeilBrown00bcb4a2010-06-01 19:37:23 +10008653 sysfs_notify_dirent_safe(mddev->sysfs_state);
Artur Paszkiewicz2214c262017-05-08 11:56:55 +02008654 /* wait for the dirty state to be recorded in the metadata */
8655 wait_event(mddev->sb_wait,
Artur Paszkiewicz2214c262017-05-08 11:56:55 +02008656 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
NeilBrown2a2275d2007-01-26 00:57:11 -08008657 } else
NeilBrown85572d72014-12-15 12:56:56 +11008658 spin_unlock(&mddev->lock);
NeilBrown2a2275d2007-01-26 00:57:11 -08008659}
8660EXPORT_SYMBOL_GPL(md_allow_write);
8661
Linus Torvalds1da177e2005-04-16 15:20:36 -07008662#define SYNC_MARKS 10
8663#define SYNC_MARK_STEP (3*HZ)
majianpeng54f89342012-10-31 11:59:10 +11008664#define UPDATE_FREQUENCY (5*60*HZ)
Shaohua Li4ed87312012-10-11 13:34:00 +11008665void md_do_sync(struct md_thread *thread)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008666{
Shaohua Li4ed87312012-10-11 13:34:00 +11008667 struct mddev *mddev = thread->mddev;
NeilBrownfd01b882011-10-11 16:47:53 +11008668 struct mddev *mddev2;
Yufen Yue5b521e2019-06-14 15:41:07 -07008669 unsigned int currspeed = 0, window;
Xiao Niac7e50a2014-08-07 09:37:41 -04008670 sector_t max_sectors,j, io_sectors, recovery_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008671 unsigned long mark[SYNC_MARKS];
majianpeng54f89342012-10-31 11:59:10 +11008672 unsigned long update_time;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008673 sector_t mark_cnt[SYNC_MARKS];
8674 int last_mark,m;
8675 struct list_head *tmp;
8676 sector_t last_check;
NeilBrown57afd892005-06-21 17:17:13 -07008677 int skipped = 0;
NeilBrown3cb03002011-10-11 16:45:26 +11008678 struct md_rdev *rdev;
Jonathan Brassowc4a39552013-06-25 01:23:59 -05008679 char *desc, *action = NULL;
majianpeng7c2c57c2012-07-03 12:12:26 +10008680 struct blk_plug plug;
Guoqing Jiang41a9a0d2016-05-02 11:33:08 -04008681 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008682
8683 /* just incase thread restarts... */
Song Liud5d885f2017-11-19 22:17:01 -08008684 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
8685 test_bit(MD_RECOVERY_WAIT, &mddev->recovery))
Linus Torvalds1da177e2005-04-16 15:20:36 -07008686 return;
NeilBrown3991b312014-05-28 13:39:23 +10008687 if (mddev->ro) {/* never try to sync a read-only array */
8688 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008689 return;
NeilBrown3991b312014-05-28 13:39:23 +10008690 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008691
Guoqing Jiang41a9a0d2016-05-02 11:33:08 -04008692 if (mddev_is_clustered(mddev)) {
8693 ret = md_cluster_ops->resync_start(mddev);
8694 if (ret)
8695 goto skip;
8696
Guoqing Jiangbb8bf152016-06-02 23:32:04 -04008697 set_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags);
Guoqing Jiang41a9a0d2016-05-02 11:33:08 -04008698 if (!(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
8699 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ||
8700 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
8701 && ((unsigned long long)mddev->curr_resync_completed
8702 < (unsigned long long)mddev->resync_max_sectors))
8703 goto skip;
8704 }
8705
NeilBrown61df9d92006-10-03 01:15:57 -07008706 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
Jonathan Brassowc4a39552013-06-25 01:23:59 -05008707 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
NeilBrown61df9d92006-10-03 01:15:57 -07008708 desc = "data-check";
Jonathan Brassowc4a39552013-06-25 01:23:59 -05008709 action = "check";
8710 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
NeilBrown61df9d92006-10-03 01:15:57 -07008711 desc = "requested-resync";
Jonathan Brassowc4a39552013-06-25 01:23:59 -05008712 action = "repair";
8713 } else
NeilBrown61df9d92006-10-03 01:15:57 -07008714 desc = "resync";
8715 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
8716 desc = "reshape";
8717 else
8718 desc = "recovery";
8719
Jonathan Brassowc4a39552013-06-25 01:23:59 -05008720 mddev->last_sync_action = action ?: desc;
8721
Linus Torvalds1da177e2005-04-16 15:20:36 -07008722 /* we overload curr_resync somewhat here.
8723 * 0 == not engaged in resync at all
8724 * 2 == checking that there is no conflict with another sync
8725 * 1 == like 2, but have yielded to allow conflicting resync to
Yufen Yue5b521e2019-06-14 15:41:07 -07008726 * commence
Linus Torvalds1da177e2005-04-16 15:20:36 -07008727 * other == active in resync - this many blocks
8728 *
8729 * Before starting a resync we must have set curr_resync to
8730 * 2, and then checked that every "conflicting" array has curr_resync
8731 * less than ours. When we find one that is the same or higher
8732 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync
8733 * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
8734 * This will mean we have to start checking from the beginning again.
8735 *
8736 */
8737
8738 do {
Artur Paszkiewiczc622ca52016-08-16 14:26:08 +02008739 int mddev2_minor = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008740 mddev->curr_resync = 2;
8741
8742 try_again:
NeilBrown404e4b42009-12-30 15:25:23 +11008743 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
Linus Torvalds1da177e2005-04-16 15:20:36 -07008744 goto skip;
NeilBrown29ac4aa2008-02-06 01:39:58 -08008745 for_each_mddev(mddev2, tmp) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008746 if (mddev2 == mddev)
8747 continue;
Bernd Schubert90b08712008-05-23 13:04:38 -07008748 if (!mddev->parallel_resync
8749 && mddev2->curr_resync
8750 && match_mddev_units(mddev, mddev2)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008751 DEFINE_WAIT(wq);
8752 if (mddev < mddev2 && mddev->curr_resync == 2) {
8753 /* arbitrarily yield */
8754 mddev->curr_resync = 1;
8755 wake_up(&resync_wait);
8756 }
8757 if (mddev > mddev2 && mddev->curr_resync == 1)
8758 /* no need to wait here, we can wait the next
8759 * time 'round when curr_resync == 2
8760 */
8761 continue;
NeilBrown97441972008-09-19 11:49:54 +10008762 /* We need to wait 'interruptible' so as not to
8763 * contribute to the load average, and not to
8764 * be caught by 'softlockup'
8765 */
8766 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
NeilBrownc91abf52013-11-19 12:02:01 +11008767 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
NeilBrown8712e552005-10-26 01:58:58 -07008768 mddev2->curr_resync >= mddev->curr_resync) {
Artur Paszkiewiczc622ca52016-08-16 14:26:08 +02008769 if (mddev2_minor != mddev2->md_minor) {
8770 mddev2_minor = mddev2->md_minor;
NeilBrown9d487392016-11-02 14:16:49 +11008771 pr_info("md: delaying %s of %s until %s has finished (they share one or more physical units)\n",
8772 desc, mdname(mddev),
8773 mdname(mddev2));
Artur Paszkiewiczc622ca52016-08-16 14:26:08 +02008774 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008775 mddev_put(mddev2);
NeilBrown97441972008-09-19 11:49:54 +10008776 if (signal_pending(current))
8777 flush_signals(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008778 schedule();
8779 finish_wait(&resync_wait, &wq);
8780 goto try_again;
8781 }
8782 finish_wait(&resync_wait, &wq);
8783 }
8784 }
8785 } while (mddev->curr_resync < 2);
8786
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008787 j = 0;
NeilBrown9d888832005-11-08 21:39:26 -08008788 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008789 /* resync follows the size requested by the personality,
NeilBrown57afd892005-06-21 17:17:13 -07008790 * which defaults to physical size, but can be virtual size
Linus Torvalds1da177e2005-04-16 15:20:36 -07008791 */
8792 max_sectors = mddev->resync_max_sectors;
Jianpeng Ma7f7583d2012-10-11 14:17:59 +11008793 atomic64_set(&mddev->resync_mismatches, 0);
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008794 /* we don't use the checkpoint if there's a bitmap */
Neil Brown5e96ee62008-06-28 08:31:24 +10008795 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
8796 j = mddev->resync_min;
8797 else if (!mddev->bitmap)
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008798 j = mddev->recovery_cp;
Neil Brown5e96ee62008-06-28 08:31:24 +10008799
Guoqing Jiangcb9ee152018-10-18 16:37:47 +08008800 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
NeilBrownc804cde2012-05-21 09:28:33 +10008801 max_sectors = mddev->resync_max_sectors;
Guoqing Jiangcb9ee152018-10-18 16:37:47 +08008802 /*
8803 * If the original node aborts reshaping then we continue the
8804 * reshaping, so set j again to avoid restart reshape from the
8805 * first beginning
8806 */
8807 if (mddev_is_clustered(mddev) &&
8808 mddev->reshape_position != MaxSector)
8809 j = mddev->reshape_position;
8810 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008811 /* recovery follows the physical size of devices */
Andre Noll58c0fed2009-03-31 14:33:13 +11008812 max_sectors = mddev->dev_sectors;
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008813 j = MaxSector;
Dan Williams4e59ca72009-12-12 21:17:06 -07008814 rcu_read_lock();
NeilBrowndafb20f2012-03-19 12:46:39 +11008815 rdev_for_each_rcu(rdev, mddev)
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008816 if (rdev->raid_disk >= 0 &&
Shaohua Lif2076e72015-10-08 21:54:12 -07008817 !test_bit(Journal, &rdev->flags) &&
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008818 !test_bit(Faulty, &rdev->flags) &&
8819 !test_bit(In_sync, &rdev->flags) &&
8820 rdev->recovery_offset < j)
8821 j = rdev->recovery_offset;
Dan Williams4e59ca72009-12-12 21:17:06 -07008822 rcu_read_unlock();
NeilBrown133d4522014-07-02 12:04:14 +10008823
8824 /* If there is a bitmap, we need to make sure all
8825 * writes that started before we added a spare
8826 * complete before we start doing a recovery.
8827 * Otherwise the write might complete and (via
8828 * bitmap_endwrite) set a bit in the bitmap after the
8829 * recovery has checked that bit and skipped that
8830 * region.
8831 */
8832 if (mddev->bitmap) {
8833 mddev->pers->quiesce(mddev, 1);
8834 mddev->pers->quiesce(mddev, 0);
8835 }
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008836 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008837
NeilBrown9d487392016-11-02 14:16:49 +11008838 pr_info("md: %s of RAID array %s\n", desc, mdname(mddev));
8839 pr_debug("md: minimum _guaranteed_ speed: %d KB/sec/disk.\n", speed_min(mddev));
8840 pr_debug("md: using maximum available idle IO bandwidth (but not more than %d KB/sec) for %s.\n",
8841 speed_max(mddev), desc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008842
NeilBrowneea1bf32009-03-31 14:27:02 +11008843 is_mddev_idle(mddev, 1); /* this initializes IO event counters */
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008844
NeilBrown57afd892005-06-21 17:17:13 -07008845 io_sectors = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008846 for (m = 0; m < SYNC_MARKS; m++) {
8847 mark[m] = jiffies;
NeilBrown57afd892005-06-21 17:17:13 -07008848 mark_cnt[m] = io_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008849 }
8850 last_mark = 0;
8851 mddev->resync_mark = mark[last_mark];
8852 mddev->resync_mark_cnt = mark_cnt[last_mark];
8853
8854 /*
8855 * Tune reconstruction:
8856 */
Yufen Yue5b521e2019-06-14 15:41:07 -07008857 window = 32 * (PAGE_SIZE / 512);
NeilBrown9d487392016-11-02 14:16:49 +11008858 pr_debug("md: using %dk window, over a total of %lluk.\n",
8859 window/2, (unsigned long long)max_sectors/2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008860
8861 atomic_set(&mddev->recovery_active, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008862 last_check = 0;
8863
8864 if (j>2) {
NeilBrown9d487392016-11-02 14:16:49 +11008865 pr_debug("md: resuming %s of %s from checkpoint.\n",
8866 desc, mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07008867 mddev->curr_resync = j;
NeilBrown72f36d52012-10-11 14:25:57 +11008868 } else
8869 mddev->curr_resync = 3; /* no longer delayed */
NeilBrown75d3da42011-01-14 09:14:34 +11008870 mddev->curr_resync_completed = j;
Junxiao Bie1a86db2020-07-14 16:10:26 -07008871 sysfs_notify_dirent_safe(mddev->sysfs_completed);
NeilBrown72f36d52012-10-11 14:25:57 +11008872 md_new_event(mddev);
majianpeng54f89342012-10-31 11:59:10 +11008873 update_time = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008874
majianpeng7c2c57c2012-07-03 12:12:26 +10008875 blk_start_plug(&plug);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008876 while (j < max_sectors) {
NeilBrown57afd892005-06-21 17:17:13 -07008877 sector_t sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008878
NeilBrown57afd892005-06-21 17:17:13 -07008879 skipped = 0;
NeilBrown97e4f422009-03-31 14:33:13 +11008880
NeilBrown7a91ee12009-05-26 12:57:21 +10008881 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8882 ((mddev->curr_resync > mddev->curr_resync_completed &&
8883 (mddev->curr_resync - mddev->curr_resync_completed)
8884 > (max_sectors >> 4)) ||
majianpeng54f89342012-10-31 11:59:10 +11008885 time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) ||
NeilBrown7a91ee12009-05-26 12:57:21 +10008886 (j - mddev->curr_resync_completed)*2
NeilBrownc5e19d92015-07-17 12:06:02 +10008887 >= mddev->resync_max - mddev->curr_resync_completed ||
8888 mddev->curr_resync_completed > mddev->resync_max
NeilBrown7a91ee12009-05-26 12:57:21 +10008889 )) {
NeilBrown97e4f422009-03-31 14:33:13 +11008890 /* time to update curr_resync_completed */
NeilBrown97e4f422009-03-31 14:33:13 +11008891 wait_event(mddev->recovery_wait,
8892 atomic_read(&mddev->recovery_active) == 0);
NeilBrown75d3da42011-01-14 09:14:34 +11008893 mddev->curr_resync_completed = j;
kernelmail35d78c62012-10-31 11:59:10 +11008894 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
8895 j > mddev->recovery_cp)
8896 mddev->recovery_cp = j;
majianpeng54f89342012-10-31 11:59:10 +11008897 update_time = jiffies;
Shaohua Li29530792016-12-08 15:48:19 -08008898 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
Junxiao Bie1a86db2020-07-14 16:10:26 -07008899 sysfs_notify_dirent_safe(mddev->sysfs_completed);
NeilBrown97e4f422009-03-31 14:33:13 +11008900 }
NeilBrownacb180b2009-04-14 16:28:34 +10008901
NeilBrownc91abf52013-11-19 12:02:01 +11008902 while (j >= mddev->resync_max &&
8903 !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
NeilBrowne62e58a2009-07-01 13:15:35 +10008904 /* As this condition is controlled by user-space,
8905 * we can block indefinitely, so use '_interruptible'
8906 * to avoid triggering warnings.
8907 */
8908 flush_signals(current); /* just in case */
8909 wait_event_interruptible(mddev->recovery_wait,
8910 mddev->resync_max > j
NeilBrownc91abf52013-11-19 12:02:01 +11008911 || test_bit(MD_RECOVERY_INTR,
8912 &mddev->recovery));
NeilBrowne62e58a2009-07-01 13:15:35 +10008913 }
NeilBrownacb180b2009-04-14 16:28:34 +10008914
NeilBrownc91abf52013-11-19 12:02:01 +11008915 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8916 break;
NeilBrownacb180b2009-04-14 16:28:34 +10008917
NeilBrown09314792015-02-19 16:04:40 +11008918 sectors = mddev->pers->sync_request(mddev, j, &skipped);
NeilBrown57afd892005-06-21 17:17:13 -07008919 if (sectors == 0) {
NeilBrowndfc70642008-05-23 13:04:39 -07008920 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
NeilBrownc91abf52013-11-19 12:02:01 +11008921 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008922 }
NeilBrown57afd892005-06-21 17:17:13 -07008923
8924 if (!skipped) { /* actual IO requested */
8925 io_sectors += sectors;
8926 atomic_add(sectors, &mddev->recovery_active);
8927 }
8928
NeilBrowne875ece2011-07-28 11:39:24 +10008929 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8930 break;
8931
Linus Torvalds1da177e2005-04-16 15:20:36 -07008932 j += sectors;
NeilBrown5ed1df22015-07-24 13:27:08 +10008933 if (j > max_sectors)
8934 /* when skipping, extra large numbers can be returned. */
8935 j = max_sectors;
NeilBrown72f36d52012-10-11 14:25:57 +11008936 if (j > 2)
8937 mddev->curr_resync = j;
NeilBrownff4e8d92006-07-10 04:44:16 -07008938 mddev->curr_mark_cnt = io_sectors;
NeilBrownd7603b72006-01-06 00:20:30 -08008939 if (last_check == 0)
NeilBrowne875ece2011-07-28 11:39:24 +10008940 /* this is the earliest that rebuild will be
NeilBrownd7603b72006-01-06 00:20:30 -08008941 * visible in /proc/mdstat
8942 */
8943 md_new_event(mddev);
NeilBrown57afd892005-06-21 17:17:13 -07008944
8945 if (last_check + window > io_sectors || j == max_sectors)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008946 continue;
8947
NeilBrown57afd892005-06-21 17:17:13 -07008948 last_check = io_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008949 repeat:
8950 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
8951 /* step marks */
8952 int next = (last_mark+1) % SYNC_MARKS;
8953
8954 mddev->resync_mark = mark[next];
8955 mddev->resync_mark_cnt = mark_cnt[next];
8956 mark[next] = jiffies;
NeilBrown57afd892005-06-21 17:17:13 -07008957 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008958 last_mark = next;
8959 }
8960
NeilBrownc91abf52013-11-19 12:02:01 +11008961 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8962 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008963
8964 /*
8965 * this loop exits only if either when we are slower than
8966 * the 'hard' speed limit, or the system was IO-idle for
8967 * a jiffy.
8968 * the system might be non-idle CPU-wise, but we only care
8969 * about not overloading the IO subsystem. (things like an
8970 * e2fsck being done on the RAID array should execute fast)
8971 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07008972 cond_resched();
8973
Xiao Niac7e50a2014-08-07 09:37:41 -04008974 recovery_done = io_sectors - atomic_read(&mddev->recovery_active);
8975 currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2
NeilBrown57afd892005-06-21 17:17:13 -07008976 /((jiffies-mddev->resync_mark)/HZ +1) +1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008977
NeilBrown88202a02006-01-06 00:21:36 -08008978 if (currspeed > speed_min(mddev)) {
NeilBrownac8fa412015-02-19 16:55:00 +11008979 if (currspeed > speed_max(mddev)) {
NeilBrownc0e48522005-11-18 01:11:01 -08008980 msleep(500);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008981 goto repeat;
8982 }
NeilBrownac8fa412015-02-19 16:55:00 +11008983 if (!is_mddev_idle(mddev, 0)) {
8984 /*
8985 * Give other IO more of a chance.
8986 * The faster the devices, the less we wait.
8987 */
8988 wait_event(mddev->recovery_wait,
8989 !atomic_read(&mddev->recovery_active));
8990 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008991 }
8992 }
NeilBrown9d487392016-11-02 14:16:49 +11008993 pr_info("md: %s: %s %s.\n",mdname(mddev), desc,
8994 test_bit(MD_RECOVERY_INTR, &mddev->recovery)
8995 ? "interrupted" : "done");
Linus Torvalds1da177e2005-04-16 15:20:36 -07008996 /*
8997 * this also signals 'finished resyncing' to md_stop
8998 */
majianpeng7c2c57c2012-07-03 12:12:26 +10008999 blk_finish_plug(&plug);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009000 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
9001
NeilBrown5ed1df22015-07-24 13:27:08 +10009002 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
9003 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
NeilBrown1217e1d2016-10-28 15:59:41 +11009004 mddev->curr_resync > 3) {
NeilBrown5ed1df22015-07-24 13:27:08 +10009005 mddev->curr_resync_completed = mddev->curr_resync;
Junxiao Bie1a86db2020-07-14 16:10:26 -07009006 sysfs_notify_dirent_safe(mddev->sysfs_completed);
NeilBrown5ed1df22015-07-24 13:27:08 +10009007 }
NeilBrown09314792015-02-19 16:04:40 +11009008 mddev->pers->sync_request(mddev, max_sectors, &skipped);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009009
NeilBrowndfc70642008-05-23 13:04:39 -07009010 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
NeilBrown1217e1d2016-10-28 15:59:41 +11009011 mddev->curr_resync > 3) {
NeilBrown5fd6c1d2006-06-26 00:27:40 -07009012 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
9013 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
9014 if (mddev->curr_resync >= mddev->recovery_cp) {
NeilBrown9d487392016-11-02 14:16:49 +11009015 pr_debug("md: checkpointing %s of %s.\n",
9016 desc, mdname(mddev));
majianpeng0a19caa2012-11-19 19:57:34 +08009017 if (test_bit(MD_RECOVERY_ERROR,
9018 &mddev->recovery))
9019 mddev->recovery_cp =
9020 mddev->curr_resync_completed;
9021 else
9022 mddev->recovery_cp =
9023 mddev->curr_resync;
NeilBrown5fd6c1d2006-06-26 00:27:40 -07009024 }
9025 } else
9026 mddev->recovery_cp = MaxSector;
9027 } else {
9028 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
9029 mddev->curr_resync = MaxSector;
NeilBrowndb0505d2017-10-17 16:18:36 +11009030 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
9031 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) {
9032 rcu_read_lock();
9033 rdev_for_each_rcu(rdev, mddev)
9034 if (rdev->raid_disk >= 0 &&
9035 mddev->delta_disks >= 0 &&
9036 !test_bit(Journal, &rdev->flags) &&
9037 !test_bit(Faulty, &rdev->flags) &&
9038 !test_bit(In_sync, &rdev->flags) &&
9039 rdev->recovery_offset < mddev->curr_resync)
9040 rdev->recovery_offset = mddev->curr_resync;
9041 rcu_read_unlock();
9042 }
NeilBrown5fd6c1d2006-06-26 00:27:40 -07009043 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009044 }
NeilBrowndb91ff52012-02-07 12:01:51 +11009045 skip:
Guoqing Jiangbb8bf152016-06-02 23:32:04 -04009046 /* set CHANGE_PENDING here since maybe another update is needed,
9047 * so other nodes are informed. It should be harmless for normal
9048 * raid */
Shaohua Li29530792016-12-08 15:48:19 -08009049 set_mask_bits(&mddev->sb_flags, 0,
9050 BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS));
Goldwyn Rodriguesc186b122015-09-30 13:20:35 -05009051
BingJing Chang88763912018-02-22 13:34:46 +08009052 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
9053 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
9054 mddev->delta_disks > 0 &&
9055 mddev->pers->finish_reshape &&
9056 mddev->pers->size &&
9057 mddev->queue) {
9058 mddev_lock_nointr(mddev);
9059 md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0));
9060 mddev_unlock(mddev);
Christoph Hellwig2c247c52020-11-16 15:57:11 +01009061 if (!mddev_is_clustered(mddev))
9062 set_capacity_and_notify(mddev->gendisk,
9063 mddev->array_sectors);
BingJing Chang88763912018-02-22 13:34:46 +08009064 }
9065
NeilBrown23da4222014-12-15 12:57:01 +11009066 spin_lock(&mddev->lock);
NeilBrownc07b70a2009-12-14 12:49:48 +11009067 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
9068 /* We completed so min/max setting can be forgotten if used. */
9069 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
9070 mddev->resync_min = 0;
9071 mddev->resync_max = MaxSector;
9072 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
9073 mddev->resync_min = mddev->curr_resync_completed;
NeilBrownf7851be2015-07-02 17:12:58 +10009074 set_bit(MD_RECOVERY_DONE, &mddev->recovery);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009075 mddev->curr_resync = 0;
NeilBrown23da4222014-12-15 12:57:01 +11009076 spin_unlock(&mddev->lock);
9077
Linus Torvalds1da177e2005-04-16 15:20:36 -07009078 wake_up(&resync_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009079 md_wakeup_thread(mddev->thread);
NeilBrownc6207272008-02-06 01:39:52 -08009080 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009081}
NeilBrown29269552006-03-27 01:18:10 -08009082EXPORT_SYMBOL_GPL(md_do_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009083
NeilBrown746d3202013-04-24 11:42:41 +10009084static int remove_and_add_spares(struct mddev *mddev,
9085 struct md_rdev *this)
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009086{
NeilBrown3cb03002011-10-11 16:45:26 +11009087 struct md_rdev *rdev;
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009088 int spares = 0;
NeilBrownf2a371c2012-01-09 00:46:41 +11009089 int removed = 0;
NeilBrownd787be42016-06-02 16:19:53 +10009090 bool remove_some = false;
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009091
NeilBrown39772f02018-02-03 09:19:30 +11009092 if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
9093 /* Mustn't remove devices when resync thread is running */
9094 return 0;
9095
NeilBrownd787be42016-06-02 16:19:53 +10009096 rdev_for_each(rdev, mddev) {
NeilBrown746d3202013-04-24 11:42:41 +10009097 if ((this == NULL || rdev == this) &&
9098 rdev->raid_disk >= 0 &&
Dan Williams6bfe0b42008-04-30 00:52:32 -07009099 !test_bit(Blocked, &rdev->flags) &&
NeilBrownd787be42016-06-02 16:19:53 +10009100 test_bit(Faulty, &rdev->flags) &&
9101 atomic_read(&rdev->nr_pending)==0) {
9102 /* Faulty non-Blocked devices with nr_pending == 0
9103 * never get nr_pending incremented,
9104 * never get Faulty cleared, and never get Blocked set.
9105 * So we can synchronize_rcu now rather than once per device
9106 */
9107 remove_some = true;
9108 set_bit(RemoveSynchronized, &rdev->flags);
9109 }
9110 }
9111
9112 if (remove_some)
9113 synchronize_rcu();
9114 rdev_for_each(rdev, mddev) {
9115 if ((this == NULL || rdev == this) &&
9116 rdev->raid_disk >= 0 &&
9117 !test_bit(Blocked, &rdev->flags) &&
9118 ((test_bit(RemoveSynchronized, &rdev->flags) ||
Shaohua Lif2076e72015-10-08 21:54:12 -07009119 (!test_bit(In_sync, &rdev->flags) &&
9120 !test_bit(Journal, &rdev->flags))) &&
NeilBrownd787be42016-06-02 16:19:53 +10009121 atomic_read(&rdev->nr_pending)==0)) {
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009122 if (mddev->pers->hot_remove_disk(
NeilBrownb8321b62011-12-23 10:17:51 +11009123 mddev, rdev) == 0) {
Namhyung Kim36fad852011-07-27 11:00:36 +10009124 sysfs_unlink_rdev(mddev, rdev);
NeilBrown011abdc2018-04-26 14:46:29 +10009125 rdev->saved_raid_disk = rdev->raid_disk;
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009126 rdev->raid_disk = -1;
NeilBrownf2a371c2012-01-09 00:46:41 +11009127 removed++;
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009128 }
9129 }
NeilBrownd787be42016-06-02 16:19:53 +10009130 if (remove_some && test_bit(RemoveSynchronized, &rdev->flags))
9131 clear_bit(RemoveSynchronized, &rdev->flags);
9132 }
9133
Jonathan Brassow90584fc2013-03-07 16:24:26 -06009134 if (removed && mddev->kobj.sd)
Junxiao Bie1a86db2020-07-14 16:10:26 -07009135 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009136
Goldwyn Rodrigues2910ff12015-09-28 10:27:26 -05009137 if (this && removed)
NeilBrown746d3202013-04-24 11:42:41 +10009138 goto no_add;
9139
NeilBrowndafb20f2012-03-19 12:46:39 +11009140 rdev_for_each(rdev, mddev) {
Goldwyn Rodrigues2910ff12015-09-28 10:27:26 -05009141 if (this && this != rdev)
9142 continue;
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05009143 if (test_bit(Candidate, &rdev->flags))
9144 continue;
NeilBrown7bfec5f2011-12-23 10:17:53 +11009145 if (rdev->raid_disk >= 0 &&
9146 !test_bit(In_sync, &rdev->flags) &&
Shaohua Lif2076e72015-10-08 21:54:12 -07009147 !test_bit(Journal, &rdev->flags) &&
NeilBrown7bfec5f2011-12-23 10:17:53 +11009148 !test_bit(Faulty, &rdev->flags))
9149 spares++;
NeilBrown7ceb17e2013-04-24 11:42:42 +10009150 if (rdev->raid_disk >= 0)
9151 continue;
9152 if (test_bit(Faulty, &rdev->flags))
9153 continue;
Shaohua Lif6b6ec52015-12-21 10:51:02 +11009154 if (!test_bit(Journal, &rdev->flags)) {
9155 if (mddev->ro &&
9156 ! (rdev->saved_raid_disk >= 0 &&
9157 !test_bit(Bitmap_sync, &rdev->flags)))
9158 continue;
NeilBrown7ceb17e2013-04-24 11:42:42 +10009159
Shaohua Lif6b6ec52015-12-21 10:51:02 +11009160 rdev->recovery_offset = 0;
9161 }
Guoqing Jiang3f79cc22020-04-04 23:57:11 +02009162 if (mddev->pers->hot_add_disk(mddev, rdev) == 0) {
Damien Le Moal5e3b8a82020-07-16 13:54:40 +09009163 /* failure here is OK */
9164 sysfs_link_rdev(mddev, rdev);
Shaohua Lif6b6ec52015-12-21 10:51:02 +11009165 if (!test_bit(Journal, &rdev->flags))
9166 spares++;
NeilBrown7ceb17e2013-04-24 11:42:42 +10009167 md_new_event(mddev);
Shaohua Li29530792016-12-08 15:48:19 -08009168 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrowndfc70642008-05-23 13:04:39 -07009169 }
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009170 }
NeilBrown746d3202013-04-24 11:42:41 +10009171no_add:
NeilBrown6dafab62012-09-19 12:54:22 +10009172 if (removed)
Shaohua Li29530792016-12-08 15:48:19 -08009173 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009174 return spares;
9175}
NeilBrown7ebc0be2011-01-14 09:14:33 +11009176
NeilBrownac05f252014-09-30 08:10:42 +10009177static void md_start_sync(struct work_struct *ws)
9178{
9179 struct mddev *mddev = container_of(ws, struct mddev, del_work);
Goldwyn Rodriguesc186b122015-09-30 13:20:35 -05009180
NeilBrownac05f252014-09-30 08:10:42 +10009181 mddev->sync_thread = md_register_thread(md_do_sync,
9182 mddev,
9183 "resync");
9184 if (!mddev->sync_thread) {
NeilBrown9d487392016-11-02 14:16:49 +11009185 pr_warn("%s: could not start resync thread...\n",
9186 mdname(mddev));
NeilBrownac05f252014-09-30 08:10:42 +10009187 /* leave the spares where they are, it shouldn't hurt */
9188 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9189 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
9190 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
9191 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
9192 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
NeilBrownf851b602014-12-11 10:02:10 +11009193 wake_up(&resync_wait);
NeilBrownac05f252014-09-30 08:10:42 +10009194 if (test_and_clear_bit(MD_RECOVERY_RECOVER,
9195 &mddev->recovery))
9196 if (mddev->sysfs_action)
9197 sysfs_notify_dirent_safe(mddev->sysfs_action);
9198 } else
9199 md_wakeup_thread(mddev->sync_thread);
9200 sysfs_notify_dirent_safe(mddev->sysfs_action);
9201 md_new_event(mddev);
9202}
9203
Linus Torvalds1da177e2005-04-16 15:20:36 -07009204/*
9205 * This routine is regularly called by all per-raid-array threads to
9206 * deal with generic issues like resync and super-block update.
9207 * Raid personalities that don't have a thread (linear/raid0) do not
9208 * need this as they never do any recovery or update the superblock.
9209 *
9210 * It does not do any resync itself, but rather "forks" off other threads
9211 * to do that as needed.
9212 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
9213 * "->recovery" and create a thread at ->sync_thread.
NeilBrowndfc70642008-05-23 13:04:39 -07009214 * When the thread finishes it sets MD_RECOVERY_DONE
Linus Torvalds1da177e2005-04-16 15:20:36 -07009215 * and wakeups up this thread which will reap the thread and finish up.
9216 * This thread also removes any faulty devices (with nr_pending == 0).
9217 *
9218 * The overall approach is:
9219 * 1/ if the superblock needs updating, update it.
9220 * 2/ If a recovery thread is running, don't do anything else.
9221 * 3/ If recovery has finished, clean up, possibly marking spares active.
9222 * 4/ If there are any faulty devices, remove them.
9223 * 5/ If array is degraded, try to add spares devices
9224 * 6/ If array has spares or is not in-sync, start a resync thread.
9225 */
NeilBrownfd01b882011-10-11 16:47:53 +11009226void md_check_recovery(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009227{
NeilBrown059421e2018-10-03 15:04:41 +10009228 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) {
9229 /* Write superblock - thread that called mddev_suspend()
9230 * holds reconfig_mutex for us.
9231 */
9232 set_bit(MD_UPDATING_SB, &mddev->flags);
9233 smp_mb__after_atomic();
9234 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags))
9235 md_update_sb(mddev, 0);
9236 clear_bit_unlock(MD_UPDATING_SB, &mddev->flags);
9237 wake_up(&mddev->sb_wait);
9238 }
9239
Jonathan Brassow68866e42011-06-08 15:10:08 +10009240 if (mddev->suspended)
9241 return;
9242
NeilBrown5f404022005-06-21 17:17:16 -07009243 if (mddev->bitmap)
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07009244 md_bitmap_daemon_work(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009245
NeilBrownfca4d842005-06-21 17:17:11 -07009246 if (signal_pending(current)) {
NeilBrown31a59e32008-04-30 00:52:30 -07009247 if (mddev->pers->sync_request && !mddev->external) {
NeilBrown9d487392016-11-02 14:16:49 +11009248 pr_debug("md: %s in immediate safe mode\n",
9249 mdname(mddev));
NeilBrownfca4d842005-06-21 17:17:11 -07009250 mddev->safemode = 2;
9251 }
9252 flush_signals(current);
9253 }
9254
NeilBrownc89a8ee2008-08-05 15:54:13 +10009255 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
9256 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009257 if ( ! (
Shaohua Li29530792016-12-08 15:48:19 -08009258 (mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07009259 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
NeilBrownfca4d842005-06-21 17:17:11 -07009260 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
NeilBrown31a59e32008-04-30 00:52:30 -07009261 (mddev->external == 0 && mddev->safemode == 1) ||
NeilBrown4ad23a972017-03-15 14:05:14 +11009262 (mddev->safemode == 2
NeilBrownfca4d842005-06-21 17:17:11 -07009263 && !mddev->in_sync && mddev->recovery_cp == MaxSector)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009264 ))
9265 return;
NeilBrownfca4d842005-06-21 17:17:11 -07009266
NeilBrowndf5b89b2006-03-27 01:18:20 -08009267 if (mddev_trylock(mddev)) {
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009268 int spares = 0;
NeilBrown480523f2019-08-20 10:21:09 +10009269 bool try_set_sync = mddev->safemode != 0;
NeilBrownfca4d842005-06-21 17:17:11 -07009270
Shaohua Liafc1f552017-08-11 20:34:45 -07009271 if (!mddev->external && mddev->safemode == 1)
NeilBrown33182d12017-08-08 16:56:36 +10009272 mddev->safemode = 0;
9273
NeilBrownc89a8ee2008-08-05 15:54:13 +10009274 if (mddev->ro) {
Neil Brownab16bfc2015-06-17 12:31:46 +10009275 struct md_rdev *rdev;
9276 if (!mddev->external && mddev->in_sync)
9277 /* 'Blocked' flag not needed as failed devices
9278 * will be recorded if array switched to read/write.
9279 * Leaving it set will prevent the device
9280 * from being removed.
9281 */
9282 rdev_for_each(rdev, mddev)
9283 clear_bit(Blocked, &rdev->flags);
NeilBrown7ceb17e2013-04-24 11:42:42 +10009284 /* On a read-only array we can:
9285 * - remove failed devices
9286 * - add already-in_sync devices if the array itself
9287 * is in-sync.
9288 * As we only add devices that are already in-sync,
9289 * we can activate the spares immediately.
NeilBrownc89a8ee2008-08-05 15:54:13 +10009290 */
NeilBrown7ceb17e2013-04-24 11:42:42 +10009291 remove_and_add_spares(mddev, NULL);
NeilBrown8313b8e2013-12-12 10:13:33 +11009292 /* There is no thread, but we need to call
9293 * ->spare_active and clear saved_raid_disk
9294 */
NeilBrown2ac295a2014-05-29 11:40:03 +10009295 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
NeilBrown8313b8e2013-12-12 10:13:33 +11009296 md_reap_sync_thread(mddev);
NeilBrowna4a3d262015-07-17 11:57:30 +10009297 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
NeilBrown8313b8e2013-12-12 10:13:33 +11009298 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
Shaohua Li29530792016-12-08 15:48:19 -08009299 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
NeilBrownc89a8ee2008-08-05 15:54:13 +10009300 goto unlock;
9301 }
9302
Guoqing Jiang659b2542015-12-21 10:50:59 +11009303 if (mddev_is_clustered(mddev)) {
Heming Zhaof7c7a2f2021-04-08 15:44:15 +08009304 struct md_rdev *rdev, *tmp;
Guoqing Jiang659b2542015-12-21 10:50:59 +11009305 /* kick the device if another node issued a
9306 * remove disk.
9307 */
Heming Zhaof7c7a2f2021-04-08 15:44:15 +08009308 rdev_for_each_safe(rdev, tmp, mddev) {
Guoqing Jiang659b2542015-12-21 10:50:59 +11009309 if (test_and_clear_bit(ClusterRemove, &rdev->flags) &&
9310 rdev->raid_disk < 0)
9311 md_kick_rdev_from_array(rdev);
9312 }
9313 }
9314
NeilBrown480523f2019-08-20 10:21:09 +10009315 if (try_set_sync && !mddev->external && !mddev->in_sync) {
NeilBrown85572d72014-12-15 12:56:56 +11009316 spin_lock(&mddev->lock);
NeilBrown6497709b2017-03-15 14:05:14 +11009317 set_in_sync(mddev);
NeilBrown85572d72014-12-15 12:56:56 +11009318 spin_unlock(&mddev->lock);
NeilBrownfca4d842005-06-21 17:17:11 -07009319 }
NeilBrownfca4d842005-06-21 17:17:11 -07009320
Shaohua Li29530792016-12-08 15:48:19 -08009321 if (mddev->sb_flags)
NeilBrown850b2b422006-10-03 01:15:46 -07009322 md_update_sb(mddev, 0);
NeilBrown06d91a52005-06-21 17:17:12 -07009323
Linus Torvalds1da177e2005-04-16 15:20:36 -07009324 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
9325 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
9326 /* resync/recovery still happening */
9327 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9328 goto unlock;
9329 }
9330 if (mddev->sync_thread) {
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009331 md_reap_sync_thread(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009332 goto unlock;
9333 }
Neil Brown72a23c22008-06-28 08:31:41 +10009334 /* Set RUNNING before clearing NEEDED to avoid
9335 * any transients in the value of "sync_action".
9336 */
NeilBrown72f36d52012-10-11 14:25:57 +11009337 mddev->curr_resync_completed = 0;
NeilBrown23da4222014-12-15 12:57:01 +11009338 spin_lock(&mddev->lock);
Neil Brown72a23c22008-06-28 08:31:41 +10009339 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
NeilBrown23da4222014-12-15 12:57:01 +11009340 spin_unlock(&mddev->lock);
NeilBrown24dd4692005-11-08 21:39:26 -08009341 /* Clear some bits that don't mean anything, but
9342 * might be left set
9343 */
NeilBrown24dd4692005-11-08 21:39:26 -08009344 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
9345 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009346
NeilBrowned209582012-04-24 10:23:14 +10009347 if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
9348 test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
NeilBrownac05f252014-09-30 08:10:42 +10009349 goto not_running;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009350 /* no recovery is running.
9351 * remove any failed drives, then
9352 * add spares if possible.
NeilBrown72f36d52012-10-11 14:25:57 +11009353 * Spares are also removed and re-added, to allow
Linus Torvalds1da177e2005-04-16 15:20:36 -07009354 * the personality to fail the re-add.
9355 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009356
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009357 if (mddev->reshape_position != MaxSector) {
NeilBrown50ac1682009-06-18 08:47:55 +10009358 if (mddev->pers->check_reshape == NULL ||
9359 mddev->pers->check_reshape(mddev) != 0)
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009360 /* Cannot proceed */
NeilBrownac05f252014-09-30 08:10:42 +10009361 goto not_running;
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009362 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
Neil Brown72a23c22008-06-28 08:31:41 +10009363 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
NeilBrown746d3202013-04-24 11:42:41 +10009364 } else if ((spares = remove_and_add_spares(mddev, NULL))) {
NeilBrown24dd4692005-11-08 21:39:26 -08009365 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9366 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
Dan Williams56ac36d2008-08-07 10:02:47 -07009367 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
Neil Brown72a23c22008-06-28 08:31:41 +10009368 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
NeilBrown24dd4692005-11-08 21:39:26 -08009369 } else if (mddev->recovery_cp < MaxSector) {
9370 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
Neil Brown72a23c22008-06-28 08:31:41 +10009371 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
NeilBrown24dd4692005-11-08 21:39:26 -08009372 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
9373 /* nothing to be done ... */
NeilBrownac05f252014-09-30 08:10:42 +10009374 goto not_running;
NeilBrown24dd4692005-11-08 21:39:26 -08009375
Linus Torvalds1da177e2005-04-16 15:20:36 -07009376 if (mddev->pers->sync_request) {
NeilBrownef99bf42012-05-22 13:55:08 +10009377 if (spares) {
NeilBrowna654b9d82005-06-21 17:17:27 -07009378 /* We are adding a device or devices to an array
9379 * which has the bitmap stored on all devices.
9380 * So make sure all bitmap pages get written
9381 */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07009382 md_bitmap_write_all(mddev->bitmap);
NeilBrowna654b9d82005-06-21 17:17:27 -07009383 }
NeilBrownac05f252014-09-30 08:10:42 +10009384 INIT_WORK(&mddev->del_work, md_start_sync);
9385 queue_work(md_misc_wq, &mddev->del_work);
9386 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009387 }
NeilBrownac05f252014-09-30 08:10:42 +10009388 not_running:
Neil Brown72a23c22008-06-28 08:31:41 +10009389 if (!mddev->sync_thread) {
9390 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
NeilBrownf851b602014-12-11 10:02:10 +11009391 wake_up(&resync_wait);
Neil Brown72a23c22008-06-28 08:31:41 +10009392 if (test_and_clear_bit(MD_RECOVERY_RECOVER,
9393 &mddev->recovery))
NeilBrown0c3573f2009-01-09 08:31:05 +11009394 if (mddev->sysfs_action)
NeilBrown00bcb4a2010-06-01 19:37:23 +10009395 sysfs_notify_dirent_safe(mddev->sysfs_action);
Neil Brown72a23c22008-06-28 08:31:41 +10009396 }
NeilBrownac05f252014-09-30 08:10:42 +10009397 unlock:
9398 wake_up(&mddev->sb_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009399 mddev_unlock(mddev);
9400 }
9401}
NeilBrown6c144d32014-09-30 16:15:38 +10009402EXPORT_SYMBOL(md_check_recovery);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009403
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009404void md_reap_sync_thread(struct mddev *mddev)
9405{
9406 struct md_rdev *rdev;
Guoqing Jiangaefb2e52018-10-18 16:37:44 +08009407 sector_t old_dev_sectors = mddev->dev_sectors;
9408 bool is_reshaped = false;
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009409
9410 /* resync has finished, collect result */
9411 md_unregister_thread(&mddev->sync_thread);
9412 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
Guoqing Jiang0d8ed0e92019-07-24 11:09:21 +02009413 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
9414 mddev->degraded != mddev->raid_disks) {
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009415 /* success...*/
9416 /* activate any spares */
9417 if (mddev->pers->spare_active(mddev)) {
Junxiao Bie1a86db2020-07-14 16:10:26 -07009418 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
Shaohua Li29530792016-12-08 15:48:19 -08009419 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009420 }
9421 }
9422 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
Guoqing Jiangaefb2e52018-10-18 16:37:44 +08009423 mddev->pers->finish_reshape) {
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009424 mddev->pers->finish_reshape(mddev);
Guoqing Jiangaefb2e52018-10-18 16:37:44 +08009425 if (mddev_is_clustered(mddev))
9426 is_reshaped = true;
9427 }
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009428
9429 /* If array is no-longer degraded, then any saved_raid_disk
NeilBrownf4667222013-12-09 12:04:56 +11009430 * information must be scrapped.
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009431 */
NeilBrownf4667222013-12-09 12:04:56 +11009432 if (!mddev->degraded)
9433 rdev_for_each(rdev, mddev)
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009434 rdev->saved_raid_disk = -1;
9435
9436 md_update_sb(mddev, 1);
Shaohua Li29530792016-12-08 15:48:19 -08009437 /* MD_SB_CHANGE_PENDING should be cleared by md_update_sb, so we can
Guoqing Jiangbb8bf152016-06-02 23:32:04 -04009438 * call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by
9439 * clustered raid */
9440 if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags))
9441 md_cluster_ops->resync_finish(mddev);
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009442 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
NeilBrownea358cd2015-06-12 20:05:04 +10009443 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009444 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9445 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
9446 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
9447 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
Guoqing Jiangaefb2e52018-10-18 16:37:44 +08009448 /*
9449 * We call md_cluster_ops->update_size here because sync_size could
9450 * be changed by md_update_sb, and MD_RECOVERY_RESHAPE is cleared,
9451 * so it is time to update size across cluster.
9452 */
9453 if (mddev_is_clustered(mddev) && is_reshaped
9454 && !test_bit(MD_CLOSING, &mddev->flags))
9455 md_cluster_ops->update_size(mddev, old_dev_sectors);
NeilBrownf851b602014-12-11 10:02:10 +11009456 wake_up(&resync_wait);
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009457 /* flag recovery needed just to double check */
9458 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9459 sysfs_notify_dirent_safe(mddev->sysfs_action);
9460 md_new_event(mddev);
9461 if (mddev->event_work.func)
9462 queue_work(md_misc_wq, &mddev->event_work);
9463}
NeilBrown6c144d32014-09-30 16:15:38 +10009464EXPORT_SYMBOL(md_reap_sync_thread);
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009465
NeilBrownfd01b882011-10-11 16:47:53 +11009466void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev)
Dan Williams6bfe0b42008-04-30 00:52:32 -07009467{
NeilBrown00bcb4a2010-06-01 19:37:23 +10009468 sysfs_notify_dirent_safe(rdev->sysfs_state);
Dan Williams6bfe0b42008-04-30 00:52:32 -07009469 wait_event_timeout(rdev->blocked_wait,
NeilBrownde393cd2011-07-28 11:31:48 +10009470 !test_bit(Blocked, &rdev->flags) &&
9471 !test_bit(BlockedBadBlocks, &rdev->flags),
Dan Williams6bfe0b42008-04-30 00:52:32 -07009472 msecs_to_jiffies(5000));
9473 rdev_dec_pending(rdev, mddev);
9474}
9475EXPORT_SYMBOL(md_wait_for_blocked_rdev);
9476
NeilBrownc6563a82012-05-21 09:27:00 +10009477void md_finish_reshape(struct mddev *mddev)
9478{
9479 /* called be personality module when reshape completes. */
9480 struct md_rdev *rdev;
9481
9482 rdev_for_each(rdev, mddev) {
9483 if (rdev->data_offset > rdev->new_data_offset)
9484 rdev->sectors += rdev->data_offset - rdev->new_data_offset;
9485 else
9486 rdev->sectors -= rdev->new_data_offset - rdev->data_offset;
9487 rdev->data_offset = rdev->new_data_offset;
9488 }
9489}
9490EXPORT_SYMBOL(md_finish_reshape);
NeilBrown2230dfe2011-07-28 11:31:46 +10009491
Vishal Vermafc974ee2015-12-24 19:20:34 -07009492/* Bad block management */
NeilBrown2230dfe2011-07-28 11:31:46 +10009493
Vishal Vermafc974ee2015-12-24 19:20:34 -07009494/* Returns 1 on success, 0 on failure */
NeilBrown3cb03002011-10-11 16:45:26 +11009495int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
NeilBrownc6563a82012-05-21 09:27:00 +10009496 int is_new)
NeilBrown2230dfe2011-07-28 11:31:46 +10009497{
Guoqing Jiang85ad1d12016-05-03 22:22:13 -04009498 struct mddev *mddev = rdev->mddev;
NeilBrownc6563a82012-05-21 09:27:00 +10009499 int rv;
9500 if (is_new)
9501 s += rdev->new_data_offset;
9502 else
9503 s += rdev->data_offset;
Vishal Vermafc974ee2015-12-24 19:20:34 -07009504 rv = badblocks_set(&rdev->badblocks, s, sectors, 0);
9505 if (rv == 0) {
NeilBrown2230dfe2011-07-28 11:31:46 +10009506 /* Make sure they get written out promptly */
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02009507 if (test_bit(ExternalBbl, &rdev->flags))
Junxiao Bie1a86db2020-07-14 16:10:26 -07009508 sysfs_notify_dirent_safe(rdev->sysfs_unack_badblocks);
NeilBrown8bd2f0a2011-12-08 16:26:08 +11009509 sysfs_notify_dirent_safe(rdev->sysfs_state);
Shaohua Li29530792016-12-08 15:48:19 -08009510 set_mask_bits(&mddev->sb_flags, 0,
9511 BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING));
NeilBrown2230dfe2011-07-28 11:31:46 +10009512 md_wakeup_thread(rdev->mddev->thread);
Vishal Vermafc974ee2015-12-24 19:20:34 -07009513 return 1;
9514 } else
9515 return 0;
NeilBrown2230dfe2011-07-28 11:31:46 +10009516}
9517EXPORT_SYMBOL_GPL(rdev_set_badblocks);
9518
NeilBrownc6563a82012-05-21 09:27:00 +10009519int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
9520 int is_new)
NeilBrown2230dfe2011-07-28 11:31:46 +10009521{
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02009522 int rv;
NeilBrownc6563a82012-05-21 09:27:00 +10009523 if (is_new)
9524 s += rdev->new_data_offset;
9525 else
9526 s += rdev->data_offset;
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02009527 rv = badblocks_clear(&rdev->badblocks, s, sectors);
9528 if ((rv == 0) && test_bit(ExternalBbl, &rdev->flags))
Junxiao Bie1a86db2020-07-14 16:10:26 -07009529 sysfs_notify_dirent_safe(rdev->sysfs_badblocks);
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02009530 return rv;
NeilBrown2230dfe2011-07-28 11:31:46 +10009531}
9532EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
9533
Adrian Bunk75c96f82005-05-05 16:16:09 -07009534static int md_notify_reboot(struct notifier_block *this,
9535 unsigned long code, void *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009536{
9537 struct list_head *tmp;
NeilBrownfd01b882011-10-11 16:47:53 +11009538 struct mddev *mddev;
Daniel P. Berrange2dba6a92011-09-23 10:40:45 +01009539 int need_delay = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009540
NeilBrownc744a652012-03-19 12:46:37 +11009541 for_each_mddev(mddev, tmp) {
9542 if (mddev_trylock(mddev)) {
NeilBrown30b8aa92012-04-24 10:23:16 +10009543 if (mddev->pers)
9544 __md_stop_writes(mddev);
NeilBrown0f62fb22014-05-06 09:36:08 +10009545 if (mddev->persistent)
9546 mddev->safemode = 2;
NeilBrownc744a652012-03-19 12:46:37 +11009547 mddev_unlock(mddev);
Daniel P. Berrange2dba6a92011-09-23 10:40:45 +01009548 }
NeilBrownc744a652012-03-19 12:46:37 +11009549 need_delay = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009550 }
NeilBrownc744a652012-03-19 12:46:37 +11009551 /*
9552 * certain more exotic SCSI devices are known to be
9553 * volatile wrt too early system reboots. While the
9554 * right place to handle this issue is the given
9555 * driver, we do want to have a safe RAID driver ...
9556 */
9557 if (need_delay)
9558 mdelay(1000*1);
9559
Linus Torvalds1da177e2005-04-16 15:20:36 -07009560 return NOTIFY_DONE;
9561}
9562
Adrian Bunk75c96f82005-05-05 16:16:09 -07009563static struct notifier_block md_notifier = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009564 .notifier_call = md_notify_reboot,
9565 .next = NULL,
9566 .priority = INT_MAX, /* before any real devices */
9567};
9568
9569static void md_geninit(void)
9570{
NeilBrown36a4e1f2011-10-07 14:23:17 +11009571 pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
Linus Torvalds1da177e2005-04-16 15:20:36 -07009572
Alexey Dobriyan97a32532020-02-03 17:37:17 -08009573 proc_create("mdstat", S_IRUGO, NULL, &mdstat_proc_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009574}
9575
Adrian Bunk75c96f82005-05-05 16:16:09 -07009576static int __init md_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009577{
Tejun Heoe804ac72010-10-15 15:36:08 +02009578 int ret = -ENOMEM;
9579
Tejun Heoada609e2011-01-25 14:35:54 +01009580 md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0);
Tejun Heoe804ac72010-10-15 15:36:08 +02009581 if (!md_wq)
9582 goto err_wq;
9583
9584 md_misc_wq = alloc_workqueue("md_misc", 0, 0);
9585 if (!md_misc_wq)
9586 goto err_misc_wq;
9587
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02009588 md_rdev_misc_wq = alloc_workqueue("md_rdev_misc", 0, 0);
Guoqing Jiangcf0b9b42020-10-08 05:19:09 +02009589 if (!md_rdev_misc_wq)
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02009590 goto err_rdev_misc_wq;
9591
Christoph Hellwig28144f92020-10-29 15:58:34 +01009592 ret = __register_blkdev(MD_MAJOR, "md", md_probe);
9593 if (ret < 0)
Tejun Heoe804ac72010-10-15 15:36:08 +02009594 goto err_md;
9595
Christoph Hellwig28144f92020-10-29 15:58:34 +01009596 ret = __register_blkdev(0, "mdp", md_probe);
9597 if (ret < 0)
Tejun Heoe804ac72010-10-15 15:36:08 +02009598 goto err_mdp;
9599 mdp_major = ret;
9600
Linus Torvalds1da177e2005-04-16 15:20:36 -07009601 register_reboot_notifier(&md_notifier);
Eric W. Biederman0b4d4142007-02-14 00:34:09 -08009602 raid_table_header = register_sysctl_table(raid_root_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009603
9604 md_geninit();
NeilBrownd710e132008-10-13 11:55:12 +11009605 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009606
Tejun Heoe804ac72010-10-15 15:36:08 +02009607err_mdp:
9608 unregister_blkdev(MD_MAJOR, "md");
9609err_md:
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02009610 destroy_workqueue(md_rdev_misc_wq);
9611err_rdev_misc_wq:
Tejun Heoe804ac72010-10-15 15:36:08 +02009612 destroy_workqueue(md_misc_wq);
9613err_misc_wq:
9614 destroy_workqueue(md_wq);
9615err_wq:
9616 return ret;
9617}
Linus Torvalds1da177e2005-04-16 15:20:36 -07009618
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009619static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
Goldwyn Rodrigues1d7e3e92014-06-07 01:53:00 -05009620{
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009621 struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
Heming Zhaof7c7a2f2021-04-08 15:44:15 +08009622 struct md_rdev *rdev2, *tmp;
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009623 int role, ret;
9624 char b[BDEVNAME_SIZE];
Goldwyn Rodrigues1d7e3e92014-06-07 01:53:00 -05009625
Guoqing Jiang818da592017-03-01 16:42:40 +08009626 /*
9627 * If size is changed in another node then we need to
9628 * do resize as well.
9629 */
9630 if (mddev->dev_sectors != le64_to_cpu(sb->size)) {
9631 ret = mddev->pers->resize(mddev, le64_to_cpu(sb->size));
9632 if (ret)
9633 pr_info("md-cluster: resize failed\n");
9634 else
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07009635 md_bitmap_update_sb(mddev->bitmap);
Guoqing Jiang818da592017-03-01 16:42:40 +08009636 }
9637
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009638 /* Check for change of roles in the active devices */
Heming Zhaof7c7a2f2021-04-08 15:44:15 +08009639 rdev_for_each_safe(rdev2, tmp, mddev) {
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009640 if (test_bit(Faulty, &rdev2->flags))
9641 continue;
9642
9643 /* Check if the roles changed */
9644 role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]);
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05009645
9646 if (test_bit(Candidate, &rdev2->flags)) {
9647 if (role == 0xfffe) {
9648 pr_info("md: Removing Candidate device %s because add failed\n", bdevname(rdev2->bdev,b));
9649 md_kick_rdev_from_array(rdev2);
9650 continue;
9651 }
9652 else
9653 clear_bit(Candidate, &rdev2->flags);
9654 }
9655
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009656 if (role != rdev2->raid_disk) {
Guoqing Jiangca1e98e2018-10-18 16:37:45 +08009657 /*
9658 * got activated except reshape is happening.
9659 */
9660 if (rdev2->raid_disk == -1 && role != 0xffff &&
9661 !(le32_to_cpu(sb->feature_map) &
9662 MD_FEATURE_RESHAPE_ACTIVE)) {
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009663 rdev2->saved_raid_disk = role;
9664 ret = remove_and_add_spares(mddev, rdev2);
9665 pr_info("Activated spare: %s\n",
NeilBrown9d487392016-11-02 14:16:49 +11009666 bdevname(rdev2->bdev,b));
Guoqing Jianga5781832016-05-02 11:33:14 -04009667 /* wakeup mddev->thread here, so array could
9668 * perform resync with the new activated disk */
9669 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9670 md_wakeup_thread(mddev->thread);
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009671 }
9672 /* device faulty
9673 * We just want to do the minimum to mark the disk
9674 * as faulty. The recovery is performed by the
9675 * one who initiated the error.
9676 */
9677 if ((role == 0xfffe) || (role == 0xfffd)) {
9678 md_error(mddev, rdev2);
9679 clear_bit(Blocked, &rdev2->flags);
9680 }
9681 }
Goldwyn Rodrigues1d7e3e92014-06-07 01:53:00 -05009682 }
9683
Zhao Heminga8da01f2020-11-19 19:41:33 +08009684 if (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) {
9685 ret = update_raid_disks(mddev, le32_to_cpu(sb->raid_disks));
9686 if (ret)
9687 pr_warn("md: updating array disks failed. %d\n", ret);
9688 }
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009689
Guoqing Jiang7564bed2018-10-18 16:37:42 +08009690 /*
9691 * Since mddev->delta_disks has already updated in update_raid_disks,
9692 * so it is time to check reshape.
9693 */
9694 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) &&
9695 (le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
9696 /*
9697 * reshape is happening in the remote node, we need to
9698 * update reshape_position and call start_reshape.
9699 */
Christoph Hellwiged4d0a4e2019-04-04 18:56:10 +02009700 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
Guoqing Jiang7564bed2018-10-18 16:37:42 +08009701 if (mddev->pers->update_reshape_pos)
9702 mddev->pers->update_reshape_pos(mddev);
9703 if (mddev->pers->start_reshape)
9704 mddev->pers->start_reshape(mddev);
9705 } else if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) &&
9706 mddev->reshape_position != MaxSector &&
9707 !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
9708 /* reshape is just done in another node. */
9709 mddev->reshape_position = MaxSector;
9710 if (mddev->pers->update_reshape_pos)
9711 mddev->pers->update_reshape_pos(mddev);
9712 }
9713
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009714 /* Finally set the event to be up to date */
9715 mddev->events = le64_to_cpu(sb->events);
9716}
9717
9718static int read_rdev(struct mddev *mddev, struct md_rdev *rdev)
9719{
9720 int err;
9721 struct page *swapout = rdev->sb_page;
9722 struct mdp_superblock_1 *sb;
9723
9724 /* Store the sb page of the rdev in the swapout temporary
9725 * variable in case we err in the future
9726 */
9727 rdev->sb_page = NULL;
NeilBrown7f0f0d82016-11-02 14:16:49 +11009728 err = alloc_disk_sb(rdev);
9729 if (err == 0) {
9730 ClearPageUptodate(rdev->sb_page);
9731 rdev->sb_loaded = 0;
9732 err = super_types[mddev->major_version].
9733 load_super(rdev, NULL, mddev->minor_version);
9734 }
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009735 if (err < 0) {
9736 pr_warn("%s: %d Could not reload rdev(%d) err: %d. Restoring old values\n",
9737 __func__, __LINE__, rdev->desc_nr, err);
NeilBrown7f0f0d82016-11-02 14:16:49 +11009738 if (rdev->sb_page)
9739 put_page(rdev->sb_page);
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009740 rdev->sb_page = swapout;
9741 rdev->sb_loaded = 1;
9742 return err;
9743 }
9744
9745 sb = page_address(rdev->sb_page);
9746 /* Read the offset unconditionally, even if MD_FEATURE_RECOVERY_OFFSET
9747 * is not set
9748 */
9749
9750 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RECOVERY_OFFSET))
9751 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
9752
9753 /* The other node finished recovery, call spare_active to set
9754 * device In_sync and mddev->degraded
9755 */
9756 if (rdev->recovery_offset == MaxSector &&
9757 !test_bit(In_sync, &rdev->flags) &&
9758 mddev->pers->spare_active(mddev))
Junxiao Bie1a86db2020-07-14 16:10:26 -07009759 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009760
9761 put_page(swapout);
9762 return 0;
9763}
9764
9765void md_reload_sb(struct mddev *mddev, int nr)
9766{
9767 struct md_rdev *rdev;
9768 int err;
9769
9770 /* Find the rdev */
9771 rdev_for_each_rcu(rdev, mddev) {
9772 if (rdev->desc_nr == nr)
9773 break;
9774 }
9775
9776 if (!rdev || rdev->desc_nr != nr) {
9777 pr_warn("%s: %d Could not find rdev with nr %d\n", __func__, __LINE__, nr);
9778 return;
9779 }
9780
9781 err = read_rdev(mddev, rdev);
9782 if (err < 0)
9783 return;
9784
9785 check_sb_changes(mddev, rdev);
9786
9787 /* Read all rdev's to update recovery_offset */
Guoqing Jiang0ea99242018-04-09 17:01:21 +08009788 rdev_for_each_rcu(rdev, mddev) {
9789 if (!test_bit(Faulty, &rdev->flags))
9790 read_rdev(mddev, rdev);
9791 }
Goldwyn Rodrigues1d7e3e92014-06-07 01:53:00 -05009792}
9793EXPORT_SYMBOL(md_reload_sb);
9794
Linus Torvalds1da177e2005-04-16 15:20:36 -07009795#ifndef MODULE
9796
9797/*
9798 * Searches all registered partitions for autorun RAID arrays
9799 * at boot time.
9800 */
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009801
Cong Wang5b1f5bc32016-06-08 09:20:16 -07009802static DEFINE_MUTEX(detected_devices_mutex);
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009803static LIST_HEAD(all_detected_devices);
9804struct detected_devices_node {
9805 struct list_head list;
9806 dev_t dev;
9807};
Linus Torvalds1da177e2005-04-16 15:20:36 -07009808
9809void md_autodetect_dev(dev_t dev)
9810{
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009811 struct detected_devices_node *node_detected_dev;
9812
9813 node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
9814 if (node_detected_dev) {
9815 node_detected_dev->dev = dev;
Cong Wang5b1f5bc32016-06-08 09:20:16 -07009816 mutex_lock(&detected_devices_mutex);
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009817 list_add_tail(&node_detected_dev->list, &all_detected_devices);
Cong Wang5b1f5bc32016-06-08 09:20:16 -07009818 mutex_unlock(&detected_devices_mutex);
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009819 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009820}
9821
Christoph Hellwigd82fa812020-06-06 15:00:24 +02009822void md_autostart_arrays(int part)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009823{
NeilBrown3cb03002011-10-11 16:45:26 +11009824 struct md_rdev *rdev;
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009825 struct detected_devices_node *node_detected_dev;
9826 dev_t dev;
9827 int i_scanned, i_passed;
9828
9829 i_scanned = 0;
9830 i_passed = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009831
NeilBrown9d487392016-11-02 14:16:49 +11009832 pr_info("md: Autodetecting RAID arrays.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07009833
Cong Wang5b1f5bc32016-06-08 09:20:16 -07009834 mutex_lock(&detected_devices_mutex);
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009835 while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
9836 i_scanned++;
9837 node_detected_dev = list_entry(all_detected_devices.next,
9838 struct detected_devices_node, list);
9839 list_del(&node_detected_dev->list);
9840 dev = node_detected_dev->dev;
9841 kfree(node_detected_dev);
Shaohua Li90bcf1332016-09-14 14:26:54 -07009842 mutex_unlock(&detected_devices_mutex);
NeilBrowndf968c42007-07-17 04:06:11 -07009843 rdev = md_import_device(dev,0, 90);
Shaohua Li90bcf1332016-09-14 14:26:54 -07009844 mutex_lock(&detected_devices_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009845 if (IS_ERR(rdev))
9846 continue;
9847
NeilBrown403df472014-09-30 15:52:29 +10009848 if (test_bit(Faulty, &rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009849 continue;
NeilBrown403df472014-09-30 15:52:29 +10009850
NeilBrownd0fae182008-03-04 14:29:31 -08009851 set_bit(AutoDetected, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009852 list_add(&rdev->same_set, &pending_raid_disks);
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009853 i_passed++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009854 }
Cong Wang5b1f5bc32016-06-08 09:20:16 -07009855 mutex_unlock(&detected_devices_mutex);
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009856
NeilBrown9d487392016-11-02 14:16:49 +11009857 pr_debug("md: Scanned %d and added %d devices.\n", i_scanned, i_passed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009858
9859 autorun_devices(part);
9860}
9861
Jeff Garzikfdee8ae2006-12-10 02:20:50 -08009862#endif /* !MODULE */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009863
9864static __exit void md_exit(void)
9865{
NeilBrownfd01b882011-10-11 16:47:53 +11009866 struct mddev *mddev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009867 struct list_head *tmp;
NeilBrowne2f23b62014-04-09 14:33:51 +10009868 int delay = 1;
Greg Kroah-Hartman8ab5e4c2005-06-20 21:15:16 -07009869
Christoph Hellwig3dbd8c22009-03-31 14:27:02 +11009870 unregister_blkdev(MD_MAJOR,"md");
Linus Torvalds1da177e2005-04-16 15:20:36 -07009871 unregister_blkdev(mdp_major, "mdp");
9872 unregister_reboot_notifier(&md_notifier);
9873 unregister_sysctl_table(raid_table_header);
NeilBrowne2f23b62014-04-09 14:33:51 +10009874
9875 /* We cannot unload the modules while some process is
9876 * waiting for us in select() or poll() - wake them up
9877 */
9878 md_unloading = 1;
9879 while (waitqueue_active(&md_event_waiters)) {
9880 /* not safe to leave yet */
9881 wake_up(&md_event_waiters);
9882 msleep(delay);
9883 delay += delay;
9884 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009885 remove_proc_entry("mdstat", NULL);
NeilBrowne2f23b62014-04-09 14:33:51 +10009886
NeilBrown29ac4aa2008-02-06 01:39:58 -08009887 for_each_mddev(mddev, tmp) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009888 export_array(mddev);
NeilBrown93568632017-02-06 13:41:39 +11009889 mddev->ctime = 0;
NeilBrownd3374822009-01-09 08:31:10 +11009890 mddev->hold_active = 0;
NeilBrown93568632017-02-06 13:41:39 +11009891 /*
9892 * for_each_mddev() will call mddev_put() at the end of each
9893 * iteration. As the mddev is now fully clear, this will
9894 * schedule the mddev for destruction by a workqueue, and the
9895 * destroy_workqueue() below will wait for that to complete.
9896 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009897 }
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02009898 destroy_workqueue(md_rdev_misc_wq);
Tejun Heoe804ac72010-10-15 15:36:08 +02009899 destroy_workqueue(md_misc_wq);
9900 destroy_workqueue(md_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009901}
9902
Dan Williams685784a2007-07-09 11:56:42 -07009903subsys_initcall(md_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009904module_exit(md_exit)
9905
Kees Cooke4dca7b2017-10-17 19:04:42 -07009906static int get_ro(char *buffer, const struct kernel_param *kp)
NeilBrownf91de922005-11-08 21:39:36 -08009907{
Xiongfeng Wang3f999802020-05-11 16:23:25 +08009908 return sprintf(buffer, "%d\n", start_readonly);
NeilBrownf91de922005-11-08 21:39:36 -08009909}
Kees Cooke4dca7b2017-10-17 19:04:42 -07009910static int set_ro(const char *val, const struct kernel_param *kp)
NeilBrownf91de922005-11-08 21:39:36 -08009911{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03009912 return kstrtouint(val, 10, (unsigned int *)&start_readonly);
NeilBrownf91de922005-11-08 21:39:36 -08009913}
9914
NeilBrown80ca3a42006-07-10 04:44:18 -07009915module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
9916module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
NeilBrownefeb53c2009-01-09 08:31:10 +11009917module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
NeilBrown78b63502017-04-12 16:26:13 +10009918module_param(create_on_open, bool, S_IRUSR|S_IWUSR);
NeilBrownf91de922005-11-08 21:39:36 -08009919
Linus Torvalds1da177e2005-04-16 15:20:36 -07009920MODULE_LICENSE("GPL");
NeilBrown0efb9e62009-12-14 12:49:58 +11009921MODULE_DESCRIPTION("MD RAID framework");
NeilBrownaa1595e2005-08-04 12:53:32 -07009922MODULE_ALIAS("md");
NeilBrown72008652005-08-26 18:34:15 -07009923MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);