blob: 41d6e2383517bbf940210d15f2110d96fd421e91 [file] [log] [blame]
Thomas Gleixneraf1a8892019-05-20 19:08:12 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 md.c : Multiple Devices driver for Linux
NeilBrownf72ffdd2014-09-30 14:23:59 +10004 Copyright (C) 1998, 1999, 2000 Ingo Molnar
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 completely rewritten, based on the MD driver code from Marc Zyngier
7
8 Changes:
9
10 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
11 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
12 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
13 - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
14 - kmod support by: Cyrus Durgin
15 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
16 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
17
18 - lots of fixes and improvements to the RAID1/RAID5 and generic
19 RAID code (such as request based resynchronization):
20
21 Neil Brown <neilb@cse.unsw.edu.au>.
22
NeilBrown32a76272005-06-21 17:17:14 -070023 - persistent bitmap code
24 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
25
NeilBrown9d487392016-11-02 14:16:49 +110026
27 Errors, Warnings, etc.
28 Please use:
29 pr_crit() for error conditions that risk data loss
30 pr_err() for error conditions that are unexpected, like an IO error
31 or internal inconsistency
32 pr_warn() for error conditions that could have been predicated, like
33 adding a device to an array when it has incompatible metadata
34 pr_info() for every interesting, very rare events, like an array starting
35 or stopping, or resync starting or stopping
36 pr_debug() for everything else.
37
Linus Torvalds1da177e2005-04-16 15:20:36 -070038*/
39
Guoqing Jiang963c5552019-06-14 17:10:36 +080040#include <linux/sched/mm.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +010041#include <linux/sched/signal.h>
NeilBrowna6fb0932005-09-09 16:23:56 -070042#include <linux/kthread.h>
NeilBrownbff61972009-03-31 14:33:13 +110043#include <linux/blkdev.h>
Christoph Hellwigfe45e632021-09-20 14:33:27 +020044#include <linux/blk-integrity.h>
Vishal Vermafc974ee2015-12-24 19:20:34 -070045#include <linux/badblocks.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <linux/sysctl.h>
NeilBrownbff61972009-03-31 14:33:13 +110047#include <linux/seq_file.h>
Al Viroff01bb42011-09-16 02:31:11 -040048#include <linux/fs.h>
NeilBrownd7603b72006-01-06 00:20:30 -080049#include <linux/poll.h>
NeilBrown16f17b32006-06-26 00:27:37 -070050#include <linux/ctype.h>
André Goddard Rosae7d28602009-12-14 18:01:06 -080051#include <linux/string.h>
NeilBrownfb4d8c72008-10-13 11:55:12 +110052#include <linux/hdreg.h>
53#include <linux/proc_fs.h>
54#include <linux/random.h>
Christoph Hellwigb81e0c22021-09-20 14:33:25 +020055#include <linux/major.h>
Paul Gortmaker056075c2011-07-03 13:58:33 -040056#include <linux/module.h>
NeilBrownfb4d8c72008-10-13 11:55:12 +110057#include <linux/reboot.h>
NeilBrown32a76272005-06-21 17:17:14 -070058#include <linux/file.h>
Arnd Bergmannaa98aa32009-12-14 12:50:05 +110059#include <linux/compat.h>
Stephen Rothwell25570722008-10-15 09:09:21 +110060#include <linux/delay.h>
NeilBrownbff61972009-03-31 14:33:13 +110061#include <linux/raid/md_p.h>
62#include <linux/raid/md_u.h>
Christoph Hellwig74cc979c2020-03-24 08:25:19 +010063#include <linux/raid/detect.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090064#include <linux/slab.h>
NeilBrown4ad23a972017-03-15 14:05:14 +110065#include <linux/percpu-refcount.h>
Christoph Hellwigc6a564ff2020-03-25 16:48:42 +010066#include <linux/part_stat.h>
NeilBrown4ad23a972017-03-15 14:05:14 +110067
Shaohua Li504634f2016-11-18 09:44:08 -080068#include <trace/events/block.h>
NeilBrown43b2e5d2009-03-31 14:33:13 +110069#include "md.h"
Mike Snitzer935fe092017-10-10 17:02:41 -040070#include "md-bitmap.h"
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -050071#include "md-cluster.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070072
NeilBrown01f96c02011-09-21 15:30:20 +100073/* pers_list is a list of registered personalities protected
74 * by pers_lock.
75 * pers_lock does extra service to protect accesses to
76 * mddev->thread when the mutex cannot be held.
77 */
NeilBrown2604b702006-01-06 00:20:36 -080078static LIST_HEAD(pers_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079static DEFINE_SPINLOCK(pers_lock);
80
Kent Overstreet28dec872018-06-07 20:52:54 -040081static struct kobj_type md_ktype;
82
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -050083struct md_cluster_operations *md_cluster_ops;
Goldwyn Rodrigues589a1c42014-06-07 02:39:37 -050084EXPORT_SYMBOL(md_cluster_ops);
Christoph Hellwig2b598ee2019-04-04 18:56:14 +020085static struct module *md_cluster_mod;
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -050086
Bernd Schubert90b08712008-05-23 13:04:38 -070087static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
Tejun Heoe804ac72010-10-15 15:36:08 +020088static struct workqueue_struct *md_wq;
89static struct workqueue_struct *md_misc_wq;
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +020090static struct workqueue_struct *md_rdev_misc_wq;
Bernd Schubert90b08712008-05-23 13:04:38 -070091
NeilBrown746d3202013-04-24 11:42:41 +100092static int remove_and_add_spares(struct mddev *mddev,
93 struct md_rdev *this);
NeilBrown5aa61f42014-12-15 12:56:57 +110094static void mddev_detach(struct mddev *mddev);
NeilBrown746d3202013-04-24 11:42:41 +100095
Linus Torvalds1da177e2005-04-16 15:20:36 -070096/*
Robert Becker1e509152009-12-14 12:49:58 +110097 * Default number of read corrections we'll attempt on an rdev
98 * before ejecting it from the array. We divide the read error
99 * count by 2 for every hour elapsed between read errors.
100 */
101#define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
Zhao Heming7c9d5c52020-07-21 02:08:52 +0800102/* Default safemode delay: 200 msec */
103#define DEFAULT_SAFEMODE_DELAY ((200 * HZ)/1000 +1)
Robert Becker1e509152009-12-14 12:49:58 +1100104/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
106 * is 1000 KB/sec, so the extra system load does not show up that much.
107 * Increase it if you want to have more _guaranteed_ speed. Note that
Adrian Bunk338cec32005-09-10 00:26:54 -0700108 * the RAID driver will use the maximum available bandwidth if the IO
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 * subsystem is idle. There is also an 'absolute maximum' reconstruction
110 * speed limit - in case reconstruction slows down your system despite
111 * idle IO detection.
112 *
113 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
NeilBrown88202a02006-01-06 00:21:36 -0800114 * or /sys/block/mdX/md/sync_speed_{min,max}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115 */
116
117static int sysctl_speed_limit_min = 1000;
118static int sysctl_speed_limit_max = 200000;
NeilBrownfd01b882011-10-11 16:47:53 +1100119static inline int speed_min(struct mddev *mddev)
NeilBrown88202a02006-01-06 00:21:36 -0800120{
121 return mddev->sync_speed_min ?
122 mddev->sync_speed_min : sysctl_speed_limit_min;
123}
124
NeilBrownfd01b882011-10-11 16:47:53 +1100125static inline int speed_max(struct mddev *mddev)
NeilBrown88202a02006-01-06 00:21:36 -0800126{
127 return mddev->sync_speed_max ?
128 mddev->sync_speed_max : sysctl_speed_limit_max;
129}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100131static void rdev_uninit_serial(struct md_rdev *rdev)
Guoqing Jiang3e148a32019-06-19 17:30:46 +0800132{
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100133 if (!test_and_clear_bit(CollisionCheck, &rdev->flags))
134 return;
Guoqing Jiang3e148a32019-06-19 17:30:46 +0800135
Guoqing Jiang025471f2019-12-23 10:49:01 +0100136 kvfree(rdev->serial);
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100137 rdev->serial = NULL;
Guoqing Jiang3e148a32019-06-19 17:30:46 +0800138}
139
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100140static void rdevs_uninit_serial(struct mddev *mddev)
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100141{
142 struct md_rdev *rdev;
143
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100144 rdev_for_each(rdev, mddev)
145 rdev_uninit_serial(rdev);
146}
147
148static int rdev_init_serial(struct md_rdev *rdev)
149{
Guoqing Jiang025471f2019-12-23 10:49:01 +0100150 /* serial_nums equals with BARRIER_BUCKETS_NR */
151 int i, serial_nums = 1 << ((PAGE_SHIFT - ilog2(sizeof(atomic_t))));
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100152 struct serial_in_rdev *serial = NULL;
153
154 if (test_bit(CollisionCheck, &rdev->flags))
155 return 0;
156
Guoqing Jiang025471f2019-12-23 10:49:01 +0100157 serial = kvmalloc(sizeof(struct serial_in_rdev) * serial_nums,
158 GFP_KERNEL);
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100159 if (!serial)
160 return -ENOMEM;
161
Guoqing Jiang025471f2019-12-23 10:49:01 +0100162 for (i = 0; i < serial_nums; i++) {
163 struct serial_in_rdev *serial_tmp = &serial[i];
164
165 spin_lock_init(&serial_tmp->serial_lock);
166 serial_tmp->serial_rb = RB_ROOT_CACHED;
167 init_waitqueue_head(&serial_tmp->serial_io_wait);
168 }
169
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100170 rdev->serial = serial;
171 set_bit(CollisionCheck, &rdev->flags);
172
173 return 0;
174}
175
176static int rdevs_init_serial(struct mddev *mddev)
177{
178 struct md_rdev *rdev;
179 int ret = 0;
180
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100181 rdev_for_each(rdev, mddev) {
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100182 ret = rdev_init_serial(rdev);
183 if (ret)
184 break;
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100185 }
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100186
187 /* Free all resources if pool is not existed */
188 if (ret && !mddev->serial_info_pool)
189 rdevs_uninit_serial(mddev);
190
191 return ret;
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100192}
193
Guoqing Jiang963c5552019-06-14 17:10:36 +0800194/*
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100195 * rdev needs to enable serial stuffs if it meets the conditions:
196 * 1. it is multi-queue device flaged with writemostly.
197 * 2. the write-behind mode is enabled.
198 */
199static int rdev_need_serial(struct md_rdev *rdev)
200{
201 return (rdev && rdev->mddev->bitmap_info.max_write_behind > 0 &&
Christoph Hellwige556f6b2020-06-26 10:01:56 +0200202 rdev->bdev->bd_disk->queue->nr_hw_queues != 1 &&
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100203 test_bit(WriteMostly, &rdev->flags));
204}
205
206/*
207 * Init resource for rdev(s), then create serial_info_pool if:
208 * 1. rdev is the first device which return true from rdev_enable_serial.
209 * 2. rdev is NULL, means we want to enable serialization for all rdevs.
Guoqing Jiang963c5552019-06-14 17:10:36 +0800210 */
Guoqing Jiang404659c2019-12-23 10:48:53 +0100211void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100212 bool is_suspend)
Guoqing Jiang963c5552019-06-14 17:10:36 +0800213{
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100214 int ret = 0;
215
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100216 if (rdev && !rdev_need_serial(rdev) &&
217 !test_bit(CollisionCheck, &rdev->flags))
Guoqing Jiang963c5552019-06-14 17:10:36 +0800218 return;
219
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100220 if (!is_suspend)
221 mddev_suspend(mddev);
222
223 if (!rdev)
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100224 ret = rdevs_init_serial(mddev);
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100225 else
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100226 ret = rdev_init_serial(rdev);
227 if (ret)
228 goto abort;
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100229
Guoqing Jiang404659c2019-12-23 10:48:53 +0100230 if (mddev->serial_info_pool == NULL) {
Coly Li3024ba22020-04-09 22:17:23 +0800231 /*
232 * already in memalloc noio context by
233 * mddev_suspend()
234 */
Guoqing Jiang404659c2019-12-23 10:48:53 +0100235 mddev->serial_info_pool =
236 mempool_create_kmalloc_pool(NR_SERIAL_INFOS,
237 sizeof(struct serial_info));
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100238 if (!mddev->serial_info_pool) {
239 rdevs_uninit_serial(mddev);
Guoqing Jiang404659c2019-12-23 10:48:53 +0100240 pr_err("can't alloc memory pool for serialization\n");
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100241 }
Guoqing Jiang963c5552019-06-14 17:10:36 +0800242 }
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100243
244abort:
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100245 if (!is_suspend)
246 mddev_resume(mddev);
Guoqing Jiang963c5552019-06-14 17:10:36 +0800247}
Guoqing Jiang963c5552019-06-14 17:10:36 +0800248
249/*
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100250 * Free resource from rdev(s), and destroy serial_info_pool under conditions:
251 * 1. rdev is the last device flaged with CollisionCheck.
252 * 2. when bitmap is destroyed while policy is not enabled.
253 * 3. for disable policy, the pool is destroyed only when no rdev needs it.
Guoqing Jiang963c5552019-06-14 17:10:36 +0800254 */
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100255void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
256 bool is_suspend)
Guoqing Jiang963c5552019-06-14 17:10:36 +0800257{
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100258 if (rdev && !test_bit(CollisionCheck, &rdev->flags))
Guoqing Jiang963c5552019-06-14 17:10:36 +0800259 return;
260
Guoqing Jiang404659c2019-12-23 10:48:53 +0100261 if (mddev->serial_info_pool) {
Guoqing Jiang963c5552019-06-14 17:10:36 +0800262 struct md_rdev *temp;
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100263 int num = 0; /* used to track if other rdevs need the pool */
Guoqing Jiang963c5552019-06-14 17:10:36 +0800264
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100265 if (!is_suspend)
266 mddev_suspend(mddev);
267 rdev_for_each(temp, mddev) {
268 if (!rdev) {
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100269 if (!mddev->serialize_policy ||
270 !rdev_need_serial(temp))
271 rdev_uninit_serial(temp);
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100272 else
273 num++;
274 } else if (temp != rdev &&
275 test_bit(CollisionCheck, &temp->flags))
Guoqing Jiang963c5552019-06-14 17:10:36 +0800276 num++;
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100277 }
278
279 if (rdev)
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100280 rdev_uninit_serial(rdev);
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100281
282 if (num)
283 pr_info("The mempool could be used by other devices\n");
284 else {
Guoqing Jiang404659c2019-12-23 10:48:53 +0100285 mempool_destroy(mddev->serial_info_pool);
286 mddev->serial_info_pool = NULL;
Guoqing Jiang963c5552019-06-14 17:10:36 +0800287 }
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100288 if (!is_suspend)
289 mddev_resume(mddev);
Guoqing Jiang963c5552019-06-14 17:10:36 +0800290 }
291}
292
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293static struct ctl_table_header *raid_table_header;
294
Joe Perches82592c32013-11-14 15:16:18 +1100295static struct ctl_table raid_table[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 .procname = "speed_limit_min",
298 .data = &sysctl_speed_limit_min,
299 .maxlen = sizeof(int),
NeilBrown80ca3a42006-07-10 04:44:18 -0700300 .mode = S_IRUGO|S_IWUSR,
Eric W. Biederman6d456112009-11-16 03:11:48 -0800301 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 },
303 {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 .procname = "speed_limit_max",
305 .data = &sysctl_speed_limit_max,
306 .maxlen = sizeof(int),
NeilBrown80ca3a42006-07-10 04:44:18 -0700307 .mode = S_IRUGO|S_IWUSR,
Eric W. Biederman6d456112009-11-16 03:11:48 -0800308 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 },
Eric W. Biederman894d2492009-11-05 14:34:02 -0800310 { }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311};
312
Joe Perches82592c32013-11-14 15:16:18 +1100313static struct ctl_table raid_dir_table[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 .procname = "raid",
316 .maxlen = 0,
NeilBrown80ca3a42006-07-10 04:44:18 -0700317 .mode = S_IRUGO|S_IXUGO,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318 .child = raid_table,
319 },
Eric W. Biederman894d2492009-11-05 14:34:02 -0800320 { }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321};
322
Joe Perches82592c32013-11-14 15:16:18 +1100323static struct ctl_table raid_root_table[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 .procname = "dev",
326 .maxlen = 0,
327 .mode = 0555,
328 .child = raid_dir_table,
329 },
Eric W. Biederman894d2492009-11-05 14:34:02 -0800330 { }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331};
332
NeilBrownf91de922005-11-08 21:39:36 -0800333static int start_readonly;
334
NeilBrown78b63502017-04-12 16:26:13 +1000335/*
336 * The original mechanism for creating an md device is to create
337 * a device node in /dev and to open it. This causes races with device-close.
338 * The preferred method is to write to the "new_array" module parameter.
339 * This can avoid races.
340 * Setting create_on_open to false disables the original mechanism
341 * so all the races disappear.
342 */
343static bool create_on_open = true;
344
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345/*
NeilBrownd7603b72006-01-06 00:20:30 -0800346 * We have a system wide 'event count' that is incremented
347 * on any 'interesting' event, and readers of /proc/mdstat
348 * can use 'poll' or 'select' to find out when the event
349 * count increases.
350 *
351 * Events are:
352 * start array, stop array, error, add device, remove device,
353 * start build, activate spare
354 */
NeilBrown2989ddb2006-01-06 00:20:43 -0800355static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
NeilBrownd7603b72006-01-06 00:20:30 -0800356static atomic_t md_event_count;
Guoqing Jiang54679482021-10-04 23:34:53 +0800357void md_new_event(void)
NeilBrownd7603b72006-01-06 00:20:30 -0800358{
359 atomic_inc(&md_event_count);
360 wake_up(&md_event_waiters);
361}
NeilBrown29269552006-03-27 01:18:10 -0800362EXPORT_SYMBOL_GPL(md_new_event);
NeilBrownd7603b72006-01-06 00:20:30 -0800363
364/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 * Enables to iterate over all existing md arrays
366 * all_mddevs_lock protects this list.
367 */
368static LIST_HEAD(all_mddevs);
369static DEFINE_SPINLOCK(all_mddevs_lock);
370
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371/*
372 * iterates through all used mddevs in the system.
373 * We take care to grab the all_mddevs_lock whenever navigating
374 * the list, and to always hold a refcount when unlocked.
375 * Any code which breaks out of this loop while own
376 * a reference to the current mddev and must mddev_put it.
377 */
NeilBrownfd01b882011-10-11 16:47:53 +1100378#define for_each_mddev(_mddev,_tmp) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 \
NeilBrownf72ffdd2014-09-30 14:23:59 +1000380 for (({ spin_lock(&all_mddevs_lock); \
NeilBrownfd01b882011-10-11 16:47:53 +1100381 _tmp = all_mddevs.next; \
382 _mddev = NULL;}); \
383 ({ if (_tmp != &all_mddevs) \
384 mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 spin_unlock(&all_mddevs_lock); \
NeilBrownfd01b882011-10-11 16:47:53 +1100386 if (_mddev) mddev_put(_mddev); \
387 _mddev = list_entry(_tmp, struct mddev, all_mddevs); \
388 _tmp != &all_mddevs;}); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 ({ spin_lock(&all_mddevs_lock); \
NeilBrownfd01b882011-10-11 16:47:53 +1100390 _tmp = _tmp->next;}) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 )
392
NeilBrown409c57f2009-03-31 14:39:39 +1100393/* Rather than calling directly into the personality make_request function,
394 * IO requests come here first so that we can check if the device is
395 * being suspended pending a reconfiguration.
396 * We hold a refcount over the call to ->make_request. By the time that
397 * call has finished, the bio has been linked into some internal structure
398 * and so is visible to ->quiesce(), so we don't need the refcount any more.
399 */
NeilBrownb3143b92017-10-17 13:46:43 +1100400static bool is_suspended(struct mddev *mddev, struct bio *bio)
401{
402 if (mddev->suspended)
403 return true;
404 if (bio_data_dir(bio) != WRITE)
405 return false;
406 if (mddev->suspend_lo >= mddev->suspend_hi)
407 return false;
408 if (bio->bi_iter.bi_sector >= mddev->suspend_hi)
409 return false;
410 if (bio_end_sector(bio) < mddev->suspend_lo)
411 return false;
412 return true;
413}
414
Shaohua Li393debc2017-09-21 10:23:35 -0700415void md_handle_request(struct mddev *mddev, struct bio *bio)
416{
417check_suspended:
418 rcu_read_lock();
NeilBrownb3143b92017-10-17 13:46:43 +1100419 if (is_suspended(mddev, bio)) {
Shaohua Li393debc2017-09-21 10:23:35 -0700420 DEFINE_WAIT(__wait);
421 for (;;) {
422 prepare_to_wait(&mddev->sb_wait, &__wait,
423 TASK_UNINTERRUPTIBLE);
NeilBrownb3143b92017-10-17 13:46:43 +1100424 if (!is_suspended(mddev, bio))
Shaohua Li393debc2017-09-21 10:23:35 -0700425 break;
426 rcu_read_unlock();
427 schedule();
428 rcu_read_lock();
429 }
430 finish_wait(&mddev->sb_wait, &__wait);
431 }
432 atomic_inc(&mddev->active_io);
433 rcu_read_unlock();
434
435 if (!mddev->pers->make_request(mddev, bio)) {
436 atomic_dec(&mddev->active_io);
437 wake_up(&mddev->sb_wait);
438 goto check_suspended;
439 }
440
441 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
442 wake_up(&mddev->sb_wait);
443}
444EXPORT_SYMBOL(md_handle_request);
445
Christoph Hellwig3e087732021-10-12 13:12:24 +0200446static void md_submit_bio(struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447{
NeilBrown49077322010-03-25 16:20:56 +1100448 const int rw = bio_data_dir(bio);
Christoph Hellwig309dca302021-01-24 11:02:34 +0100449 struct mddev *mddev = bio->bi_bdev->bd_disk->private_data;
NeilBrown49077322010-03-25 16:20:56 +1100450
Colin Ian King9a5a8592020-07-02 12:35:02 +0100451 if (mddev == NULL || mddev->pers == NULL) {
452 bio_io_error(bio);
Christoph Hellwig3e087732021-10-12 13:12:24 +0200453 return;
Colin Ian King9a5a8592020-07-02 12:35:02 +0100454 }
NeilBrown409c57f2009-03-31 14:39:39 +1100455
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -0300456 if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) {
457 bio_io_error(bio);
Christoph Hellwig3e087732021-10-12 13:12:24 +0200458 return;
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -0300459 }
460
Christoph Hellwigf695ca32020-07-01 10:59:39 +0200461 blk_queue_split(&bio);
Kent Overstreet54efd502015-04-23 22:37:18 -0700462
Sebastian Riemerbbfa57c2013-02-21 13:28:09 +1100463 if (mddev->ro == 1 && unlikely(rw == WRITE)) {
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200464 if (bio_sectors(bio) != 0)
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200465 bio->bi_status = BLK_STS_IOERR;
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200466 bio_endio(bio);
Christoph Hellwig3e087732021-10-12 13:12:24 +0200467 return;
Sebastian Riemerbbfa57c2013-02-21 13:28:09 +1100468 }
NeilBrown49077322010-03-25 16:20:56 +1100469
Shaohua Li9c573de2016-04-25 16:52:38 -0700470 /* bio could be mergeable after passing to underlayer */
Jens Axboe1eff9d32016-08-05 15:35:16 -0600471 bio->bi_opf &= ~REQ_NOMERGE;
Shaohua Li393debc2017-09-21 10:23:35 -0700472
473 md_handle_request(mddev, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474}
475
NeilBrown9e35b992010-04-06 14:23:02 +1000476/* mddev_suspend makes sure no new requests are submitted
477 * to the device, and that any requests that have been submitted
478 * are completely handled.
NeilBrownafa0f552014-12-15 12:56:58 +1100479 * Once mddev_detach() is called and completes, the module will be
480 * completely unused.
NeilBrown9e35b992010-04-06 14:23:02 +1000481 */
NeilBrownfd01b882011-10-11 16:47:53 +1100482void mddev_suspend(struct mddev *mddev)
NeilBrown409c57f2009-03-31 14:39:39 +1100483{
Heinz Mauelshagen092398d2016-05-03 19:43:57 +0200484 WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk);
NeilBrown4d5324f2017-10-19 12:17:16 +1100485 lockdep_assert_held(&mddev->reconfig_mutex);
Mikulas Patocka0dc10e52015-12-18 15:19:16 +1100486 if (mddev->suspended++)
487 return;
NeilBrown409c57f2009-03-31 14:39:39 +1100488 synchronize_rcu();
NeilBrowncc27b0c2017-06-05 16:49:39 +1000489 wake_up(&mddev->sb_wait);
NeilBrown35bfc522017-10-17 13:46:43 +1100490 set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags);
491 smp_mb__after_atomic();
NeilBrown409c57f2009-03-31 14:39:39 +1100492 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
493 mddev->pers->quiesce(mddev, 1);
NeilBrown35bfc522017-10-17 13:46:43 +1100494 clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags);
495 wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags));
Jonathan Brassow0d9f4f12012-05-16 04:06:14 -0500496
497 del_timer_sync(&mddev->safemode_timer);
Coly Li78f57ef2020-04-09 22:17:20 +0800498 /* restrict memory reclaim I/O during raid array is suspend */
499 mddev->noio_flag = memalloc_noio_save();
NeilBrown409c57f2009-03-31 14:39:39 +1100500}
NeilBrown390ee602010-06-01 19:37:27 +1000501EXPORT_SYMBOL_GPL(mddev_suspend);
NeilBrown409c57f2009-03-31 14:39:39 +1100502
NeilBrownfd01b882011-10-11 16:47:53 +1100503void mddev_resume(struct mddev *mddev)
NeilBrown409c57f2009-03-31 14:39:39 +1100504{
Coly Li78f57ef2020-04-09 22:17:20 +0800505 /* entred the memalloc scope from mddev_suspend() */
506 memalloc_noio_restore(mddev->noio_flag);
NeilBrown4d5324f2017-10-19 12:17:16 +1100507 lockdep_assert_held(&mddev->reconfig_mutex);
Mikulas Patocka0dc10e52015-12-18 15:19:16 +1100508 if (--mddev->suspended)
509 return;
NeilBrown409c57f2009-03-31 14:39:39 +1100510 wake_up(&mddev->sb_wait);
511 mddev->pers->quiesce(mddev, 0);
Jonathan Brassow0fd018a2011-06-07 17:49:36 -0500512
Jonathan Brassow47525e52012-05-22 13:55:29 +1000513 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
Jonathan Brassow0fd018a2011-06-07 17:49:36 -0500514 md_wakeup_thread(mddev->thread);
515 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
NeilBrown409c57f2009-03-31 14:39:39 +1100516}
NeilBrown390ee602010-06-01 19:37:27 +1000517EXPORT_SYMBOL_GPL(mddev_resume);
NeilBrown409c57f2009-03-31 14:39:39 +1100518
NeilBrowna2826aa2009-12-14 12:49:49 +1100519/*
Tejun Heoe9c74692010-09-03 11:56:18 +0200520 * Generic flush handling for md
NeilBrowna2826aa2009-12-14 12:49:49 +1100521 */
NeilBrown4bc034d2019-03-29 10:46:16 -0700522
523static void md_end_flush(struct bio *bio)
NeilBrowna2826aa2009-12-14 12:49:49 +1100524{
NeilBrown4bc034d2019-03-29 10:46:16 -0700525 struct md_rdev *rdev = bio->bi_private;
526 struct mddev *mddev = rdev->mddev;
NeilBrowna2826aa2009-12-14 12:49:49 +1100527
528 rdev_dec_pending(rdev, mddev);
529
NeilBrown4bc034d2019-03-29 10:46:16 -0700530 if (atomic_dec_and_test(&mddev->flush_pending)) {
531 /* The pre-request flush has finished */
532 queue_work(md_wq, &mddev->flush_work);
NeilBrowna2826aa2009-12-14 12:49:49 +1100533 }
NeilBrown4bc034d2019-03-29 10:46:16 -0700534 bio_put(bio);
NeilBrowna2826aa2009-12-14 12:49:49 +1100535}
536
NeilBrown4bc034d2019-03-29 10:46:16 -0700537static void md_submit_flush_data(struct work_struct *ws);
538
539static void submit_flushes(struct work_struct *ws)
NeilBrowna2826aa2009-12-14 12:49:49 +1100540{
NeilBrown4bc034d2019-03-29 10:46:16 -0700541 struct mddev *mddev = container_of(ws, struct mddev, flush_work);
NeilBrown3cb03002011-10-11 16:45:26 +1100542 struct md_rdev *rdev;
NeilBrowna2826aa2009-12-14 12:49:49 +1100543
NeilBrown2bc13b82019-03-29 10:46:17 -0700544 mddev->start_flush = ktime_get_boottime();
NeilBrown4bc034d2019-03-29 10:46:16 -0700545 INIT_WORK(&mddev->flush_work, md_submit_flush_data);
546 atomic_set(&mddev->flush_pending, 1);
NeilBrowna2826aa2009-12-14 12:49:49 +1100547 rcu_read_lock();
NeilBrowndafb20f2012-03-19 12:46:39 +1100548 rdev_for_each_rcu(rdev, mddev)
NeilBrowna2826aa2009-12-14 12:49:49 +1100549 if (rdev->raid_disk >= 0 &&
550 !test_bit(Faulty, &rdev->flags)) {
551 /* Take two references, one is dropped
552 * when request finishes, one after
553 * we reclaim rcu_read_lock
554 */
555 struct bio *bi;
556 atomic_inc(&rdev->nr_pending);
557 atomic_inc(&rdev->nr_pending);
558 rcu_read_unlock();
Christoph Hellwiga78f18d2021-01-26 15:52:41 +0100559 bi = bio_alloc_bioset(GFP_NOIO, 0, &mddev->bio_set);
Xiao Ni5a409b42018-05-21 11:49:54 +0800560 bi->bi_end_io = md_end_flush;
NeilBrown4bc034d2019-03-29 10:46:16 -0700561 bi->bi_private = rdev;
562 bio_set_dev(bi, rdev->bdev);
Christoph Hellwig70fd7612016-11-01 07:40:10 -0600563 bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
NeilBrown4bc034d2019-03-29 10:46:16 -0700564 atomic_inc(&mddev->flush_pending);
Mike Christie4e49ea42016-06-05 14:31:41 -0500565 submit_bio(bi);
NeilBrowna2826aa2009-12-14 12:49:49 +1100566 rcu_read_lock();
567 rdev_dec_pending(rdev, mddev);
568 }
569 rcu_read_unlock();
NeilBrown4bc034d2019-03-29 10:46:16 -0700570 if (atomic_dec_and_test(&mddev->flush_pending))
571 queue_work(md_wq, &mddev->flush_work);
572}
NeilBrowna2826aa2009-12-14 12:49:49 +1100573
NeilBrown4bc034d2019-03-29 10:46:16 -0700574static void md_submit_flush_data(struct work_struct *ws)
575{
576 struct mddev *mddev = container_of(ws, struct mddev, flush_work);
577 struct bio *bio = mddev->flush_bio;
578
579 /*
580 * must reset flush_bio before calling into md_handle_request to avoid a
581 * deadlock, because other bios passed md_handle_request suspend check
582 * could wait for this and below md_handle_request could wait for those
583 * bios because of suspend check
584 */
Xiao Nidc5d17a32020-12-10 14:33:32 +0800585 spin_lock_irq(&mddev->lock);
Pankaj Gupta81ba3c22020-11-11 06:16:56 +0100586 mddev->prev_flush_start = mddev->start_flush;
NeilBrown4bc034d2019-03-29 10:46:16 -0700587 mddev->flush_bio = NULL;
Xiao Nidc5d17a32020-12-10 14:33:32 +0800588 spin_unlock_irq(&mddev->lock);
NeilBrown4bc034d2019-03-29 10:46:16 -0700589 wake_up(&mddev->sb_wait);
590
591 if (bio->bi_iter.bi_size == 0) {
592 /* an empty barrier - all done */
593 bio_endio(bio);
594 } else {
595 bio->bi_opf &= ~REQ_PREFLUSH;
596 md_handle_request(mddev, bio);
NeilBrowna2826aa2009-12-14 12:49:49 +1100597 }
NeilBrowna2826aa2009-12-14 12:49:49 +1100598}
NeilBrown4bc034d2019-03-29 10:46:16 -0700599
David Jeffery775d7832019-09-16 13:15:14 -0400600/*
601 * Manages consolidation of flushes and submitting any flushes needed for
602 * a bio with REQ_PREFLUSH. Returns true if the bio is finished or is
603 * being finished in another context. Returns false if the flushing is
604 * complete but still needs the I/O portion of the bio to be processed.
605 */
606bool md_flush_request(struct mddev *mddev, struct bio *bio)
NeilBrown4bc034d2019-03-29 10:46:16 -0700607{
Pankaj Gupta81ba3c22020-11-11 06:16:56 +0100608 ktime_t req_start = ktime_get_boottime();
NeilBrown4bc034d2019-03-29 10:46:16 -0700609 spin_lock_irq(&mddev->lock);
Pankaj Gupta204d1a62020-11-11 06:16:57 +0100610 /* flush requests wait until ongoing flush completes,
611 * hence coalescing all the pending requests.
612 */
NeilBrown4bc034d2019-03-29 10:46:16 -0700613 wait_event_lock_irq(mddev->sb_wait,
NeilBrown2bc13b82019-03-29 10:46:17 -0700614 !mddev->flush_bio ||
Pankaj Guptaa23f2aa2020-11-11 06:16:58 +0100615 ktime_before(req_start, mddev->prev_flush_start),
NeilBrown4bc034d2019-03-29 10:46:16 -0700616 mddev->lock);
Pankaj Gupta204d1a62020-11-11 06:16:57 +0100617 /* new request after previous flush is completed */
Pankaj Guptaa23f2aa2020-11-11 06:16:58 +0100618 if (ktime_after(req_start, mddev->prev_flush_start)) {
NeilBrown2bc13b82019-03-29 10:46:17 -0700619 WARN_ON(mddev->flush_bio);
620 mddev->flush_bio = bio;
621 bio = NULL;
622 }
NeilBrown4bc034d2019-03-29 10:46:16 -0700623 spin_unlock_irq(&mddev->lock);
624
NeilBrown2bc13b82019-03-29 10:46:17 -0700625 if (!bio) {
626 INIT_WORK(&mddev->flush_work, submit_flushes);
627 queue_work(md_wq, &mddev->flush_work);
628 } else {
629 /* flush was performed for some other bio while we waited. */
630 if (bio->bi_iter.bi_size == 0)
631 /* an empty barrier - all done */
632 bio_endio(bio);
633 else {
634 bio->bi_opf &= ~REQ_PREFLUSH;
David Jeffery775d7832019-09-16 13:15:14 -0400635 return false;
NeilBrown2bc13b82019-03-29 10:46:17 -0700636 }
637 }
David Jeffery775d7832019-09-16 13:15:14 -0400638 return true;
NeilBrown4bc034d2019-03-29 10:46:16 -0700639}
Tejun Heoe9c74692010-09-03 11:56:18 +0200640EXPORT_SYMBOL(md_flush_request);
NeilBrown409c57f2009-03-31 14:39:39 +1100641
NeilBrownfd01b882011-10-11 16:47:53 +1100642static inline struct mddev *mddev_get(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643{
644 atomic_inc(&mddev->active);
645 return mddev;
646}
647
Dan Williams5fd3a172009-03-04 00:57:25 -0700648static void mddev_delayed_delete(struct work_struct *ws);
NeilBrownd3374822009-01-09 08:31:10 +1100649
NeilBrownfd01b882011-10-11 16:47:53 +1100650static void mddev_put(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651{
652 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
653 return;
NeilBrownd3374822009-01-09 08:31:10 +1100654 if (!mddev->raid_disks && list_empty(&mddev->disks) &&
NeilBrowncbd19982009-12-30 12:08:49 +1100655 mddev->ctime == 0 && !mddev->hold_active) {
656 /* Array is not configured at all, and not held active,
657 * so destroy it */
NeilBrownaf8a2432011-12-08 15:49:46 +1100658 list_del_init(&mddev->all_mddevs);
Kent Overstreet28dec872018-06-07 20:52:54 -0400659
660 /*
661 * Call queue_work inside the spinlock so that
662 * flush_workqueue() after mddev_find will succeed in waiting
663 * for the work to be done.
664 */
665 INIT_WORK(&mddev->del_work, mddev_delayed_delete);
666 queue_work(md_misc_wq, &mddev->del_work);
NeilBrownd3374822009-01-09 08:31:10 +1100667 }
668 spin_unlock(&all_mddevs_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669}
670
Kees Cook8376d3c2017-10-16 17:01:48 -0700671static void md_safemode_timeout(struct timer_list *t);
Sasha Levin25b2edf2015-07-24 18:19:58 -0400672
NeilBrownfd01b882011-10-11 16:47:53 +1100673void mddev_init(struct mddev *mddev)
NeilBrownfafd7fb2010-04-01 15:55:30 +1100674{
Kent Overstreet28dec872018-06-07 20:52:54 -0400675 kobject_init(&mddev->kobj, &md_ktype);
NeilBrownfafd7fb2010-04-01 15:55:30 +1100676 mutex_init(&mddev->open_mutex);
677 mutex_init(&mddev->reconfig_mutex);
678 mutex_init(&mddev->bitmap_info.mutex);
679 INIT_LIST_HEAD(&mddev->disks);
680 INIT_LIST_HEAD(&mddev->all_mddevs);
Kees Cook8376d3c2017-10-16 17:01:48 -0700681 timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0);
NeilBrownfafd7fb2010-04-01 15:55:30 +1100682 atomic_set(&mddev->active, 1);
683 atomic_set(&mddev->openers, 0);
684 atomic_set(&mddev->active_io, 0);
NeilBrown85572d72014-12-15 12:56:56 +1100685 spin_lock_init(&mddev->lock);
NeilBrown4bc034d2019-03-29 10:46:16 -0700686 atomic_set(&mddev->flush_pending, 0);
NeilBrownfafd7fb2010-04-01 15:55:30 +1100687 init_waitqueue_head(&mddev->sb_wait);
688 init_waitqueue_head(&mddev->recovery_wait);
689 mddev->reshape_position = MaxSector;
NeilBrown2c810cd2012-05-21 09:27:00 +1000690 mddev->reshape_backwards = 0;
Jonathan Brassowc4a39552013-06-25 01:23:59 -0500691 mddev->last_sync_action = "none";
NeilBrownfafd7fb2010-04-01 15:55:30 +1100692 mddev->resync_min = 0;
693 mddev->resync_max = MaxSector;
694 mddev->level = LEVEL_NONE;
695}
NeilBrown390ee602010-06-01 19:37:27 +1000696EXPORT_SYMBOL_GPL(mddev_init);
NeilBrownfafd7fb2010-04-01 15:55:30 +1100697
Christoph Hellwig8b57251f2021-04-03 18:15:28 +0200698static struct mddev *mddev_find_locked(dev_t unit)
699{
700 struct mddev *mddev;
701
702 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
703 if (mddev->unit == unit)
704 return mddev;
705
706 return NULL;
707}
708
Christoph Hellwig85c8c3c2021-04-12 10:05:28 +0200709/* find an unused unit number */
710static dev_t mddev_alloc_unit(void)
711{
712 static int next_minor = 512;
713 int start = next_minor;
714 bool is_free = 0;
715 dev_t dev = 0;
716
717 while (!is_free) {
718 dev = MKDEV(MD_MAJOR, next_minor);
719 next_minor++;
720 if (next_minor > MINORMASK)
721 next_minor = 0;
722 if (next_minor == start)
723 return 0; /* Oh dear, all in use. */
724 is_free = !mddev_find_locked(dev);
725 }
726
727 return dev;
728}
729
NeilBrownf72ffdd2014-09-30 14:23:59 +1000730static struct mddev *mddev_find(dev_t unit)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731{
Christoph Hellwig65aa97c2021-04-03 18:15:29 +0200732 struct mddev *mddev;
733
734 if (MAJOR(unit) != MD_MAJOR)
735 unit &= ~((1 << MdpMinorShift) - 1);
736
737 spin_lock(&all_mddevs_lock);
738 mddev = mddev_find_locked(unit);
739 if (mddev)
740 mddev_get(mddev);
741 spin_unlock(&all_mddevs_lock);
742
743 return mddev;
744}
745
Christoph Hellwig0d809b32021-04-12 10:05:30 +0200746static struct mddev *mddev_alloc(dev_t unit)
Christoph Hellwig65aa97c2021-04-03 18:15:29 +0200747{
Christoph Hellwig0d809b32021-04-12 10:05:30 +0200748 struct mddev *new;
749 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750
NeilBrown8f5f02c2011-02-16 13:58:51 +1100751 if (unit && MAJOR(unit) != MD_MAJOR)
Christoph Hellwigd144fe62021-04-12 10:05:29 +0200752 unit &= ~((1 << MdpMinorShift) - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753
NeilBrown9ffae0c2006-01-06 00:20:32 -0800754 new = kzalloc(sizeof(*new), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 if (!new)
Christoph Hellwig0d809b32021-04-12 10:05:30 +0200756 return ERR_PTR(-ENOMEM);
NeilBrownfafd7fb2010-04-01 15:55:30 +1100757 mddev_init(new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758
Christoph Hellwigd144fe62021-04-12 10:05:29 +0200759 spin_lock(&all_mddevs_lock);
760 if (unit) {
Christoph Hellwig0d809b32021-04-12 10:05:30 +0200761 error = -EEXIST;
762 if (mddev_find_locked(unit))
Christoph Hellwigd144fe62021-04-12 10:05:29 +0200763 goto out_free_new;
Christoph Hellwigd144fe62021-04-12 10:05:29 +0200764 new->unit = unit;
765 if (MAJOR(unit) == MD_MAJOR)
766 new->md_minor = MINOR(unit);
767 else
768 new->md_minor = MINOR(unit) >> MdpMinorShift;
769 new->hold_active = UNTIL_IOCTL;
770 } else {
Christoph Hellwig0d809b32021-04-12 10:05:30 +0200771 error = -ENODEV;
Christoph Hellwigd144fe62021-04-12 10:05:29 +0200772 new->unit = mddev_alloc_unit();
773 if (!new->unit)
774 goto out_free_new;
775 new->md_minor = MINOR(new->unit);
776 new->hold_active = UNTIL_STOP;
777 }
778
779 list_add(&new->all_mddevs, &all_mddevs);
780 spin_unlock(&all_mddevs_lock);
781 return new;
782out_free_new:
783 spin_unlock(&all_mddevs_lock);
784 kfree(new);
Christoph Hellwig0d809b32021-04-12 10:05:30 +0200785 return ERR_PTR(error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786}
787
Rikard Falkebornc32dc042021-05-29 12:30:49 +0200788static const struct attribute_group md_redundancy_group;
NeilBrownb6eb1272010-04-15 10:13:47 +1000789
NeilBrown5c47daf2014-12-15 12:57:01 +1100790void mddev_unlock(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791{
NeilBrowna64c8762010-04-14 17:15:37 +1000792 if (mddev->to_remove) {
NeilBrownb6eb1272010-04-15 10:13:47 +1000793 /* These cannot be removed under reconfig_mutex as
794 * an access to the files will try to take reconfig_mutex
795 * while holding the file unremovable, which leads to
796 * a deadlock.
NeilBrownbb4f1e92010-08-08 21:18:03 +1000797 * So hold set sysfs_active while the remove in happeing,
798 * and anything else which might set ->to_remove or my
799 * otherwise change the sysfs namespace will fail with
800 * -EBUSY if sysfs_active is still set.
801 * We set sysfs_active under reconfig_mutex and elsewhere
802 * test it under the same mutex to ensure its correct value
803 * is seen.
NeilBrownb6eb1272010-04-15 10:13:47 +1000804 */
Rikard Falkebornc32dc042021-05-29 12:30:49 +0200805 const struct attribute_group *to_remove = mddev->to_remove;
NeilBrowna64c8762010-04-14 17:15:37 +1000806 mddev->to_remove = NULL;
NeilBrownbb4f1e92010-08-08 21:18:03 +1000807 mddev->sysfs_active = 1;
NeilBrownb6eb1272010-04-15 10:13:47 +1000808 mutex_unlock(&mddev->reconfig_mutex);
809
NeilBrown00bcb4a2010-06-01 19:37:23 +1000810 if (mddev->kobj.sd) {
811 if (to_remove != &md_redundancy_group)
812 sysfs_remove_group(&mddev->kobj, to_remove);
813 if (mddev->pers == NULL ||
814 mddev->pers->sync_request == NULL) {
815 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
816 if (mddev->sysfs_action)
817 sysfs_put(mddev->sysfs_action);
Junxiao Bie8efa9b2020-08-04 17:27:18 -0700818 if (mddev->sysfs_completed)
819 sysfs_put(mddev->sysfs_completed);
820 if (mddev->sysfs_degraded)
821 sysfs_put(mddev->sysfs_degraded);
NeilBrown00bcb4a2010-06-01 19:37:23 +1000822 mddev->sysfs_action = NULL;
Junxiao Bie8efa9b2020-08-04 17:27:18 -0700823 mddev->sysfs_completed = NULL;
824 mddev->sysfs_degraded = NULL;
NeilBrown00bcb4a2010-06-01 19:37:23 +1000825 }
NeilBrowna64c8762010-04-14 17:15:37 +1000826 }
NeilBrownbb4f1e92010-08-08 21:18:03 +1000827 mddev->sysfs_active = 0;
NeilBrownb6eb1272010-04-15 10:13:47 +1000828 } else
829 mutex_unlock(&mddev->reconfig_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830
Chris Dunlop751e67c2011-10-19 16:48:26 +1100831 /* As we've dropped the mutex we need a spinlock to
832 * make sure the thread doesn't disappear
NeilBrown01f96c02011-09-21 15:30:20 +1000833 */
834 spin_lock(&pers_lock);
NeilBrown005eca52005-08-22 13:11:08 -0700835 md_wakeup_thread(mddev->thread);
NeilBrown4d5324f2017-10-19 12:17:16 +1100836 wake_up(&mddev->sb_wait);
NeilBrown01f96c02011-09-21 15:30:20 +1000837 spin_unlock(&pers_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838}
NeilBrown5c47daf2014-12-15 12:57:01 +1100839EXPORT_SYMBOL_GPL(mddev_unlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840
Goldwyn Rodrigues57d051d2015-04-14 10:43:55 -0500841struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr)
NeilBrown1ca69c42012-10-11 13:37:33 +1100842{
843 struct md_rdev *rdev;
844
845 rdev_for_each_rcu(rdev, mddev)
846 if (rdev->desc_nr == nr)
847 return rdev;
848
849 return NULL;
850}
Goldwyn Rodrigues57d051d2015-04-14 10:43:55 -0500851EXPORT_SYMBOL_GPL(md_find_rdev_nr_rcu);
NeilBrown1ca69c42012-10-11 13:37:33 +1100852
853static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854{
NeilBrown3cb03002011-10-11 16:45:26 +1100855 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856
NeilBrowndafb20f2012-03-19 12:46:39 +1100857 rdev_for_each(rdev, mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 if (rdev->bdev->bd_dev == dev)
859 return rdev;
Cheng Renquan159ec1f2009-01-09 08:31:08 +1100860
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 return NULL;
862}
863
Tomasz Majchrzak1532d9e2017-12-27 10:31:40 +0100864struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev)
NeilBrown1ca69c42012-10-11 13:37:33 +1100865{
866 struct md_rdev *rdev;
867
868 rdev_for_each_rcu(rdev, mddev)
869 if (rdev->bdev->bd_dev == dev)
870 return rdev;
871
872 return NULL;
873}
Tomasz Majchrzak1532d9e2017-12-27 10:31:40 +0100874EXPORT_SYMBOL_GPL(md_find_rdev_rcu);
NeilBrown1ca69c42012-10-11 13:37:33 +1100875
NeilBrown84fc4b52011-10-11 16:49:58 +1100876static struct md_personality *find_pers(int level, char *clevel)
NeilBrown2604b702006-01-06 00:20:36 -0800877{
NeilBrown84fc4b52011-10-11 16:49:58 +1100878 struct md_personality *pers;
NeilBrownd9d166c2006-01-06 00:20:51 -0800879 list_for_each_entry(pers, &pers_list, list) {
880 if (level != LEVEL_NONE && pers->level == level)
NeilBrown2604b702006-01-06 00:20:36 -0800881 return pers;
NeilBrownd9d166c2006-01-06 00:20:51 -0800882 if (strcmp(pers->name, clevel)==0)
883 return pers;
884 }
NeilBrown2604b702006-01-06 00:20:36 -0800885 return NULL;
886}
887
Andre Nollb73df2d2008-07-11 22:02:23 +1000888/* return the offset of the super block in 512byte sectors */
NeilBrown3cb03002011-10-11 16:45:26 +1100889static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890{
Christoph Hellwig0fe80342021-10-18 12:11:06 +0200891 return MD_NEW_SIZE_SECTORS(bdev_nr_sectors(rdev->bdev));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892}
893
NeilBrownf72ffdd2014-09-30 14:23:59 +1000894static int alloc_disk_sb(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 rdev->sb_page = alloc_page(GFP_KERNEL);
NeilBrown7f0f0d82016-11-02 14:16:49 +1100897 if (!rdev->sb_page)
Andre Nollebc24332008-07-11 22:02:20 +1000898 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 return 0;
900}
901
NeilBrown545c8792012-05-22 13:54:30 +1000902void md_rdev_clear(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903{
904 if (rdev->sb_page) {
NeilBrown2d1f3b52006-01-06 00:20:31 -0800905 put_page(rdev->sb_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906 rdev->sb_loaded = 0;
907 rdev->sb_page = NULL;
Andre Noll0f420352008-07-11 22:02:23 +1000908 rdev->sb_start = 0;
Andre Nolldd8ac332009-03-31 14:33:13 +1100909 rdev->sectors = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 }
NeilBrown2699b672011-07-28 11:31:47 +1000911 if (rdev->bb_page) {
912 put_page(rdev->bb_page);
913 rdev->bb_page = NULL;
914 }
Dan Williamsd3b407fb2016-01-06 12:19:22 -0800915 badblocks_exit(&rdev->badblocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916}
NeilBrown545c8792012-05-22 13:54:30 +1000917EXPORT_SYMBOL_GPL(md_rdev_clear);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200919static void super_written(struct bio *bio)
NeilBrown7bfa19f2005-06-21 17:17:28 -0700920{
NeilBrown3cb03002011-10-11 16:45:26 +1100921 struct md_rdev *rdev = bio->bi_private;
NeilBrownfd01b882011-10-11 16:47:53 +1100922 struct mddev *mddev = rdev->mddev;
NeilBrown7bfa19f2005-06-21 17:17:28 -0700923
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200924 if (bio->bi_status) {
Guoqing Jiangb3db8a22020-07-28 12:01:41 +0200925 pr_err("md: %s gets error=%d\n", __func__,
926 blk_status_to_errno(bio->bi_status));
NeilBrowna9701a32005-11-08 21:39:34 -0800927 md_error(mddev, rdev);
NeilBrown46533ff2016-11-18 16:16:11 +1100928 if (!test_bit(Faulty, &rdev->flags)
929 && (bio->bi_opf & MD_FAILFAST)) {
Shaohua Li29530792016-12-08 15:48:19 -0800930 set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags);
NeilBrown46533ff2016-11-18 16:16:11 +1100931 set_bit(LastDev, &rdev->flags);
932 }
933 } else
934 clear_bit(LastDev, &rdev->flags);
NeilBrown7bfa19f2005-06-21 17:17:28 -0700935
NeilBrowna9701a32005-11-08 21:39:34 -0800936 if (atomic_dec_and_test(&mddev->pending_writes))
937 wake_up(&mddev->sb_wait);
Shaohua Lied3b98c2016-03-29 14:00:19 -0700938 rdev_dec_pending(rdev, mddev);
Neil Brownf8b58ed2005-06-27 22:29:34 -0700939 bio_put(bio);
NeilBrown7bfa19f2005-06-21 17:17:28 -0700940}
941
NeilBrownfd01b882011-10-11 16:47:53 +1100942void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
NeilBrown7bfa19f2005-06-21 17:17:28 -0700943 sector_t sector, int size, struct page *page)
944{
945 /* write first size bytes of page to sector of rdev
946 * Increment mddev->pending_writes before returning
947 * and decrement it on completion, waking up sb_wait
948 * if zero is reached.
949 * If an error occurred, call md_error
950 */
NeilBrown46533ff2016-11-18 16:16:11 +1100951 struct bio *bio;
952 int ff = 0;
953
Heinz Mauelshagen4b6c1062018-02-02 23:13:19 +0100954 if (!page)
955 return;
956
NeilBrown46533ff2016-11-18 16:16:11 +1100957 if (test_bit(Faulty, &rdev->flags))
958 return;
959
Christoph Hellwig6a596562021-01-26 15:52:43 +0100960 bio = bio_alloc_bioset(GFP_NOIO, 1, &mddev->sync_set);
NeilBrown7bfa19f2005-06-21 17:17:28 -0700961
Shaohua Lied3b98c2016-03-29 14:00:19 -0700962 atomic_inc(&rdev->nr_pending);
963
Christoph Hellwig74d46992017-08-23 19:10:32 +0200964 bio_set_dev(bio, rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev);
Kent Overstreet4f024f32013-10-11 15:44:27 -0700965 bio->bi_iter.bi_sector = sector;
NeilBrown7bfa19f2005-06-21 17:17:28 -0700966 bio_add_page(bio, page, size, 0);
967 bio->bi_private = rdev;
968 bio->bi_end_io = super_written;
NeilBrown46533ff2016-11-18 16:16:11 +1100969
970 if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) &&
971 test_bit(FailFast, &rdev->flags) &&
972 !test_bit(LastDev, &rdev->flags))
973 ff = MD_FAILFAST;
Jan Kara5a8948f2017-05-31 09:44:33 +0200974 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA | ff;
NeilBrowna9701a32005-11-08 21:39:34 -0800975
NeilBrown7bfa19f2005-06-21 17:17:28 -0700976 atomic_inc(&mddev->pending_writes);
Mike Christie4e49ea42016-06-05 14:31:41 -0500977 submit_bio(bio);
NeilBrowna9701a32005-11-08 21:39:34 -0800978}
979
NeilBrown46533ff2016-11-18 16:16:11 +1100980int md_super_wait(struct mddev *mddev)
NeilBrowna9701a32005-11-08 21:39:34 -0800981{
Tejun Heoe9c74692010-09-03 11:56:18 +0200982 /* wait for all superblock writes that were scheduled to complete */
NeilBrown1967cd52014-09-09 14:20:28 +1000983 wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
Shaohua Li29530792016-12-08 15:48:19 -0800984 if (test_and_clear_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags))
NeilBrown46533ff2016-11-18 16:16:11 +1100985 return -EAGAIN;
986 return 0;
NeilBrown7bfa19f2005-06-21 17:17:28 -0700987}
988
NeilBrown3cb03002011-10-11 16:45:26 +1100989int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
Mike Christie796a5cf2016-06-05 14:32:07 -0500990 struct page *page, int op, int op_flags, bool metadata_op)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991{
Christoph Hellwig32637382021-01-26 15:52:42 +0100992 struct bio bio;
993 struct bio_vec bvec;
994
995 bio_init(&bio, &bvec, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996
Christoph Hellwig74d46992017-08-23 19:10:32 +0200997 if (metadata_op && rdev->meta_bdev)
Christoph Hellwig32637382021-01-26 15:52:42 +0100998 bio_set_dev(&bio, rdev->meta_bdev);
Christoph Hellwig74d46992017-08-23 19:10:32 +0200999 else
Christoph Hellwig32637382021-01-26 15:52:42 +01001000 bio_set_dev(&bio, rdev->bdev);
1001 bio.bi_opf = op | op_flags;
Jonathan Brassowccebd4c2011-01-14 09:14:33 +11001002 if (metadata_op)
Christoph Hellwig32637382021-01-26 15:52:42 +01001003 bio.bi_iter.bi_sector = sector + rdev->sb_start;
NeilBrown1fdd6fc92012-05-21 09:28:32 +10001004 else if (rdev->mddev->reshape_position != MaxSector &&
1005 (rdev->mddev->reshape_backwards ==
1006 (sector >= rdev->mddev->reshape_position)))
Christoph Hellwig32637382021-01-26 15:52:42 +01001007 bio.bi_iter.bi_sector = sector + rdev->new_data_offset;
Jonathan Brassowccebd4c2011-01-14 09:14:33 +11001008 else
Christoph Hellwig32637382021-01-26 15:52:42 +01001009 bio.bi_iter.bi_sector = sector + rdev->data_offset;
1010 bio_add_page(&bio, page, size, 0);
Mike Christie4e49ea42016-06-05 14:31:41 -05001011
Christoph Hellwig32637382021-01-26 15:52:42 +01001012 submit_bio_wait(&bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013
Christoph Hellwig32637382021-01-26 15:52:42 +01001014 return !bio.bi_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015}
NeilBrowna8745db2006-01-06 00:20:34 -08001016EXPORT_SYMBOL_GPL(sync_page_io);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017
NeilBrownf72ffdd2014-09-30 14:23:59 +10001018static int read_disk_sb(struct md_rdev *rdev, int size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019{
1020 char b[BDEVNAME_SIZE];
NeilBrown403df472014-09-30 15:52:29 +10001021
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022 if (rdev->sb_loaded)
1023 return 0;
1024
Mike Christie796a5cf2016-06-05 14:32:07 -05001025 if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026 goto fail;
1027 rdev->sb_loaded = 1;
1028 return 0;
1029
1030fail:
NeilBrown9d487392016-11-02 14:16:49 +11001031 pr_err("md: disabled device %s, could not read superblock.\n",
1032 bdevname(rdev->bdev,b));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033 return -EINVAL;
1034}
1035
Amir Goldsteine6fd2092017-05-04 16:26:20 +03001036static int md_uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037{
NeilBrownf72ffdd2014-09-30 14:23:59 +10001038 return sb1->set_uuid0 == sb2->set_uuid0 &&
Andre Noll05710462008-07-11 22:02:20 +10001039 sb1->set_uuid1 == sb2->set_uuid1 &&
1040 sb1->set_uuid2 == sb2->set_uuid2 &&
1041 sb1->set_uuid3 == sb2->set_uuid3;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042}
1043
Amir Goldsteine6fd2092017-05-04 16:26:20 +03001044static int md_sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045{
1046 int ret;
1047 mdp_super_t *tmp1, *tmp2;
1048
1049 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
1050 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
1051
1052 if (!tmp1 || !tmp2) {
1053 ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054 goto abort;
1055 }
1056
1057 *tmp1 = *sb1;
1058 *tmp2 = *sb2;
1059
1060 /*
1061 * nr_disks is not constant
1062 */
1063 tmp1->nr_disks = 0;
1064 tmp2->nr_disks = 0;
1065
Andre Nollce0c8e02008-07-11 22:02:20 +10001066 ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067abort:
Jesper Juhl990a8ba2005-06-21 17:17:30 -07001068 kfree(tmp1);
1069 kfree(tmp2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070 return ret;
1071}
1072
NeilBrown4d167f02007-05-09 02:35:37 -07001073static u32 md_csum_fold(u32 csum)
1074{
1075 csum = (csum & 0xffff) + (csum >> 16);
1076 return (csum & 0xffff) + (csum >> 16);
1077}
1078
NeilBrownf72ffdd2014-09-30 14:23:59 +10001079static unsigned int calc_sb_csum(mdp_super_t *sb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080{
NeilBrown4d167f02007-05-09 02:35:37 -07001081 u64 newcsum = 0;
1082 u32 *sb32 = (u32*)sb;
1083 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 unsigned int disk_csum, csum;
1085
1086 disk_csum = sb->sb_csum;
1087 sb->sb_csum = 0;
NeilBrown4d167f02007-05-09 02:35:37 -07001088
1089 for (i = 0; i < MD_SB_BYTES/4 ; i++)
1090 newcsum += sb32[i];
1091 csum = (newcsum & 0xffffffff) + (newcsum>>32);
1092
NeilBrown4d167f02007-05-09 02:35:37 -07001093#ifdef CONFIG_ALPHA
1094 /* This used to use csum_partial, which was wrong for several
1095 * reasons including that different results are returned on
1096 * different architectures. It isn't critical that we get exactly
1097 * the same return value as before (we always csum_fold before
1098 * testing, and that removes any differences). However as we
1099 * know that csum_partial always returned a 16bit value on
1100 * alphas, do a fold to maximise conformity to previous behaviour.
1101 */
1102 sb->sb_csum = md_csum_fold(disk_csum);
1103#else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104 sb->sb_csum = disk_csum;
NeilBrown4d167f02007-05-09 02:35:37 -07001105#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106 return csum;
1107}
1108
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109/*
1110 * Handle superblock details.
1111 * We want to be able to handle multiple superblock formats
1112 * so we have a common interface to them all, and an array of
1113 * different handlers.
1114 * We rely on user-space to write the initial superblock, and support
1115 * reading and updating of superblocks.
1116 * Interface methods are:
NeilBrown3cb03002011-10-11 16:45:26 +11001117 * int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118 * loads and validates a superblock on dev.
1119 * if refdev != NULL, compare superblocks on both devices
1120 * Return:
1121 * 0 - dev has a superblock that is compatible with refdev
1122 * 1 - dev has a superblock that is compatible and newer than refdev
1123 * so dev should be used as the refdev in future
1124 * -EINVAL superblock incompatible or invalid
1125 * -othererror e.g. -EIO
1126 *
NeilBrownfd01b882011-10-11 16:47:53 +11001127 * int validate_super(struct mddev *mddev, struct md_rdev *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128 * Verify that dev is acceptable into mddev.
1129 * The first time, mddev->raid_disks will be 0, and data from
1130 * dev should be merged in. Subsequent calls check that dev
1131 * is new enough. Return 0 or -EINVAL
1132 *
NeilBrownfd01b882011-10-11 16:47:53 +11001133 * void sync_super(struct mddev *mddev, struct md_rdev *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134 * Update the superblock for rdev with data in mddev
1135 * This does not write to disc.
1136 *
1137 */
1138
1139struct super_type {
Chris Webb0cd17fe2008-06-28 08:31:46 +10001140 char *name;
1141 struct module *owner;
NeilBrownc6563a82012-05-21 09:27:00 +10001142 int (*load_super)(struct md_rdev *rdev,
1143 struct md_rdev *refdev,
Chris Webb0cd17fe2008-06-28 08:31:46 +10001144 int minor_version);
NeilBrownc6563a82012-05-21 09:27:00 +10001145 int (*validate_super)(struct mddev *mddev,
1146 struct md_rdev *rdev);
1147 void (*sync_super)(struct mddev *mddev,
1148 struct md_rdev *rdev);
NeilBrown3cb03002011-10-11 16:45:26 +11001149 unsigned long long (*rdev_size_change)(struct md_rdev *rdev,
Andre Noll15f4a5f2008-07-21 14:42:12 +10001150 sector_t num_sectors);
NeilBrownc6563a82012-05-21 09:27:00 +10001151 int (*allow_new_offset)(struct md_rdev *rdev,
1152 unsigned long long new_offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153};
1154
1155/*
Andre Noll0894cc32009-06-18 08:49:23 +10001156 * Check that the given mddev has no bitmap.
1157 *
1158 * This function is called from the run method of all personalities that do not
1159 * support bitmaps. It prints an error message and returns non-zero if mddev
1160 * has a bitmap. Otherwise, it returns 0.
1161 *
1162 */
NeilBrownfd01b882011-10-11 16:47:53 +11001163int md_check_no_bitmap(struct mddev *mddev)
Andre Noll0894cc32009-06-18 08:49:23 +10001164{
NeilBrownc3d97142009-12-14 12:49:52 +11001165 if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
Andre Noll0894cc32009-06-18 08:49:23 +10001166 return 0;
NeilBrown9d487392016-11-02 14:16:49 +11001167 pr_warn("%s: bitmaps are not supported for %s\n",
Andre Noll0894cc32009-06-18 08:49:23 +10001168 mdname(mddev), mddev->pers->name);
1169 return 1;
1170}
1171EXPORT_SYMBOL(md_check_no_bitmap);
1172
1173/*
NeilBrownf72ffdd2014-09-30 14:23:59 +10001174 * load_super for 0.90.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175 */
NeilBrown3cb03002011-10-11 16:45:26 +11001176static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177{
1178 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1179 mdp_super_t *sb;
1180 int ret;
Yufen Yu228fc7d2019-10-30 18:47:02 +08001181 bool spare_disk = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182
1183 /*
Andre Noll0f420352008-07-11 22:02:23 +10001184 * Calculate the position of the superblock (512byte sectors),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185 * it's at the end of the disk.
1186 *
1187 * It also happens to be a multiple of 4Kb.
1188 */
Jonathan Brassow57b2caa2011-01-14 09:14:33 +11001189 rdev->sb_start = calc_dev_sboffset(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190
NeilBrown0002b272005-09-09 16:23:53 -07001191 ret = read_disk_sb(rdev, MD_SB_BYTES);
NeilBrown9d487392016-11-02 14:16:49 +11001192 if (ret)
1193 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194
1195 ret = -EINVAL;
1196
1197 bdevname(rdev->bdev, b);
Namhyung Kim65a06f062011-07-27 11:00:36 +10001198 sb = page_address(rdev->sb_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199
1200 if (sb->md_magic != MD_SB_MAGIC) {
NeilBrown9d487392016-11-02 14:16:49 +11001201 pr_warn("md: invalid raid superblock magic on %s\n", b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202 goto abort;
1203 }
1204
1205 if (sb->major_version != 0 ||
NeilBrownf6705572006-03-27 01:18:11 -08001206 sb->minor_version < 90 ||
1207 sb->minor_version > 91) {
NeilBrown9d487392016-11-02 14:16:49 +11001208 pr_warn("Bad version number %d.%d on %s\n",
1209 sb->major_version, sb->minor_version, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210 goto abort;
1211 }
1212
1213 if (sb->raid_disks <= 0)
1214 goto abort;
1215
NeilBrown4d167f02007-05-09 02:35:37 -07001216 if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
NeilBrown9d487392016-11-02 14:16:49 +11001217 pr_warn("md: invalid superblock checksum on %s\n", b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218 goto abort;
1219 }
1220
1221 rdev->preferred_minor = sb->md_minor;
1222 rdev->data_offset = 0;
NeilBrownc6563a82012-05-21 09:27:00 +10001223 rdev->new_data_offset = 0;
NeilBrown0002b272005-09-09 16:23:53 -07001224 rdev->sb_size = MD_SB_BYTES;
NeilBrown9f2f3832011-07-28 11:31:47 +10001225 rdev->badblocks.shift = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226
1227 if (sb->level == LEVEL_MULTIPATH)
1228 rdev->desc_nr = -1;
1229 else
1230 rdev->desc_nr = sb->this_disk.number;
1231
Yufen Yu228fc7d2019-10-30 18:47:02 +08001232 /* not spare disk, or LEVEL_MULTIPATH */
1233 if (sb->level == LEVEL_MULTIPATH ||
1234 (rdev->desc_nr >= 0 &&
Yufen Yu3b7436c2019-12-10 15:01:29 +08001235 rdev->desc_nr < MD_SB_DISKS &&
Yufen Yu228fc7d2019-10-30 18:47:02 +08001236 sb->disks[rdev->desc_nr].state &
1237 ((1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))))
1238 spare_disk = false;
1239
Harvey Harrison9a7b2b02008-04-28 02:15:49 -07001240 if (!refdev) {
Yufen Yu228fc7d2019-10-30 18:47:02 +08001241 if (!spare_disk)
Yufen Yu6a5cb532019-10-16 16:00:03 +08001242 ret = 1;
1243 else
1244 ret = 0;
Harvey Harrison9a7b2b02008-04-28 02:15:49 -07001245 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246 __u64 ev1, ev2;
Namhyung Kim65a06f062011-07-27 11:00:36 +10001247 mdp_super_t *refsb = page_address(refdev->sb_page);
Amir Goldsteine6fd2092017-05-04 16:26:20 +03001248 if (!md_uuid_equal(refsb, sb)) {
NeilBrown9d487392016-11-02 14:16:49 +11001249 pr_warn("md: %s has different UUID to %s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250 b, bdevname(refdev->bdev,b2));
1251 goto abort;
1252 }
Amir Goldsteine6fd2092017-05-04 16:26:20 +03001253 if (!md_sb_equal(refsb, sb)) {
NeilBrown9d487392016-11-02 14:16:49 +11001254 pr_warn("md: %s has same UUID but different superblock to %s\n",
1255 b, bdevname(refdev->bdev, b2));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 goto abort;
1257 }
1258 ev1 = md_event(sb);
1259 ev2 = md_event(refsb);
Yufen Yu6a5cb532019-10-16 16:00:03 +08001260
Yufen Yu228fc7d2019-10-30 18:47:02 +08001261 if (!spare_disk && ev1 > ev2)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262 ret = 1;
NeilBrownf72ffdd2014-09-30 14:23:59 +10001263 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264 ret = 0;
1265 }
NeilBrown8190e752009-06-18 08:48:58 +10001266 rdev->sectors = rdev->sb_start;
NeilBrown667a5312012-08-16 16:46:12 +10001267 /* Limit to 4TB as metadata cannot record more than that.
1268 * (not needed for Linear and RAID0 as metadata doesn't
1269 * record this size)
1270 */
Christoph Hellwig72deb452019-04-05 18:08:59 +02001271 if ((u64)rdev->sectors >= (2ULL << 32) && sb->level >= 1)
Arnd Bergmann3312c952015-12-21 10:51:01 +11001272 rdev->sectors = (sector_t)(2ULL << 32) - 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273
NeilBrown27a7b262011-09-10 17:21:28 +10001274 if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
NeilBrown2bf071b2006-01-06 00:20:55 -08001275 /* "this cannot possibly happen" ... */
1276 ret = -EINVAL;
1277
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278 abort:
1279 return ret;
1280}
1281
1282/*
1283 * validate_super for 0.90.0
1284 */
NeilBrownfd01b882011-10-11 16:47:53 +11001285static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286{
1287 mdp_disk_t *desc;
Namhyung Kim65a06f062011-07-27 11:00:36 +10001288 mdp_super_t *sb = page_address(rdev->sb_page);
NeilBrown07d84d102006-06-26 00:27:56 -07001289 __u64 ev1 = md_event(sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290
NeilBrown41158c72005-06-21 17:17:25 -07001291 rdev->raid_disk = -1;
NeilBrownc5d79ad2008-02-06 01:39:54 -08001292 clear_bit(Faulty, &rdev->flags);
1293 clear_bit(In_sync, &rdev->flags);
NeilBrown8313b8e2013-12-12 10:13:33 +11001294 clear_bit(Bitmap_sync, &rdev->flags);
NeilBrownc5d79ad2008-02-06 01:39:54 -08001295 clear_bit(WriteMostly, &rdev->flags);
NeilBrownc5d79ad2008-02-06 01:39:54 -08001296
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297 if (mddev->raid_disks == 0) {
1298 mddev->major_version = 0;
1299 mddev->minor_version = sb->minor_version;
1300 mddev->patch_version = sb->patch_version;
NeilBrowne6910632008-02-06 01:39:51 -08001301 mddev->external = 0;
Andre Noll9d8f0362009-06-18 08:45:01 +10001302 mddev->chunk_sectors = sb->chunk_size >> 9;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303 mddev->ctime = sb->ctime;
1304 mddev->utime = sb->utime;
1305 mddev->level = sb->level;
NeilBrownd9d166c2006-01-06 00:20:51 -08001306 mddev->clevel[0] = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307 mddev->layout = sb->layout;
1308 mddev->raid_disks = sb->raid_disks;
NeilBrown27a7b262011-09-10 17:21:28 +10001309 mddev->dev_sectors = ((sector_t)sb->size) * 2;
NeilBrown07d84d102006-06-26 00:27:56 -07001310 mddev->events = ev1;
NeilBrownc3d97142009-12-14 12:49:52 +11001311 mddev->bitmap_info.offset = 0;
NeilBrown6409bb02012-05-22 13:55:07 +10001312 mddev->bitmap_info.space = 0;
1313 /* bitmap can use 60 K after the 4K superblocks */
NeilBrownc3d97142009-12-14 12:49:52 +11001314 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
NeilBrown6409bb02012-05-22 13:55:07 +10001315 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
NeilBrown2c810cd2012-05-21 09:27:00 +10001316 mddev->reshape_backwards = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317
NeilBrownf6705572006-03-27 01:18:11 -08001318 if (mddev->minor_version >= 91) {
1319 mddev->reshape_position = sb->reshape_position;
1320 mddev->delta_disks = sb->delta_disks;
1321 mddev->new_level = sb->new_level;
1322 mddev->new_layout = sb->new_layout;
Andre Noll664e7c42009-06-18 08:45:27 +10001323 mddev->new_chunk_sectors = sb->new_chunk >> 9;
NeilBrown2c810cd2012-05-21 09:27:00 +10001324 if (mddev->delta_disks < 0)
1325 mddev->reshape_backwards = 1;
NeilBrownf6705572006-03-27 01:18:11 -08001326 } else {
1327 mddev->reshape_position = MaxSector;
1328 mddev->delta_disks = 0;
1329 mddev->new_level = mddev->level;
1330 mddev->new_layout = mddev->layout;
Andre Noll664e7c42009-06-18 08:45:27 +10001331 mddev->new_chunk_sectors = mddev->chunk_sectors;
NeilBrownf6705572006-03-27 01:18:11 -08001332 }
NeilBrown33f2c352019-09-09 16:52:29 +10001333 if (mddev->level == 0)
1334 mddev->layout = -1;
NeilBrownf6705572006-03-27 01:18:11 -08001335
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336 if (sb->state & (1<<MD_SB_CLEAN))
1337 mddev->recovery_cp = MaxSector;
1338 else {
NeilBrownf72ffdd2014-09-30 14:23:59 +10001339 if (sb->events_hi == sb->cp_events_hi &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340 sb->events_lo == sb->cp_events_lo) {
1341 mddev->recovery_cp = sb->recovery_cp;
1342 } else
1343 mddev->recovery_cp = 0;
1344 }
1345
1346 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
1347 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
1348 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
1349 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
1350
1351 mddev->max_disks = MD_SB_DISKS;
NeilBrowna654b9d82005-06-21 17:17:27 -07001352
1353 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
NeilBrown6409bb02012-05-22 13:55:07 +10001354 mddev->bitmap_info.file == NULL) {
NeilBrownc3d97142009-12-14 12:49:52 +11001355 mddev->bitmap_info.offset =
1356 mddev->bitmap_info.default_offset;
NeilBrown6409bb02012-05-22 13:55:07 +10001357 mddev->bitmap_info.space =
Dave Jonesc9ad0202013-08-19 22:26:32 -04001358 mddev->bitmap_info.default_space;
NeilBrown6409bb02012-05-22 13:55:07 +10001359 }
NeilBrowna654b9d82005-06-21 17:17:27 -07001360
NeilBrown41158c72005-06-21 17:17:25 -07001361 } else if (mddev->pers == NULL) {
NeilBrownbe6800a2010-05-18 10:17:09 +10001362 /* Insist on good event counter while assembling, except
1363 * for spares (which don't need an event count) */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364 ++ev1;
NeilBrownbe6800a2010-05-18 10:17:09 +10001365 if (sb->disks[rdev->desc_nr].state & (
1366 (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
NeilBrownf72ffdd2014-09-30 14:23:59 +10001367 if (ev1 < mddev->events)
NeilBrownbe6800a2010-05-18 10:17:09 +10001368 return -EINVAL;
NeilBrown41158c72005-06-21 17:17:25 -07001369 } else if (mddev->bitmap) {
1370 /* if adding to array with a bitmap, then we can accept an
1371 * older device ... but not too old.
1372 */
NeilBrown41158c72005-06-21 17:17:25 -07001373 if (ev1 < mddev->bitmap->events_cleared)
1374 return 0;
NeilBrown8313b8e2013-12-12 10:13:33 +11001375 if (ev1 < mddev->events)
1376 set_bit(Bitmap_sync, &rdev->flags);
NeilBrown07d84d102006-06-26 00:27:56 -07001377 } else {
1378 if (ev1 < mddev->events)
1379 /* just a hot-add of a new device, leave raid_disk at -1 */
1380 return 0;
1381 }
NeilBrown41158c72005-06-21 17:17:25 -07001382
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383 if (mddev->level != LEVEL_MULTIPATH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384 desc = sb->disks + rdev->desc_nr;
1385
1386 if (desc->state & (1<<MD_DISK_FAULTY))
NeilBrownb2d444d2005-11-08 21:39:31 -08001387 set_bit(Faulty, &rdev->flags);
NeilBrown7c7546c2006-06-26 00:27:41 -07001388 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
1389 desc->raid_disk < mddev->raid_disks */) {
NeilBrownb2d444d2005-11-08 21:39:31 -08001390 set_bit(In_sync, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391 rdev->raid_disk = desc->raid_disk;
NeilBrownf4667222013-12-09 12:04:56 +11001392 rdev->saved_raid_disk = desc->raid_disk;
NeilBrown0261cd9f2009-11-13 17:40:48 +11001393 } else if (desc->state & (1<<MD_DISK_ACTIVE)) {
1394 /* active but not in sync implies recovery up to
1395 * reshape position. We don't know exactly where
1396 * that is, so set to zero for now */
1397 if (mddev->minor_version >= 91) {
1398 rdev->recovery_offset = 0;
1399 rdev->raid_disk = desc->raid_disk;
1400 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401 }
NeilBrown8ddf9ef2005-09-09 16:23:45 -07001402 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
1403 set_bit(WriteMostly, &rdev->flags);
NeilBrown688834e2016-11-18 16:16:11 +11001404 if (desc->state & (1<<MD_DISK_FAILFAST))
1405 set_bit(FailFast, &rdev->flags);
NeilBrown41158c72005-06-21 17:17:25 -07001406 } else /* MULTIPATH are always insync */
NeilBrownb2d444d2005-11-08 21:39:31 -08001407 set_bit(In_sync, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408 return 0;
1409}
1410
1411/*
1412 * sync_super for 0.90.0
1413 */
NeilBrownfd01b882011-10-11 16:47:53 +11001414static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415{
1416 mdp_super_t *sb;
NeilBrown3cb03002011-10-11 16:45:26 +11001417 struct md_rdev *rdev2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418 int next_spare = mddev->raid_disks;
NeilBrown19133a42005-11-08 21:39:35 -08001419
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420 /* make rdev->sb match mddev data..
1421 *
1422 * 1/ zero out disks
1423 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
1424 * 3/ any empty disks < next_spare become removed
1425 *
1426 * disks[0] gets initialised to REMOVED because
1427 * we cannot be sure from other fields if it has
1428 * been initialised or not.
1429 */
1430 int i;
1431 int active=0, working=0,failed=0,spare=0,nr_disks=0;
1432
NeilBrown61181562005-09-09 16:24:02 -07001433 rdev->sb_size = MD_SB_BYTES;
1434
Namhyung Kim65a06f062011-07-27 11:00:36 +10001435 sb = page_address(rdev->sb_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436
1437 memset(sb, 0, sizeof(*sb));
1438
1439 sb->md_magic = MD_SB_MAGIC;
1440 sb->major_version = mddev->major_version;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441 sb->patch_version = mddev->patch_version;
1442 sb->gvalid_words = 0; /* ignored */
1443 memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
1444 memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
1445 memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
1446 memcpy(&sb->set_uuid3, mddev->uuid+12,4);
1447
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11001448 sb->ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449 sb->level = mddev->level;
Andre Noll58c0fed2009-03-31 14:33:13 +11001450 sb->size = mddev->dev_sectors / 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451 sb->raid_disks = mddev->raid_disks;
1452 sb->md_minor = mddev->md_minor;
NeilBrowne6910632008-02-06 01:39:51 -08001453 sb->not_persistent = 0;
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11001454 sb->utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455 sb->state = 0;
1456 sb->events_hi = (mddev->events>>32);
1457 sb->events_lo = (u32)mddev->events;
1458
NeilBrownf6705572006-03-27 01:18:11 -08001459 if (mddev->reshape_position == MaxSector)
1460 sb->minor_version = 90;
1461 else {
1462 sb->minor_version = 91;
1463 sb->reshape_position = mddev->reshape_position;
1464 sb->new_level = mddev->new_level;
1465 sb->delta_disks = mddev->delta_disks;
1466 sb->new_layout = mddev->new_layout;
Andre Noll664e7c42009-06-18 08:45:27 +10001467 sb->new_chunk = mddev->new_chunk_sectors << 9;
NeilBrownf6705572006-03-27 01:18:11 -08001468 }
1469 mddev->minor_version = sb->minor_version;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470 if (mddev->in_sync)
1471 {
1472 sb->recovery_cp = mddev->recovery_cp;
1473 sb->cp_events_hi = (mddev->events>>32);
1474 sb->cp_events_lo = (u32)mddev->events;
1475 if (mddev->recovery_cp == MaxSector)
1476 sb->state = (1<< MD_SB_CLEAN);
1477 } else
1478 sb->recovery_cp = 0;
1479
1480 sb->layout = mddev->layout;
Andre Noll9d8f0362009-06-18 08:45:01 +10001481 sb->chunk_size = mddev->chunk_sectors << 9;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482
NeilBrownc3d97142009-12-14 12:49:52 +11001483 if (mddev->bitmap && mddev->bitmap_info.file == NULL)
NeilBrowna654b9d82005-06-21 17:17:27 -07001484 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
1485
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486 sb->disks[0].state = (1<<MD_DISK_REMOVED);
NeilBrowndafb20f2012-03-19 12:46:39 +11001487 rdev_for_each(rdev2, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488 mdp_disk_t *d;
NeilBrown86e6ffd2005-11-08 21:39:24 -08001489 int desc_nr;
NeilBrown0261cd9f2009-11-13 17:40:48 +11001490 int is_active = test_bit(In_sync, &rdev2->flags);
1491
1492 if (rdev2->raid_disk >= 0 &&
1493 sb->minor_version >= 91)
1494 /* we have nowhere to store the recovery_offset,
1495 * but if it is not below the reshape_position,
1496 * we can piggy-back on that.
1497 */
1498 is_active = 1;
1499 if (rdev2->raid_disk < 0 ||
1500 test_bit(Faulty, &rdev2->flags))
1501 is_active = 0;
1502 if (is_active)
NeilBrown86e6ffd2005-11-08 21:39:24 -08001503 desc_nr = rdev2->raid_disk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504 else
NeilBrown86e6ffd2005-11-08 21:39:24 -08001505 desc_nr = next_spare++;
NeilBrown19133a42005-11-08 21:39:35 -08001506 rdev2->desc_nr = desc_nr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507 d = &sb->disks[rdev2->desc_nr];
1508 nr_disks++;
1509 d->number = rdev2->desc_nr;
1510 d->major = MAJOR(rdev2->bdev->bd_dev);
1511 d->minor = MINOR(rdev2->bdev->bd_dev);
NeilBrown0261cd9f2009-11-13 17:40:48 +11001512 if (is_active)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513 d->raid_disk = rdev2->raid_disk;
1514 else
1515 d->raid_disk = rdev2->desc_nr; /* compatibility */
NeilBrown1be78922006-03-27 01:18:03 -08001516 if (test_bit(Faulty, &rdev2->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 d->state = (1<<MD_DISK_FAULTY);
NeilBrown0261cd9f2009-11-13 17:40:48 +11001518 else if (is_active) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519 d->state = (1<<MD_DISK_ACTIVE);
NeilBrown0261cd9f2009-11-13 17:40:48 +11001520 if (test_bit(In_sync, &rdev2->flags))
1521 d->state |= (1<<MD_DISK_SYNC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522 active++;
1523 working++;
1524 } else {
1525 d->state = 0;
1526 spare++;
1527 working++;
1528 }
NeilBrown8ddf9ef2005-09-09 16:23:45 -07001529 if (test_bit(WriteMostly, &rdev2->flags))
1530 d->state |= (1<<MD_DISK_WRITEMOSTLY);
NeilBrown688834e2016-11-18 16:16:11 +11001531 if (test_bit(FailFast, &rdev2->flags))
1532 d->state |= (1<<MD_DISK_FAILFAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 /* now set the "removed" and "faulty" bits on any missing devices */
1535 for (i=0 ; i < mddev->raid_disks ; i++) {
1536 mdp_disk_t *d = &sb->disks[i];
1537 if (d->state == 0 && d->number == 0) {
1538 d->number = i;
1539 d->raid_disk = i;
1540 d->state = (1<<MD_DISK_REMOVED);
1541 d->state |= (1<<MD_DISK_FAULTY);
1542 failed++;
1543 }
1544 }
1545 sb->nr_disks = nr_disks;
1546 sb->active_disks = active;
1547 sb->working_disks = working;
1548 sb->failed_disks = failed;
1549 sb->spare_disks = spare;
1550
1551 sb->this_disk = sb->disks[rdev->desc_nr];
1552 sb->sb_csum = calc_sb_csum(sb);
1553}
1554
1555/*
Chris Webb0cd17fe2008-06-28 08:31:46 +10001556 * rdev_size_change for 0.90.0
1557 */
1558static unsigned long long
NeilBrown3cb03002011-10-11 16:45:26 +11001559super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
Chris Webb0cd17fe2008-06-28 08:31:46 +10001560{
Andre Noll58c0fed2009-03-31 14:33:13 +11001561 if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
Chris Webb0cd17fe2008-06-28 08:31:46 +10001562 return 0; /* component must fit device */
NeilBrownc3d97142009-12-14 12:49:52 +11001563 if (rdev->mddev->bitmap_info.offset)
Chris Webb0cd17fe2008-06-28 08:31:46 +10001564 return 0; /* can't move bitmap */
Jonathan Brassow57b2caa2011-01-14 09:14:33 +11001565 rdev->sb_start = calc_dev_sboffset(rdev);
Andre Noll15f4a5f2008-07-21 14:42:12 +10001566 if (!num_sectors || num_sectors > rdev->sb_start)
1567 num_sectors = rdev->sb_start;
NeilBrown27a7b262011-09-10 17:21:28 +10001568 /* Limit to 4TB as metadata cannot record more than that.
1569 * 4TB == 2^32 KB, or 2*2^32 sectors.
1570 */
Christoph Hellwig72deb452019-04-05 18:08:59 +02001571 if ((u64)num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
Arnd Bergmann3312c952015-12-21 10:51:01 +11001572 num_sectors = (sector_t)(2ULL << 32) - 2;
NeilBrown46533ff2016-11-18 16:16:11 +11001573 do {
1574 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
Chris Webb0cd17fe2008-06-28 08:31:46 +10001575 rdev->sb_page);
NeilBrown46533ff2016-11-18 16:16:11 +11001576 } while (md_super_wait(rdev->mddev) < 0);
Justin Maggardc26a44e2010-11-24 16:36:17 +11001577 return num_sectors;
Chris Webb0cd17fe2008-06-28 08:31:46 +10001578}
1579
NeilBrownc6563a82012-05-21 09:27:00 +10001580static int
1581super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset)
1582{
1583 /* non-zero offset changes not possible with v0.90 */
1584 return new_offset == 0;
1585}
Chris Webb0cd17fe2008-06-28 08:31:46 +10001586
1587/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588 * version 1 superblock
1589 */
1590
NeilBrownf72ffdd2014-09-30 14:23:59 +10001591static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592{
NeilBrown1c05b4b2006-10-21 10:24:08 -07001593 __le32 disk_csum;
1594 u32 csum;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 unsigned long long newcsum;
1596 int size = 256 + le32_to_cpu(sb->max_dev)*2;
NeilBrown1c05b4b2006-10-21 10:24:08 -07001597 __le32 *isuper = (__le32*)sb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598
1599 disk_csum = sb->sb_csum;
1600 sb->sb_csum = 0;
1601 newcsum = 0;
NeilBrown1f3c9902012-12-11 13:09:00 +11001602 for (; size >= 4; size -= 4)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603 newcsum += le32_to_cpu(*isuper++);
1604
1605 if (size == 2)
NeilBrown1c05b4b2006-10-21 10:24:08 -07001606 newcsum += le16_to_cpu(*(__le16*) isuper);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607
1608 csum = (newcsum & 0xffffffff) + (newcsum >> 32);
1609 sb->sb_csum = disk_csum;
1610 return cpu_to_le32(csum);
1611}
1612
NeilBrown3cb03002011-10-11 16:45:26 +11001613static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614{
1615 struct mdp_superblock_1 *sb;
1616 int ret;
Andre Noll0f420352008-07-11 22:02:23 +10001617 sector_t sb_start;
NeilBrownc6563a82012-05-21 09:27:00 +10001618 sector_t sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
NeilBrown0002b272005-09-09 16:23:53 -07001620 int bmask;
Yufen Yu228fc7d2019-10-30 18:47:02 +08001621 bool spare_disk = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622
1623 /*
Andre Noll0f420352008-07-11 22:02:23 +10001624 * Calculate the position of the superblock in 512byte sectors.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625 * It is always aligned to a 4K boundary and
1626 * depeding on minor_version, it can be:
1627 * 0: At least 8K, but less than 12K, from end of device
1628 * 1: At start of device
1629 * 2: 4K from start of device.
1630 */
1631 switch(minor_version) {
1632 case 0:
Christoph Hellwig0fe80342021-10-18 12:11:06 +02001633 sb_start = bdev_nr_sectors(rdev->bdev) - 8 * 2;
Andre Noll0f420352008-07-11 22:02:23 +10001634 sb_start &= ~(sector_t)(4*2-1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635 break;
1636 case 1:
Andre Noll0f420352008-07-11 22:02:23 +10001637 sb_start = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638 break;
1639 case 2:
Andre Noll0f420352008-07-11 22:02:23 +10001640 sb_start = 8;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641 break;
1642 default:
1643 return -EINVAL;
1644 }
Andre Noll0f420352008-07-11 22:02:23 +10001645 rdev->sb_start = sb_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646
NeilBrown0002b272005-09-09 16:23:53 -07001647 /* superblock is rarely larger than 1K, but it can be larger,
1648 * and it is safe to read 4k, so we do that
1649 */
1650 ret = read_disk_sb(rdev, 4096);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651 if (ret) return ret;
1652
Namhyung Kim65a06f062011-07-27 11:00:36 +10001653 sb = page_address(rdev->sb_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654
1655 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1656 sb->major_version != cpu_to_le32(1) ||
1657 le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
Andre Noll0f420352008-07-11 22:02:23 +10001658 le64_to_cpu(sb->super_offset) != rdev->sb_start ||
NeilBrown71c08052005-09-09 16:23:51 -07001659 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660 return -EINVAL;
1661
1662 if (calc_sb_1_csum(sb) != sb->sb_csum) {
NeilBrown9d487392016-11-02 14:16:49 +11001663 pr_warn("md: invalid superblock checksum on %s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664 bdevname(rdev->bdev,b));
1665 return -EINVAL;
1666 }
1667 if (le64_to_cpu(sb->data_size) < 10) {
NeilBrown9d487392016-11-02 14:16:49 +11001668 pr_warn("md: data_size too small on %s\n",
1669 bdevname(rdev->bdev,b));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670 return -EINVAL;
1671 }
NeilBrownc6563a82012-05-21 09:27:00 +10001672 if (sb->pad0 ||
1673 sb->pad3[0] ||
1674 memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1])))
1675 /* Some padding is non-zero, might be a new feature */
1676 return -EINVAL;
NeilBrowne11e93f2007-05-09 02:35:36 -07001677
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678 rdev->preferred_minor = 0xffff;
1679 rdev->data_offset = le64_to_cpu(sb->data_offset);
NeilBrownc6563a82012-05-21 09:27:00 +10001680 rdev->new_data_offset = rdev->data_offset;
1681 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
1682 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
1683 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
NeilBrown4dbcdc72006-01-06 00:20:52 -08001684 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685
NeilBrown0002b272005-09-09 16:23:53 -07001686 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
Martin K. Petersene1defc42009-05-22 17:17:49 -04001687 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
NeilBrown0002b272005-09-09 16:23:53 -07001688 if (rdev->sb_size & bmask)
NeilBrowna1801f82008-03-04 14:29:31 -08001689 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1690
1691 if (minor_version
Andre Noll0f420352008-07-11 22:02:23 +10001692 && rdev->data_offset < sb_start + (rdev->sb_size/512))
NeilBrowna1801f82008-03-04 14:29:31 -08001693 return -EINVAL;
NeilBrownc6563a82012-05-21 09:27:00 +10001694 if (minor_version
1695 && rdev->new_data_offset < sb_start + (rdev->sb_size/512))
1696 return -EINVAL;
NeilBrown0002b272005-09-09 16:23:53 -07001697
NeilBrown31b65a02006-07-10 04:44:14 -07001698 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1699 rdev->desc_nr = -1;
1700 else
1701 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1702
NeilBrown2699b672011-07-28 11:31:47 +10001703 if (!rdev->bb_page) {
1704 rdev->bb_page = alloc_page(GFP_KERNEL);
1705 if (!rdev->bb_page)
1706 return -ENOMEM;
1707 }
1708 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) &&
1709 rdev->badblocks.count == 0) {
1710 /* need to load the bad block list.
1711 * Currently we limit it to one page.
1712 */
1713 s32 offset;
1714 sector_t bb_sector;
Christoph Hellwig00485d02019-04-04 18:56:12 +02001715 __le64 *bbp;
NeilBrown2699b672011-07-28 11:31:47 +10001716 int i;
1717 int sectors = le16_to_cpu(sb->bblog_size);
1718 if (sectors > (PAGE_SIZE / 512))
1719 return -EINVAL;
1720 offset = le32_to_cpu(sb->bblog_offset);
1721 if (offset == 0)
1722 return -EINVAL;
1723 bb_sector = (long long)offset;
1724 if (!sync_page_io(rdev, bb_sector, sectors << 9,
Mike Christie796a5cf2016-06-05 14:32:07 -05001725 rdev->bb_page, REQ_OP_READ, 0, true))
NeilBrown2699b672011-07-28 11:31:47 +10001726 return -EIO;
Christoph Hellwig00485d02019-04-04 18:56:12 +02001727 bbp = (__le64 *)page_address(rdev->bb_page);
NeilBrown2699b672011-07-28 11:31:47 +10001728 rdev->badblocks.shift = sb->bblog_shift;
1729 for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
1730 u64 bb = le64_to_cpu(*bbp);
1731 int count = bb & (0x3ff);
1732 u64 sector = bb >> 10;
1733 sector <<= sb->bblog_shift;
1734 count <<= sb->bblog_shift;
1735 if (bb + 1 == 0)
1736 break;
Vishal Vermafc974ee2015-12-24 19:20:34 -07001737 if (badblocks_set(&rdev->badblocks, sector, count, 1))
NeilBrown2699b672011-07-28 11:31:47 +10001738 return -EINVAL;
1739 }
NeilBrown486adf72013-04-24 11:42:44 +10001740 } else if (sb->bblog_offset != 0)
1741 rdev->badblocks.shift = 0;
NeilBrown2699b672011-07-28 11:31:47 +10001742
Pawel Baldysiakddc08822017-08-16 17:13:45 +02001743 if ((le32_to_cpu(sb->feature_map) &
1744 (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS))) {
Artur Paszkiewiczea0213e2017-03-09 09:59:57 +01001745 rdev->ppl.offset = (__s16)le16_to_cpu(sb->ppl.offset);
1746 rdev->ppl.size = le16_to_cpu(sb->ppl.size);
1747 rdev->ppl.sector = rdev->sb_start + rdev->ppl.offset;
1748 }
1749
NeilBrown33f2c352019-09-09 16:52:29 +10001750 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT) &&
1751 sb->level != 0)
1752 return -EINVAL;
1753
Yufen Yu228fc7d2019-10-30 18:47:02 +08001754 /* not spare disk, or LEVEL_MULTIPATH */
1755 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH) ||
1756 (rdev->desc_nr >= 0 &&
1757 rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
1758 (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
1759 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL)))
1760 spare_disk = false;
Yufen Yu6a5cb532019-10-16 16:00:03 +08001761
Harvey Harrison9a7b2b02008-04-28 02:15:49 -07001762 if (!refdev) {
Yufen Yu228fc7d2019-10-30 18:47:02 +08001763 if (!spare_disk)
Yufen Yu6a5cb532019-10-16 16:00:03 +08001764 ret = 1;
1765 else
1766 ret = 0;
Harvey Harrison9a7b2b02008-04-28 02:15:49 -07001767 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768 __u64 ev1, ev2;
Namhyung Kim65a06f062011-07-27 11:00:36 +10001769 struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770
1771 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1772 sb->level != refsb->level ||
1773 sb->layout != refsb->layout ||
1774 sb->chunksize != refsb->chunksize) {
NeilBrown9d487392016-11-02 14:16:49 +11001775 pr_warn("md: %s has strangely different superblock to %s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776 bdevname(rdev->bdev,b),
1777 bdevname(refdev->bdev,b2));
1778 return -EINVAL;
1779 }
1780 ev1 = le64_to_cpu(sb->events);
1781 ev2 = le64_to_cpu(refsb->events);
1782
Yufen Yu228fc7d2019-10-30 18:47:02 +08001783 if (!spare_disk && ev1 > ev2)
NeilBrown8ed75462006-02-03 03:03:41 -08001784 ret = 1;
1785 else
1786 ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787 }
Christoph Hellwig0fe80342021-10-18 12:11:06 +02001788 if (minor_version)
1789 sectors = bdev_nr_sectors(rdev->bdev) - rdev->data_offset;
1790 else
NeilBrownc6563a82012-05-21 09:27:00 +10001791 sectors = rdev->sb_start;
1792 if (sectors < le64_to_cpu(sb->data_size))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 return -EINVAL;
Andre Nolldd8ac332009-03-31 14:33:13 +11001794 rdev->sectors = le64_to_cpu(sb->data_size);
NeilBrown8ed75462006-02-03 03:03:41 -08001795 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796}
1797
NeilBrownfd01b882011-10-11 16:47:53 +11001798static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799{
Namhyung Kim65a06f062011-07-27 11:00:36 +10001800 struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
NeilBrown07d84d102006-06-26 00:27:56 -07001801 __u64 ev1 = le64_to_cpu(sb->events);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802
NeilBrown41158c72005-06-21 17:17:25 -07001803 rdev->raid_disk = -1;
NeilBrownc5d79ad2008-02-06 01:39:54 -08001804 clear_bit(Faulty, &rdev->flags);
1805 clear_bit(In_sync, &rdev->flags);
NeilBrown8313b8e2013-12-12 10:13:33 +11001806 clear_bit(Bitmap_sync, &rdev->flags);
NeilBrownc5d79ad2008-02-06 01:39:54 -08001807 clear_bit(WriteMostly, &rdev->flags);
NeilBrownc5d79ad2008-02-06 01:39:54 -08001808
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809 if (mddev->raid_disks == 0) {
1810 mddev->major_version = 1;
1811 mddev->patch_version = 0;
NeilBrowne6910632008-02-06 01:39:51 -08001812 mddev->external = 0;
Andre Noll9d8f0362009-06-18 08:45:01 +10001813 mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11001814 mddev->ctime = le64_to_cpu(sb->ctime);
1815 mddev->utime = le64_to_cpu(sb->utime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816 mddev->level = le32_to_cpu(sb->level);
NeilBrownd9d166c2006-01-06 00:20:51 -08001817 mddev->clevel[0] = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818 mddev->layout = le32_to_cpu(sb->layout);
1819 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
Andre Noll58c0fed2009-03-31 14:33:13 +11001820 mddev->dev_sectors = le64_to_cpu(sb->size);
NeilBrown07d84d102006-06-26 00:27:56 -07001821 mddev->events = ev1;
NeilBrownc3d97142009-12-14 12:49:52 +11001822 mddev->bitmap_info.offset = 0;
NeilBrown6409bb02012-05-22 13:55:07 +10001823 mddev->bitmap_info.space = 0;
1824 /* Default location for bitmap is 1K after superblock
1825 * using 3K - total of 4K
1826 */
NeilBrownc3d97142009-12-14 12:49:52 +11001827 mddev->bitmap_info.default_offset = 1024 >> 9;
NeilBrown6409bb02012-05-22 13:55:07 +10001828 mddev->bitmap_info.default_space = (4096-1024) >> 9;
NeilBrown2c810cd2012-05-21 09:27:00 +10001829 mddev->reshape_backwards = 0;
1830
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1832 memcpy(mddev->uuid, sb->set_uuid, 16);
1833
1834 mddev->max_disks = (4096-256)/2;
NeilBrowna654b9d82005-06-21 17:17:27 -07001835
NeilBrown71c08052005-09-09 16:23:51 -07001836 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
NeilBrown6409bb02012-05-22 13:55:07 +10001837 mddev->bitmap_info.file == NULL) {
NeilBrownc3d97142009-12-14 12:49:52 +11001838 mddev->bitmap_info.offset =
1839 (__s32)le32_to_cpu(sb->bitmap_offset);
NeilBrown6409bb02012-05-22 13:55:07 +10001840 /* Metadata doesn't record how much space is available.
1841 * For 1.0, we assume we can use up to the superblock
1842 * if before, else to 4K beyond superblock.
1843 * For others, assume no change is possible.
1844 */
1845 if (mddev->minor_version > 0)
1846 mddev->bitmap_info.space = 0;
1847 else if (mddev->bitmap_info.offset > 0)
1848 mddev->bitmap_info.space =
1849 8 - mddev->bitmap_info.offset;
1850 else
1851 mddev->bitmap_info.space =
1852 -mddev->bitmap_info.offset;
1853 }
NeilBrowne11e93f2007-05-09 02:35:36 -07001854
NeilBrownf6705572006-03-27 01:18:11 -08001855 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1856 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1857 mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1858 mddev->new_level = le32_to_cpu(sb->new_level);
1859 mddev->new_layout = le32_to_cpu(sb->new_layout);
Andre Noll664e7c42009-06-18 08:45:27 +10001860 mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
NeilBrown2c810cd2012-05-21 09:27:00 +10001861 if (mddev->delta_disks < 0 ||
1862 (mddev->delta_disks == 0 &&
1863 (le32_to_cpu(sb->feature_map)
1864 & MD_FEATURE_RESHAPE_BACKWARDS)))
1865 mddev->reshape_backwards = 1;
NeilBrownf6705572006-03-27 01:18:11 -08001866 } else {
1867 mddev->reshape_position = MaxSector;
1868 mddev->delta_disks = 0;
1869 mddev->new_level = mddev->level;
1870 mddev->new_layout = mddev->layout;
Andre Noll664e7c42009-06-18 08:45:27 +10001871 mddev->new_chunk_sectors = mddev->chunk_sectors;
NeilBrownf6705572006-03-27 01:18:11 -08001872 }
1873
NeilBrown33f2c352019-09-09 16:52:29 +10001874 if (mddev->level == 0 &&
1875 !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT))
1876 mddev->layout = -1;
1877
Song Liu486b0f72016-08-19 15:34:01 -07001878 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)
Shaohua Lia62ab492016-01-06 14:37:13 -08001879 set_bit(MD_HAS_JOURNAL, &mddev->flags);
Artur Paszkiewiczea0213e2017-03-09 09:59:57 +01001880
Pawel Baldysiakddc08822017-08-16 17:13:45 +02001881 if (le32_to_cpu(sb->feature_map) &
1882 (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS)) {
Artur Paszkiewiczea0213e2017-03-09 09:59:57 +01001883 if (le32_to_cpu(sb->feature_map) &
1884 (MD_FEATURE_BITMAP_OFFSET | MD_FEATURE_JOURNAL))
1885 return -EINVAL;
Pawel Baldysiakddc08822017-08-16 17:13:45 +02001886 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_PPL) &&
1887 (le32_to_cpu(sb->feature_map) &
1888 MD_FEATURE_MULTIPLE_PPLS))
1889 return -EINVAL;
Artur Paszkiewiczea0213e2017-03-09 09:59:57 +01001890 set_bit(MD_HAS_PPL, &mddev->flags);
1891 }
NeilBrown41158c72005-06-21 17:17:25 -07001892 } else if (mddev->pers == NULL) {
NeilBrownbe6800a2010-05-18 10:17:09 +10001893 /* Insist of good event counter while assembling, except for
1894 * spares (which don't need an event count) */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895 ++ev1;
NeilBrownbe6800a2010-05-18 10:17:09 +10001896 if (rdev->desc_nr >= 0 &&
1897 rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
Song Liua3dfbda2015-10-08 21:54:11 -07001898 (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
1899 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL))
NeilBrownbe6800a2010-05-18 10:17:09 +10001900 if (ev1 < mddev->events)
1901 return -EINVAL;
NeilBrown41158c72005-06-21 17:17:25 -07001902 } else if (mddev->bitmap) {
1903 /* If adding to array with a bitmap, then we can accept an
1904 * older device, but not too old.
1905 */
NeilBrown41158c72005-06-21 17:17:25 -07001906 if (ev1 < mddev->bitmap->events_cleared)
1907 return 0;
NeilBrown8313b8e2013-12-12 10:13:33 +11001908 if (ev1 < mddev->events)
1909 set_bit(Bitmap_sync, &rdev->flags);
NeilBrown07d84d102006-06-26 00:27:56 -07001910 } else {
1911 if (ev1 < mddev->events)
1912 /* just a hot-add of a new device, leave raid_disk at -1 */
1913 return 0;
1914 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915 if (mddev->level != LEVEL_MULTIPATH) {
1916 int role;
NeilBrown3673f302009-08-03 10:59:56 +10001917 if (rdev->desc_nr < 0 ||
1918 rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
Song Liuc4d4c912015-08-13 14:31:54 -07001919 role = MD_DISK_ROLE_SPARE;
NeilBrown3673f302009-08-03 10:59:56 +10001920 rdev->desc_nr = -1;
1921 } else
1922 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923 switch(role) {
Song Liuc4d4c912015-08-13 14:31:54 -07001924 case MD_DISK_ROLE_SPARE: /* spare */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925 break;
Song Liuc4d4c912015-08-13 14:31:54 -07001926 case MD_DISK_ROLE_FAULTY: /* faulty */
NeilBrownb2d444d2005-11-08 21:39:31 -08001927 set_bit(Faulty, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928 break;
Song Liubac624f2015-08-13 14:31:55 -07001929 case MD_DISK_ROLE_JOURNAL: /* journal device */
1930 if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)) {
1931 /* journal device without journal feature */
NeilBrown9d487392016-11-02 14:16:49 +11001932 pr_warn("md: journal device provided without journal feature, ignoring the device\n");
Song Liubac624f2015-08-13 14:31:55 -07001933 return -EINVAL;
1934 }
1935 set_bit(Journal, &rdev->flags);
Shaohua Li3069aa82015-08-13 14:31:56 -07001936 rdev->journal_tail = le64_to_cpu(sb->journal_tail);
Shaohua Li9b156032015-12-18 15:19:16 +11001937 rdev->raid_disk = 0;
Song Liubac624f2015-08-13 14:31:55 -07001938 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001939 default:
NeilBrownf4667222013-12-09 12:04:56 +11001940 rdev->saved_raid_disk = role;
NeilBrown5fd6c1d2006-06-26 00:27:40 -07001941 if ((le32_to_cpu(sb->feature_map) &
NeilBrownf4667222013-12-09 12:04:56 +11001942 MD_FEATURE_RECOVERY_OFFSET)) {
NeilBrown5fd6c1d2006-06-26 00:27:40 -07001943 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
NeilBrownf4667222013-12-09 12:04:56 +11001944 if (!(le32_to_cpu(sb->feature_map) &
1945 MD_FEATURE_RECOVERY_BITMAP))
1946 rdev->saved_raid_disk = -1;
Guoqing Jiang062f5b2a2019-07-24 11:09:20 +02001947 } else {
1948 /*
1949 * If the array is FROZEN, then the device can't
1950 * be in_sync with rest of array.
1951 */
1952 if (!test_bit(MD_RECOVERY_FROZEN,
1953 &mddev->recovery))
1954 set_bit(In_sync, &rdev->flags);
1955 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956 rdev->raid_disk = role;
1957 break;
1958 }
NeilBrown8ddf9ef2005-09-09 16:23:45 -07001959 if (sb->devflags & WriteMostly1)
1960 set_bit(WriteMostly, &rdev->flags);
NeilBrown688834e2016-11-18 16:16:11 +11001961 if (sb->devflags & FailFast1)
1962 set_bit(FailFast, &rdev->flags);
NeilBrown2d78f8c2011-12-23 10:17:51 +11001963 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
1964 set_bit(Replacement, &rdev->flags);
NeilBrown41158c72005-06-21 17:17:25 -07001965 } else /* MULTIPATH are always insync */
NeilBrownb2d444d2005-11-08 21:39:31 -08001966 set_bit(In_sync, &rdev->flags);
NeilBrown41158c72005-06-21 17:17:25 -07001967
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968 return 0;
1969}
1970
NeilBrownfd01b882011-10-11 16:47:53 +11001971static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972{
1973 struct mdp_superblock_1 *sb;
NeilBrown3cb03002011-10-11 16:45:26 +11001974 struct md_rdev *rdev2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975 int max_dev, i;
1976 /* make rdev->sb match mddev and rdev data. */
1977
Namhyung Kim65a06f062011-07-27 11:00:36 +10001978 sb = page_address(rdev->sb_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979
1980 sb->feature_map = 0;
1981 sb->pad0 = 0;
NeilBrown5fd6c1d2006-06-26 00:27:40 -07001982 sb->recovery_offset = cpu_to_le64(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983 memset(sb->pad3, 0, sizeof(sb->pad3));
1984
1985 sb->utime = cpu_to_le64((__u64)mddev->utime);
1986 sb->events = cpu_to_le64(mddev->events);
1987 if (mddev->in_sync)
1988 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
Shaohua Libd18f642015-09-02 13:49:50 -07001989 else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags))
1990 sb->resync_offset = cpu_to_le64(MaxSector);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001991 else
1992 sb->resync_offset = cpu_to_le64(0);
1993
NeilBrown1c05b4b2006-10-21 10:24:08 -07001994 sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
NeilBrown4dbcdc72006-01-06 00:20:52 -08001995
NeilBrownf0ca3402006-02-02 14:28:04 -08001996 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
Andre Noll58c0fed2009-03-31 14:33:13 +11001997 sb->size = cpu_to_le64(mddev->dev_sectors);
Andre Noll9d8f0362009-06-18 08:45:01 +10001998 sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
NeilBrown62e1e382009-05-26 09:40:59 +10001999 sb->level = cpu_to_le32(mddev->level);
2000 sb->layout = cpu_to_le32(mddev->layout);
NeilBrown688834e2016-11-18 16:16:11 +11002001 if (test_bit(FailFast, &rdev->flags))
2002 sb->devflags |= FailFast1;
2003 else
2004 sb->devflags &= ~FailFast1;
NeilBrownf0ca3402006-02-02 14:28:04 -08002005
NeilBrownaeb9b2112011-08-25 14:43:08 +10002006 if (test_bit(WriteMostly, &rdev->flags))
2007 sb->devflags |= WriteMostly1;
2008 else
2009 sb->devflags &= ~WriteMostly1;
NeilBrownc6563a82012-05-21 09:27:00 +10002010 sb->data_offset = cpu_to_le64(rdev->data_offset);
2011 sb->data_size = cpu_to_le64(rdev->sectors);
NeilBrownaeb9b2112011-08-25 14:43:08 +10002012
NeilBrownc3d97142009-12-14 12:49:52 +11002013 if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
2014 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
NeilBrown71c08052005-09-09 16:23:51 -07002015 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
NeilBrowna654b9d82005-06-21 17:17:27 -07002016 }
NeilBrown5fd6c1d2006-06-26 00:27:40 -07002017
Shaohua Lif2076e72015-10-08 21:54:12 -07002018 if (rdev->raid_disk >= 0 && !test_bit(Journal, &rdev->flags) &&
NeilBrown97e4f422009-03-31 14:33:13 +11002019 !test_bit(In_sync, &rdev->flags)) {
NeilBrown93be75f2009-12-14 12:50:06 +11002020 sb->feature_map |=
2021 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
2022 sb->recovery_offset =
2023 cpu_to_le64(rdev->recovery_offset);
NeilBrownf4667222013-12-09 12:04:56 +11002024 if (rdev->saved_raid_disk >= 0 && mddev->bitmap)
2025 sb->feature_map |=
2026 cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP);
NeilBrown5fd6c1d2006-06-26 00:27:40 -07002027 }
Shaohua Li3069aa82015-08-13 14:31:56 -07002028 /* Note: recovery_offset and journal_tail share space */
2029 if (test_bit(Journal, &rdev->flags))
2030 sb->journal_tail = cpu_to_le64(rdev->journal_tail);
NeilBrown2d78f8c2011-12-23 10:17:51 +11002031 if (test_bit(Replacement, &rdev->flags))
2032 sb->feature_map |=
2033 cpu_to_le32(MD_FEATURE_REPLACEMENT);
NeilBrown5fd6c1d2006-06-26 00:27:40 -07002034
NeilBrownf6705572006-03-27 01:18:11 -08002035 if (mddev->reshape_position != MaxSector) {
2036 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
2037 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
2038 sb->new_layout = cpu_to_le32(mddev->new_layout);
2039 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
2040 sb->new_level = cpu_to_le32(mddev->new_level);
Andre Noll664e7c42009-06-18 08:45:27 +10002041 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
NeilBrown2c810cd2012-05-21 09:27:00 +10002042 if (mddev->delta_disks == 0 &&
2043 mddev->reshape_backwards)
2044 sb->feature_map
2045 |= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS);
NeilBrownc6563a82012-05-21 09:27:00 +10002046 if (rdev->new_data_offset != rdev->data_offset) {
2047 sb->feature_map
2048 |= cpu_to_le32(MD_FEATURE_NEW_OFFSET);
2049 sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset
2050 - rdev->data_offset));
2051 }
NeilBrownf6705572006-03-27 01:18:11 -08002052 }
NeilBrowna654b9d82005-06-21 17:17:27 -07002053
Goldwyn Rodrigues3c462c82015-08-19 07:35:54 +10002054 if (mddev_is_clustered(mddev))
2055 sb->feature_map |= cpu_to_le32(MD_FEATURE_CLUSTERED);
2056
NeilBrown2699b672011-07-28 11:31:47 +10002057 if (rdev->badblocks.count == 0)
2058 /* Nothing to do for bad blocks*/ ;
2059 else if (sb->bblog_offset == 0)
2060 /* Cannot record bad blocks on this device */
2061 md_error(mddev, rdev);
2062 else {
2063 struct badblocks *bb = &rdev->badblocks;
Christoph Hellwigae506402019-04-04 18:56:13 +02002064 __le64 *bbp = (__le64 *)page_address(rdev->bb_page);
NeilBrown2699b672011-07-28 11:31:47 +10002065 u64 *p = bb->page;
2066 sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
2067 if (bb->changed) {
2068 unsigned seq;
2069
2070retry:
2071 seq = read_seqbegin(&bb->lock);
2072
2073 memset(bbp, 0xff, PAGE_SIZE);
2074
2075 for (i = 0 ; i < bb->count ; i++) {
majianpeng35f9ac22012-11-08 08:56:27 +08002076 u64 internal_bb = p[i];
NeilBrown2699b672011-07-28 11:31:47 +10002077 u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
2078 | BB_LEN(internal_bb));
majianpeng35f9ac22012-11-08 08:56:27 +08002079 bbp[i] = cpu_to_le64(store_bb);
NeilBrown2699b672011-07-28 11:31:47 +10002080 }
NeilBrownd0962932012-03-19 12:46:41 +11002081 bb->changed = 0;
NeilBrown2699b672011-07-28 11:31:47 +10002082 if (read_seqretry(&bb->lock, seq))
2083 goto retry;
2084
2085 bb->sector = (rdev->sb_start +
2086 (int)le32_to_cpu(sb->bblog_offset));
2087 bb->size = le16_to_cpu(sb->bblog_size);
NeilBrown2699b672011-07-28 11:31:47 +10002088 }
2089 }
2090
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091 max_dev = 0;
NeilBrowndafb20f2012-03-19 12:46:39 +11002092 rdev_for_each(rdev2, mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093 if (rdev2->desc_nr+1 > max_dev)
2094 max_dev = rdev2->desc_nr+1;
NeilBrowna778b732007-05-23 13:58:10 -07002095
NeilBrown70471da2009-08-03 10:59:57 +10002096 if (max_dev > le32_to_cpu(sb->max_dev)) {
2097 int bmask;
NeilBrowna778b732007-05-23 13:58:10 -07002098 sb->max_dev = cpu_to_le32(max_dev);
NeilBrown70471da2009-08-03 10:59:57 +10002099 rdev->sb_size = max_dev * 2 + 256;
2100 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
2101 if (rdev->sb_size & bmask)
2102 rdev->sb_size = (rdev->sb_size | bmask) + 1;
NeilBrownddcf3522010-09-08 16:48:17 +10002103 } else
2104 max_dev = le32_to_cpu(sb->max_dev);
2105
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106 for (i=0; i<max_dev;i++)
Lidong Zhong8df72022017-06-12 10:45:55 +08002107 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
NeilBrownf72ffdd2014-09-30 14:23:59 +10002108
Song Liua97b7892015-10-08 21:54:09 -07002109 if (test_bit(MD_HAS_JOURNAL, &mddev->flags))
2110 sb->feature_map |= cpu_to_le32(MD_FEATURE_JOURNAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111
Artur Paszkiewiczea0213e2017-03-09 09:59:57 +01002112 if (test_bit(MD_HAS_PPL, &mddev->flags)) {
Pawel Baldysiakddc08822017-08-16 17:13:45 +02002113 if (test_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags))
2114 sb->feature_map |=
2115 cpu_to_le32(MD_FEATURE_MULTIPLE_PPLS);
2116 else
2117 sb->feature_map |= cpu_to_le32(MD_FEATURE_PPL);
Artur Paszkiewiczea0213e2017-03-09 09:59:57 +01002118 sb->ppl.offset = cpu_to_le16(rdev->ppl.offset);
2119 sb->ppl.size = cpu_to_le16(rdev->ppl.size);
2120 }
2121
NeilBrowndafb20f2012-03-19 12:46:39 +11002122 rdev_for_each(rdev2, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123 i = rdev2->desc_nr;
NeilBrownb2d444d2005-11-08 21:39:31 -08002124 if (test_bit(Faulty, &rdev2->flags))
Song Liuc4d4c912015-08-13 14:31:54 -07002125 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY);
NeilBrownb2d444d2005-11-08 21:39:31 -08002126 else if (test_bit(In_sync, &rdev2->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
Song Liua97b7892015-10-08 21:54:09 -07002128 else if (test_bit(Journal, &rdev2->flags))
Song Liubac624f2015-08-13 14:31:55 -07002129 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_JOURNAL);
NeilBrown93be75f2009-12-14 12:50:06 +11002130 else if (rdev2->raid_disk >= 0)
NeilBrown5fd6c1d2006-06-26 00:27:40 -07002131 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132 else
Song Liuc4d4c912015-08-13 14:31:54 -07002133 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134 }
2135
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136 sb->sb_csum = calc_sb_1_csum(sb);
2137}
2138
Xiao Nid9c0fa52020-06-30 15:55:36 +08002139static sector_t super_1_choose_bm_space(sector_t dev_size)
2140{
2141 sector_t bm_space;
2142
2143 /* if the device is bigger than 8Gig, save 64k for bitmap
2144 * usage, if bigger than 200Gig, save 128k
2145 */
2146 if (dev_size < 64*2)
2147 bm_space = 0;
2148 else if (dev_size - 64*2 >= 200*1024*1024*2)
2149 bm_space = 128*2;
2150 else if (dev_size - 4*2 > 8*1024*1024*2)
2151 bm_space = 64*2;
2152 else
2153 bm_space = 4*2;
2154 return bm_space;
2155}
2156
Chris Webb0cd17fe2008-06-28 08:31:46 +10002157static unsigned long long
NeilBrown3cb03002011-10-11 16:45:26 +11002158super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
Chris Webb0cd17fe2008-06-28 08:31:46 +10002159{
2160 struct mdp_superblock_1 *sb;
Andre Noll15f4a5f2008-07-21 14:42:12 +10002161 sector_t max_sectors;
Andre Noll58c0fed2009-03-31 14:33:13 +11002162 if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
Chris Webb0cd17fe2008-06-28 08:31:46 +10002163 return 0; /* component must fit device */
NeilBrownc6563a82012-05-21 09:27:00 +10002164 if (rdev->data_offset != rdev->new_data_offset)
2165 return 0; /* too confusing */
Andre Noll0f420352008-07-11 22:02:23 +10002166 if (rdev->sb_start < rdev->data_offset) {
Chris Webb0cd17fe2008-06-28 08:31:46 +10002167 /* minor versions 1 and 2; superblock before data */
Christoph Hellwig0fe80342021-10-18 12:11:06 +02002168 max_sectors = bdev_nr_sectors(rdev->bdev) - rdev->data_offset;
Andre Noll15f4a5f2008-07-21 14:42:12 +10002169 if (!num_sectors || num_sectors > max_sectors)
2170 num_sectors = max_sectors;
NeilBrownc3d97142009-12-14 12:49:52 +11002171 } else if (rdev->mddev->bitmap_info.offset) {
Chris Webb0cd17fe2008-06-28 08:31:46 +10002172 /* minor version 0 with bitmap we can't move */
2173 return 0;
2174 } else {
2175 /* minor version 0; superblock after data */
Xiao Nid9c0fa52020-06-30 15:55:36 +08002176 sector_t sb_start, bm_space;
Christoph Hellwig0fe80342021-10-18 12:11:06 +02002177 sector_t dev_size = bdev_nr_sectors(rdev->bdev);
Xiao Nid9c0fa52020-06-30 15:55:36 +08002178
2179 /* 8K is for superblock */
2180 sb_start = dev_size - 8*2;
Andre Noll0f420352008-07-11 22:02:23 +10002181 sb_start &= ~(sector_t)(4*2 - 1);
Xiao Nid9c0fa52020-06-30 15:55:36 +08002182
2183 bm_space = super_1_choose_bm_space(dev_size);
2184
2185 /* Space that can be used to store date needs to decrease
2186 * superblock bitmap space and bad block space(4K)
2187 */
2188 max_sectors = sb_start - bm_space - 4*2;
2189
Andre Noll15f4a5f2008-07-21 14:42:12 +10002190 if (!num_sectors || num_sectors > max_sectors)
2191 num_sectors = max_sectors;
Markus Hochholdinger55df1ce2021-11-16 10:21:35 +00002192 rdev->sb_start = sb_start;
Chris Webb0cd17fe2008-06-28 08:31:46 +10002193 }
Namhyung Kim65a06f062011-07-27 11:00:36 +10002194 sb = page_address(rdev->sb_page);
Andre Noll15f4a5f2008-07-21 14:42:12 +10002195 sb->data_size = cpu_to_le64(num_sectors);
Jason Yan3fb632e2017-03-10 11:27:23 +08002196 sb->super_offset = cpu_to_le64(rdev->sb_start);
Chris Webb0cd17fe2008-06-28 08:31:46 +10002197 sb->sb_csum = calc_sb_1_csum(sb);
NeilBrown46533ff2016-11-18 16:16:11 +11002198 do {
2199 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
2200 rdev->sb_page);
2201 } while (md_super_wait(rdev->mddev) < 0);
Justin Maggardc26a44e2010-11-24 16:36:17 +11002202 return num_sectors;
NeilBrownc6563a82012-05-21 09:27:00 +10002203
2204}
2205
2206static int
2207super_1_allow_new_offset(struct md_rdev *rdev,
2208 unsigned long long new_offset)
2209{
2210 /* All necessary checks on new >= old have been done */
2211 struct bitmap *bitmap;
2212 if (new_offset >= rdev->data_offset)
2213 return 1;
2214
2215 /* with 1.0 metadata, there is no metadata to tread on
2216 * so we can always move back */
2217 if (rdev->mddev->minor_version == 0)
2218 return 1;
2219
2220 /* otherwise we must be sure not to step on
2221 * any metadata, so stay:
2222 * 36K beyond start of superblock
2223 * beyond end of badblocks
2224 * beyond write-intent bitmap
2225 */
2226 if (rdev->sb_start + (32+4)*2 > new_offset)
2227 return 0;
2228 bitmap = rdev->mddev->bitmap;
2229 if (bitmap && !rdev->mddev->bitmap_info.file &&
2230 rdev->sb_start + rdev->mddev->bitmap_info.offset +
NeilBrown1ec885c2012-05-22 13:55:10 +10002231 bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset)
NeilBrownc6563a82012-05-21 09:27:00 +10002232 return 0;
2233 if (rdev->badblocks.sector + rdev->badblocks.size > new_offset)
2234 return 0;
2235
2236 return 1;
Chris Webb0cd17fe2008-06-28 08:31:46 +10002237}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238
Adrian Bunk75c96f82005-05-05 16:16:09 -07002239static struct super_type super_types[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240 [0] = {
2241 .name = "0.90.0",
2242 .owner = THIS_MODULE,
Chris Webb0cd17fe2008-06-28 08:31:46 +10002243 .load_super = super_90_load,
2244 .validate_super = super_90_validate,
2245 .sync_super = super_90_sync,
2246 .rdev_size_change = super_90_rdev_size_change,
NeilBrownc6563a82012-05-21 09:27:00 +10002247 .allow_new_offset = super_90_allow_new_offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248 },
2249 [1] = {
2250 .name = "md-1",
2251 .owner = THIS_MODULE,
Chris Webb0cd17fe2008-06-28 08:31:46 +10002252 .load_super = super_1_load,
2253 .validate_super = super_1_validate,
2254 .sync_super = super_1_sync,
2255 .rdev_size_change = super_1_rdev_size_change,
NeilBrownc6563a82012-05-21 09:27:00 +10002256 .allow_new_offset = super_1_allow_new_offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257 },
2258};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259
NeilBrownfd01b882011-10-11 16:47:53 +11002260static void sync_super(struct mddev *mddev, struct md_rdev *rdev)
Jonathan Brassow076f9682011-06-07 17:51:30 -05002261{
2262 if (mddev->sync_super) {
2263 mddev->sync_super(mddev, rdev);
2264 return;
2265 }
2266
2267 BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));
2268
2269 super_types[mddev->major_version].sync_super(mddev, rdev);
2270}
2271
NeilBrownfd01b882011-10-11 16:47:53 +11002272static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273{
NeilBrown3cb03002011-10-11 16:45:26 +11002274 struct md_rdev *rdev, *rdev2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275
NeilBrown4b809912008-07-21 17:05:25 +10002276 rcu_read_lock();
Song Liu0b020e82015-09-03 23:00:35 -07002277 rdev_for_each_rcu(rdev, mddev1) {
2278 if (test_bit(Faulty, &rdev->flags) ||
2279 test_bit(Journal, &rdev->flags) ||
2280 rdev->raid_disk == -1)
2281 continue;
2282 rdev_for_each_rcu(rdev2, mddev2) {
2283 if (test_bit(Faulty, &rdev2->flags) ||
2284 test_bit(Journal, &rdev2->flags) ||
2285 rdev2->raid_disk == -1)
2286 continue;
Christoph Hellwig61a27e1f2020-09-03 07:40:58 +02002287 if (rdev->bdev->bd_disk == rdev2->bdev->bd_disk) {
NeilBrown4b809912008-07-21 17:05:25 +10002288 rcu_read_unlock();
NeilBrown7dd5e7c32007-02-28 20:11:35 -08002289 return 1;
NeilBrown4b809912008-07-21 17:05:25 +10002290 }
Song Liu0b020e82015-09-03 23:00:35 -07002291 }
2292 }
NeilBrown4b809912008-07-21 17:05:25 +10002293 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002294 return 0;
2295}
2296
2297static LIST_HEAD(pending_raid_disks);
2298
Andre Nollac5e7112009-08-03 10:59:47 +10002299/*
2300 * Try to register data integrity profile for an mddev
2301 *
2302 * This is called when an array is started and after a disk has been kicked
2303 * from the array. It only succeeds if all working and active component devices
2304 * are integrity capable with matching profiles.
2305 */
NeilBrownfd01b882011-10-11 16:47:53 +11002306int md_integrity_register(struct mddev *mddev)
Martin K. Petersen3f9d99c2009-03-31 14:27:02 +11002307{
NeilBrown3cb03002011-10-11 16:45:26 +11002308 struct md_rdev *rdev, *reference = NULL;
Martin K. Petersen3f9d99c2009-03-31 14:27:02 +11002309
Andre Nollac5e7112009-08-03 10:59:47 +10002310 if (list_empty(&mddev->disks))
2311 return 0; /* nothing to do */
Jonathan Brassow629acb62011-06-08 15:10:08 +10002312 if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
2313 return 0; /* shouldn't register, or already is */
NeilBrowndafb20f2012-03-19 12:46:39 +11002314 rdev_for_each(rdev, mddev) {
Andre Nollac5e7112009-08-03 10:59:47 +10002315 /* skip spares and non-functional disks */
2316 if (test_bit(Faulty, &rdev->flags))
2317 continue;
2318 if (rdev->raid_disk < 0)
2319 continue;
Andre Nollac5e7112009-08-03 10:59:47 +10002320 if (!reference) {
2321 /* Use the first rdev as the reference */
2322 reference = rdev;
2323 continue;
2324 }
2325 /* does this rdev's profile match the reference profile? */
2326 if (blk_integrity_compare(reference->bdev->bd_disk,
2327 rdev->bdev->bd_disk) < 0)
2328 return -EINVAL;
Martin K. Petersen3f9d99c2009-03-31 14:27:02 +11002329 }
Martin K. Petersen89078d52011-03-28 20:09:12 -04002330 if (!reference || !bdev_get_integrity(reference->bdev))
2331 return 0;
Andre Nollac5e7112009-08-03 10:59:47 +10002332 /*
2333 * All component devices are integrity capable and have matching
2334 * profiles, register the common profile for the md device.
2335 */
Martin K. Petersen25520d52015-10-21 13:19:49 -04002336 blk_integrity_register(mddev->gendisk,
2337 bdev_get_integrity(reference->bdev));
2338
NeilBrown9d487392016-11-02 14:16:49 +11002339 pr_debug("md: data integrity enabled on %s\n", mdname(mddev));
Guoqing Jiang10764812021-05-25 17:46:17 +08002340 if (bioset_integrity_create(&mddev->bio_set, BIO_POOL_SIZE) ||
Guoqing Jiangdaee2022021-06-03 17:21:06 +08002341 (mddev->level != 1 && mddev->level != 10 &&
2342 bioset_integrity_create(&mddev->io_acct_set, BIO_POOL_SIZE))) {
Guoqing Jiangde3ea662021-06-03 17:21:07 +08002343 /*
2344 * No need to handle the failure of bioset_integrity_create,
2345 * because the function is called by md_run() -> pers->run(),
2346 * md_run calls bioset_exit -> bioset_integrity_free in case
2347 * of failure case.
2348 */
NeilBrown9d487392016-11-02 14:16:49 +11002349 pr_err("md: failed to create integrity pool for %s\n",
Martin K. Petersena91a2782011-03-17 11:11:05 +01002350 mdname(mddev));
2351 return -EINVAL;
2352 }
Andre Nollac5e7112009-08-03 10:59:47 +10002353 return 0;
Martin K. Petersen3f9d99c2009-03-31 14:27:02 +11002354}
Andre Nollac5e7112009-08-03 10:59:47 +10002355EXPORT_SYMBOL(md_integrity_register);
2356
Dan Williams1501efa2016-01-13 16:00:07 -08002357/*
2358 * Attempt to add an rdev, but only if it is consistent with the current
2359 * integrity profile
2360 */
2361int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
Andre Nollac5e7112009-08-03 10:59:47 +10002362{
Jonathan Brassow2863b9e2012-10-11 13:38:58 +11002363 struct blk_integrity *bi_mddev;
Dan Williams1501efa2016-01-13 16:00:07 -08002364 char name[BDEVNAME_SIZE];
Jonathan Brassow2863b9e2012-10-11 13:38:58 +11002365
2366 if (!mddev->gendisk)
Dan Williams1501efa2016-01-13 16:00:07 -08002367 return 0;
Jonathan Brassow2863b9e2012-10-11 13:38:58 +11002368
Jonathan Brassow2863b9e2012-10-11 13:38:58 +11002369 bi_mddev = blk_get_integrity(mddev->gendisk);
Andre Nollac5e7112009-08-03 10:59:47 +10002370
2371 if (!bi_mddev) /* nothing to do */
Dan Williams1501efa2016-01-13 16:00:07 -08002372 return 0;
2373
2374 if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) {
NeilBrown9d487392016-11-02 14:16:49 +11002375 pr_err("%s: incompatible integrity profile for %s\n",
2376 mdname(mddev), bdevname(rdev->bdev, name));
Dan Williams1501efa2016-01-13 16:00:07 -08002377 return -ENXIO;
2378 }
2379
2380 return 0;
Andre Nollac5e7112009-08-03 10:59:47 +10002381}
2382EXPORT_SYMBOL(md_integrity_add_rdev);
Martin K. Petersen3f9d99c2009-03-31 14:27:02 +11002383
Christoph Hellwigd7a47832021-02-01 14:17:20 +01002384static bool rdev_read_only(struct md_rdev *rdev)
2385{
2386 return bdev_read_only(rdev->bdev) ||
2387 (rdev->meta_bdev && bdev_read_only(rdev->meta_bdev));
2388}
2389
NeilBrownf72ffdd2014-09-30 14:23:59 +10002390static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391{
NeilBrown7dd5e7c32007-02-28 20:11:35 -08002392 char b[BDEVNAME_SIZE];
NeilBrown5e55e2f2007-03-26 21:32:14 -08002393 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394
Dan Williams11e2ede2008-04-30 00:52:32 -07002395 /* prevent duplicates */
2396 if (find_rdev(mddev, rdev->bdev->bd_dev))
2397 return -EEXIST;
2398
Christoph Hellwigd7a47832021-02-01 14:17:20 +01002399 if (rdev_read_only(rdev) && mddev->pers)
NeilBrown97b20ef2017-04-13 08:53:48 +10002400 return -EROFS;
2401
Andre Nolldd8ac332009-03-31 14:33:13 +11002402 /* make sure rdev->sectors exceeds mddev->dev_sectors */
Shaohua Lif6b6ec52015-12-21 10:51:02 +11002403 if (!test_bit(Journal, &rdev->flags) &&
2404 rdev->sectors &&
2405 (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) {
NeilBrowna778b732007-05-23 13:58:10 -07002406 if (mddev->pers) {
2407 /* Cannot change size, so fail
2408 * If mddev->level <= 0, then we don't care
2409 * about aligning sizes (e.g. linear)
2410 */
2411 if (mddev->level > 0)
2412 return -ENOSPC;
2413 } else
Andre Nolldd8ac332009-03-31 14:33:13 +11002414 mddev->dev_sectors = rdev->sectors;
NeilBrown2bf071b2006-01-06 00:20:55 -08002415 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002416
2417 /* Verify rdev->desc_nr is unique.
2418 * If it is -1, assign a free number, else
2419 * check number is not in use
2420 */
NeilBrown4878e9e2014-09-25 17:00:11 +10002421 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002422 if (rdev->desc_nr < 0) {
2423 int choice = 0;
NeilBrown4878e9e2014-09-25 17:00:11 +10002424 if (mddev->pers)
2425 choice = mddev->raid_disks;
Goldwyn Rodrigues57d051d2015-04-14 10:43:55 -05002426 while (md_find_rdev_nr_rcu(mddev, choice))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002427 choice++;
2428 rdev->desc_nr = choice;
2429 } else {
Goldwyn Rodrigues57d051d2015-04-14 10:43:55 -05002430 if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) {
NeilBrown4878e9e2014-09-25 17:00:11 +10002431 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002432 return -EBUSY;
NeilBrown4878e9e2014-09-25 17:00:11 +10002433 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434 }
NeilBrown4878e9e2014-09-25 17:00:11 +10002435 rcu_read_unlock();
Shaohua Lif6b6ec52015-12-21 10:51:02 +11002436 if (!test_bit(Journal, &rdev->flags) &&
2437 mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
NeilBrown9d487392016-11-02 14:16:49 +11002438 pr_warn("md: %s: array is limited to %d devices\n",
2439 mdname(mddev), mddev->max_disks);
NeilBrownde01dfa2009-02-06 18:02:46 +11002440 return -EBUSY;
2441 }
NeilBrown19133a42005-11-08 21:39:35 -08002442 bdevname(rdev->bdev,b);
Rasmus Villemoes90a9bef2015-06-25 15:02:36 -07002443 strreplace(b, '/', '!');
Greg Kroah-Hartman649316b2007-12-17 23:05:35 -07002444
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445 rdev->mddev = mddev;
NeilBrown9d487392016-11-02 14:16:49 +11002446 pr_debug("md: bind<%s>\n", b);
NeilBrown86e6ffd2005-11-08 21:39:24 -08002447
Guoqing Jiang963c5552019-06-14 17:10:36 +08002448 if (mddev->raid_disks)
Guoqing Jiang404659c2019-12-23 10:48:53 +01002449 mddev_create_serial_pool(mddev, rdev, false);
Guoqing Jiang963c5552019-06-14 17:10:36 +08002450
Greg Kroah-Hartmanb2d6db52007-12-17 23:05:35 -07002451 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
NeilBrown5e55e2f2007-03-26 21:32:14 -08002452 goto fail;
NeilBrown86e6ffd2005-11-08 21:39:24 -08002453
Damien Le Moal5e3b8a82020-07-16 13:54:40 +09002454 /* failure here is OK */
Christoph Hellwig8d652692020-11-17 08:18:55 +01002455 err = sysfs_create_link(&rdev->kobj, bdev_kobj(rdev->bdev), "block");
NeilBrown00bcb4a2010-06-01 19:37:23 +10002456 rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
Junxiao Bie1a86db2020-07-14 16:10:26 -07002457 rdev->sysfs_unack_badblocks =
2458 sysfs_get_dirent_safe(rdev->kobj.sd, "unacknowledged_bad_blocks");
2459 rdev->sysfs_badblocks =
2460 sysfs_get_dirent_safe(rdev->kobj.sd, "bad_blocks");
NeilBrown3c0ee632008-10-21 13:25:28 +11002461
NeilBrown4b809912008-07-21 17:05:25 +10002462 list_add_rcu(&rdev->same_set, &mddev->disks);
Tejun Heoe09b4572010-11-13 11:55:17 +01002463 bd_link_disk_holder(rdev->bdev, mddev->gendisk);
NeilBrown4044ba52009-01-09 08:31:11 +11002464
2465 /* May as well allow recovery to be retried once */
NeilBrown53890422011-07-27 11:00:36 +10002466 mddev->recovery_disabled++;
Martin K. Petersen3f9d99c2009-03-31 14:27:02 +11002467
Linus Torvalds1da177e2005-04-16 15:20:36 -07002468 return 0;
NeilBrown5e55e2f2007-03-26 21:32:14 -08002469
2470 fail:
NeilBrown9d487392016-11-02 14:16:49 +11002471 pr_warn("md: failed to register dev-%s for %s\n",
2472 b, mdname(mddev));
NeilBrown5e55e2f2007-03-26 21:32:14 -08002473 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002474}
2475
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02002476static void rdev_delayed_delete(struct work_struct *ws)
NeilBrown5792a282007-04-04 19:08:18 -07002477{
NeilBrown3cb03002011-10-11 16:45:26 +11002478 struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work);
NeilBrown5792a282007-04-04 19:08:18 -07002479 kobject_del(&rdev->kobj);
NeilBrown177a99b2008-02-06 01:39:56 -08002480 kobject_put(&rdev->kobj);
NeilBrown5792a282007-04-04 19:08:18 -07002481}
2482
NeilBrownf72ffdd2014-09-30 14:23:59 +10002483static void unbind_rdev_from_array(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002484{
2485 char b[BDEVNAME_SIZE];
NeilBrown403df472014-09-30 15:52:29 +10002486
Tejun Heo49731ba2011-01-14 18:43:57 +01002487 bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
NeilBrown4b809912008-07-21 17:05:25 +10002488 list_del_rcu(&rdev->same_set);
NeilBrown9d487392016-11-02 14:16:49 +11002489 pr_debug("md: unbind<%s>\n", bdevname(rdev->bdev,b));
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +01002490 mddev_destroy_serial_pool(rdev->mddev, rdev, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002491 rdev->mddev = NULL;
NeilBrown86e6ffd2005-11-08 21:39:24 -08002492 sysfs_remove_link(&rdev->kobj, "block");
NeilBrown3c0ee632008-10-21 13:25:28 +11002493 sysfs_put(rdev->sysfs_state);
Junxiao Bie1a86db2020-07-14 16:10:26 -07002494 sysfs_put(rdev->sysfs_unack_badblocks);
2495 sysfs_put(rdev->sysfs_badblocks);
NeilBrown3c0ee632008-10-21 13:25:28 +11002496 rdev->sysfs_state = NULL;
Junxiao Bie1a86db2020-07-14 16:10:26 -07002497 rdev->sysfs_unack_badblocks = NULL;
2498 rdev->sysfs_badblocks = NULL;
NeilBrown2230dfe2011-07-28 11:31:46 +10002499 rdev->badblocks.count = 0;
NeilBrown5792a282007-04-04 19:08:18 -07002500 /* We need to delay this, otherwise we can deadlock when
NeilBrown4b809912008-07-21 17:05:25 +10002501 * writing to 'remove' to "dev/state". We also need
2502 * to delay it due to rcu usage.
NeilBrown5792a282007-04-04 19:08:18 -07002503 */
NeilBrown4b809912008-07-21 17:05:25 +10002504 synchronize_rcu();
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02002505 INIT_WORK(&rdev->del_work, rdev_delayed_delete);
NeilBrown177a99b2008-02-06 01:39:56 -08002506 kobject_get(&rdev->kobj);
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02002507 queue_work(md_rdev_misc_wq, &rdev->del_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002508}
2509
2510/*
2511 * prevent the device from being mounted, repartitioned or
2512 * otherwise reused by a RAID array (or any other kernel
2513 * subsystem), by bd_claiming the device.
2514 */
NeilBrown3cb03002011-10-11 16:45:26 +11002515static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516{
2517 int err = 0;
2518 struct block_device *bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002519
Tejun Heod4d77622010-11-13 11:55:18 +01002520 bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
NeilBrown3cb03002011-10-11 16:45:26 +11002521 shared ? (struct md_rdev *)lock_rdev : rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002522 if (IS_ERR(bdev)) {
Christoph Hellwigea3edd42020-03-24 08:25:11 +01002523 pr_warn("md: could not open device unknown-block(%u,%u).\n",
2524 MAJOR(dev), MINOR(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002525 return PTR_ERR(bdev);
2526 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002527 rdev->bdev = bdev;
2528 return err;
2529}
2530
NeilBrown3cb03002011-10-11 16:45:26 +11002531static void unlock_rdev(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002532{
2533 struct block_device *bdev = rdev->bdev;
2534 rdev->bdev = NULL;
Tejun Heoe525fd82010-11-13 11:55:17 +01002535 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002536}
2537
2538void md_autodetect_dev(dev_t dev);
2539
NeilBrownf72ffdd2014-09-30 14:23:59 +10002540static void export_rdev(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002541{
2542 char b[BDEVNAME_SIZE];
NeilBrown403df472014-09-30 15:52:29 +10002543
NeilBrown9d487392016-11-02 14:16:49 +11002544 pr_debug("md: export_rdev(%s)\n", bdevname(rdev->bdev,b));
NeilBrown545c8792012-05-22 13:54:30 +10002545 md_rdev_clear(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002546#ifndef MODULE
NeilBrownd0fae182008-03-04 14:29:31 -08002547 if (test_bit(AutoDetected, &rdev->flags))
2548 md_autodetect_dev(rdev->bdev->bd_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002549#endif
2550 unlock_rdev(rdev);
NeilBrown86e6ffd2005-11-08 21:39:24 -08002551 kobject_put(&rdev->kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002552}
2553
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05002554void md_kick_rdev_from_array(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002555{
2556 unbind_rdev_from_array(rdev);
2557 export_rdev(rdev);
2558}
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05002559EXPORT_SYMBOL_GPL(md_kick_rdev_from_array);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002560
NeilBrownfd01b882011-10-11 16:47:53 +11002561static void export_array(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002562{
NeilBrown0638bb02014-09-25 17:43:47 +10002563 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002564
NeilBrown0638bb02014-09-25 17:43:47 +10002565 while (!list_empty(&mddev->disks)) {
2566 rdev = list_first_entry(&mddev->disks, struct md_rdev,
2567 same_set);
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05002568 md_kick_rdev_from_array(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002569 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002570 mddev->raid_disks = 0;
2571 mddev->major_version = 0;
2572}
2573
NeilBrown6497709b2017-03-15 14:05:14 +11002574static bool set_in_sync(struct mddev *mddev)
2575{
Shaohua Liefa4b772017-10-18 22:08:13 -07002576 lockdep_assert_held(&mddev->lock);
NeilBrown4ad23a972017-03-15 14:05:14 +11002577 if (!mddev->in_sync) {
2578 mddev->sync_checkers++;
2579 spin_unlock(&mddev->lock);
2580 percpu_ref_switch_to_atomic_sync(&mddev->writes_pending);
2581 spin_lock(&mddev->lock);
2582 if (!mddev->in_sync &&
2583 percpu_ref_is_zero(&mddev->writes_pending)) {
NeilBrown6497709b2017-03-15 14:05:14 +11002584 mddev->in_sync = 1;
NeilBrown4ad23a972017-03-15 14:05:14 +11002585 /*
2586 * Ensure ->in_sync is visible before we clear
2587 * ->sync_checkers.
2588 */
NeilBrown55cc39f2017-03-15 14:05:14 +11002589 smp_mb();
NeilBrown6497709b2017-03-15 14:05:14 +11002590 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
2591 sysfs_notify_dirent_safe(mddev->sysfs_state);
2592 }
NeilBrown4ad23a972017-03-15 14:05:14 +11002593 if (--mddev->sync_checkers == 0)
2594 percpu_ref_switch_to_percpu(&mddev->writes_pending);
NeilBrown6497709b2017-03-15 14:05:14 +11002595 }
2596 if (mddev->safemode == 1)
2597 mddev->safemode = 0;
2598 return mddev->in_sync;
2599}
2600
NeilBrownf72ffdd2014-09-30 14:23:59 +10002601static void sync_sbs(struct mddev *mddev, int nospares)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002602{
NeilBrown42543762006-06-26 00:27:57 -07002603 /* Update each superblock (in-memory image), but
2604 * if we are allowed to, skip spares which already
2605 * have the right event counter, or have one earlier
2606 * (which would mean they aren't being marked as dirty
2607 * with the rest of the array)
2608 */
NeilBrown3cb03002011-10-11 16:45:26 +11002609 struct md_rdev *rdev;
NeilBrowndafb20f2012-03-19 12:46:39 +11002610 rdev_for_each(rdev, mddev) {
NeilBrown42543762006-06-26 00:27:57 -07002611 if (rdev->sb_events == mddev->events ||
2612 (nospares &&
2613 rdev->raid_disk < 0 &&
NeilBrown42543762006-06-26 00:27:57 -07002614 rdev->sb_events+1 == mddev->events)) {
2615 /* Don't update this superblock */
2616 rdev->sb_loaded = 2;
2617 } else {
Jonathan Brassow076f9682011-06-07 17:51:30 -05002618 sync_super(mddev, rdev);
NeilBrown42543762006-06-26 00:27:57 -07002619 rdev->sb_loaded = 1;
2620 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002621 }
2622}
2623
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002624static bool does_sb_need_changing(struct mddev *mddev)
2625{
2626 struct md_rdev *rdev;
2627 struct mdp_superblock_1 *sb;
2628 int role;
2629
2630 /* Find a good rdev */
2631 rdev_for_each(rdev, mddev)
2632 if ((rdev->raid_disk >= 0) && !test_bit(Faulty, &rdev->flags))
2633 break;
2634
2635 /* No good device found. */
2636 if (!rdev)
2637 return false;
2638
2639 sb = page_address(rdev->sb_page);
2640 /* Check if a device has become faulty or a spare become active */
2641 rdev_for_each(rdev, mddev) {
2642 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
2643 /* Device activated? */
2644 if (role == 0xffff && rdev->raid_disk >=0 &&
2645 !test_bit(Faulty, &rdev->flags))
2646 return true;
2647 /* Device turned faulty? */
2648 if (test_bit(Faulty, &rdev->flags) && (role < 0xfffd))
2649 return true;
2650 }
2651
2652 /* Check if any mddev parameters have changed */
2653 if ((mddev->dev_sectors != le64_to_cpu(sb->size)) ||
2654 (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) ||
Jason Yan13459212017-03-10 11:49:12 +08002655 (mddev->layout != le32_to_cpu(sb->layout)) ||
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002656 (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) ||
2657 (mddev->chunk_sectors != le32_to_cpu(sb->chunksize)))
2658 return true;
2659
2660 return false;
2661}
2662
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05002663void md_update_sb(struct mddev *mddev, int force_change)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002664{
NeilBrown3cb03002011-10-11 16:45:26 +11002665 struct md_rdev *rdev;
NeilBrown06d91a52005-06-21 17:17:12 -07002666 int sync_req;
NeilBrown42543762006-06-26 00:27:57 -07002667 int nospares = 0;
NeilBrown2699b672011-07-28 11:31:47 +10002668 int any_badblocks_changed = 0;
Guoqing Jiang23b63f92015-10-12 17:21:30 +08002669 int ret = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002670
NeilBrownd87f0642013-04-24 11:42:40 +10002671 if (mddev->ro) {
2672 if (force_change)
Shaohua Li29530792016-12-08 15:48:19 -08002673 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrownd87f0642013-04-24 11:42:40 +10002674 return;
2675 }
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002676
Guoqing Jiang2c97cf12016-05-02 11:33:09 -04002677repeat:
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002678 if (mddev_is_clustered(mddev)) {
Shaohua Li29530792016-12-08 15:48:19 -08002679 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002680 force_change = 1;
Shaohua Li29530792016-12-08 15:48:19 -08002681 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
Guoqing Jiang85ad1d12016-05-03 22:22:13 -04002682 nospares = 1;
Guoqing Jiang23b63f92015-10-12 17:21:30 +08002683 ret = md_cluster_ops->metadata_update_start(mddev);
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002684 /* Has someone else has updated the sb */
2685 if (!does_sb_need_changing(mddev)) {
Guoqing Jiang23b63f92015-10-12 17:21:30 +08002686 if (ret == 0)
2687 md_cluster_ops->metadata_update_cancel(mddev);
Shaohua Li29530792016-12-08 15:48:19 -08002688 bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
2689 BIT(MD_SB_CHANGE_DEVS) |
2690 BIT(MD_SB_CHANGE_CLEAN));
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002691 return;
2692 }
2693 }
Guoqing Jiang2c97cf12016-05-02 11:33:09 -04002694
NeilBrowndb0505d2017-10-17 16:18:36 +11002695 /*
2696 * First make sure individual recovery_offsets are correct
2697 * curr_resync_completed can only be used during recovery.
2698 * During reshape/resync it might use array-addresses rather
2699 * that device addresses.
2700 */
NeilBrowndafb20f2012-03-19 12:46:39 +11002701 rdev_for_each(rdev, mddev) {
NeilBrown3a3a5dd2010-08-16 18:09:31 +10002702 if (rdev->raid_disk >= 0 &&
2703 mddev->delta_disks >= 0 &&
NeilBrowndb0505d2017-10-17 16:18:36 +11002704 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
2705 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery) &&
2706 !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
Shaohua Lif2076e72015-10-08 21:54:12 -07002707 !test_bit(Journal, &rdev->flags) &&
NeilBrown3a3a5dd2010-08-16 18:09:31 +10002708 !test_bit(In_sync, &rdev->flags) &&
2709 mddev->curr_resync_completed > rdev->recovery_offset)
2710 rdev->recovery_offset = mddev->curr_resync_completed;
2711
NeilBrownf72ffdd2014-09-30 14:23:59 +10002712 }
Dan Williamsbd52b742010-08-30 17:33:33 +10002713 if (!mddev->persistent) {
Shaohua Li29530792016-12-08 15:48:19 -08002714 clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
2715 clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrownde393cd2011-07-28 11:31:48 +10002716 if (!mddev->external) {
Shaohua Li29530792016-12-08 15:48:19 -08002717 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
NeilBrowndafb20f2012-03-19 12:46:39 +11002718 rdev_for_each(rdev, mddev) {
NeilBrownde393cd2011-07-28 11:31:48 +10002719 if (rdev->badblocks.changed) {
NeilBrownd0962932012-03-19 12:46:41 +11002720 rdev->badblocks.changed = 0;
Vishal Vermafc974ee2015-12-24 19:20:34 -07002721 ack_all_badblocks(&rdev->badblocks);
NeilBrownde393cd2011-07-28 11:31:48 +10002722 md_error(mddev, rdev);
2723 }
2724 clear_bit(Blocked, &rdev->flags);
2725 clear_bit(BlockedBadBlocks, &rdev->flags);
2726 wake_up(&rdev->blocked_wait);
2727 }
2728 }
NeilBrown3a3a5dd2010-08-16 18:09:31 +10002729 wake_up(&mddev->sb_wait);
2730 return;
2731 }
2732
NeilBrown85572d72014-12-15 12:56:56 +11002733 spin_lock(&mddev->lock);
NeilBrown84692192006-08-27 01:23:49 -07002734
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11002735 mddev->utime = ktime_get_real_seconds();
NeilBrown3a3a5dd2010-08-16 18:09:31 +10002736
Shaohua Li29530792016-12-08 15:48:19 -08002737 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
NeilBrown850b2b422006-10-03 01:15:46 -07002738 force_change = 1;
Shaohua Li29530792016-12-08 15:48:19 -08002739 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
NeilBrown850b2b422006-10-03 01:15:46 -07002740 /* just a clean<-> dirty transition, possibly leave spares alone,
2741 * though if events isn't the right even/odd, we will have to do
2742 * spares after all
2743 */
2744 nospares = 1;
2745 if (force_change)
2746 nospares = 0;
2747 if (mddev->degraded)
NeilBrown84692192006-08-27 01:23:49 -07002748 /* If the array is degraded, then skipping spares is both
2749 * dangerous and fairly pointless.
2750 * Dangerous because a device that was removed from the array
2751 * might have a event_count that still looks up-to-date,
2752 * so it can be re-added without a resync.
2753 * Pointless because if there are any spares to skip,
2754 * then a recovery will happen and soon that array won't
2755 * be degraded any more and the spare can go back to sleep then.
2756 */
NeilBrown850b2b422006-10-03 01:15:46 -07002757 nospares = 0;
NeilBrown84692192006-08-27 01:23:49 -07002758
NeilBrown06d91a52005-06-21 17:17:12 -07002759 sync_req = mddev->in_sync;
NeilBrown42543762006-06-26 00:27:57 -07002760
2761 /* If this is just a dirty<->clean transition, and the array is clean
2762 * and 'events' is odd, we can roll back to the previous clean state */
NeilBrown850b2b422006-10-03 01:15:46 -07002763 if (nospares
NeilBrown42543762006-06-26 00:27:57 -07002764 && (mddev->in_sync && mddev->recovery_cp == MaxSector)
NeilBrowna8707c02010-05-18 09:28:43 +10002765 && mddev->can_decrease_events
2766 && mddev->events != 1) {
NeilBrown42543762006-06-26 00:27:57 -07002767 mddev->events--;
NeilBrowna8707c02010-05-18 09:28:43 +10002768 mddev->can_decrease_events = 0;
2769 } else {
NeilBrown42543762006-06-26 00:27:57 -07002770 /* otherwise we have to go forward and ... */
2771 mddev->events ++;
NeilBrowna8707c02010-05-18 09:28:43 +10002772 mddev->can_decrease_events = nospares;
NeilBrown42543762006-06-26 00:27:57 -07002773 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002774
NeilBrown403df472014-09-30 15:52:29 +10002775 /*
2776 * This 64-bit counter should never wrap.
2777 * Either we are in around ~1 trillion A.C., assuming
2778 * 1 reboot per second, or we have a bug...
2779 */
2780 WARN_ON(mddev->events == 0);
NeilBrown2699b672011-07-28 11:31:47 +10002781
NeilBrowndafb20f2012-03-19 12:46:39 +11002782 rdev_for_each(rdev, mddev) {
NeilBrown2699b672011-07-28 11:31:47 +10002783 if (rdev->badblocks.changed)
2784 any_badblocks_changed++;
NeilBrownde393cd2011-07-28 11:31:48 +10002785 if (test_bit(Faulty, &rdev->flags))
2786 set_bit(FaultRecorded, &rdev->flags);
2787 }
NeilBrown2699b672011-07-28 11:31:47 +10002788
NeilBrowne6910632008-02-06 01:39:51 -08002789 sync_sbs(mddev, nospares);
NeilBrown85572d72014-12-15 12:56:56 +11002790 spin_unlock(&mddev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002791
NeilBrown36a4e1f2011-10-07 14:23:17 +11002792 pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
2793 mdname(mddev), mddev->in_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002794
Shaohua Li504634f2016-11-18 09:44:08 -08002795 if (mddev->queue)
2796 blk_add_trace_msg(mddev->queue, "md md_update_sb");
NeilBrown46533ff2016-11-18 16:16:11 +11002797rewrite:
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002798 md_bitmap_update_sb(mddev->bitmap);
NeilBrowndafb20f2012-03-19 12:46:39 +11002799 rdev_for_each(rdev, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002800 char b[BDEVNAME_SIZE];
NeilBrown36a4e1f2011-10-07 14:23:17 +11002801
NeilBrown42543762006-06-26 00:27:57 -07002802 if (rdev->sb_loaded != 1)
2803 continue; /* no noise on spare devices */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002804
NeilBrownf4667222013-12-09 12:04:56 +11002805 if (!test_bit(Faulty, &rdev->flags)) {
NeilBrown7bfa19f2005-06-21 17:17:28 -07002806 md_super_write(mddev,rdev,
Andre Noll0f420352008-07-11 22:02:23 +10002807 rdev->sb_start, rdev->sb_size,
NeilBrown7bfa19f2005-06-21 17:17:28 -07002808 rdev->sb_page);
NeilBrown36a4e1f2011-10-07 14:23:17 +11002809 pr_debug("md: (write) %s's sb offset: %llu\n",
2810 bdevname(rdev->bdev, b),
2811 (unsigned long long)rdev->sb_start);
NeilBrown42543762006-06-26 00:27:57 -07002812 rdev->sb_events = mddev->events;
NeilBrown2699b672011-07-28 11:31:47 +10002813 if (rdev->badblocks.size) {
2814 md_super_write(mddev, rdev,
2815 rdev->badblocks.sector,
2816 rdev->badblocks.size << 9,
2817 rdev->bb_page);
2818 rdev->badblocks.size = 0;
2819 }
NeilBrown7bfa19f2005-06-21 17:17:28 -07002820
NeilBrownf4667222013-12-09 12:04:56 +11002821 } else
NeilBrown36a4e1f2011-10-07 14:23:17 +11002822 pr_debug("md: %s (skipping faulty)\n",
2823 bdevname(rdev->bdev, b));
Andrei Warkentind70ed2e2011-10-18 12:16:48 +11002824
NeilBrown7bfa19f2005-06-21 17:17:28 -07002825 if (mddev->level == LEVEL_MULTIPATH)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002826 /* only need to write one superblock... */
2827 break;
2828 }
NeilBrown46533ff2016-11-18 16:16:11 +11002829 if (md_super_wait(mddev) < 0)
2830 goto rewrite;
Shaohua Li29530792016-12-08 15:48:19 -08002831 /* if there was a failure, MD_SB_CHANGE_DEVS was set, and we re-write super */
NeilBrown7bfa19f2005-06-21 17:17:28 -07002832
Guoqing Jiang2c97cf12016-05-02 11:33:09 -04002833 if (mddev_is_clustered(mddev) && ret == 0)
2834 md_cluster_ops->metadata_update_finish(mddev);
2835
NeilBrown850b2b422006-10-03 01:15:46 -07002836 if (mddev->in_sync != sync_req ||
Shaohua Li29530792016-12-08 15:48:19 -08002837 !bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
2838 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_CLEAN)))
NeilBrown06d91a52005-06-21 17:17:12 -07002839 /* have to write it out again */
NeilBrown06d91a52005-06-21 17:17:12 -07002840 goto repeat;
NeilBrown3d310eb2005-06-21 17:17:26 -07002841 wake_up(&mddev->sb_wait);
NeilBrownacb180b2009-04-14 16:28:34 +10002842 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
Junxiao Bie1a86db2020-07-14 16:10:26 -07002843 sysfs_notify_dirent_safe(mddev->sysfs_completed);
NeilBrown06d91a52005-06-21 17:17:12 -07002844
NeilBrowndafb20f2012-03-19 12:46:39 +11002845 rdev_for_each(rdev, mddev) {
NeilBrownde393cd2011-07-28 11:31:48 +10002846 if (test_and_clear_bit(FaultRecorded, &rdev->flags))
2847 clear_bit(Blocked, &rdev->flags);
2848
2849 if (any_badblocks_changed)
Vishal Vermafc974ee2015-12-24 19:20:34 -07002850 ack_all_badblocks(&rdev->badblocks);
NeilBrownde393cd2011-07-28 11:31:48 +10002851 clear_bit(BlockedBadBlocks, &rdev->flags);
2852 wake_up(&rdev->blocked_wait);
2853 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854}
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05002855EXPORT_SYMBOL(md_update_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002856
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002857static int add_bound_rdev(struct md_rdev *rdev)
2858{
2859 struct mddev *mddev = rdev->mddev;
2860 int err = 0;
Shaohua Li87d4d912016-01-06 14:37:14 -08002861 bool add_journal = test_bit(Journal, &rdev->flags);
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002862
Shaohua Li87d4d912016-01-06 14:37:14 -08002863 if (!mddev->pers->hot_remove_disk || add_journal) {
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002864 /* If there is hot_add_disk but no hot_remove_disk
2865 * then added disks for geometry changes,
2866 * and should be added immediately.
2867 */
2868 super_types[mddev->major_version].
2869 validate_super(mddev, rdev);
Shaohua Li87d4d912016-01-06 14:37:14 -08002870 if (add_journal)
2871 mddev_suspend(mddev);
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002872 err = mddev->pers->hot_add_disk(mddev, rdev);
Shaohua Li87d4d912016-01-06 14:37:14 -08002873 if (add_journal)
2874 mddev_resume(mddev);
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002875 if (err) {
Guoqing Jiangdb767672016-06-02 23:32:05 -04002876 md_kick_rdev_from_array(rdev);
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002877 return err;
2878 }
2879 }
2880 sysfs_notify_dirent_safe(rdev->sysfs_state);
2881
Shaohua Li29530792016-12-08 15:48:19 -08002882 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002883 if (mddev->degraded)
2884 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
2885 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
Guoqing Jiang54679482021-10-04 23:34:53 +08002886 md_new_event();
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002887 md_wakeup_thread(mddev->thread);
2888 return 0;
2889}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002890
Andre Noll7f6ce762008-03-23 18:34:54 +01002891/* words written to sysfs files may, or may not, be \n terminated.
NeilBrownbce74da2006-01-06 00:20:41 -08002892 * We want to accept with case. For this we use cmd_match.
2893 */
2894static int cmd_match(const char *cmd, const char *str)
2895{
2896 /* See if cmd, written into a sysfs file, matches
2897 * str. They must either be the same, or cmd can
2898 * have a trailing newline
2899 */
2900 while (*cmd && *str && *cmd == *str) {
2901 cmd++;
2902 str++;
2903 }
2904 if (*cmd == '\n')
2905 cmd++;
2906 if (*str || *cmd)
2907 return 0;
2908 return 1;
2909}
2910
NeilBrown86e6ffd2005-11-08 21:39:24 -08002911struct rdev_sysfs_entry {
2912 struct attribute attr;
NeilBrown3cb03002011-10-11 16:45:26 +11002913 ssize_t (*show)(struct md_rdev *, char *);
2914 ssize_t (*store)(struct md_rdev *, const char *, size_t);
NeilBrown86e6ffd2005-11-08 21:39:24 -08002915};
2916
2917static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11002918state_show(struct md_rdev *rdev, char *page)
NeilBrown86e6ffd2005-11-08 21:39:24 -08002919{
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02002920 char *sep = ",";
NeilBrown20a49ff2008-02-06 01:39:57 -08002921 size_t len = 0;
Mark Rutland6aa7de02017-10-23 14:07:29 -07002922 unsigned long flags = READ_ONCE(rdev->flags);
NeilBrown86e6ffd2005-11-08 21:39:24 -08002923
NeilBrown758bfc82014-12-15 12:56:59 +11002924 if (test_bit(Faulty, &flags) ||
Tomasz Majchrzakdcbcb482016-10-21 16:27:08 +02002925 (!test_bit(ExternalBbl, &flags) &&
2926 rdev->badblocks.unacked_exist))
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02002927 len += sprintf(page+len, "faulty%s", sep);
2928 if (test_bit(In_sync, &flags))
2929 len += sprintf(page+len, "in_sync%s", sep);
2930 if (test_bit(Journal, &flags))
2931 len += sprintf(page+len, "journal%s", sep);
2932 if (test_bit(WriteMostly, &flags))
2933 len += sprintf(page+len, "write_mostly%s", sep);
NeilBrown758bfc82014-12-15 12:56:59 +11002934 if (test_bit(Blocked, &flags) ||
NeilBrown52c64152011-12-08 16:22:48 +11002935 (rdev->badblocks.unacked_exist
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02002936 && !test_bit(Faulty, &flags)))
2937 len += sprintf(page+len, "blocked%s", sep);
NeilBrown758bfc82014-12-15 12:56:59 +11002938 if (!test_bit(Faulty, &flags) &&
Shaohua Lif2076e72015-10-08 21:54:12 -07002939 !test_bit(Journal, &flags) &&
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02002940 !test_bit(In_sync, &flags))
2941 len += sprintf(page+len, "spare%s", sep);
2942 if (test_bit(WriteErrorSeen, &flags))
2943 len += sprintf(page+len, "write_error%s", sep);
2944 if (test_bit(WantReplacement, &flags))
2945 len += sprintf(page+len, "want_replacement%s", sep);
2946 if (test_bit(Replacement, &flags))
2947 len += sprintf(page+len, "replacement%s", sep);
2948 if (test_bit(ExternalBbl, &flags))
2949 len += sprintf(page+len, "external_bbl%s", sep);
NeilBrown688834e2016-11-18 16:16:11 +11002950 if (test_bit(FailFast, &flags))
2951 len += sprintf(page+len, "failfast%s", sep);
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02002952
2953 if (len)
2954 len -= strlen(sep);
NeilBrown2d78f8c2011-12-23 10:17:51 +11002955
NeilBrown86e6ffd2005-11-08 21:39:24 -08002956 return len+sprintf(page+len, "\n");
2957}
2958
NeilBrown45dc2de2006-06-26 00:27:58 -07002959static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11002960state_store(struct md_rdev *rdev, const char *buf, size_t len)
NeilBrown45dc2de2006-06-26 00:27:58 -07002961{
2962 /* can write
NeilBrownde393cd2011-07-28 11:31:48 +10002963 * faulty - simulates an error
NeilBrown45dc2de2006-06-26 00:27:58 -07002964 * remove - disconnects the device
NeilBrownf6556752006-06-26 00:28:01 -07002965 * writemostly - sets write_mostly
2966 * -writemostly - clears write_mostly
NeilBrownde393cd2011-07-28 11:31:48 +10002967 * blocked - sets the Blocked flags
2968 * -blocked - clears the Blocked and possibly simulates an error
NeilBrown6d56e272009-04-14 12:01:57 +10002969 * insync - sets Insync providing device isn't active
NeilBrownf4667222013-12-09 12:04:56 +11002970 * -insync - clear Insync for a device with a slot assigned,
2971 * so that it gets rebuilt based on bitmap
NeilBrownd7a9d442011-07-28 11:31:48 +10002972 * write_error - sets WriteErrorSeen
2973 * -write_error - clears WriteErrorSeen
NeilBrown688834e2016-11-18 16:16:11 +11002974 * {,-}failfast - set/clear FailFast
NeilBrown45dc2de2006-06-26 00:27:58 -07002975 */
Xiao Ni8b9e2292021-10-13 22:59:33 +08002976
2977 struct mddev *mddev = rdev->mddev;
NeilBrown45dc2de2006-06-26 00:27:58 -07002978 int err = -EINVAL;
Xiao Ni8b9e2292021-10-13 22:59:33 +08002979 bool need_update_sb = false;
2980
NeilBrown45dc2de2006-06-26 00:27:58 -07002981 if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
2982 md_error(rdev->mddev, rdev);
NeilBrown5ef56c82011-08-25 14:42:51 +10002983 if (test_bit(Faulty, &rdev->flags))
2984 err = 0;
2985 else
2986 err = -EBUSY;
NeilBrown45dc2de2006-06-26 00:27:58 -07002987 } else if (cmd_match(buf, "remove")) {
Shaohua Li5d881782016-07-28 09:06:34 -07002988 if (rdev->mddev->pers) {
2989 clear_bit(Blocked, &rdev->flags);
2990 remove_and_add_spares(rdev->mddev, rdev);
2991 }
NeilBrown45dc2de2006-06-26 00:27:58 -07002992 if (rdev->raid_disk >= 0)
2993 err = -EBUSY;
2994 else {
NeilBrown45dc2de2006-06-26 00:27:58 -07002995 err = 0;
Guoqing Jianga9720902015-10-12 17:21:27 +08002996 if (mddev_is_clustered(mddev))
2997 err = md_cluster_ops->remove_disk(mddev, rdev);
2998
2999 if (err == 0) {
3000 md_kick_rdev_from_array(rdev);
NeilBrown060b0682016-11-04 16:46:03 +11003001 if (mddev->pers) {
Shaohua Li29530792016-12-08 15:48:19 -08003002 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrown060b0682016-11-04 16:46:03 +11003003 md_wakeup_thread(mddev->thread);
3004 }
Guoqing Jiang54679482021-10-04 23:34:53 +08003005 md_new_event();
Guoqing Jianga9720902015-10-12 17:21:27 +08003006 }
NeilBrown45dc2de2006-06-26 00:27:58 -07003007 }
NeilBrownf6556752006-06-26 00:28:01 -07003008 } else if (cmd_match(buf, "writemostly")) {
3009 set_bit(WriteMostly, &rdev->flags);
Guoqing Jiang404659c2019-12-23 10:48:53 +01003010 mddev_create_serial_pool(rdev->mddev, rdev, false);
Xiao Ni8b9e2292021-10-13 22:59:33 +08003011 need_update_sb = true;
NeilBrownf6556752006-06-26 00:28:01 -07003012 err = 0;
3013 } else if (cmd_match(buf, "-writemostly")) {
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +01003014 mddev_destroy_serial_pool(rdev->mddev, rdev, false);
NeilBrownf6556752006-06-26 00:28:01 -07003015 clear_bit(WriteMostly, &rdev->flags);
Xiao Ni8b9e2292021-10-13 22:59:33 +08003016 need_update_sb = true;
NeilBrownf6556752006-06-26 00:28:01 -07003017 err = 0;
Dan Williams6bfe0b42008-04-30 00:52:32 -07003018 } else if (cmd_match(buf, "blocked")) {
3019 set_bit(Blocked, &rdev->flags);
3020 err = 0;
3021 } else if (cmd_match(buf, "-blocked")) {
NeilBrownde393cd2011-07-28 11:31:48 +10003022 if (!test_bit(Faulty, &rdev->flags) &&
Tomasz Majchrzakdcbcb482016-10-21 16:27:08 +02003023 !test_bit(ExternalBbl, &rdev->flags) &&
NeilBrown7da64a02011-08-30 16:20:17 +10003024 rdev->badblocks.unacked_exist) {
NeilBrownde393cd2011-07-28 11:31:48 +10003025 /* metadata handler doesn't understand badblocks,
3026 * so we need to fail the device
3027 */
3028 md_error(rdev->mddev, rdev);
3029 }
Dan Williams6bfe0b42008-04-30 00:52:32 -07003030 clear_bit(Blocked, &rdev->flags);
NeilBrownde393cd2011-07-28 11:31:48 +10003031 clear_bit(BlockedBadBlocks, &rdev->flags);
Dan Williams6bfe0b42008-04-30 00:52:32 -07003032 wake_up(&rdev->blocked_wait);
3033 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3034 md_wakeup_thread(rdev->mddev->thread);
3035
3036 err = 0;
NeilBrown6d56e272009-04-14 12:01:57 +10003037 } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
3038 set_bit(In_sync, &rdev->flags);
3039 err = 0;
NeilBrown688834e2016-11-18 16:16:11 +11003040 } else if (cmd_match(buf, "failfast")) {
3041 set_bit(FailFast, &rdev->flags);
Xiao Ni8b9e2292021-10-13 22:59:33 +08003042 need_update_sb = true;
NeilBrown688834e2016-11-18 16:16:11 +11003043 err = 0;
3044 } else if (cmd_match(buf, "-failfast")) {
3045 clear_bit(FailFast, &rdev->flags);
Xiao Ni8b9e2292021-10-13 22:59:33 +08003046 need_update_sb = true;
NeilBrown688834e2016-11-18 16:16:11 +11003047 err = 0;
Shaohua Lif2076e72015-10-08 21:54:12 -07003048 } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 &&
3049 !test_bit(Journal, &rdev->flags)) {
NeilBrowne1960f82014-09-30 15:24:25 +10003050 if (rdev->mddev->pers == NULL) {
3051 clear_bit(In_sync, &rdev->flags);
3052 rdev->saved_raid_disk = rdev->raid_disk;
3053 rdev->raid_disk = -1;
3054 err = 0;
3055 }
NeilBrownd7a9d442011-07-28 11:31:48 +10003056 } else if (cmd_match(buf, "write_error")) {
3057 set_bit(WriteErrorSeen, &rdev->flags);
3058 err = 0;
3059 } else if (cmd_match(buf, "-write_error")) {
3060 clear_bit(WriteErrorSeen, &rdev->flags);
3061 err = 0;
NeilBrown2d78f8c2011-12-23 10:17:51 +11003062 } else if (cmd_match(buf, "want_replacement")) {
3063 /* Any non-spare device that is not a replacement can
3064 * become want_replacement at any time, but we then need to
3065 * check if recovery is needed.
3066 */
3067 if (rdev->raid_disk >= 0 &&
Shaohua Lif2076e72015-10-08 21:54:12 -07003068 !test_bit(Journal, &rdev->flags) &&
NeilBrown2d78f8c2011-12-23 10:17:51 +11003069 !test_bit(Replacement, &rdev->flags))
3070 set_bit(WantReplacement, &rdev->flags);
3071 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3072 md_wakeup_thread(rdev->mddev->thread);
3073 err = 0;
3074 } else if (cmd_match(buf, "-want_replacement")) {
3075 /* Clearing 'want_replacement' is always allowed.
3076 * Once replacements starts it is too late though.
3077 */
3078 err = 0;
3079 clear_bit(WantReplacement, &rdev->flags);
3080 } else if (cmd_match(buf, "replacement")) {
3081 /* Can only set a device as a replacement when array has not
3082 * yet been started. Once running, replacement is automatic
3083 * from spares, or by assigning 'slot'.
3084 */
3085 if (rdev->mddev->pers)
3086 err = -EBUSY;
3087 else {
3088 set_bit(Replacement, &rdev->flags);
3089 err = 0;
3090 }
3091 } else if (cmd_match(buf, "-replacement")) {
3092 /* Similarly, can only clear Replacement before start */
3093 if (rdev->mddev->pers)
3094 err = -EBUSY;
3095 else {
3096 clear_bit(Replacement, &rdev->flags);
3097 err = 0;
3098 }
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05003099 } else if (cmd_match(buf, "re-add")) {
Yufen Yuee37e622019-04-02 14:22:14 +08003100 if (!rdev->mddev->pers)
3101 err = -EINVAL;
3102 else if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
3103 rdev->saved_raid_disk >= 0) {
Goldwyn Rodrigues97f6cd32015-04-14 10:45:42 -05003104 /* clear_bit is performed _after_ all the devices
3105 * have their local Faulty bit cleared. If any writes
3106 * happen in the meantime in the local node, they
3107 * will land in the local bitmap, which will be synced
3108 * by this node eventually
3109 */
3110 if (!mddev_is_clustered(rdev->mddev) ||
3111 (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) {
3112 clear_bit(Faulty, &rdev->flags);
3113 err = add_bound_rdev(rdev);
3114 }
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05003115 } else
3116 err = -EBUSY;
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02003117 } else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) {
3118 set_bit(ExternalBbl, &rdev->flags);
3119 rdev->badblocks.shift = 0;
3120 err = 0;
3121 } else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) {
3122 clear_bit(ExternalBbl, &rdev->flags);
3123 err = 0;
NeilBrown45dc2de2006-06-26 00:27:58 -07003124 }
Xiao Ni8b9e2292021-10-13 22:59:33 +08003125 if (need_update_sb)
3126 md_update_sb(mddev, 1);
NeilBrown00bcb4a2010-06-01 19:37:23 +10003127 if (!err)
3128 sysfs_notify_dirent_safe(rdev->sysfs_state);
NeilBrown45dc2de2006-06-26 00:27:58 -07003129 return err ? err : len;
3130}
NeilBrown80ca3a42006-07-10 04:44:18 -07003131static struct rdev_sysfs_entry rdev_state =
NeilBrown750f1992014-09-30 08:53:05 +10003132__ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
NeilBrown86e6ffd2005-11-08 21:39:24 -08003133
3134static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003135errors_show(struct md_rdev *rdev, char *page)
NeilBrown4dbcdc72006-01-06 00:20:52 -08003136{
3137 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
3138}
3139
3140static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003141errors_store(struct md_rdev *rdev, const char *buf, size_t len)
NeilBrown4dbcdc72006-01-06 00:20:52 -08003142{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03003143 unsigned int n;
3144 int rv;
3145
3146 rv = kstrtouint(buf, 10, &n);
3147 if (rv < 0)
3148 return rv;
3149 atomic_set(&rdev->corrected_errors, n);
3150 return len;
NeilBrown4dbcdc72006-01-06 00:20:52 -08003151}
3152static struct rdev_sysfs_entry rdev_errors =
NeilBrown80ca3a42006-07-10 04:44:18 -07003153__ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
NeilBrown4dbcdc72006-01-06 00:20:52 -08003154
NeilBrown014236d2006-01-06 00:20:55 -08003155static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003156slot_show(struct md_rdev *rdev, char *page)
NeilBrown014236d2006-01-06 00:20:55 -08003157{
Shaohua Lif2076e72015-10-08 21:54:12 -07003158 if (test_bit(Journal, &rdev->flags))
3159 return sprintf(page, "journal\n");
3160 else if (rdev->raid_disk < 0)
NeilBrown014236d2006-01-06 00:20:55 -08003161 return sprintf(page, "none\n");
3162 else
3163 return sprintf(page, "%d\n", rdev->raid_disk);
3164}
3165
3166static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003167slot_store(struct md_rdev *rdev, const char *buf, size_t len)
NeilBrown014236d2006-01-06 00:20:55 -08003168{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03003169 int slot;
NeilBrownc303da62008-02-06 01:39:51 -08003170 int err;
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03003171
Shaohua Lif2076e72015-10-08 21:54:12 -07003172 if (test_bit(Journal, &rdev->flags))
3173 return -EBUSY;
NeilBrown014236d2006-01-06 00:20:55 -08003174 if (strncmp(buf, "none", 4)==0)
3175 slot = -1;
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03003176 else {
3177 err = kstrtouint(buf, 10, (unsigned int *)&slot);
3178 if (err < 0)
3179 return err;
3180 }
Neil Brown6c2fce22008-06-28 08:31:31 +10003181 if (rdev->mddev->pers && slot == -1) {
NeilBrownc303da62008-02-06 01:39:51 -08003182 /* Setting 'slot' on an active array requires also
3183 * updating the 'rd%d' link, and communicating
3184 * with the personality with ->hot_*_disk.
3185 * For now we only support removing
3186 * failed/spare devices. This normally happens automatically,
3187 * but not when the metadata is externally managed.
3188 */
NeilBrownc303da62008-02-06 01:39:51 -08003189 if (rdev->raid_disk == -1)
3190 return -EEXIST;
3191 /* personality does all needed checks */
Namhyung Kim01393f32011-06-09 11:42:54 +10003192 if (rdev->mddev->pers->hot_remove_disk == NULL)
NeilBrownc303da62008-02-06 01:39:51 -08003193 return -EINVAL;
NeilBrown746d3202013-04-24 11:42:41 +10003194 clear_bit(Blocked, &rdev->flags);
3195 remove_and_add_spares(rdev->mddev, rdev);
3196 if (rdev->raid_disk >= 0)
3197 return -EBUSY;
NeilBrownc303da62008-02-06 01:39:51 -08003198 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3199 md_wakeup_thread(rdev->mddev->thread);
Neil Brown6c2fce22008-06-28 08:31:31 +10003200 } else if (rdev->mddev->pers) {
Neil Brown6c2fce22008-06-28 08:31:31 +10003201 /* Activating a spare .. or possibly reactivating
NeilBrown6d56e272009-04-14 12:01:57 +10003202 * if we ever get bitmaps working here.
Neil Brown6c2fce22008-06-28 08:31:31 +10003203 */
Goldwyn Rodriguescb01c542015-12-18 15:19:16 +11003204 int err;
Neil Brown6c2fce22008-06-28 08:31:31 +10003205
3206 if (rdev->raid_disk != -1)
3207 return -EBUSY;
3208
NeilBrownc6751b22011-02-02 11:57:13 +11003209 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
3210 return -EBUSY;
3211
Neil Brown6c2fce22008-06-28 08:31:31 +10003212 if (rdev->mddev->pers->hot_add_disk == NULL)
3213 return -EINVAL;
3214
NeilBrownba1b41b2011-01-14 09:14:34 +11003215 if (slot >= rdev->mddev->raid_disks &&
3216 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
3217 return -ENOSPC;
3218
Neil Brown6c2fce22008-06-28 08:31:31 +10003219 rdev->raid_disk = slot;
3220 if (test_bit(In_sync, &rdev->flags))
3221 rdev->saved_raid_disk = slot;
3222 else
3223 rdev->saved_raid_disk = -1;
NeilBrownd30519f2011-10-18 12:13:47 +11003224 clear_bit(In_sync, &rdev->flags);
NeilBrown8313b8e2013-12-12 10:13:33 +11003225 clear_bit(Bitmap_sync, &rdev->flags);
Guoqing Jiang3f79cc22020-04-04 23:57:11 +02003226 err = rdev->mddev->pers->hot_add_disk(rdev->mddev, rdev);
Goldwyn Rodriguescb01c542015-12-18 15:19:16 +11003227 if (err) {
3228 rdev->raid_disk = -1;
3229 return err;
3230 } else
3231 sysfs_notify_dirent_safe(rdev->sysfs_state);
Damien Le Moal5e3b8a82020-07-16 13:54:40 +09003232 /* failure here is OK */;
3233 sysfs_link_rdev(rdev->mddev, rdev);
Neil Brown6c2fce22008-06-28 08:31:31 +10003234 /* don't wakeup anyone, leave that to userspace. */
NeilBrownc303da62008-02-06 01:39:51 -08003235 } else {
NeilBrownba1b41b2011-01-14 09:14:34 +11003236 if (slot >= rdev->mddev->raid_disks &&
3237 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
NeilBrownc303da62008-02-06 01:39:51 -08003238 return -ENOSPC;
3239 rdev->raid_disk = slot;
3240 /* assume it is working */
NeilBrownc5d79ad2008-02-06 01:39:54 -08003241 clear_bit(Faulty, &rdev->flags);
3242 clear_bit(WriteMostly, &rdev->flags);
NeilBrownc303da62008-02-06 01:39:51 -08003243 set_bit(In_sync, &rdev->flags);
NeilBrown00bcb4a2010-06-01 19:37:23 +10003244 sysfs_notify_dirent_safe(rdev->sysfs_state);
NeilBrownc303da62008-02-06 01:39:51 -08003245 }
NeilBrown014236d2006-01-06 00:20:55 -08003246 return len;
3247}
3248
NeilBrown014236d2006-01-06 00:20:55 -08003249static struct rdev_sysfs_entry rdev_slot =
NeilBrown80ca3a42006-07-10 04:44:18 -07003250__ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
NeilBrown014236d2006-01-06 00:20:55 -08003251
NeilBrown93c8cad2006-01-06 00:20:56 -08003252static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003253offset_show(struct md_rdev *rdev, char *page)
NeilBrown93c8cad2006-01-06 00:20:56 -08003254{
Andrew Morton6961ece2006-01-06 00:20:59 -08003255 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
NeilBrown93c8cad2006-01-06 00:20:56 -08003256}
3257
3258static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003259offset_store(struct md_rdev *rdev, const char *buf, size_t len)
NeilBrown93c8cad2006-01-06 00:20:56 -08003260{
NeilBrownc6563a82012-05-21 09:27:00 +10003261 unsigned long long offset;
Jingoo Hanb29bebd2013-06-01 16:15:16 +09003262 if (kstrtoull(buf, 10, &offset) < 0)
NeilBrown93c8cad2006-01-06 00:20:56 -08003263 return -EINVAL;
Neil Brown8ed0a522008-06-28 08:31:29 +10003264 if (rdev->mddev->pers && rdev->raid_disk >= 0)
NeilBrown93c8cad2006-01-06 00:20:56 -08003265 return -EBUSY;
Andre Nolldd8ac332009-03-31 14:33:13 +11003266 if (rdev->sectors && rdev->mddev->external)
NeilBrownc5d79ad2008-02-06 01:39:54 -08003267 /* Must set offset before size, so overlap checks
3268 * can be sane */
3269 return -EBUSY;
NeilBrown93c8cad2006-01-06 00:20:56 -08003270 rdev->data_offset = offset;
NeilBrown25f7fd42012-07-19 15:59:18 +10003271 rdev->new_data_offset = offset;
NeilBrown93c8cad2006-01-06 00:20:56 -08003272 return len;
3273}
3274
3275static struct rdev_sysfs_entry rdev_offset =
NeilBrown80ca3a42006-07-10 04:44:18 -07003276__ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
NeilBrown93c8cad2006-01-06 00:20:56 -08003277
NeilBrownc6563a82012-05-21 09:27:00 +10003278static ssize_t new_offset_show(struct md_rdev *rdev, char *page)
3279{
3280 return sprintf(page, "%llu\n",
3281 (unsigned long long)rdev->new_data_offset);
3282}
3283
3284static ssize_t new_offset_store(struct md_rdev *rdev,
3285 const char *buf, size_t len)
3286{
3287 unsigned long long new_offset;
3288 struct mddev *mddev = rdev->mddev;
3289
Jingoo Hanb29bebd2013-06-01 16:15:16 +09003290 if (kstrtoull(buf, 10, &new_offset) < 0)
NeilBrownc6563a82012-05-21 09:27:00 +10003291 return -EINVAL;
3292
NeilBrownf851b602014-12-11 10:02:10 +11003293 if (mddev->sync_thread ||
3294 test_bit(MD_RECOVERY_RUNNING,&mddev->recovery))
NeilBrownc6563a82012-05-21 09:27:00 +10003295 return -EBUSY;
3296 if (new_offset == rdev->data_offset)
3297 /* reset is always permitted */
3298 ;
3299 else if (new_offset > rdev->data_offset) {
3300 /* must not push array size beyond rdev_sectors */
3301 if (new_offset - rdev->data_offset
3302 + mddev->dev_sectors > rdev->sectors)
3303 return -E2BIG;
3304 }
3305 /* Metadata worries about other space details. */
3306
3307 /* decreasing the offset is inconsistent with a backwards
3308 * reshape.
3309 */
3310 if (new_offset < rdev->data_offset &&
3311 mddev->reshape_backwards)
3312 return -EINVAL;
3313 /* Increasing offset is inconsistent with forwards
3314 * reshape. reshape_direction should be set to
3315 * 'backwards' first.
3316 */
3317 if (new_offset > rdev->data_offset &&
3318 !mddev->reshape_backwards)
3319 return -EINVAL;
3320
3321 if (mddev->pers && mddev->persistent &&
3322 !super_types[mddev->major_version]
3323 .allow_new_offset(rdev, new_offset))
3324 return -E2BIG;
3325 rdev->new_data_offset = new_offset;
3326 if (new_offset > rdev->data_offset)
3327 mddev->reshape_backwards = 1;
3328 else if (new_offset < rdev->data_offset)
3329 mddev->reshape_backwards = 0;
3330
3331 return len;
3332}
3333static struct rdev_sysfs_entry rdev_new_offset =
3334__ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store);
3335
NeilBrown83303b62006-01-06 00:21:06 -08003336static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003337rdev_size_show(struct md_rdev *rdev, char *page)
NeilBrown83303b62006-01-06 00:21:06 -08003338{
Andre Nolldd8ac332009-03-31 14:33:13 +11003339 return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
NeilBrown83303b62006-01-06 00:21:06 -08003340}
3341
NeilBrownc5d79ad2008-02-06 01:39:54 -08003342static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
3343{
3344 /* check if two start/length pairs overlap */
3345 if (s1+l1 <= s2)
3346 return 0;
3347 if (s2+l2 <= s1)
3348 return 0;
3349 return 1;
3350}
3351
Dan Williamsb522adc2009-03-31 15:00:31 +11003352static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
3353{
3354 unsigned long long blocks;
3355 sector_t new;
3356
Jingoo Hanb29bebd2013-06-01 16:15:16 +09003357 if (kstrtoull(buf, 10, &blocks) < 0)
Dan Williamsb522adc2009-03-31 15:00:31 +11003358 return -EINVAL;
3359
3360 if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
3361 return -EINVAL; /* sector conversion overflow */
3362
3363 new = blocks * 2;
3364 if (new != blocks * 2)
3365 return -EINVAL; /* unsigned long long to sector_t overflow */
3366
3367 *sectors = new;
3368 return 0;
3369}
3370
NeilBrown83303b62006-01-06 00:21:06 -08003371static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003372rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
NeilBrown83303b62006-01-06 00:21:06 -08003373{
NeilBrownfd01b882011-10-11 16:47:53 +11003374 struct mddev *my_mddev = rdev->mddev;
Andre Nolldd8ac332009-03-31 14:33:13 +11003375 sector_t oldsectors = rdev->sectors;
Dan Williamsb522adc2009-03-31 15:00:31 +11003376 sector_t sectors;
NeilBrown27c529b2008-03-04 14:29:33 -08003377
Shaohua Lif2076e72015-10-08 21:54:12 -07003378 if (test_bit(Journal, &rdev->flags))
3379 return -EBUSY;
Dan Williamsb522adc2009-03-31 15:00:31 +11003380 if (strict_blocks_to_sectors(buf, &sectors) < 0)
Neil Brownd7027452008-07-12 10:37:50 +10003381 return -EINVAL;
NeilBrownc6563a82012-05-21 09:27:00 +10003382 if (rdev->data_offset != rdev->new_data_offset)
3383 return -EINVAL; /* too confusing */
Chris Webb0cd17fe2008-06-28 08:31:46 +10003384 if (my_mddev->pers && rdev->raid_disk >= 0) {
Neil Brownd7027452008-07-12 10:37:50 +10003385 if (my_mddev->persistent) {
Andre Nolldd8ac332009-03-31 14:33:13 +11003386 sectors = super_types[my_mddev->major_version].
3387 rdev_size_change(rdev, sectors);
3388 if (!sectors)
Chris Webb0cd17fe2008-06-28 08:31:46 +10003389 return -EBUSY;
Andre Nolldd8ac332009-03-31 14:33:13 +11003390 } else if (!sectors)
Christoph Hellwig0fe80342021-10-18 12:11:06 +02003391 sectors = bdev_nr_sectors(rdev->bdev) -
Andre Nolldd8ac332009-03-31 14:33:13 +11003392 rdev->data_offset;
NeilBrowna6468532013-02-21 14:33:17 +11003393 if (!my_mddev->pers->resize)
3394 /* Cannot change size for RAID0 or Linear etc */
3395 return -EINVAL;
Chris Webb0cd17fe2008-06-28 08:31:46 +10003396 }
Andre Nolldd8ac332009-03-31 14:33:13 +11003397 if (sectors < my_mddev->dev_sectors)
Chris Webb7d3c6f82008-10-13 11:55:11 +11003398 return -EINVAL; /* component must fit device */
Chris Webb0cd17fe2008-06-28 08:31:46 +10003399
Andre Nolldd8ac332009-03-31 14:33:13 +11003400 rdev->sectors = sectors;
3401 if (sectors > oldsectors && my_mddev->external) {
NeilBrown8b1afc32014-09-29 15:33:20 +10003402 /* Need to check that all other rdevs with the same
3403 * ->bdev do not overlap. 'rcu' is sufficient to walk
3404 * the rdev lists safely.
3405 * This check does not provide a hard guarantee, it
3406 * just helps avoid dangerous mistakes.
NeilBrownc5d79ad2008-02-06 01:39:54 -08003407 */
NeilBrownfd01b882011-10-11 16:47:53 +11003408 struct mddev *mddev;
NeilBrownc5d79ad2008-02-06 01:39:54 -08003409 int overlap = 0;
Cheng Renquan159ec1f2009-01-09 08:31:08 +11003410 struct list_head *tmp;
NeilBrownc5d79ad2008-02-06 01:39:54 -08003411
NeilBrown8b1afc32014-09-29 15:33:20 +10003412 rcu_read_lock();
NeilBrown29ac4aa2008-02-06 01:39:58 -08003413 for_each_mddev(mddev, tmp) {
NeilBrown3cb03002011-10-11 16:45:26 +11003414 struct md_rdev *rdev2;
NeilBrownc5d79ad2008-02-06 01:39:54 -08003415
NeilBrowndafb20f2012-03-19 12:46:39 +11003416 rdev_for_each(rdev2, mddev)
NeilBrownf21e9ff2011-01-31 12:10:09 +11003417 if (rdev->bdev == rdev2->bdev &&
3418 rdev != rdev2 &&
3419 overlaps(rdev->data_offset, rdev->sectors,
3420 rdev2->data_offset,
3421 rdev2->sectors)) {
NeilBrownc5d79ad2008-02-06 01:39:54 -08003422 overlap = 1;
3423 break;
3424 }
NeilBrownc5d79ad2008-02-06 01:39:54 -08003425 if (overlap) {
3426 mddev_put(mddev);
3427 break;
3428 }
3429 }
NeilBrown8b1afc32014-09-29 15:33:20 +10003430 rcu_read_unlock();
NeilBrownc5d79ad2008-02-06 01:39:54 -08003431 if (overlap) {
3432 /* Someone else could have slipped in a size
3433 * change here, but doing so is just silly.
Andre Nolldd8ac332009-03-31 14:33:13 +11003434 * We put oldsectors back because we *know* it is
NeilBrownc5d79ad2008-02-06 01:39:54 -08003435 * safe, and trust userspace not to race with
3436 * itself
3437 */
Andre Nolldd8ac332009-03-31 14:33:13 +11003438 rdev->sectors = oldsectors;
NeilBrownc5d79ad2008-02-06 01:39:54 -08003439 return -EBUSY;
3440 }
3441 }
NeilBrown83303b62006-01-06 00:21:06 -08003442 return len;
3443}
3444
3445static struct rdev_sysfs_entry rdev_size =
NeilBrown80ca3a42006-07-10 04:44:18 -07003446__ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
NeilBrown83303b62006-01-06 00:21:06 -08003447
NeilBrown3cb03002011-10-11 16:45:26 +11003448static ssize_t recovery_start_show(struct md_rdev *rdev, char *page)
Dan Williams06e3c812009-12-12 21:17:12 -07003449{
3450 unsigned long long recovery_start = rdev->recovery_offset;
3451
3452 if (test_bit(In_sync, &rdev->flags) ||
3453 recovery_start == MaxSector)
3454 return sprintf(page, "none\n");
3455
3456 return sprintf(page, "%llu\n", recovery_start);
3457}
3458
NeilBrown3cb03002011-10-11 16:45:26 +11003459static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len)
Dan Williams06e3c812009-12-12 21:17:12 -07003460{
3461 unsigned long long recovery_start;
3462
3463 if (cmd_match(buf, "none"))
3464 recovery_start = MaxSector;
Jingoo Hanb29bebd2013-06-01 16:15:16 +09003465 else if (kstrtoull(buf, 10, &recovery_start))
Dan Williams06e3c812009-12-12 21:17:12 -07003466 return -EINVAL;
3467
3468 if (rdev->mddev->pers &&
3469 rdev->raid_disk >= 0)
3470 return -EBUSY;
3471
3472 rdev->recovery_offset = recovery_start;
3473 if (recovery_start == MaxSector)
3474 set_bit(In_sync, &rdev->flags);
3475 else
3476 clear_bit(In_sync, &rdev->flags);
3477 return len;
3478}
3479
3480static struct rdev_sysfs_entry rdev_recovery_start =
3481__ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
3482
Vishal Vermafc974ee2015-12-24 19:20:34 -07003483/* sysfs access to bad-blocks list.
3484 * We present two files.
3485 * 'bad-blocks' lists sector numbers and lengths of ranges that
3486 * are recorded as bad. The list is truncated to fit within
3487 * the one-page limit of sysfs.
3488 * Writing "sector length" to this file adds an acknowledged
3489 * bad block list.
3490 * 'unacknowledged-bad-blocks' lists bad blocks that have not yet
3491 * been acknowledged. Writing to this file adds bad blocks
3492 * without acknowledging them. This is largely for testing.
3493 */
NeilBrown3cb03002011-10-11 16:45:26 +11003494static ssize_t bb_show(struct md_rdev *rdev, char *page)
NeilBrown16c791a2011-07-28 11:31:47 +10003495{
3496 return badblocks_show(&rdev->badblocks, page, 0);
3497}
NeilBrown3cb03002011-10-11 16:45:26 +11003498static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len)
NeilBrown16c791a2011-07-28 11:31:47 +10003499{
NeilBrownde393cd2011-07-28 11:31:48 +10003500 int rv = badblocks_store(&rdev->badblocks, page, len, 0);
3501 /* Maybe that ack was all we needed */
3502 if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags))
3503 wake_up(&rdev->blocked_wait);
3504 return rv;
NeilBrown16c791a2011-07-28 11:31:47 +10003505}
3506static struct rdev_sysfs_entry rdev_bad_blocks =
3507__ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store);
3508
NeilBrown3cb03002011-10-11 16:45:26 +11003509static ssize_t ubb_show(struct md_rdev *rdev, char *page)
NeilBrown16c791a2011-07-28 11:31:47 +10003510{
3511 return badblocks_show(&rdev->badblocks, page, 1);
3512}
NeilBrown3cb03002011-10-11 16:45:26 +11003513static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len)
NeilBrown16c791a2011-07-28 11:31:47 +10003514{
3515 return badblocks_store(&rdev->badblocks, page, len, 1);
3516}
3517static struct rdev_sysfs_entry rdev_unack_bad_blocks =
3518__ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store);
3519
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01003520static ssize_t
3521ppl_sector_show(struct md_rdev *rdev, char *page)
3522{
3523 return sprintf(page, "%llu\n", (unsigned long long)rdev->ppl.sector);
3524}
3525
3526static ssize_t
3527ppl_sector_store(struct md_rdev *rdev, const char *buf, size_t len)
3528{
3529 unsigned long long sector;
3530
3531 if (kstrtoull(buf, 10, &sector) < 0)
3532 return -EINVAL;
3533 if (sector != (sector_t)sector)
3534 return -EINVAL;
3535
3536 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) &&
3537 rdev->raid_disk >= 0)
3538 return -EBUSY;
3539
3540 if (rdev->mddev->persistent) {
3541 if (rdev->mddev->major_version == 0)
3542 return -EINVAL;
3543 if ((sector > rdev->sb_start &&
3544 sector - rdev->sb_start > S16_MAX) ||
3545 (sector < rdev->sb_start &&
3546 rdev->sb_start - sector > -S16_MIN))
3547 return -EINVAL;
3548 rdev->ppl.offset = sector - rdev->sb_start;
3549 } else if (!rdev->mddev->external) {
3550 return -EBUSY;
3551 }
3552 rdev->ppl.sector = sector;
3553 return len;
3554}
3555
3556static struct rdev_sysfs_entry rdev_ppl_sector =
3557__ATTR(ppl_sector, S_IRUGO|S_IWUSR, ppl_sector_show, ppl_sector_store);
3558
3559static ssize_t
3560ppl_size_show(struct md_rdev *rdev, char *page)
3561{
3562 return sprintf(page, "%u\n", rdev->ppl.size);
3563}
3564
3565static ssize_t
3566ppl_size_store(struct md_rdev *rdev, const char *buf, size_t len)
3567{
3568 unsigned int size;
3569
3570 if (kstrtouint(buf, 10, &size) < 0)
3571 return -EINVAL;
3572
3573 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) &&
3574 rdev->raid_disk >= 0)
3575 return -EBUSY;
3576
3577 if (rdev->mddev->persistent) {
3578 if (rdev->mddev->major_version == 0)
3579 return -EINVAL;
3580 if (size > U16_MAX)
3581 return -EINVAL;
3582 } else if (!rdev->mddev->external) {
3583 return -EBUSY;
3584 }
3585 rdev->ppl.size = size;
3586 return len;
3587}
3588
3589static struct rdev_sysfs_entry rdev_ppl_size =
3590__ATTR(ppl_size, S_IRUGO|S_IWUSR, ppl_size_show, ppl_size_store);
3591
NeilBrown86e6ffd2005-11-08 21:39:24 -08003592static struct attribute *rdev_default_attrs[] = {
3593 &rdev_state.attr,
NeilBrown4dbcdc72006-01-06 00:20:52 -08003594 &rdev_errors.attr,
NeilBrown014236d2006-01-06 00:20:55 -08003595 &rdev_slot.attr,
NeilBrown93c8cad2006-01-06 00:20:56 -08003596 &rdev_offset.attr,
NeilBrownc6563a82012-05-21 09:27:00 +10003597 &rdev_new_offset.attr,
NeilBrown83303b62006-01-06 00:21:06 -08003598 &rdev_size.attr,
Dan Williams06e3c812009-12-12 21:17:12 -07003599 &rdev_recovery_start.attr,
NeilBrown16c791a2011-07-28 11:31:47 +10003600 &rdev_bad_blocks.attr,
3601 &rdev_unack_bad_blocks.attr,
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01003602 &rdev_ppl_sector.attr,
3603 &rdev_ppl_size.attr,
NeilBrown86e6ffd2005-11-08 21:39:24 -08003604 NULL,
3605};
3606static ssize_t
3607rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
3608{
3609 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
NeilBrown3cb03002011-10-11 16:45:26 +11003610 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
NeilBrown86e6ffd2005-11-08 21:39:24 -08003611
3612 if (!entry->show)
3613 return -EIO;
NeilBrown758bfc82014-12-15 12:56:59 +11003614 if (!rdev->mddev)
Marcos Paulo de Souza168b3052019-06-14 15:41:06 -07003615 return -ENODEV;
NeilBrown758bfc82014-12-15 12:56:59 +11003616 return entry->show(rdev, page);
NeilBrown86e6ffd2005-11-08 21:39:24 -08003617}
3618
3619static ssize_t
3620rdev_attr_store(struct kobject *kobj, struct attribute *attr,
3621 const char *page, size_t length)
3622{
3623 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
NeilBrown3cb03002011-10-11 16:45:26 +11003624 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
NeilBrown27c529b2008-03-04 14:29:33 -08003625 ssize_t rv;
NeilBrownfd01b882011-10-11 16:47:53 +11003626 struct mddev *mddev = rdev->mddev;
NeilBrown86e6ffd2005-11-08 21:39:24 -08003627
3628 if (!entry->store)
3629 return -EIO;
NeilBrown67463ac2006-07-10 04:44:19 -07003630 if (!capable(CAP_SYS_ADMIN))
3631 return -EACCES;
Pawel Baldysiakc42d3242019-03-27 13:48:21 +01003632 rv = mddev ? mddev_lock(mddev) : -ENODEV;
NeilBrownca388052008-02-06 01:39:55 -08003633 if (!rv) {
NeilBrown27c529b2008-03-04 14:29:33 -08003634 if (rdev->mddev == NULL)
Pawel Baldysiakc42d3242019-03-27 13:48:21 +01003635 rv = -ENODEV;
NeilBrown27c529b2008-03-04 14:29:33 -08003636 else
3637 rv = entry->store(rdev, page, length);
Dan Williams6a518302008-04-30 00:52:28 -07003638 mddev_unlock(mddev);
NeilBrownca388052008-02-06 01:39:55 -08003639 }
3640 return rv;
NeilBrown86e6ffd2005-11-08 21:39:24 -08003641}
3642
3643static void rdev_free(struct kobject *ko)
3644{
NeilBrown3cb03002011-10-11 16:45:26 +11003645 struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj);
NeilBrown86e6ffd2005-11-08 21:39:24 -08003646 kfree(rdev);
3647}
Emese Revfy52cf25d2010-01-19 02:58:23 +01003648static const struct sysfs_ops rdev_sysfs_ops = {
NeilBrown86e6ffd2005-11-08 21:39:24 -08003649 .show = rdev_attr_show,
3650 .store = rdev_attr_store,
3651};
3652static struct kobj_type rdev_ktype = {
3653 .release = rdev_free,
3654 .sysfs_ops = &rdev_sysfs_ops,
3655 .default_attrs = rdev_default_attrs,
3656};
3657
NeilBrown3cb03002011-10-11 16:45:26 +11003658int md_rdev_init(struct md_rdev *rdev)
NeilBrowne8bb9a82010-06-01 19:37:26 +10003659{
3660 rdev->desc_nr = -1;
3661 rdev->saved_raid_disk = -1;
3662 rdev->raid_disk = -1;
3663 rdev->flags = 0;
3664 rdev->data_offset = 0;
NeilBrownc6563a82012-05-21 09:27:00 +10003665 rdev->new_data_offset = 0;
NeilBrowne8bb9a82010-06-01 19:37:26 +10003666 rdev->sb_events = 0;
Arnd Bergmann0e3ef492016-06-17 17:33:10 +02003667 rdev->last_read_error = 0;
NeilBrown2699b672011-07-28 11:31:47 +10003668 rdev->sb_loaded = 0;
3669 rdev->bb_page = NULL;
NeilBrowne8bb9a82010-06-01 19:37:26 +10003670 atomic_set(&rdev->nr_pending, 0);
3671 atomic_set(&rdev->read_errors, 0);
3672 atomic_set(&rdev->corrected_errors, 0);
3673
3674 INIT_LIST_HEAD(&rdev->same_set);
3675 init_waitqueue_head(&rdev->blocked_wait);
NeilBrown2230dfe2011-07-28 11:31:46 +10003676
3677 /* Add space to store bad block list.
3678 * This reserves the space even on arrays where it cannot
3679 * be used - I wonder if that matters
3680 */
Vishal Vermafc974ee2015-12-24 19:20:34 -07003681 return badblocks_init(&rdev->badblocks, 0);
NeilBrowne8bb9a82010-06-01 19:37:26 +10003682}
3683EXPORT_SYMBOL_GPL(md_rdev_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003684/*
3685 * Import a device. If 'super_format' >= 0, then sanity check the superblock
3686 *
3687 * mark the device faulty if:
3688 *
3689 * - the device is nonexistent (zero size)
3690 * - the device has no valid superblock
3691 *
3692 * a faulty rdev _never_ has rdev->sb set.
3693 */
NeilBrown3cb03002011-10-11 16:45:26 +11003694static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003695{
3696 char b[BDEVNAME_SIZE];
3697 int err;
NeilBrown3cb03002011-10-11 16:45:26 +11003698 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003699 sector_t size;
3700
NeilBrown9ffae0c2006-01-06 00:20:32 -08003701 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
NeilBrown9d487392016-11-02 14:16:49 +11003702 if (!rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003703 return ERR_PTR(-ENOMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003704
NeilBrown2230dfe2011-07-28 11:31:46 +10003705 err = md_rdev_init(rdev);
3706 if (err)
3707 goto abort_free;
3708 err = alloc_disk_sb(rdev);
3709 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003710 goto abort_free;
3711
NeilBrownc5d79ad2008-02-06 01:39:54 -08003712 err = lock_rdev(rdev, newdev, super_format == -2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003713 if (err)
3714 goto abort_free;
3715
Greg Kroah-Hartmanf9cb0742007-12-17 23:05:35 -07003716 kobject_init(&rdev->kobj, &rdev_ktype);
NeilBrown86e6ffd2005-11-08 21:39:24 -08003717
Christoph Hellwig0fe80342021-10-18 12:11:06 +02003718 size = bdev_nr_bytes(rdev->bdev) >> BLOCK_SIZE_BITS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003719 if (!size) {
NeilBrown9d487392016-11-02 14:16:49 +11003720 pr_warn("md: %s has zero or unknown size, marking faulty!\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003721 bdevname(rdev->bdev,b));
3722 err = -EINVAL;
3723 goto abort_free;
3724 }
3725
3726 if (super_format >= 0) {
3727 err = super_types[super_format].
3728 load_super(rdev, NULL, super_minor);
3729 if (err == -EINVAL) {
NeilBrown9d487392016-11-02 14:16:49 +11003730 pr_warn("md: %s does not have a valid v%d.%d superblock, not importing!\n",
NeilBrowndf968c42007-07-17 04:06:11 -07003731 bdevname(rdev->bdev,b),
NeilBrown9d487392016-11-02 14:16:49 +11003732 super_format, super_minor);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003733 goto abort_free;
3734 }
3735 if (err < 0) {
NeilBrown9d487392016-11-02 14:16:49 +11003736 pr_warn("md: could not read %s's sb, not importing!\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003737 bdevname(rdev->bdev,b));
3738 goto abort_free;
3739 }
3740 }
Dan Williams6bfe0b42008-04-30 00:52:32 -07003741
Linus Torvalds1da177e2005-04-16 15:20:36 -07003742 return rdev;
3743
3744abort_free:
NeilBrown2699b672011-07-28 11:31:47 +10003745 if (rdev->bdev)
3746 unlock_rdev(rdev);
NeilBrown545c8792012-05-22 13:54:30 +10003747 md_rdev_clear(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003748 kfree(rdev);
3749 return ERR_PTR(err);
3750}
3751
3752/*
3753 * Check a full RAID array for plausibility
3754 */
3755
Yufen Yu6a5cb532019-10-16 16:00:03 +08003756static int analyze_sbs(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003757{
3758 int i;
NeilBrown3cb03002011-10-11 16:45:26 +11003759 struct md_rdev *rdev, *freshest, *tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003760 char b[BDEVNAME_SIZE];
3761
3762 freshest = NULL;
NeilBrowndafb20f2012-03-19 12:46:39 +11003763 rdev_for_each_safe(rdev, tmp, mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003764 switch (super_types[mddev->major_version].
3765 load_super(rdev, freshest, mddev->minor_version)) {
3766 case 1:
3767 freshest = rdev;
3768 break;
3769 case 0:
3770 break;
3771 default:
NeilBrown9d487392016-11-02 14:16:49 +11003772 pr_warn("md: fatal superblock inconsistency in %s -- removing from array\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003773 bdevname(rdev->bdev,b));
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05003774 md_kick_rdev_from_array(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003775 }
3776
Yufen Yu6a5cb532019-10-16 16:00:03 +08003777 /* Cannot find a valid fresh disk */
3778 if (!freshest) {
3779 pr_warn("md: cannot find a valid disk\n");
3780 return -EINVAL;
3781 }
3782
Linus Torvalds1da177e2005-04-16 15:20:36 -07003783 super_types[mddev->major_version].
3784 validate_super(mddev, freshest);
3785
3786 i = 0;
NeilBrowndafb20f2012-03-19 12:46:39 +11003787 rdev_for_each_safe(rdev, tmp, mddev) {
NeilBrown233fca32010-04-14 17:02:09 +10003788 if (mddev->max_disks &&
3789 (rdev->desc_nr >= mddev->max_disks ||
3790 i > mddev->max_disks)) {
NeilBrown9d487392016-11-02 14:16:49 +11003791 pr_warn("md: %s: %s: only %d devices permitted\n",
3792 mdname(mddev), bdevname(rdev->bdev, b),
3793 mddev->max_disks);
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05003794 md_kick_rdev_from_array(rdev);
NeilBrownde01dfa2009-02-06 18:02:46 +11003795 continue;
3796 }
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05003797 if (rdev != freshest) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003798 if (super_types[mddev->major_version].
3799 validate_super(mddev, rdev)) {
NeilBrown9d487392016-11-02 14:16:49 +11003800 pr_warn("md: kicking non-fresh %s from array!\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003801 bdevname(rdev->bdev,b));
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05003802 md_kick_rdev_from_array(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003803 continue;
3804 }
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05003805 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003806 if (mddev->level == LEVEL_MULTIPATH) {
3807 rdev->desc_nr = i++;
3808 rdev->raid_disk = rdev->desc_nr;
NeilBrownb2d444d2005-11-08 21:39:31 -08003809 set_bit(In_sync, &rdev->flags);
Shaohua Lif2076e72015-10-08 21:54:12 -07003810 } else if (rdev->raid_disk >=
3811 (mddev->raid_disks - min(0, mddev->delta_disks)) &&
3812 !test_bit(Journal, &rdev->flags)) {
NeilBrowna778b732007-05-23 13:58:10 -07003813 rdev->raid_disk = -1;
3814 clear_bit(In_sync, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003815 }
3816 }
Yufen Yu6a5cb532019-10-16 16:00:03 +08003817
3818 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003819}
3820
NeilBrown72e02072009-12-14 12:49:55 +11003821/* Read a fixed-point number.
3822 * Numbers in sysfs attributes should be in "standard" units where
3823 * possible, so time should be in seconds.
NeilBrownf72ffdd2014-09-30 14:23:59 +10003824 * However we internally use a a much smaller unit such as
NeilBrown72e02072009-12-14 12:49:55 +11003825 * milliseconds or jiffies.
3826 * This function takes a decimal number with a possible fractional
3827 * component, and produces an integer which is the result of
3828 * multiplying that number by 10^'scale'.
3829 * all without any floating-point arithmetic.
3830 */
3831int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
3832{
3833 unsigned long result = 0;
3834 long decimals = -1;
3835 while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
3836 if (*cp == '.')
3837 decimals = 0;
3838 else if (decimals < scale) {
3839 unsigned int value;
3840 value = *cp - '0';
3841 result = result * 10 + value;
3842 if (decimals >= 0)
3843 decimals++;
3844 }
3845 cp++;
3846 }
3847 if (*cp == '\n')
3848 cp++;
3849 if (*cp)
3850 return -EINVAL;
3851 if (decimals < 0)
3852 decimals = 0;
Andy Shevchenkocf891602019-07-23 23:41:55 +03003853 *res = result * int_pow(10, scale - decimals);
NeilBrown72e02072009-12-14 12:49:55 +11003854 return 0;
3855}
3856
NeilBrowneae17012005-11-08 21:39:23 -08003857static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11003858safe_delay_show(struct mddev *mddev, char *page)
NeilBrown16f17b32006-06-26 00:27:37 -07003859{
3860 int msec = (mddev->safemode_delay*1000)/HZ;
3861 return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
3862}
3863static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11003864safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
NeilBrown16f17b32006-06-26 00:27:37 -07003865{
NeilBrown16f17b32006-06-26 00:27:37 -07003866 unsigned long msec;
Dan Williams97ce0a72008-09-24 22:48:19 -07003867
Goldwyn Rodrigues28c1b9f2015-10-22 16:01:25 +11003868 if (mddev_is_clustered(mddev)) {
NeilBrown9d487392016-11-02 14:16:49 +11003869 pr_warn("md: Safemode is disabled for clustered mode\n");
Goldwyn Rodrigues28c1b9f2015-10-22 16:01:25 +11003870 return -EINVAL;
3871 }
3872
NeilBrown72e02072009-12-14 12:49:55 +11003873 if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
NeilBrown16f17b32006-06-26 00:27:37 -07003874 return -EINVAL;
NeilBrown16f17b32006-06-26 00:27:37 -07003875 if (msec == 0)
3876 mddev->safemode_delay = 0;
3877 else {
NeilBrown19052c02008-08-05 15:54:13 +10003878 unsigned long old_delay = mddev->safemode_delay;
NeilBrown1b30e662014-12-15 12:57:00 +11003879 unsigned long new_delay = (msec*HZ)/1000;
3880
3881 if (new_delay == 0)
3882 new_delay = 1;
3883 mddev->safemode_delay = new_delay;
3884 if (new_delay < old_delay || old_delay == 0)
3885 mod_timer(&mddev->safemode_timer, jiffies+1);
NeilBrown16f17b32006-06-26 00:27:37 -07003886 }
3887 return len;
3888}
3889static struct md_sysfs_entry md_safe_delay =
NeilBrown80ca3a42006-07-10 04:44:18 -07003890__ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
NeilBrown16f17b32006-06-26 00:27:37 -07003891
3892static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11003893level_show(struct mddev *mddev, char *page)
NeilBrowneae17012005-11-08 21:39:23 -08003894{
NeilBrown36d091f2014-12-15 12:56:58 +11003895 struct md_personality *p;
3896 int ret;
3897 spin_lock(&mddev->lock);
3898 p = mddev->pers;
NeilBrownd9d166c2006-01-06 00:20:51 -08003899 if (p)
NeilBrown36d091f2014-12-15 12:56:58 +11003900 ret = sprintf(page, "%s\n", p->name);
NeilBrownd9d166c2006-01-06 00:20:51 -08003901 else if (mddev->clevel[0])
NeilBrown36d091f2014-12-15 12:56:58 +11003902 ret = sprintf(page, "%s\n", mddev->clevel);
NeilBrownd9d166c2006-01-06 00:20:51 -08003903 else if (mddev->level != LEVEL_NONE)
NeilBrown36d091f2014-12-15 12:56:58 +11003904 ret = sprintf(page, "%d\n", mddev->level);
NeilBrownd9d166c2006-01-06 00:20:51 -08003905 else
NeilBrown36d091f2014-12-15 12:56:58 +11003906 ret = 0;
3907 spin_unlock(&mddev->lock);
3908 return ret;
NeilBrowneae17012005-11-08 21:39:23 -08003909}
3910
NeilBrownd9d166c2006-01-06 00:20:51 -08003911static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11003912level_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrownd9d166c2006-01-06 00:20:51 -08003913{
Dan Williamsf2859af2010-05-02 10:04:16 -07003914 char clevel[16];
NeilBrown67918752014-12-15 12:57:01 +11003915 ssize_t rv;
3916 size_t slen = len;
NeilBrowndb721d32014-12-15 12:56:58 +11003917 struct md_personality *pers, *oldpers;
Dan Williamsf2859af2010-05-02 10:04:16 -07003918 long level;
NeilBrowndb721d32014-12-15 12:56:58 +11003919 void *priv, *oldpriv;
NeilBrown3cb03002011-10-11 16:45:26 +11003920 struct md_rdev *rdev;
NeilBrown245f46c2009-03-31 14:39:39 +11003921
NeilBrown67918752014-12-15 12:57:01 +11003922 if (slen == 0 || slen >= sizeof(clevel))
3923 return -EINVAL;
3924
3925 rv = mddev_lock(mddev);
3926 if (rv)
NeilBrown245f46c2009-03-31 14:39:39 +11003927 return rv;
NeilBrown67918752014-12-15 12:57:01 +11003928
3929 if (mddev->pers == NULL) {
3930 strncpy(mddev->clevel, buf, slen);
3931 if (mddev->clevel[slen-1] == '\n')
3932 slen--;
3933 mddev->clevel[slen] = 0;
3934 mddev->level = LEVEL_NONE;
3935 rv = len;
3936 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11003937 }
NeilBrown67918752014-12-15 12:57:01 +11003938 rv = -EROFS;
NeilBrownbd8839e2014-05-28 13:39:21 +10003939 if (mddev->ro)
NeilBrown67918752014-12-15 12:57:01 +11003940 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11003941
3942 /* request to change the personality. Need to ensure:
3943 * - array is not engaged in resync/recovery/reshape
3944 * - old personality can be suspended
3945 * - new personality will access other array.
3946 */
3947
NeilBrown67918752014-12-15 12:57:01 +11003948 rv = -EBUSY;
NeilBrownbb4f1e92010-08-08 21:18:03 +10003949 if (mddev->sync_thread ||
NeilBrownf851b602014-12-11 10:02:10 +11003950 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
NeilBrownbb4f1e92010-08-08 21:18:03 +10003951 mddev->reshape_position != MaxSector ||
3952 mddev->sysfs_active)
NeilBrown67918752014-12-15 12:57:01 +11003953 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11003954
NeilBrown67918752014-12-15 12:57:01 +11003955 rv = -EINVAL;
NeilBrown245f46c2009-03-31 14:39:39 +11003956 if (!mddev->pers->quiesce) {
NeilBrown9d487392016-11-02 14:16:49 +11003957 pr_warn("md: %s: %s does not support online personality change\n",
3958 mdname(mddev), mddev->pers->name);
NeilBrown67918752014-12-15 12:57:01 +11003959 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11003960 }
3961
3962 /* Now find the new personality */
NeilBrown67918752014-12-15 12:57:01 +11003963 strncpy(clevel, buf, slen);
3964 if (clevel[slen-1] == '\n')
3965 slen--;
3966 clevel[slen] = 0;
Jingoo Hanb29bebd2013-06-01 16:15:16 +09003967 if (kstrtol(clevel, 10, &level))
Dan Williamsf2859af2010-05-02 10:04:16 -07003968 level = LEVEL_NONE;
NeilBrown245f46c2009-03-31 14:39:39 +11003969
Dan Williamsf2859af2010-05-02 10:04:16 -07003970 if (request_module("md-%s", clevel) != 0)
3971 request_module("md-level-%s", clevel);
NeilBrown245f46c2009-03-31 14:39:39 +11003972 spin_lock(&pers_lock);
Dan Williamsf2859af2010-05-02 10:04:16 -07003973 pers = find_pers(level, clevel);
NeilBrown245f46c2009-03-31 14:39:39 +11003974 if (!pers || !try_module_get(pers->owner)) {
3975 spin_unlock(&pers_lock);
NeilBrown9d487392016-11-02 14:16:49 +11003976 pr_warn("md: personality %s not loaded\n", clevel);
NeilBrown67918752014-12-15 12:57:01 +11003977 rv = -EINVAL;
3978 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11003979 }
3980 spin_unlock(&pers_lock);
3981
3982 if (pers == mddev->pers) {
3983 /* Nothing to do! */
3984 module_put(pers->owner);
NeilBrown67918752014-12-15 12:57:01 +11003985 rv = len;
3986 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11003987 }
3988 if (!pers->takeover) {
3989 module_put(pers->owner);
NeilBrown9d487392016-11-02 14:16:49 +11003990 pr_warn("md: %s: %s does not support personality takeover\n",
3991 mdname(mddev), clevel);
NeilBrown67918752014-12-15 12:57:01 +11003992 rv = -EINVAL;
3993 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11003994 }
3995
NeilBrowndafb20f2012-03-19 12:46:39 +11003996 rdev_for_each(rdev, mddev)
NeilBrowne93f68a2010-06-15 09:36:03 +01003997 rdev->new_raid_disk = rdev->raid_disk;
3998
NeilBrown245f46c2009-03-31 14:39:39 +11003999 /* ->takeover must set new_* and/or delta_disks
4000 * if it succeeds, and may set them when it fails.
4001 */
4002 priv = pers->takeover(mddev);
4003 if (IS_ERR(priv)) {
4004 mddev->new_level = mddev->level;
4005 mddev->new_layout = mddev->layout;
Andre Noll664e7c42009-06-18 08:45:27 +10004006 mddev->new_chunk_sectors = mddev->chunk_sectors;
NeilBrown245f46c2009-03-31 14:39:39 +11004007 mddev->raid_disks -= mddev->delta_disks;
4008 mddev->delta_disks = 0;
NeilBrown2c810cd2012-05-21 09:27:00 +10004009 mddev->reshape_backwards = 0;
NeilBrown245f46c2009-03-31 14:39:39 +11004010 module_put(pers->owner);
NeilBrown9d487392016-11-02 14:16:49 +11004011 pr_warn("md: %s: %s would not accept array\n",
4012 mdname(mddev), clevel);
NeilBrown67918752014-12-15 12:57:01 +11004013 rv = PTR_ERR(priv);
4014 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11004015 }
4016
4017 /* Looks like we have a winner */
4018 mddev_suspend(mddev);
NeilBrown5aa61f42014-12-15 12:56:57 +11004019 mddev_detach(mddev);
NeilBrown36d091f2014-12-15 12:56:58 +11004020
4021 spin_lock(&mddev->lock);
NeilBrowndb721d32014-12-15 12:56:58 +11004022 oldpers = mddev->pers;
4023 oldpriv = mddev->private;
4024 mddev->pers = pers;
4025 mddev->private = priv;
4026 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
4027 mddev->level = mddev->new_level;
4028 mddev->layout = mddev->new_layout;
4029 mddev->chunk_sectors = mddev->new_chunk_sectors;
4030 mddev->delta_disks = 0;
4031 mddev->reshape_backwards = 0;
4032 mddev->degraded = 0;
NeilBrown36d091f2014-12-15 12:56:58 +11004033 spin_unlock(&mddev->lock);
NeilBrownf72ffdd2014-09-30 14:23:59 +10004034
NeilBrowndb721d32014-12-15 12:56:58 +11004035 if (oldpers->sync_request == NULL &&
Trela Maciej54071b32010-03-08 16:02:42 +11004036 mddev->external) {
4037 /* We are converting from a no-redundancy array
4038 * to a redundancy array and metadata is managed
4039 * externally so we need to be sure that writes
4040 * won't block due to a need to transition
4041 * clean->dirty
4042 * until external management is started.
4043 */
4044 mddev->in_sync = 0;
4045 mddev->safemode_delay = 0;
4046 mddev->safemode = 0;
4047 }
4048
NeilBrowndb721d32014-12-15 12:56:58 +11004049 oldpers->free(mddev, oldpriv);
4050
4051 if (oldpers->sync_request == NULL &&
4052 pers->sync_request != NULL) {
4053 /* need to add the md_redundancy_group */
4054 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
NeilBrown9d487392016-11-02 14:16:49 +11004055 pr_warn("md: cannot register extra attributes for %s\n",
4056 mdname(mddev));
NeilBrowndb721d32014-12-15 12:56:58 +11004057 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
Junxiao Bie8efa9b2020-08-04 17:27:18 -07004058 mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed");
4059 mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded");
NeilBrowndb721d32014-12-15 12:56:58 +11004060 }
4061 if (oldpers->sync_request != NULL &&
4062 pers->sync_request == NULL) {
4063 /* need to remove the md_redundancy_group */
4064 if (mddev->to_remove == NULL)
4065 mddev->to_remove = &md_redundancy_group;
4066 }
4067
Alexey Obitotskiy4cb9da72016-06-23 12:11:01 +02004068 module_put(oldpers->owner);
4069
NeilBrowndafb20f2012-03-19 12:46:39 +11004070 rdev_for_each(rdev, mddev) {
NeilBrowne93f68a2010-06-15 09:36:03 +01004071 if (rdev->raid_disk < 0)
4072 continue;
NeilBrownbf2cb0d2011-01-14 09:14:34 +11004073 if (rdev->new_raid_disk >= mddev->raid_disks)
NeilBrowne93f68a2010-06-15 09:36:03 +01004074 rdev->new_raid_disk = -1;
4075 if (rdev->new_raid_disk == rdev->raid_disk)
4076 continue;
Namhyung Kim36fad852011-07-27 11:00:36 +10004077 sysfs_unlink_rdev(mddev, rdev);
NeilBrowne93f68a2010-06-15 09:36:03 +01004078 }
NeilBrowndafb20f2012-03-19 12:46:39 +11004079 rdev_for_each(rdev, mddev) {
NeilBrowne93f68a2010-06-15 09:36:03 +01004080 if (rdev->raid_disk < 0)
4081 continue;
4082 if (rdev->new_raid_disk == rdev->raid_disk)
4083 continue;
4084 rdev->raid_disk = rdev->new_raid_disk;
4085 if (rdev->raid_disk < 0)
NeilBrown3a981b02009-08-03 10:59:55 +10004086 clear_bit(In_sync, &rdev->flags);
NeilBrowne93f68a2010-06-15 09:36:03 +01004087 else {
Namhyung Kim36fad852011-07-27 11:00:36 +10004088 if (sysfs_link_rdev(mddev, rdev))
NeilBrown9d487392016-11-02 14:16:49 +11004089 pr_warn("md: cannot register rd%d for %s after level change\n",
4090 rdev->raid_disk, mdname(mddev));
NeilBrown3a981b02009-08-03 10:59:55 +10004091 }
NeilBrowne93f68a2010-06-15 09:36:03 +01004092 }
4093
NeilBrowndb721d32014-12-15 12:56:58 +11004094 if (pers->sync_request == NULL) {
Trela, Maciej9af204c2010-03-08 16:02:44 +11004095 /* this is now an array without redundancy, so
4096 * it must always be in_sync
4097 */
4098 mddev->in_sync = 1;
4099 del_timer_sync(&mddev->safemode_timer);
4100 }
NeilBrown02e5f5c2013-11-14 15:16:15 +11004101 blk_set_stacking_limits(&mddev->queue->limits);
NeilBrown245f46c2009-03-31 14:39:39 +11004102 pers->run(mddev);
Shaohua Li29530792016-12-08 15:48:19 -08004103 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
Jonathan Brassow47525e52012-05-22 13:55:29 +10004104 mddev_resume(mddev);
NeilBrown830778a2014-01-14 15:17:03 +11004105 if (!mddev->thread)
4106 md_update_sb(mddev, 1);
Junxiao Bie1a86db2020-07-14 16:10:26 -07004107 sysfs_notify_dirent_safe(mddev->sysfs_level);
Guoqing Jiang54679482021-10-04 23:34:53 +08004108 md_new_event();
NeilBrown67918752014-12-15 12:57:01 +11004109 rv = len;
4110out_unlock:
4111 mddev_unlock(mddev);
NeilBrownd9d166c2006-01-06 00:20:51 -08004112 return rv;
4113}
4114
4115static struct md_sysfs_entry md_level =
NeilBrown80ca3a42006-07-10 04:44:18 -07004116__ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
NeilBrowneae17012005-11-08 21:39:23 -08004117
NeilBrownd4dbd022006-06-26 00:27:59 -07004118static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004119layout_show(struct mddev *mddev, char *page)
NeilBrownd4dbd022006-06-26 00:27:59 -07004120{
4121 /* just a number, not meaningful for all levels */
NeilBrown08a02ec2007-05-09 02:35:38 -07004122 if (mddev->reshape_position != MaxSector &&
4123 mddev->layout != mddev->new_layout)
4124 return sprintf(page, "%d (%d)\n",
4125 mddev->new_layout, mddev->layout);
NeilBrownd4dbd022006-06-26 00:27:59 -07004126 return sprintf(page, "%d\n", mddev->layout);
4127}
4128
4129static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004130layout_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrownd4dbd022006-06-26 00:27:59 -07004131{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004132 unsigned int n;
NeilBrown67918752014-12-15 12:57:01 +11004133 int err;
NeilBrownd4dbd022006-06-26 00:27:59 -07004134
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004135 err = kstrtouint(buf, 10, &n);
4136 if (err < 0)
4137 return err;
NeilBrown67918752014-12-15 12:57:01 +11004138 err = mddev_lock(mddev);
4139 if (err)
4140 return err;
NeilBrownd4dbd022006-06-26 00:27:59 -07004141
NeilBrownb3546032009-03-31 14:56:41 +11004142 if (mddev->pers) {
NeilBrown50ac1682009-06-18 08:47:55 +10004143 if (mddev->pers->check_reshape == NULL)
NeilBrown67918752014-12-15 12:57:01 +11004144 err = -EBUSY;
4145 else if (mddev->ro)
4146 err = -EROFS;
4147 else {
4148 mddev->new_layout = n;
4149 err = mddev->pers->check_reshape(mddev);
4150 if (err)
4151 mddev->new_layout = mddev->layout;
NeilBrown597a7112009-06-18 08:47:42 +10004152 }
NeilBrownb3546032009-03-31 14:56:41 +11004153 } else {
NeilBrown08a02ec2007-05-09 02:35:38 -07004154 mddev->new_layout = n;
NeilBrownb3546032009-03-31 14:56:41 +11004155 if (mddev->reshape_position == MaxSector)
4156 mddev->layout = n;
4157 }
NeilBrown67918752014-12-15 12:57:01 +11004158 mddev_unlock(mddev);
4159 return err ?: len;
NeilBrownd4dbd022006-06-26 00:27:59 -07004160}
4161static struct md_sysfs_entry md_layout =
NeilBrown80ca3a42006-07-10 04:44:18 -07004162__ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
NeilBrownd4dbd022006-06-26 00:27:59 -07004163
NeilBrowneae17012005-11-08 21:39:23 -08004164static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004165raid_disks_show(struct mddev *mddev, char *page)
NeilBrowneae17012005-11-08 21:39:23 -08004166{
NeilBrownbb636542005-11-08 21:39:45 -08004167 if (mddev->raid_disks == 0)
4168 return 0;
NeilBrown08a02ec2007-05-09 02:35:38 -07004169 if (mddev->reshape_position != MaxSector &&
4170 mddev->delta_disks != 0)
4171 return sprintf(page, "%d (%d)\n", mddev->raid_disks,
4172 mddev->raid_disks - mddev->delta_disks);
NeilBrowneae17012005-11-08 21:39:23 -08004173 return sprintf(page, "%d\n", mddev->raid_disks);
4174}
4175
NeilBrownfd01b882011-10-11 16:47:53 +11004176static int update_raid_disks(struct mddev *mddev, int raid_disks);
NeilBrownda943b992006-01-06 00:20:54 -08004177
4178static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004179raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrownda943b992006-01-06 00:20:54 -08004180{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004181 unsigned int n;
NeilBrown67918752014-12-15 12:57:01 +11004182 int err;
NeilBrownda943b992006-01-06 00:20:54 -08004183
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004184 err = kstrtouint(buf, 10, &n);
4185 if (err < 0)
4186 return err;
NeilBrownda943b992006-01-06 00:20:54 -08004187
NeilBrown67918752014-12-15 12:57:01 +11004188 err = mddev_lock(mddev);
4189 if (err)
4190 return err;
NeilBrownda943b992006-01-06 00:20:54 -08004191 if (mddev->pers)
NeilBrown67918752014-12-15 12:57:01 +11004192 err = update_raid_disks(mddev, n);
NeilBrown08a02ec2007-05-09 02:35:38 -07004193 else if (mddev->reshape_position != MaxSector) {
NeilBrownc6563a82012-05-21 09:27:00 +10004194 struct md_rdev *rdev;
NeilBrown08a02ec2007-05-09 02:35:38 -07004195 int olddisks = mddev->raid_disks - mddev->delta_disks;
NeilBrownc6563a82012-05-21 09:27:00 +10004196
NeilBrown67918752014-12-15 12:57:01 +11004197 err = -EINVAL;
NeilBrownc6563a82012-05-21 09:27:00 +10004198 rdev_for_each(rdev, mddev) {
4199 if (olddisks < n &&
4200 rdev->data_offset < rdev->new_data_offset)
NeilBrown67918752014-12-15 12:57:01 +11004201 goto out_unlock;
NeilBrownc6563a82012-05-21 09:27:00 +10004202 if (olddisks > n &&
4203 rdev->data_offset > rdev->new_data_offset)
NeilBrown67918752014-12-15 12:57:01 +11004204 goto out_unlock;
NeilBrownc6563a82012-05-21 09:27:00 +10004205 }
NeilBrown67918752014-12-15 12:57:01 +11004206 err = 0;
NeilBrown08a02ec2007-05-09 02:35:38 -07004207 mddev->delta_disks = n - olddisks;
4208 mddev->raid_disks = n;
NeilBrown2c810cd2012-05-21 09:27:00 +10004209 mddev->reshape_backwards = (mddev->delta_disks < 0);
NeilBrown08a02ec2007-05-09 02:35:38 -07004210 } else
NeilBrownda943b992006-01-06 00:20:54 -08004211 mddev->raid_disks = n;
NeilBrown67918752014-12-15 12:57:01 +11004212out_unlock:
4213 mddev_unlock(mddev);
4214 return err ? err : len;
NeilBrownda943b992006-01-06 00:20:54 -08004215}
4216static struct md_sysfs_entry md_raid_disks =
NeilBrown80ca3a42006-07-10 04:44:18 -07004217__ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
NeilBrowneae17012005-11-08 21:39:23 -08004218
NeilBrown24dd4692005-11-08 21:39:26 -08004219static ssize_t
Sebastian Parschauerec164d072020-07-28 12:01:39 +02004220uuid_show(struct mddev *mddev, char *page)
4221{
4222 return sprintf(page, "%pU\n", mddev->uuid);
4223}
4224static struct md_sysfs_entry md_uuid =
4225__ATTR(uuid, S_IRUGO, uuid_show, NULL);
4226
4227static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004228chunk_size_show(struct mddev *mddev, char *page)
NeilBrown3b343802006-01-06 00:20:47 -08004229{
NeilBrown08a02ec2007-05-09 02:35:38 -07004230 if (mddev->reshape_position != MaxSector &&
Andre Noll664e7c42009-06-18 08:45:27 +10004231 mddev->chunk_sectors != mddev->new_chunk_sectors)
4232 return sprintf(page, "%d (%d)\n",
4233 mddev->new_chunk_sectors << 9,
Andre Noll9d8f0362009-06-18 08:45:01 +10004234 mddev->chunk_sectors << 9);
4235 return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
NeilBrown3b343802006-01-06 00:20:47 -08004236}
4237
4238static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004239chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown3b343802006-01-06 00:20:47 -08004240{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004241 unsigned long n;
NeilBrown67918752014-12-15 12:57:01 +11004242 int err;
NeilBrown3b343802006-01-06 00:20:47 -08004243
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004244 err = kstrtoul(buf, 10, &n);
4245 if (err < 0)
4246 return err;
NeilBrown3b343802006-01-06 00:20:47 -08004247
NeilBrown67918752014-12-15 12:57:01 +11004248 err = mddev_lock(mddev);
4249 if (err)
4250 return err;
NeilBrownb3546032009-03-31 14:56:41 +11004251 if (mddev->pers) {
NeilBrown50ac1682009-06-18 08:47:55 +10004252 if (mddev->pers->check_reshape == NULL)
NeilBrown67918752014-12-15 12:57:01 +11004253 err = -EBUSY;
4254 else if (mddev->ro)
4255 err = -EROFS;
4256 else {
4257 mddev->new_chunk_sectors = n >> 9;
4258 err = mddev->pers->check_reshape(mddev);
4259 if (err)
4260 mddev->new_chunk_sectors = mddev->chunk_sectors;
NeilBrown597a7112009-06-18 08:47:42 +10004261 }
NeilBrownb3546032009-03-31 14:56:41 +11004262 } else {
Andre Noll664e7c42009-06-18 08:45:27 +10004263 mddev->new_chunk_sectors = n >> 9;
NeilBrownb3546032009-03-31 14:56:41 +11004264 if (mddev->reshape_position == MaxSector)
Andre Noll9d8f0362009-06-18 08:45:01 +10004265 mddev->chunk_sectors = n >> 9;
NeilBrownb3546032009-03-31 14:56:41 +11004266 }
NeilBrown67918752014-12-15 12:57:01 +11004267 mddev_unlock(mddev);
4268 return err ?: len;
NeilBrown3b343802006-01-06 00:20:47 -08004269}
4270static struct md_sysfs_entry md_chunk_size =
NeilBrown80ca3a42006-07-10 04:44:18 -07004271__ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
NeilBrown3b343802006-01-06 00:20:47 -08004272
NeilBrowna94213b2006-06-26 00:28:00 -07004273static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004274resync_start_show(struct mddev *mddev, char *page)
NeilBrowna94213b2006-06-26 00:28:00 -07004275{
NeilBrownd1a7c502009-03-31 15:24:32 +11004276 if (mddev->recovery_cp == MaxSector)
4277 return sprintf(page, "none\n");
NeilBrowna94213b2006-06-26 00:28:00 -07004278 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
4279}
4280
4281static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004282resync_start_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrowna94213b2006-06-26 00:28:00 -07004283{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004284 unsigned long long n;
NeilBrown67918752014-12-15 12:57:01 +11004285 int err;
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004286
4287 if (cmd_match(buf, "none"))
4288 n = MaxSector;
4289 else {
4290 err = kstrtoull(buf, 10, &n);
4291 if (err < 0)
4292 return err;
4293 if (n != (sector_t)n)
4294 return -EINVAL;
4295 }
NeilBrowna94213b2006-06-26 00:28:00 -07004296
NeilBrown67918752014-12-15 12:57:01 +11004297 err = mddev_lock(mddev);
4298 if (err)
4299 return err;
NeilBrownb0986362011-05-11 15:52:21 +10004300 if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
NeilBrown67918752014-12-15 12:57:01 +11004301 err = -EBUSY;
NeilBrowna94213b2006-06-26 00:28:00 -07004302
NeilBrown67918752014-12-15 12:57:01 +11004303 if (!err) {
4304 mddev->recovery_cp = n;
4305 if (mddev->pers)
Shaohua Li29530792016-12-08 15:48:19 -08004306 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
NeilBrown67918752014-12-15 12:57:01 +11004307 }
4308 mddev_unlock(mddev);
4309 return err ?: len;
NeilBrowna94213b2006-06-26 00:28:00 -07004310}
4311static struct md_sysfs_entry md_resync_start =
NeilBrown750f1992014-09-30 08:53:05 +10004312__ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR,
4313 resync_start_show, resync_start_store);
NeilBrowna94213b2006-06-26 00:28:00 -07004314
NeilBrown9e653b62006-06-26 00:27:58 -07004315/*
4316 * The array state can be:
4317 *
4318 * clear
4319 * No devices, no size, no level
4320 * Equivalent to STOP_ARRAY ioctl
4321 * inactive
4322 * May have some settings, but array is not active
4323 * all IO results in error
4324 * When written, doesn't tear down array, but just stops it
4325 * suspended (not supported yet)
4326 * All IO requests will block. The array can be reconfigured.
Andre Noll910d8cb2008-03-25 21:00:53 +01004327 * Writing this, if accepted, will block until array is quiescent
NeilBrown9e653b62006-06-26 00:27:58 -07004328 * readonly
4329 * no resync can happen. no superblocks get written.
4330 * write requests fail
4331 * read-auto
4332 * like readonly, but behaves like 'clean' on a write request.
4333 *
4334 * clean - no pending writes, but otherwise active.
4335 * When written to inactive array, starts without resync
4336 * If a write request arrives then
4337 * if metadata is known, mark 'dirty' and switch to 'active'.
4338 * if not known, block and switch to write-pending
4339 * If written to an active array that has pending writes, then fails.
4340 * active
4341 * fully active: IO and resync can be happening.
4342 * When written to inactive array, starts with resync
4343 *
4344 * write-pending
4345 * clean, but writes are blocked waiting for 'active' to be written.
4346 *
4347 * active-idle
4348 * like active, but no writes have been seen for a while (100msec).
4349 *
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -03004350 * broken
4351 * RAID0/LINEAR-only: same as clean, but array is missing a member.
4352 * It's useful because RAID0/LINEAR mounted-arrays aren't stopped
4353 * when a member is gone, so this state will at least alert the
4354 * user that something is wrong.
NeilBrown9e653b62006-06-26 00:27:58 -07004355 */
4356enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -03004357 write_pending, active_idle, broken, bad_word};
Adrian Bunk05381952006-06-26 00:28:01 -07004358static char *array_states[] = {
NeilBrown9e653b62006-06-26 00:27:58 -07004359 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -03004360 "write-pending", "active-idle", "broken", NULL };
NeilBrown9e653b62006-06-26 00:27:58 -07004361
4362static int match_word(const char *word, char **list)
4363{
4364 int n;
4365 for (n=0; list[n]; n++)
4366 if (cmd_match(word, list[n]))
4367 break;
4368 return n;
4369}
4370
4371static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004372array_state_show(struct mddev *mddev, char *page)
NeilBrown9e653b62006-06-26 00:27:58 -07004373{
4374 enum array_state st = inactive;
4375
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -03004376 if (mddev->pers && !test_bit(MD_NOT_READY, &mddev->flags)) {
NeilBrown9e653b62006-06-26 00:27:58 -07004377 switch(mddev->ro) {
4378 case 1:
4379 st = readonly;
4380 break;
4381 case 2:
4382 st = read_auto;
4383 break;
4384 case 0:
NeilBrown55cc39f2017-03-15 14:05:14 +11004385 spin_lock(&mddev->lock);
Shaohua Li29530792016-12-08 15:48:19 -08004386 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
NeilBrowne6910632008-02-06 01:39:51 -08004387 st = write_pending;
Tomasz Majchrzak16f88942016-10-24 12:47:28 +02004388 else if (mddev->in_sync)
4389 st = clean;
NeilBrown9e653b62006-06-26 00:27:58 -07004390 else if (mddev->safemode)
4391 st = active_idle;
4392 else
4393 st = active;
NeilBrown55cc39f2017-03-15 14:05:14 +11004394 spin_unlock(&mddev->lock);
NeilBrown9e653b62006-06-26 00:27:58 -07004395 }
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -03004396
4397 if (test_bit(MD_BROKEN, &mddev->flags) && st == clean)
4398 st = broken;
4399 } else {
NeilBrown9e653b62006-06-26 00:27:58 -07004400 if (list_empty(&mddev->disks) &&
4401 mddev->raid_disks == 0 &&
Andre Noll58c0fed2009-03-31 14:33:13 +11004402 mddev->dev_sectors == 0)
NeilBrown9e653b62006-06-26 00:27:58 -07004403 st = clear;
4404 else
4405 st = inactive;
4406 }
4407 return sprintf(page, "%s\n", array_states[st]);
4408}
4409
NeilBrownf72ffdd2014-09-30 14:23:59 +10004410static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev);
4411static int md_set_readonly(struct mddev *mddev, struct block_device *bdev);
NeilBrownfd01b882011-10-11 16:47:53 +11004412static int restart_array(struct mddev *mddev);
NeilBrown9e653b62006-06-26 00:27:58 -07004413
4414static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004415array_state_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown9e653b62006-06-26 00:27:58 -07004416{
NeilBrown6497709b2017-03-15 14:05:14 +11004417 int err = 0;
NeilBrown9e653b62006-06-26 00:27:58 -07004418 enum array_state st = match_word(buf, array_states);
NeilBrown67918752014-12-15 12:57:01 +11004419
4420 if (mddev->pers && (st == active || st == clean) && mddev->ro != 1) {
4421 /* don't take reconfig_mutex when toggling between
4422 * clean and active
4423 */
4424 spin_lock(&mddev->lock);
4425 if (st == active) {
4426 restart_array(mddev);
Shaohua Li29530792016-12-08 15:48:19 -08004427 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
Tomasz Majchrzak91a6c4a2016-10-25 17:07:08 +02004428 md_wakeup_thread(mddev->thread);
NeilBrown67918752014-12-15 12:57:01 +11004429 wake_up(&mddev->sb_wait);
NeilBrown67918752014-12-15 12:57:01 +11004430 } else /* st == clean */ {
4431 restart_array(mddev);
NeilBrown6497709b2017-03-15 14:05:14 +11004432 if (!set_in_sync(mddev))
NeilBrown67918752014-12-15 12:57:01 +11004433 err = -EBUSY;
4434 }
Tomasz Majchrzak573275b2016-06-30 10:47:09 +02004435 if (!err)
4436 sysfs_notify_dirent_safe(mddev->sysfs_state);
NeilBrown67918752014-12-15 12:57:01 +11004437 spin_unlock(&mddev->lock);
NeilBrownc008f1d2015-06-12 19:46:44 +10004438 return err ?: len;
NeilBrown67918752014-12-15 12:57:01 +11004439 }
4440 err = mddev_lock(mddev);
4441 if (err)
4442 return err;
4443 err = -EINVAL;
NeilBrown9e653b62006-06-26 00:27:58 -07004444 switch(st) {
4445 case bad_word:
4446 break;
4447 case clear:
4448 /* stopping an active array */
NeilBrowna05b7ea2012-07-19 15:59:18 +10004449 err = do_md_stop(mddev, 0, NULL);
NeilBrown9e653b62006-06-26 00:27:58 -07004450 break;
4451 case inactive:
4452 /* stopping an active array */
NeilBrown90cf1952012-07-31 10:04:55 +10004453 if (mddev->pers)
NeilBrowna05b7ea2012-07-19 15:59:18 +10004454 err = do_md_stop(mddev, 2, NULL);
NeilBrown90cf1952012-07-31 10:04:55 +10004455 else
NeilBrowne6910632008-02-06 01:39:51 -08004456 err = 0; /* already inactive */
NeilBrown9e653b62006-06-26 00:27:58 -07004457 break;
4458 case suspended:
4459 break; /* not supported yet */
4460 case readonly:
4461 if (mddev->pers)
NeilBrowna05b7ea2012-07-19 15:59:18 +10004462 err = md_set_readonly(mddev, NULL);
NeilBrown9e653b62006-06-26 00:27:58 -07004463 else {
4464 mddev->ro = 1;
NeilBrown648b6292008-04-30 00:52:30 -07004465 set_disk_ro(mddev->gendisk, 1);
NeilBrown9e653b62006-06-26 00:27:58 -07004466 err = do_md_run(mddev);
4467 }
4468 break;
4469 case read_auto:
NeilBrown9e653b62006-06-26 00:27:58 -07004470 if (mddev->pers) {
NeilBrown80268ee2008-10-13 11:55:12 +11004471 if (mddev->ro == 0)
NeilBrowna05b7ea2012-07-19 15:59:18 +10004472 err = md_set_readonly(mddev, NULL);
NeilBrown80268ee2008-10-13 11:55:12 +11004473 else if (mddev->ro == 1)
NeilBrown648b6292008-04-30 00:52:30 -07004474 err = restart_array(mddev);
4475 if (err == 0) {
4476 mddev->ro = 2;
4477 set_disk_ro(mddev->gendisk, 0);
4478 }
NeilBrown9e653b62006-06-26 00:27:58 -07004479 } else {
4480 mddev->ro = 2;
4481 err = do_md_run(mddev);
4482 }
4483 break;
4484 case clean:
4485 if (mddev->pers) {
Song Liu339421d2015-10-08 21:54:13 -07004486 err = restart_array(mddev);
4487 if (err)
4488 break;
NeilBrown85572d72014-12-15 12:56:56 +11004489 spin_lock(&mddev->lock);
NeilBrown6497709b2017-03-15 14:05:14 +11004490 if (!set_in_sync(mddev))
NeilBrowne6910632008-02-06 01:39:51 -08004491 err = -EBUSY;
NeilBrown85572d72014-12-15 12:56:56 +11004492 spin_unlock(&mddev->lock);
NeilBrown5bf29592009-05-07 12:50:57 +10004493 } else
4494 err = -EINVAL;
NeilBrown9e653b62006-06-26 00:27:58 -07004495 break;
4496 case active:
4497 if (mddev->pers) {
Song Liu339421d2015-10-08 21:54:13 -07004498 err = restart_array(mddev);
4499 if (err)
4500 break;
Shaohua Li29530792016-12-08 15:48:19 -08004501 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
NeilBrown9e653b62006-06-26 00:27:58 -07004502 wake_up(&mddev->sb_wait);
4503 err = 0;
4504 } else {
4505 mddev->ro = 0;
NeilBrown648b6292008-04-30 00:52:30 -07004506 set_disk_ro(mddev->gendisk, 0);
NeilBrown9e653b62006-06-26 00:27:58 -07004507 err = do_md_run(mddev);
4508 }
4509 break;
4510 case write_pending:
4511 case active_idle:
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -03004512 case broken:
NeilBrown9e653b62006-06-26 00:27:58 -07004513 /* these cannot be set */
4514 break;
4515 }
NeilBrown67918752014-12-15 12:57:01 +11004516
4517 if (!err) {
NeilBrown1d23f172011-12-08 15:49:12 +11004518 if (mddev->hold_active == UNTIL_IOCTL)
4519 mddev->hold_active = 0;
NeilBrown00bcb4a2010-06-01 19:37:23 +10004520 sysfs_notify_dirent_safe(mddev->sysfs_state);
Neil Brown0fd62b82008-06-28 08:31:36 +10004521 }
NeilBrown67918752014-12-15 12:57:01 +11004522 mddev_unlock(mddev);
4523 return err ?: len;
NeilBrown9e653b62006-06-26 00:27:58 -07004524}
NeilBrown80ca3a42006-07-10 04:44:18 -07004525static struct md_sysfs_entry md_array_state =
NeilBrown750f1992014-09-30 08:53:05 +10004526__ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
NeilBrown9e653b62006-06-26 00:27:58 -07004527
NeilBrown6d7ff7382006-01-06 00:21:16 -08004528static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004529max_corrected_read_errors_show(struct mddev *mddev, char *page) {
Robert Becker1e509152009-12-14 12:49:58 +11004530 return sprintf(page, "%d\n",
4531 atomic_read(&mddev->max_corr_read_errors));
4532}
4533
4534static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004535max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len)
Robert Becker1e509152009-12-14 12:49:58 +11004536{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004537 unsigned int n;
4538 int rv;
Robert Becker1e509152009-12-14 12:49:58 +11004539
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004540 rv = kstrtouint(buf, 10, &n);
4541 if (rv < 0)
4542 return rv;
4543 atomic_set(&mddev->max_corr_read_errors, n);
4544 return len;
Robert Becker1e509152009-12-14 12:49:58 +11004545}
4546
4547static struct md_sysfs_entry max_corr_read_errors =
4548__ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
4549 max_corrected_read_errors_store);
4550
4551static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004552null_show(struct mddev *mddev, char *page)
NeilBrown6d7ff7382006-01-06 00:21:16 -08004553{
4554 return -EINVAL;
4555}
4556
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02004557/* need to ensure rdev_delayed_delete() has completed */
4558static void flush_rdev_wq(struct mddev *mddev)
4559{
4560 struct md_rdev *rdev;
4561
4562 rcu_read_lock();
4563 rdev_for_each_rcu(rdev, mddev)
4564 if (work_pending(&rdev->del_work)) {
4565 flush_workqueue(md_rdev_misc_wq);
4566 break;
4567 }
4568 rcu_read_unlock();
4569}
4570
NeilBrown6d7ff7382006-01-06 00:21:16 -08004571static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004572new_dev_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown6d7ff7382006-01-06 00:21:16 -08004573{
4574 /* buf must be %d:%d\n? giving major and minor numbers */
4575 /* The new device is added to the array.
4576 * If the array has a persistent superblock, we read the
4577 * superblock to initialise info and check validity.
4578 * Otherwise, only checking done is that in bind_rdev_to_array,
4579 * which mainly checks size.
4580 */
4581 char *e;
4582 int major = simple_strtoul(buf, &e, 10);
4583 int minor;
4584 dev_t dev;
NeilBrown3cb03002011-10-11 16:45:26 +11004585 struct md_rdev *rdev;
NeilBrown6d7ff7382006-01-06 00:21:16 -08004586 int err;
4587
4588 if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
4589 return -EINVAL;
4590 minor = simple_strtoul(e+1, &e, 10);
4591 if (*e && *e != '\n')
4592 return -EINVAL;
4593 dev = MKDEV(major, minor);
4594 if (major != MAJOR(dev) ||
4595 minor != MINOR(dev))
4596 return -EOVERFLOW;
4597
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02004598 flush_rdev_wq(mddev);
NeilBrown67918752014-12-15 12:57:01 +11004599 err = mddev_lock(mddev);
4600 if (err)
4601 return err;
NeilBrown6d7ff7382006-01-06 00:21:16 -08004602 if (mddev->persistent) {
4603 rdev = md_import_device(dev, mddev->major_version,
4604 mddev->minor_version);
4605 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
NeilBrown3cb03002011-10-11 16:45:26 +11004606 struct md_rdev *rdev0
4607 = list_entry(mddev->disks.next,
4608 struct md_rdev, same_set);
NeilBrown6d7ff7382006-01-06 00:21:16 -08004609 err = super_types[mddev->major_version]
4610 .load_super(rdev, rdev0, mddev->minor_version);
4611 if (err < 0)
4612 goto out;
4613 }
NeilBrownc5d79ad2008-02-06 01:39:54 -08004614 } else if (mddev->external)
4615 rdev = md_import_device(dev, -2, -1);
4616 else
NeilBrown6d7ff7382006-01-06 00:21:16 -08004617 rdev = md_import_device(dev, -1, -1);
4618
NeilBrown9a8c0fa2015-06-25 17:06:40 +10004619 if (IS_ERR(rdev)) {
4620 mddev_unlock(mddev);
NeilBrown6d7ff7382006-01-06 00:21:16 -08004621 return PTR_ERR(rdev);
NeilBrown9a8c0fa2015-06-25 17:06:40 +10004622 }
NeilBrown6d7ff7382006-01-06 00:21:16 -08004623 err = bind_rdev_to_array(rdev, mddev);
4624 out:
4625 if (err)
4626 export_rdev(rdev);
NeilBrown67918752014-12-15 12:57:01 +11004627 mddev_unlock(mddev);
Alexey Obitotskiy5492c462017-07-28 15:49:25 +02004628 if (!err)
Guoqing Jiang54679482021-10-04 23:34:53 +08004629 md_new_event();
NeilBrown6d7ff7382006-01-06 00:21:16 -08004630 return err ? err : len;
4631}
4632
4633static struct md_sysfs_entry md_new_device =
NeilBrown80ca3a42006-07-10 04:44:18 -07004634__ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
NeilBrown3b343802006-01-06 00:20:47 -08004635
4636static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004637bitmap_store(struct mddev *mddev, const char *buf, size_t len)
Paul Clements9b1d1da2006-10-03 01:15:49 -07004638{
4639 char *end;
4640 unsigned long chunk, end_chunk;
NeilBrown67918752014-12-15 12:57:01 +11004641 int err;
Paul Clements9b1d1da2006-10-03 01:15:49 -07004642
NeilBrown67918752014-12-15 12:57:01 +11004643 err = mddev_lock(mddev);
4644 if (err)
4645 return err;
Paul Clements9b1d1da2006-10-03 01:15:49 -07004646 if (!mddev->bitmap)
4647 goto out;
4648 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
4649 while (*buf) {
4650 chunk = end_chunk = simple_strtoul(buf, &end, 0);
4651 if (buf == end) break;
4652 if (*end == '-') { /* range */
4653 buf = end + 1;
4654 end_chunk = simple_strtoul(buf, &end, 0);
4655 if (buf == end) break;
4656 }
4657 if (*end && !isspace(*end)) break;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07004658 md_bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
André Goddard Rosae7d28602009-12-14 18:01:06 -08004659 buf = skip_spaces(end);
Paul Clements9b1d1da2006-10-03 01:15:49 -07004660 }
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07004661 md_bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
Paul Clements9b1d1da2006-10-03 01:15:49 -07004662out:
NeilBrown67918752014-12-15 12:57:01 +11004663 mddev_unlock(mddev);
Paul Clements9b1d1da2006-10-03 01:15:49 -07004664 return len;
4665}
4666
4667static struct md_sysfs_entry md_bitmap =
4668__ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
4669
4670static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004671size_show(struct mddev *mddev, char *page)
NeilBrowna35b0d62006-01-06 00:20:49 -08004672{
Andre Noll58c0fed2009-03-31 14:33:13 +11004673 return sprintf(page, "%llu\n",
4674 (unsigned long long)mddev->dev_sectors / 2);
NeilBrowna35b0d62006-01-06 00:20:49 -08004675}
4676
NeilBrownfd01b882011-10-11 16:47:53 +11004677static int update_size(struct mddev *mddev, sector_t num_sectors);
NeilBrowna35b0d62006-01-06 00:20:49 -08004678
4679static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004680size_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrowna35b0d62006-01-06 00:20:49 -08004681{
4682 /* If array is inactive, we can reduce the component size, but
4683 * not increase it (except from 0).
4684 * If array is active, we can try an on-line resize
4685 */
Dan Williamsb522adc2009-03-31 15:00:31 +11004686 sector_t sectors;
4687 int err = strict_blocks_to_sectors(buf, &sectors);
NeilBrowna35b0d62006-01-06 00:20:49 -08004688
Andre Noll58c0fed2009-03-31 14:33:13 +11004689 if (err < 0)
4690 return err;
NeilBrown67918752014-12-15 12:57:01 +11004691 err = mddev_lock(mddev);
4692 if (err)
4693 return err;
NeilBrowna35b0d62006-01-06 00:20:49 -08004694 if (mddev->pers) {
Andre Noll58c0fed2009-03-31 14:33:13 +11004695 err = update_size(mddev, sectors);
Xiao Ni4ba1e782016-06-12 17:18:00 +08004696 if (err == 0)
4697 md_update_sb(mddev, 1);
NeilBrowna35b0d62006-01-06 00:20:49 -08004698 } else {
Andre Noll58c0fed2009-03-31 14:33:13 +11004699 if (mddev->dev_sectors == 0 ||
4700 mddev->dev_sectors > sectors)
4701 mddev->dev_sectors = sectors;
NeilBrowna35b0d62006-01-06 00:20:49 -08004702 else
4703 err = -ENOSPC;
4704 }
NeilBrown67918752014-12-15 12:57:01 +11004705 mddev_unlock(mddev);
NeilBrowna35b0d62006-01-06 00:20:49 -08004706 return err ? err : len;
4707}
4708
4709static struct md_sysfs_entry md_size =
NeilBrown80ca3a42006-07-10 04:44:18 -07004710__ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
NeilBrowna35b0d62006-01-06 00:20:49 -08004711
Masanari Iida83f0d772012-10-30 00:18:08 +09004712/* Metadata version.
NeilBrowne6910632008-02-06 01:39:51 -08004713 * This is one of
4714 * 'none' for arrays with no metadata (good luck...)
4715 * 'external' for arrays with externally managed metadata,
NeilBrown8bb93aa2006-01-06 00:20:50 -08004716 * or N.M for internally known formats
4717 */
4718static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004719metadata_show(struct mddev *mddev, char *page)
NeilBrown8bb93aa2006-01-06 00:20:50 -08004720{
4721 if (mddev->persistent)
4722 return sprintf(page, "%d.%d\n",
4723 mddev->major_version, mddev->minor_version);
NeilBrowne6910632008-02-06 01:39:51 -08004724 else if (mddev->external)
4725 return sprintf(page, "external:%s\n", mddev->metadata_type);
NeilBrown8bb93aa2006-01-06 00:20:50 -08004726 else
4727 return sprintf(page, "none\n");
4728}
4729
4730static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004731metadata_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown8bb93aa2006-01-06 00:20:50 -08004732{
4733 int major, minor;
4734 char *e;
NeilBrown67918752014-12-15 12:57:01 +11004735 int err;
NeilBrownea43ddd2008-10-13 11:55:11 +11004736 /* Changing the details of 'external' metadata is
4737 * always permitted. Otherwise there must be
4738 * no devices attached to the array.
4739 */
NeilBrown67918752014-12-15 12:57:01 +11004740
4741 err = mddev_lock(mddev);
4742 if (err)
4743 return err;
4744 err = -EBUSY;
NeilBrownea43ddd2008-10-13 11:55:11 +11004745 if (mddev->external && strncmp(buf, "external:", 9) == 0)
4746 ;
4747 else if (!list_empty(&mddev->disks))
NeilBrown67918752014-12-15 12:57:01 +11004748 goto out_unlock;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004749
NeilBrown67918752014-12-15 12:57:01 +11004750 err = 0;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004751 if (cmd_match(buf, "none")) {
4752 mddev->persistent = 0;
NeilBrowne6910632008-02-06 01:39:51 -08004753 mddev->external = 0;
4754 mddev->major_version = 0;
4755 mddev->minor_version = 90;
NeilBrown67918752014-12-15 12:57:01 +11004756 goto out_unlock;
NeilBrowne6910632008-02-06 01:39:51 -08004757 }
4758 if (strncmp(buf, "external:", 9) == 0) {
NeilBrown20a49ff2008-02-06 01:39:57 -08004759 size_t namelen = len-9;
NeilBrowne6910632008-02-06 01:39:51 -08004760 if (namelen >= sizeof(mddev->metadata_type))
4761 namelen = sizeof(mddev->metadata_type)-1;
4762 strncpy(mddev->metadata_type, buf+9, namelen);
4763 mddev->metadata_type[namelen] = 0;
4764 if (namelen && mddev->metadata_type[namelen-1] == '\n')
4765 mddev->metadata_type[--namelen] = 0;
4766 mddev->persistent = 0;
4767 mddev->external = 1;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004768 mddev->major_version = 0;
4769 mddev->minor_version = 90;
NeilBrown67918752014-12-15 12:57:01 +11004770 goto out_unlock;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004771 }
4772 major = simple_strtoul(buf, &e, 10);
NeilBrown67918752014-12-15 12:57:01 +11004773 err = -EINVAL;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004774 if (e==buf || *e != '.')
NeilBrown67918752014-12-15 12:57:01 +11004775 goto out_unlock;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004776 buf = e+1;
4777 minor = simple_strtoul(buf, &e, 10);
NeilBrown3f9d7b02006-12-22 01:11:41 -08004778 if (e==buf || (*e && *e != '\n') )
NeilBrown67918752014-12-15 12:57:01 +11004779 goto out_unlock;
4780 err = -ENOENT;
Ahmed S. Darwish50511da2007-05-09 02:35:34 -07004781 if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
NeilBrown67918752014-12-15 12:57:01 +11004782 goto out_unlock;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004783 mddev->major_version = major;
4784 mddev->minor_version = minor;
4785 mddev->persistent = 1;
NeilBrowne6910632008-02-06 01:39:51 -08004786 mddev->external = 0;
NeilBrown67918752014-12-15 12:57:01 +11004787 err = 0;
4788out_unlock:
4789 mddev_unlock(mddev);
4790 return err ?: len;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004791}
4792
4793static struct md_sysfs_entry md_metadata =
NeilBrown750f1992014-09-30 08:53:05 +10004794__ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
NeilBrown8bb93aa2006-01-06 00:20:50 -08004795
NeilBrowna35b0d62006-01-06 00:20:49 -08004796static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004797action_show(struct mddev *mddev, char *page)
NeilBrown24dd4692005-11-08 21:39:26 -08004798{
NeilBrown7eec3142005-11-08 21:39:44 -08004799 char *type = "idle";
NeilBrownb7b17c92014-12-15 12:56:59 +11004800 unsigned long recovery = mddev->recovery;
4801 if (test_bit(MD_RECOVERY_FROZEN, &recovery))
NeilBrownb6a9ce62009-05-26 09:41:17 +10004802 type = "frozen";
NeilBrownb7b17c92014-12-15 12:56:59 +11004803 else if (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
4804 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery))) {
4805 if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
NeilBrownccfcc3c2006-03-27 01:18:09 -08004806 type = "reshape";
NeilBrownb7b17c92014-12-15 12:56:59 +11004807 else if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
4808 if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
NeilBrown24dd4692005-11-08 21:39:26 -08004809 type = "resync";
NeilBrownb7b17c92014-12-15 12:56:59 +11004810 else if (test_bit(MD_RECOVERY_CHECK, &recovery))
NeilBrown24dd4692005-11-08 21:39:26 -08004811 type = "check";
4812 else
4813 type = "repair";
NeilBrownb7b17c92014-12-15 12:56:59 +11004814 } else if (test_bit(MD_RECOVERY_RECOVER, &recovery))
NeilBrown24dd4692005-11-08 21:39:26 -08004815 type = "recover";
NeilBrown985ca972015-07-06 12:26:57 +10004816 else if (mddev->reshape_position != MaxSector)
4817 type = "reshape";
NeilBrown24dd4692005-11-08 21:39:26 -08004818 }
4819 return sprintf(page, "%s\n", type);
4820}
4821
4822static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004823action_store(struct mddev *mddev, const char *page, size_t len)
NeilBrown24dd4692005-11-08 21:39:26 -08004824{
NeilBrown7eec3142005-11-08 21:39:44 -08004825 if (!mddev->pers || !mddev->pers->sync_request)
4826 return -EINVAL;
4827
NeilBrownb6a9ce62009-05-26 09:41:17 +10004828
4829 if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
NeilBrown56ccc112015-05-28 17:53:29 +10004830 if (cmd_match(page, "frozen"))
4831 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4832 else
4833 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
NeilBrown8e8e2512015-06-12 19:51:27 +10004834 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
4835 mddev_lock(mddev) == 0) {
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02004836 if (work_pending(&mddev->del_work))
4837 flush_workqueue(md_misc_wq);
NeilBrown8e8e2512015-06-12 19:51:27 +10004838 if (mddev->sync_thread) {
4839 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
NeilBrown67918752014-12-15 12:57:01 +11004840 md_reap_sync_thread(mddev);
NeilBrown67918752014-12-15 12:57:01 +11004841 }
NeilBrown8e8e2512015-06-12 19:51:27 +10004842 mddev_unlock(mddev);
NeilBrown7eec3142005-11-08 21:39:44 -08004843 }
NeilBrown312045e2015-12-21 11:01:21 +11004844 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
NeilBrown24dd4692005-11-08 21:39:26 -08004845 return -EBUSY;
Neil Brown72a23c22008-06-28 08:31:41 +10004846 else if (cmd_match(page, "resync"))
NeilBrown56ccc112015-05-28 17:53:29 +10004847 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
Neil Brown72a23c22008-06-28 08:31:41 +10004848 else if (cmd_match(page, "recover")) {
NeilBrown56ccc112015-05-28 17:53:29 +10004849 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
Neil Brown72a23c22008-06-28 08:31:41 +10004850 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
Neil Brown72a23c22008-06-28 08:31:41 +10004851 } else if (cmd_match(page, "reshape")) {
NeilBrown16484bf2006-03-27 01:18:13 -08004852 int err;
4853 if (mddev->pers->start_reshape == NULL)
4854 return -EINVAL;
NeilBrown67918752014-12-15 12:57:01 +11004855 err = mddev_lock(mddev);
4856 if (!err) {
NeilBrown312045e2015-12-21 11:01:21 +11004857 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4858 err = -EBUSY;
4859 else {
4860 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4861 err = mddev->pers->start_reshape(mddev);
4862 }
NeilBrown67918752014-12-15 12:57:01 +11004863 mddev_unlock(mddev);
4864 }
NeilBrown16484bf2006-03-27 01:18:13 -08004865 if (err)
4866 return err;
Junxiao Bie1a86db2020-07-14 16:10:26 -07004867 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
NeilBrown16484bf2006-03-27 01:18:13 -08004868 } else {
NeilBrownbce74da2006-01-06 00:20:41 -08004869 if (cmd_match(page, "check"))
NeilBrown7eec3142005-11-08 21:39:44 -08004870 set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
NeilBrown2adc7d42006-05-20 14:59:57 -07004871 else if (!cmd_match(page, "repair"))
NeilBrown7eec3142005-11-08 21:39:44 -08004872 return -EINVAL;
NeilBrown56ccc112015-05-28 17:53:29 +10004873 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
NeilBrown7eec3142005-11-08 21:39:44 -08004874 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
4875 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
NeilBrown7eec3142005-11-08 21:39:44 -08004876 }
NeilBrown48c26dd2012-10-11 14:19:39 +11004877 if (mddev->ro == 2) {
4878 /* A write to sync_action is enough to justify
4879 * canceling read-auto mode
4880 */
4881 mddev->ro = 0;
4882 md_wakeup_thread(mddev->sync_thread);
4883 }
NeilBrown03c902e2006-01-06 00:20:46 -08004884 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
NeilBrown24dd4692005-11-08 21:39:26 -08004885 md_wakeup_thread(mddev->thread);
NeilBrown00bcb4a2010-06-01 19:37:23 +10004886 sysfs_notify_dirent_safe(mddev->sysfs_action);
NeilBrown24dd4692005-11-08 21:39:26 -08004887 return len;
4888}
4889
Jonathan Brassowc4a39552013-06-25 01:23:59 -05004890static struct md_sysfs_entry md_scan_mode =
NeilBrown750f1992014-09-30 08:53:05 +10004891__ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
Jonathan Brassowc4a39552013-06-25 01:23:59 -05004892
4893static ssize_t
4894last_sync_action_show(struct mddev *mddev, char *page)
4895{
4896 return sprintf(page, "%s\n", mddev->last_sync_action);
4897}
4898
4899static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action);
4900
NeilBrown9d888832005-11-08 21:39:26 -08004901static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004902mismatch_cnt_show(struct mddev *mddev, char *page)
NeilBrown9d888832005-11-08 21:39:26 -08004903{
4904 return sprintf(page, "%llu\n",
Jianpeng Ma7f7583d2012-10-11 14:17:59 +11004905 (unsigned long long)
4906 atomic64_read(&mddev->resync_mismatches));
NeilBrown9d888832005-11-08 21:39:26 -08004907}
4908
NeilBrown80ca3a42006-07-10 04:44:18 -07004909static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
NeilBrown9d888832005-11-08 21:39:26 -08004910
NeilBrown88202a02006-01-06 00:21:36 -08004911static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004912sync_min_show(struct mddev *mddev, char *page)
NeilBrown88202a02006-01-06 00:21:36 -08004913{
4914 return sprintf(page, "%d (%s)\n", speed_min(mddev),
4915 mddev->sync_speed_min ? "local": "system");
4916}
4917
4918static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004919sync_min_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown88202a02006-01-06 00:21:36 -08004920{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004921 unsigned int min;
4922 int rv;
4923
NeilBrown88202a02006-01-06 00:21:36 -08004924 if (strncmp(buf, "system", 6)==0) {
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004925 min = 0;
4926 } else {
4927 rv = kstrtouint(buf, 10, &min);
4928 if (rv < 0)
4929 return rv;
4930 if (min == 0)
4931 return -EINVAL;
NeilBrown88202a02006-01-06 00:21:36 -08004932 }
NeilBrown88202a02006-01-06 00:21:36 -08004933 mddev->sync_speed_min = min;
4934 return len;
4935}
4936
4937static struct md_sysfs_entry md_sync_min =
4938__ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
4939
4940static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004941sync_max_show(struct mddev *mddev, char *page)
NeilBrown88202a02006-01-06 00:21:36 -08004942{
4943 return sprintf(page, "%d (%s)\n", speed_max(mddev),
4944 mddev->sync_speed_max ? "local": "system");
4945}
4946
4947static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004948sync_max_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown88202a02006-01-06 00:21:36 -08004949{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004950 unsigned int max;
4951 int rv;
4952
NeilBrown88202a02006-01-06 00:21:36 -08004953 if (strncmp(buf, "system", 6)==0) {
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004954 max = 0;
4955 } else {
4956 rv = kstrtouint(buf, 10, &max);
4957 if (rv < 0)
4958 return rv;
4959 if (max == 0)
4960 return -EINVAL;
NeilBrown88202a02006-01-06 00:21:36 -08004961 }
NeilBrown88202a02006-01-06 00:21:36 -08004962 mddev->sync_speed_max = max;
4963 return len;
4964}
4965
4966static struct md_sysfs_entry md_sync_max =
4967__ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
4968
Iustin Popd7f3d292007-10-16 23:30:54 -07004969static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004970degraded_show(struct mddev *mddev, char *page)
Iustin Popd7f3d292007-10-16 23:30:54 -07004971{
4972 return sprintf(page, "%d\n", mddev->degraded);
4973}
4974static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
NeilBrown88202a02006-01-06 00:21:36 -08004975
4976static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004977sync_force_parallel_show(struct mddev *mddev, char *page)
Bernd Schubert90b08712008-05-23 13:04:38 -07004978{
4979 return sprintf(page, "%d\n", mddev->parallel_resync);
4980}
4981
4982static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004983sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len)
Bernd Schubert90b08712008-05-23 13:04:38 -07004984{
4985 long n;
4986
Jingoo Hanb29bebd2013-06-01 16:15:16 +09004987 if (kstrtol(buf, 10, &n))
Bernd Schubert90b08712008-05-23 13:04:38 -07004988 return -EINVAL;
4989
4990 if (n != 0 && n != 1)
4991 return -EINVAL;
4992
4993 mddev->parallel_resync = n;
4994
4995 if (mddev->sync_thread)
4996 wake_up(&resync_wait);
4997
4998 return len;
4999}
5000
5001/* force parallel resync, even with shared block devices */
5002static struct md_sysfs_entry md_sync_force_parallel =
5003__ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
5004 sync_force_parallel_show, sync_force_parallel_store);
5005
5006static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005007sync_speed_show(struct mddev *mddev, char *page)
NeilBrown88202a02006-01-06 00:21:36 -08005008{
5009 unsigned long resync, dt, db;
NeilBrownd1a7c502009-03-31 15:24:32 +11005010 if (mddev->curr_resync == 0)
5011 return sprintf(page, "none\n");
Andre Noll9687a602008-03-25 22:24:09 +01005012 resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
5013 dt = (jiffies - mddev->resync_mark) / HZ;
NeilBrown88202a02006-01-06 00:21:36 -08005014 if (!dt) dt++;
Andre Noll9687a602008-03-25 22:24:09 +01005015 db = resync - mddev->resync_mark_cnt;
5016 return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
NeilBrown88202a02006-01-06 00:21:36 -08005017}
5018
NeilBrown80ca3a42006-07-10 04:44:18 -07005019static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
NeilBrown88202a02006-01-06 00:21:36 -08005020
5021static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005022sync_completed_show(struct mddev *mddev, char *page)
NeilBrown88202a02006-01-06 00:21:36 -08005023{
RĂ©mi RĂ©rolle13ae8642011-01-14 09:14:34 +11005024 unsigned long long max_sectors, resync;
NeilBrown88202a02006-01-06 00:21:36 -08005025
NeilBrownacb180b2009-04-14 16:28:34 +10005026 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5027 return sprintf(page, "none\n");
5028
NeilBrown72f36d52012-10-11 14:25:57 +11005029 if (mddev->curr_resync == 1 ||
5030 mddev->curr_resync == 2)
5031 return sprintf(page, "delayed\n");
5032
NeilBrownc804cde2012-05-21 09:28:33 +10005033 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
5034 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
Andre Noll58c0fed2009-03-31 14:33:13 +11005035 max_sectors = mddev->resync_max_sectors;
NeilBrown88202a02006-01-06 00:21:36 -08005036 else
Andre Noll58c0fed2009-03-31 14:33:13 +11005037 max_sectors = mddev->dev_sectors;
NeilBrown88202a02006-01-06 00:21:36 -08005038
NeilBrownacb180b2009-04-14 16:28:34 +10005039 resync = mddev->curr_resync_completed;
RĂ©mi RĂ©rolle13ae8642011-01-14 09:14:34 +11005040 return sprintf(page, "%llu / %llu\n", resync, max_sectors);
NeilBrown88202a02006-01-06 00:21:36 -08005041}
5042
NeilBrown750f1992014-09-30 08:53:05 +10005043static struct md_sysfs_entry md_sync_completed =
5044 __ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL);
NeilBrown88202a02006-01-06 00:21:36 -08005045
NeilBrowne464eaf2006-03-27 01:18:14 -08005046static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005047min_sync_show(struct mddev *mddev, char *page)
Neil Brown5e96ee62008-06-28 08:31:24 +10005048{
5049 return sprintf(page, "%llu\n",
5050 (unsigned long long)mddev->resync_min);
5051}
5052static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005053min_sync_store(struct mddev *mddev, const char *buf, size_t len)
Neil Brown5e96ee62008-06-28 08:31:24 +10005054{
5055 unsigned long long min;
NeilBrown23da4222014-12-15 12:57:01 +11005056 int err;
NeilBrown23da4222014-12-15 12:57:01 +11005057
Jingoo Hanb29bebd2013-06-01 16:15:16 +09005058 if (kstrtoull(buf, 10, &min))
Neil Brown5e96ee62008-06-28 08:31:24 +10005059 return -EINVAL;
NeilBrown23da4222014-12-15 12:57:01 +11005060
5061 spin_lock(&mddev->lock);
5062 err = -EINVAL;
Neil Brown5e96ee62008-06-28 08:31:24 +10005063 if (min > mddev->resync_max)
NeilBrown23da4222014-12-15 12:57:01 +11005064 goto out_unlock;
5065
5066 err = -EBUSY;
Neil Brown5e96ee62008-06-28 08:31:24 +10005067 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
NeilBrown23da4222014-12-15 12:57:01 +11005068 goto out_unlock;
Neil Brown5e96ee62008-06-28 08:31:24 +10005069
NeilBrown50c37b12015-03-23 17:36:38 +11005070 /* Round down to multiple of 4K for safety */
5071 mddev->resync_min = round_down(min, 8);
NeilBrown23da4222014-12-15 12:57:01 +11005072 err = 0;
Neil Brown5e96ee62008-06-28 08:31:24 +10005073
NeilBrown23da4222014-12-15 12:57:01 +11005074out_unlock:
5075 spin_unlock(&mddev->lock);
5076 return err ?: len;
Neil Brown5e96ee62008-06-28 08:31:24 +10005077}
5078
5079static struct md_sysfs_entry md_min_sync =
5080__ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
5081
5082static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005083max_sync_show(struct mddev *mddev, char *page)
NeilBrownc6207272008-02-06 01:39:52 -08005084{
5085 if (mddev->resync_max == MaxSector)
5086 return sprintf(page, "max\n");
5087 else
5088 return sprintf(page, "%llu\n",
5089 (unsigned long long)mddev->resync_max);
5090}
5091static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005092max_sync_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrownc6207272008-02-06 01:39:52 -08005093{
NeilBrown23da4222014-12-15 12:57:01 +11005094 int err;
5095 spin_lock(&mddev->lock);
NeilBrownc6207272008-02-06 01:39:52 -08005096 if (strncmp(buf, "max", 3) == 0)
5097 mddev->resync_max = MaxSector;
5098 else {
Neil Brown5e96ee62008-06-28 08:31:24 +10005099 unsigned long long max;
NeilBrown23da4222014-12-15 12:57:01 +11005100 int chunk;
5101
5102 err = -EINVAL;
Jingoo Hanb29bebd2013-06-01 16:15:16 +09005103 if (kstrtoull(buf, 10, &max))
NeilBrown23da4222014-12-15 12:57:01 +11005104 goto out_unlock;
Neil Brown5e96ee62008-06-28 08:31:24 +10005105 if (max < mddev->resync_min)
NeilBrown23da4222014-12-15 12:57:01 +11005106 goto out_unlock;
5107
5108 err = -EBUSY;
NeilBrownc6207272008-02-06 01:39:52 -08005109 if (max < mddev->resync_max &&
NeilBrown4d484a42009-08-13 10:41:50 +10005110 mddev->ro == 0 &&
NeilBrownc6207272008-02-06 01:39:52 -08005111 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
NeilBrown23da4222014-12-15 12:57:01 +11005112 goto out_unlock;
NeilBrownc6207272008-02-06 01:39:52 -08005113
5114 /* Must be a multiple of chunk_size */
NeilBrown23da4222014-12-15 12:57:01 +11005115 chunk = mddev->chunk_sectors;
5116 if (chunk) {
raz ben yehuda2ac06c32009-06-16 17:01:42 +10005117 sector_t temp = max;
NeilBrown23da4222014-12-15 12:57:01 +11005118
5119 err = -EINVAL;
5120 if (sector_div(temp, chunk))
5121 goto out_unlock;
NeilBrownc6207272008-02-06 01:39:52 -08005122 }
5123 mddev->resync_max = max;
5124 }
5125 wake_up(&mddev->recovery_wait);
NeilBrown23da4222014-12-15 12:57:01 +11005126 err = 0;
5127out_unlock:
5128 spin_unlock(&mddev->lock);
5129 return err ?: len;
NeilBrownc6207272008-02-06 01:39:52 -08005130}
5131
5132static struct md_sysfs_entry md_max_sync =
5133__ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
5134
5135static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005136suspend_lo_show(struct mddev *mddev, char *page)
NeilBrowne464eaf2006-03-27 01:18:14 -08005137{
5138 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
5139}
5140
5141static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005142suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrowne464eaf2006-03-27 01:18:14 -08005143{
NeilBrownb03e0cc2017-10-19 12:49:15 +11005144 unsigned long long new;
NeilBrown67918752014-12-15 12:57:01 +11005145 int err;
NeilBrowne464eaf2006-03-27 01:18:14 -08005146
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03005147 err = kstrtoull(buf, 10, &new);
5148 if (err < 0)
5149 return err;
5150 if (new != (sector_t)new)
NeilBrowne464eaf2006-03-27 01:18:14 -08005151 return -EINVAL;
NeilBrown23ddff32011-01-14 09:14:34 +11005152
NeilBrown67918752014-12-15 12:57:01 +11005153 err = mddev_lock(mddev);
5154 if (err)
5155 return err;
5156 err = -EINVAL;
5157 if (mddev->pers == NULL ||
5158 mddev->pers->quiesce == NULL)
5159 goto unlock;
NeilBrownb03e0cc2017-10-19 12:49:15 +11005160 mddev_suspend(mddev);
NeilBrown23ddff32011-01-14 09:14:34 +11005161 mddev->suspend_lo = new;
NeilBrownb03e0cc2017-10-19 12:49:15 +11005162 mddev_resume(mddev);
5163
NeilBrown67918752014-12-15 12:57:01 +11005164 err = 0;
5165unlock:
5166 mddev_unlock(mddev);
5167 return err ?: len;
NeilBrowne464eaf2006-03-27 01:18:14 -08005168}
5169static struct md_sysfs_entry md_suspend_lo =
5170__ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
5171
NeilBrowne464eaf2006-03-27 01:18:14 -08005172static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005173suspend_hi_show(struct mddev *mddev, char *page)
NeilBrowne464eaf2006-03-27 01:18:14 -08005174{
5175 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
5176}
5177
5178static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005179suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrowne464eaf2006-03-27 01:18:14 -08005180{
NeilBrownb03e0cc2017-10-19 12:49:15 +11005181 unsigned long long new;
NeilBrown67918752014-12-15 12:57:01 +11005182 int err;
NeilBrowne464eaf2006-03-27 01:18:14 -08005183
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03005184 err = kstrtoull(buf, 10, &new);
5185 if (err < 0)
5186 return err;
5187 if (new != (sector_t)new)
NeilBrowne464eaf2006-03-27 01:18:14 -08005188 return -EINVAL;
NeilBrown23ddff32011-01-14 09:14:34 +11005189
NeilBrown67918752014-12-15 12:57:01 +11005190 err = mddev_lock(mddev);
5191 if (err)
5192 return err;
5193 err = -EINVAL;
NeilBrownb03e0cc2017-10-19 12:49:15 +11005194 if (mddev->pers == NULL)
NeilBrown67918752014-12-15 12:57:01 +11005195 goto unlock;
NeilBrownb03e0cc2017-10-19 12:49:15 +11005196
5197 mddev_suspend(mddev);
NeilBrown23ddff32011-01-14 09:14:34 +11005198 mddev->suspend_hi = new;
NeilBrownb03e0cc2017-10-19 12:49:15 +11005199 mddev_resume(mddev);
5200
NeilBrown67918752014-12-15 12:57:01 +11005201 err = 0;
5202unlock:
5203 mddev_unlock(mddev);
5204 return err ?: len;
NeilBrowne464eaf2006-03-27 01:18:14 -08005205}
5206static struct md_sysfs_entry md_suspend_hi =
5207__ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
5208
NeilBrown08a02ec2007-05-09 02:35:38 -07005209static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005210reshape_position_show(struct mddev *mddev, char *page)
NeilBrown08a02ec2007-05-09 02:35:38 -07005211{
5212 if (mddev->reshape_position != MaxSector)
5213 return sprintf(page, "%llu\n",
5214 (unsigned long long)mddev->reshape_position);
5215 strcpy(page, "none\n");
5216 return 5;
5217}
5218
5219static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005220reshape_position_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown08a02ec2007-05-09 02:35:38 -07005221{
NeilBrownc6563a82012-05-21 09:27:00 +10005222 struct md_rdev *rdev;
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03005223 unsigned long long new;
NeilBrown67918752014-12-15 12:57:01 +11005224 int err;
NeilBrown67918752014-12-15 12:57:01 +11005225
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03005226 err = kstrtoull(buf, 10, &new);
5227 if (err < 0)
5228 return err;
5229 if (new != (sector_t)new)
NeilBrown08a02ec2007-05-09 02:35:38 -07005230 return -EINVAL;
NeilBrown67918752014-12-15 12:57:01 +11005231 err = mddev_lock(mddev);
5232 if (err)
5233 return err;
5234 err = -EBUSY;
5235 if (mddev->pers)
5236 goto unlock;
NeilBrown08a02ec2007-05-09 02:35:38 -07005237 mddev->reshape_position = new;
5238 mddev->delta_disks = 0;
NeilBrown2c810cd2012-05-21 09:27:00 +10005239 mddev->reshape_backwards = 0;
NeilBrown08a02ec2007-05-09 02:35:38 -07005240 mddev->new_level = mddev->level;
5241 mddev->new_layout = mddev->layout;
Andre Noll664e7c42009-06-18 08:45:27 +10005242 mddev->new_chunk_sectors = mddev->chunk_sectors;
NeilBrownc6563a82012-05-21 09:27:00 +10005243 rdev_for_each(rdev, mddev)
5244 rdev->new_data_offset = rdev->data_offset;
NeilBrown67918752014-12-15 12:57:01 +11005245 err = 0;
5246unlock:
5247 mddev_unlock(mddev);
5248 return err ?: len;
NeilBrown08a02ec2007-05-09 02:35:38 -07005249}
5250
5251static struct md_sysfs_entry md_reshape_position =
5252__ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
5253 reshape_position_store);
5254
Dan Williamsb522adc2009-03-31 15:00:31 +11005255static ssize_t
NeilBrown2c810cd2012-05-21 09:27:00 +10005256reshape_direction_show(struct mddev *mddev, char *page)
5257{
5258 return sprintf(page, "%s\n",
5259 mddev->reshape_backwards ? "backwards" : "forwards");
5260}
5261
5262static ssize_t
5263reshape_direction_store(struct mddev *mddev, const char *buf, size_t len)
5264{
5265 int backwards = 0;
NeilBrown67918752014-12-15 12:57:01 +11005266 int err;
5267
NeilBrown2c810cd2012-05-21 09:27:00 +10005268 if (cmd_match(buf, "forwards"))
5269 backwards = 0;
5270 else if (cmd_match(buf, "backwards"))
5271 backwards = 1;
5272 else
5273 return -EINVAL;
5274 if (mddev->reshape_backwards == backwards)
5275 return len;
5276
NeilBrown67918752014-12-15 12:57:01 +11005277 err = mddev_lock(mddev);
5278 if (err)
5279 return err;
NeilBrown2c810cd2012-05-21 09:27:00 +10005280 /* check if we are allowed to change */
5281 if (mddev->delta_disks)
NeilBrown67918752014-12-15 12:57:01 +11005282 err = -EBUSY;
5283 else if (mddev->persistent &&
NeilBrown2c810cd2012-05-21 09:27:00 +10005284 mddev->major_version == 0)
NeilBrown67918752014-12-15 12:57:01 +11005285 err = -EINVAL;
5286 else
5287 mddev->reshape_backwards = backwards;
5288 mddev_unlock(mddev);
5289 return err ?: len;
NeilBrown2c810cd2012-05-21 09:27:00 +10005290}
5291
5292static struct md_sysfs_entry md_reshape_direction =
5293__ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show,
5294 reshape_direction_store);
5295
5296static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005297array_size_show(struct mddev *mddev, char *page)
Dan Williamsb522adc2009-03-31 15:00:31 +11005298{
5299 if (mddev->external_size)
5300 return sprintf(page, "%llu\n",
5301 (unsigned long long)mddev->array_sectors/2);
5302 else
5303 return sprintf(page, "default\n");
5304}
5305
5306static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005307array_size_store(struct mddev *mddev, const char *buf, size_t len)
Dan Williamsb522adc2009-03-31 15:00:31 +11005308{
5309 sector_t sectors;
NeilBrown67918752014-12-15 12:57:01 +11005310 int err;
5311
5312 err = mddev_lock(mddev);
5313 if (err)
5314 return err;
Dan Williamsb522adc2009-03-31 15:00:31 +11005315
Guoqing Jiangab5a98b2016-05-02 11:33:13 -04005316 /* cluster raid doesn't support change array_sectors */
Zhilong Liub6708832017-04-10 14:15:55 +08005317 if (mddev_is_clustered(mddev)) {
5318 mddev_unlock(mddev);
Guoqing Jiangab5a98b2016-05-02 11:33:13 -04005319 return -EINVAL;
Zhilong Liub6708832017-04-10 14:15:55 +08005320 }
Guoqing Jiangab5a98b2016-05-02 11:33:13 -04005321
Dan Williamsb522adc2009-03-31 15:00:31 +11005322 if (strncmp(buf, "default", 7) == 0) {
5323 if (mddev->pers)
5324 sectors = mddev->pers->size(mddev, 0, 0);
5325 else
5326 sectors = mddev->array_sectors;
5327
5328 mddev->external_size = 0;
5329 } else {
5330 if (strict_blocks_to_sectors(buf, &sectors) < 0)
NeilBrown67918752014-12-15 12:57:01 +11005331 err = -EINVAL;
5332 else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
5333 err = -E2BIG;
5334 else
5335 mddev->external_size = 1;
Dan Williamsb522adc2009-03-31 15:00:31 +11005336 }
5337
NeilBrown67918752014-12-15 12:57:01 +11005338 if (!err) {
5339 mddev->array_sectors = sectors;
Christoph Hellwig2c247c52020-11-16 15:57:11 +01005340 if (mddev->pers)
5341 set_capacity_and_notify(mddev->gendisk,
5342 mddev->array_sectors);
NeilBrowncbe6ef12011-02-16 13:58:38 +11005343 }
NeilBrown67918752014-12-15 12:57:01 +11005344 mddev_unlock(mddev);
5345 return err ?: len;
Dan Williamsb522adc2009-03-31 15:00:31 +11005346}
5347
5348static struct md_sysfs_entry md_array_size =
5349__ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
5350 array_size_store);
NeilBrowne464eaf2006-03-27 01:18:14 -08005351
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005352static ssize_t
5353consistency_policy_show(struct mddev *mddev, char *page)
5354{
5355 int ret;
5356
5357 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
5358 ret = sprintf(page, "journal\n");
5359 } else if (test_bit(MD_HAS_PPL, &mddev->flags)) {
5360 ret = sprintf(page, "ppl\n");
5361 } else if (mddev->bitmap) {
5362 ret = sprintf(page, "bitmap\n");
5363 } else if (mddev->pers) {
5364 if (mddev->pers->sync_request)
5365 ret = sprintf(page, "resync\n");
5366 else
5367 ret = sprintf(page, "none\n");
5368 } else {
5369 ret = sprintf(page, "unknown\n");
5370 }
5371
5372 return ret;
5373}
5374
5375static ssize_t
5376consistency_policy_store(struct mddev *mddev, const char *buf, size_t len)
5377{
Artur Paszkiewiczba903a32017-03-09 10:00:03 +01005378 int err = 0;
5379
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005380 if (mddev->pers) {
Artur Paszkiewiczba903a32017-03-09 10:00:03 +01005381 if (mddev->pers->change_consistency_policy)
5382 err = mddev->pers->change_consistency_policy(mddev, buf);
5383 else
5384 err = -EBUSY;
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005385 } else if (mddev->external && strncmp(buf, "ppl", 3) == 0) {
5386 set_bit(MD_HAS_PPL, &mddev->flags);
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005387 } else {
Artur Paszkiewiczba903a32017-03-09 10:00:03 +01005388 err = -EINVAL;
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005389 }
Artur Paszkiewiczba903a32017-03-09 10:00:03 +01005390
5391 return err ? err : len;
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005392}
5393
5394static struct md_sysfs_entry md_consistency_policy =
5395__ATTR(consistency_policy, S_IRUGO | S_IWUSR, consistency_policy_show,
5396 consistency_policy_store);
5397
Guoqing Jiang9a567842019-07-24 11:09:19 +02005398static ssize_t fail_last_dev_show(struct mddev *mddev, char *page)
5399{
5400 return sprintf(page, "%d\n", mddev->fail_last_dev);
5401}
5402
5403/*
5404 * Setting fail_last_dev to true to allow last device to be forcibly removed
5405 * from RAID1/RAID10.
5406 */
5407static ssize_t
5408fail_last_dev_store(struct mddev *mddev, const char *buf, size_t len)
5409{
5410 int ret;
5411 bool value;
5412
5413 ret = kstrtobool(buf, &value);
5414 if (ret)
5415 return ret;
5416
5417 if (value != mddev->fail_last_dev)
5418 mddev->fail_last_dev = value;
5419
5420 return len;
5421}
5422static struct md_sysfs_entry md_fail_last_dev =
5423__ATTR(fail_last_dev, S_IRUGO | S_IWUSR, fail_last_dev_show,
5424 fail_last_dev_store);
5425
Guoqing Jiang3938f5f2019-12-23 10:48:56 +01005426static ssize_t serialize_policy_show(struct mddev *mddev, char *page)
5427{
5428 if (mddev->pers == NULL || (mddev->pers->level != 1))
5429 return sprintf(page, "n/a\n");
5430 else
5431 return sprintf(page, "%d\n", mddev->serialize_policy);
5432}
5433
5434/*
5435 * Setting serialize_policy to true to enforce write IO is not reordered
5436 * for raid1.
5437 */
5438static ssize_t
5439serialize_policy_store(struct mddev *mddev, const char *buf, size_t len)
5440{
5441 int err;
5442 bool value;
5443
5444 err = kstrtobool(buf, &value);
5445 if (err)
5446 return err;
5447
5448 if (value == mddev->serialize_policy)
5449 return len;
5450
5451 err = mddev_lock(mddev);
5452 if (err)
5453 return err;
5454 if (mddev->pers == NULL || (mddev->pers->level != 1)) {
5455 pr_err("md: serialize_policy is only effective for raid1\n");
5456 err = -EINVAL;
5457 goto unlock;
5458 }
5459
5460 mddev_suspend(mddev);
5461 if (value)
5462 mddev_create_serial_pool(mddev, NULL, true);
5463 else
5464 mddev_destroy_serial_pool(mddev, NULL, true);
5465 mddev->serialize_policy = value;
5466 mddev_resume(mddev);
5467unlock:
5468 mddev_unlock(mddev);
5469 return err ?: len;
5470}
5471
5472static struct md_sysfs_entry md_serialize_policy =
5473__ATTR(serialize_policy, S_IRUGO | S_IWUSR, serialize_policy_show,
5474 serialize_policy_store);
5475
5476
NeilBrowneae17012005-11-08 21:39:23 -08005477static struct attribute *md_default_attrs[] = {
5478 &md_level.attr,
NeilBrownd4dbd022006-06-26 00:27:59 -07005479 &md_layout.attr,
NeilBrowneae17012005-11-08 21:39:23 -08005480 &md_raid_disks.attr,
Sebastian Parschauerec164d072020-07-28 12:01:39 +02005481 &md_uuid.attr,
NeilBrown3b343802006-01-06 00:20:47 -08005482 &md_chunk_size.attr,
NeilBrowna35b0d62006-01-06 00:20:49 -08005483 &md_size.attr,
NeilBrowna94213b2006-06-26 00:28:00 -07005484 &md_resync_start.attr,
NeilBrown8bb93aa2006-01-06 00:20:50 -08005485 &md_metadata.attr,
NeilBrown6d7ff7382006-01-06 00:21:16 -08005486 &md_new_device.attr,
NeilBrown16f17b32006-06-26 00:27:37 -07005487 &md_safe_delay.attr,
NeilBrown9e653b62006-06-26 00:27:58 -07005488 &md_array_state.attr,
NeilBrown08a02ec2007-05-09 02:35:38 -07005489 &md_reshape_position.attr,
NeilBrown2c810cd2012-05-21 09:27:00 +10005490 &md_reshape_direction.attr,
Dan Williamsb522adc2009-03-31 15:00:31 +11005491 &md_array_size.attr,
Robert Becker1e509152009-12-14 12:49:58 +11005492 &max_corr_read_errors.attr,
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005493 &md_consistency_policy.attr,
Guoqing Jiang9a567842019-07-24 11:09:19 +02005494 &md_fail_last_dev.attr,
Guoqing Jiang3938f5f2019-12-23 10:48:56 +01005495 &md_serialize_policy.attr,
NeilBrown411036f2005-11-08 21:39:40 -08005496 NULL,
5497};
5498
Christoph Hellwig51238e7f2021-09-01 13:38:31 +02005499static const struct attribute_group md_default_group = {
5500 .attrs = md_default_attrs,
5501};
5502
NeilBrown411036f2005-11-08 21:39:40 -08005503static struct attribute *md_redundancy_attrs[] = {
NeilBrown24dd4692005-11-08 21:39:26 -08005504 &md_scan_mode.attr,
Jonathan Brassowc4a39552013-06-25 01:23:59 -05005505 &md_last_scan_mode.attr,
NeilBrown9d888832005-11-08 21:39:26 -08005506 &md_mismatches.attr,
NeilBrown88202a02006-01-06 00:21:36 -08005507 &md_sync_min.attr,
5508 &md_sync_max.attr,
5509 &md_sync_speed.attr,
Bernd Schubert90b08712008-05-23 13:04:38 -07005510 &md_sync_force_parallel.attr,
NeilBrown88202a02006-01-06 00:21:36 -08005511 &md_sync_completed.attr,
Neil Brown5e96ee62008-06-28 08:31:24 +10005512 &md_min_sync.attr,
NeilBrownc6207272008-02-06 01:39:52 -08005513 &md_max_sync.attr,
NeilBrowne464eaf2006-03-27 01:18:14 -08005514 &md_suspend_lo.attr,
5515 &md_suspend_hi.attr,
Paul Clements9b1d1da2006-10-03 01:15:49 -07005516 &md_bitmap.attr,
Iustin Popd7f3d292007-10-16 23:30:54 -07005517 &md_degraded.attr,
NeilBrowneae17012005-11-08 21:39:23 -08005518 NULL,
5519};
Rikard Falkebornc32dc042021-05-29 12:30:49 +02005520static const struct attribute_group md_redundancy_group = {
NeilBrown411036f2005-11-08 21:39:40 -08005521 .name = NULL,
5522 .attrs = md_redundancy_attrs,
5523};
5524
Christoph Hellwig51238e7f2021-09-01 13:38:31 +02005525static const struct attribute_group *md_attr_groups[] = {
5526 &md_default_group,
5527 &md_bitmap_group,
5528 NULL,
5529};
5530
NeilBrowneae17012005-11-08 21:39:23 -08005531static ssize_t
5532md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
5533{
5534 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
NeilBrownfd01b882011-10-11 16:47:53 +11005535 struct mddev *mddev = container_of(kobj, struct mddev, kobj);
NeilBrown96de1e62005-11-08 21:39:39 -08005536 ssize_t rv;
NeilBrowneae17012005-11-08 21:39:23 -08005537
5538 if (!entry->show)
5539 return -EIO;
NeilBrownaf8a2432011-12-08 15:49:46 +11005540 spin_lock(&all_mddevs_lock);
5541 if (list_empty(&mddev->all_mddevs)) {
5542 spin_unlock(&all_mddevs_lock);
5543 return -EBUSY;
5544 }
5545 mddev_get(mddev);
5546 spin_unlock(&all_mddevs_lock);
5547
NeilBrownb7b17c92014-12-15 12:56:59 +11005548 rv = entry->show(mddev, page);
NeilBrownaf8a2432011-12-08 15:49:46 +11005549 mddev_put(mddev);
NeilBrown96de1e62005-11-08 21:39:39 -08005550 return rv;
NeilBrowneae17012005-11-08 21:39:23 -08005551}
5552
5553static ssize_t
5554md_attr_store(struct kobject *kobj, struct attribute *attr,
5555 const char *page, size_t length)
5556{
5557 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
NeilBrownfd01b882011-10-11 16:47:53 +11005558 struct mddev *mddev = container_of(kobj, struct mddev, kobj);
NeilBrown96de1e62005-11-08 21:39:39 -08005559 ssize_t rv;
NeilBrowneae17012005-11-08 21:39:23 -08005560
5561 if (!entry->store)
5562 return -EIO;
NeilBrown67463ac2006-07-10 04:44:19 -07005563 if (!capable(CAP_SYS_ADMIN))
5564 return -EACCES;
NeilBrownaf8a2432011-12-08 15:49:46 +11005565 spin_lock(&all_mddevs_lock);
5566 if (list_empty(&mddev->all_mddevs)) {
5567 spin_unlock(&all_mddevs_lock);
5568 return -EBUSY;
5569 }
5570 mddev_get(mddev);
5571 spin_unlock(&all_mddevs_lock);
NeilBrown67918752014-12-15 12:57:01 +11005572 rv = entry->store(mddev, page, length);
NeilBrownaf8a2432011-12-08 15:49:46 +11005573 mddev_put(mddev);
NeilBrown96de1e62005-11-08 21:39:39 -08005574 return rv;
NeilBrowneae17012005-11-08 21:39:23 -08005575}
5576
5577static void md_free(struct kobject *ko)
5578{
NeilBrownfd01b882011-10-11 16:47:53 +11005579 struct mddev *mddev = container_of(ko, struct mddev, kobj);
NeilBrowna21d1502009-01-09 08:31:09 +11005580
5581 if (mddev->sysfs_state)
5582 sysfs_put(mddev->sysfs_state);
Junxiao Bie1a86db2020-07-14 16:10:26 -07005583 if (mddev->sysfs_level)
5584 sysfs_put(mddev->sysfs_level);
5585
Christoph Hellwig0f1d2e02021-05-21 07:51:04 +02005586 if (mddev->gendisk) {
Bart Van Assched8115c352018-02-28 10:15:29 -08005587 del_gendisk(mddev->gendisk);
Christoph Hellwig0f1d2e02021-05-21 07:51:04 +02005588 blk_cleanup_disk(mddev->gendisk);
5589 }
NeilBrown4ad23a972017-03-15 14:05:14 +11005590 percpu_ref_exit(&mddev->writes_pending);
NeilBrowna21d1502009-01-09 08:31:09 +11005591
Kent Overstreet28dec872018-06-07 20:52:54 -04005592 bioset_exit(&mddev->bio_set);
5593 bioset_exit(&mddev->sync_set);
Guoqing Jiangdaee2022021-06-03 17:21:06 +08005594 if (mddev->level != 1 && mddev->level != 10)
5595 bioset_exit(&mddev->io_acct_set);
NeilBrowneae17012005-11-08 21:39:23 -08005596 kfree(mddev);
5597}
5598
Emese Revfy52cf25d2010-01-19 02:58:23 +01005599static const struct sysfs_ops md_sysfs_ops = {
NeilBrowneae17012005-11-08 21:39:23 -08005600 .show = md_attr_show,
5601 .store = md_attr_store,
5602};
5603static struct kobj_type md_ktype = {
5604 .release = md_free,
5605 .sysfs_ops = &md_sysfs_ops,
Christoph Hellwig51238e7f2021-09-01 13:38:31 +02005606 .default_groups = md_attr_groups,
NeilBrowneae17012005-11-08 21:39:23 -08005607};
5608
Linus Torvalds1da177e2005-04-16 15:20:36 -07005609int mdp_major = 0;
5610
Dan Williams5fd3a172009-03-04 00:57:25 -07005611static void mddev_delayed_delete(struct work_struct *ws)
5612{
NeilBrownfd01b882011-10-11 16:47:53 +11005613 struct mddev *mddev = container_of(ws, struct mddev, del_work);
Dan Williams5fd3a172009-03-04 00:57:25 -07005614
Dan Williams5fd3a172009-03-04 00:57:25 -07005615 kobject_del(&mddev->kobj);
5616 kobject_put(&mddev->kobj);
5617}
5618
NeilBrown4ad23a972017-03-15 14:05:14 +11005619static void no_op(struct percpu_ref *r) {}
5620
NeilBrowna415c0f2017-06-05 16:05:13 +10005621int mddev_init_writes_pending(struct mddev *mddev)
5622{
5623 if (mddev->writes_pending.percpu_count_ptr)
5624 return 0;
Roman Gushchinddde2af2019-05-07 10:01:49 -07005625 if (percpu_ref_init(&mddev->writes_pending, no_op,
5626 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL) < 0)
NeilBrowna415c0f2017-06-05 16:05:13 +10005627 return -ENOMEM;
5628 /* We want to start with the refcount at zero */
5629 percpu_ref_put(&mddev->writes_pending);
5630 return 0;
5631}
5632EXPORT_SYMBOL_GPL(mddev_init_writes_pending);
5633
NeilBrownefeb53c2009-01-09 08:31:10 +11005634static int md_alloc(dev_t dev, char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005635{
NeilBrown039b7222017-04-12 16:26:13 +10005636 /*
5637 * If dev is zero, name is the name of a device to allocate with
5638 * an arbitrary minor number. It will be "md_???"
5639 * If dev is non-zero it must be a device number with a MAJOR of
5640 * MD_MAJOR or mdp_major. In this case, if "name" is NULL, then
5641 * the device is being created by opening a node in /dev.
5642 * If "name" is not NULL, the device is being created by
5643 * writing to /sys/module/md_mod/parameters/new_array.
5644 */
Arjan van de Ven48c9c272006-03-27 01:18:20 -08005645 static DEFINE_MUTEX(disks_mutex);
Christoph Hellwig0d809b32021-04-12 10:05:30 +02005646 struct mddev *mddev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005647 struct gendisk *disk;
NeilBrownefeb53c2009-01-09 08:31:10 +11005648 int partitioned;
5649 int shift;
5650 int unit;
Christoph Hellwig0d809b32021-04-12 10:05:30 +02005651 int error ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005652
Christoph Hellwig0d809b32021-04-12 10:05:30 +02005653 /*
5654 * Wait for any previous instance of this device to be completely
5655 * removed (mddev_delayed_delete).
NeilBrownd3374822009-01-09 08:31:10 +11005656 */
Tejun Heoe804ac72010-10-15 15:36:08 +02005657 flush_workqueue(md_misc_wq);
NeilBrownd3374822009-01-09 08:31:10 +11005658
Arjan van de Ven48c9c272006-03-27 01:18:20 -08005659 mutex_lock(&disks_mutex);
Christoph Hellwig0d809b32021-04-12 10:05:30 +02005660 mddev = mddev_alloc(dev);
5661 if (IS_ERR(mddev)) {
5662 mutex_unlock(&disks_mutex);
5663 return PTR_ERR(mddev);
5664 }
5665
5666 partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
5667 shift = partitioned ? MdpMinorShift : 0;
5668 unit = MINOR(mddev->unit) >> shift;
NeilBrownefeb53c2009-01-09 08:31:10 +11005669
NeilBrown039b7222017-04-12 16:26:13 +10005670 if (name && !dev) {
NeilBrownefeb53c2009-01-09 08:31:10 +11005671 /* Need to ensure that 'name' is not a duplicate.
5672 */
NeilBrownfd01b882011-10-11 16:47:53 +11005673 struct mddev *mddev2;
NeilBrownefeb53c2009-01-09 08:31:10 +11005674 spin_lock(&all_mddevs_lock);
5675
5676 list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
5677 if (mddev2->gendisk &&
5678 strcmp(mddev2->gendisk->disk_name, name) == 0) {
5679 spin_unlock(&all_mddevs_lock);
Christoph Hellwig0d809b32021-04-12 10:05:30 +02005680 error = -EEXIST;
Christoph Hellwig7ad10692021-09-01 13:38:33 +02005681 goto out_unlock_disks_mutex;
NeilBrownefeb53c2009-01-09 08:31:10 +11005682 }
5683 spin_unlock(&all_mddevs_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005684 }
NeilBrown039b7222017-04-12 16:26:13 +10005685 if (name && dev)
5686 /*
5687 * Creating /dev/mdNNN via "newarray", so adjust hold_active.
5688 */
5689 mddev->hold_active = UNTIL_STOP;
NeilBrown8b765392009-01-09 08:31:08 +11005690
NeilBrown0909dc42009-07-01 12:27:21 +10005691 error = -ENOMEM;
Christoph Hellwig0f1d2e02021-05-21 07:51:04 +02005692 disk = blk_alloc_disk(NUMA_NO_NODE);
5693 if (!disk)
Christoph Hellwig7ad10692021-09-01 13:38:33 +02005694 goto out_unlock_disks_mutex;
NeilBrown409c57f2009-03-31 14:39:39 +11005695
NeilBrownefeb53c2009-01-09 08:31:10 +11005696 disk->major = MAJOR(mddev->unit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005697 disk->first_minor = unit << shift;
Christoph Hellwig0f1d2e02021-05-21 07:51:04 +02005698 disk->minors = 1 << shift;
NeilBrownefeb53c2009-01-09 08:31:10 +11005699 if (name)
5700 strcpy(disk->disk_name, name);
5701 else if (partitioned)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005702 sprintf(disk->disk_name, "md_d%d", unit);
Greg Kroah-Hartmance7b0f462005-06-20 21:15:16 -07005703 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07005704 sprintf(disk->disk_name, "md%d", unit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005705 disk->fops = &md_fops;
5706 disk->private_data = mddev;
Christoph Hellwig0f1d2e02021-05-21 07:51:04 +02005707
5708 mddev->queue = disk->queue;
5709 blk_set_stacking_limits(&mddev->queue->limits);
Jens Axboe56883a72016-03-30 10:16:53 -06005710 blk_queue_write_cache(mddev->queue, true, true);
NeilBrown92850bb2008-10-21 13:25:32 +11005711 /* Allow extended partitions. This makes the
NeilBrownd3374822009-01-09 08:31:10 +11005712 * 'mdp' device redundant, but we can't really
NeilBrown92850bb2008-10-21 13:25:32 +11005713 * remove it now.
5714 */
5715 disk->flags |= GENHD_FL_EXT_DEVT;
Christoph Hellwiga564e232020-07-08 14:25:41 +02005716 disk->events |= DISK_EVENT_MEDIA_CHANGE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005717 mddev->gendisk = disk;
Luis Chamberlain9be68dd2021-09-01 13:38:30 +02005718 error = add_disk(disk);
Christoph Hellwig7ad10692021-09-01 13:38:33 +02005719 if (error)
5720 goto out_cleanup_disk;
NeilBrownb0140892011-05-10 17:49:01 +10005721
Kent Overstreet28dec872018-06-07 20:52:54 -04005722 error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md");
Christoph Hellwig7ad10692021-09-01 13:38:33 +02005723 if (error)
5724 goto out_del_gendisk;
5725
5726 kobject_uevent(&mddev->kobj, KOBJ_ADD);
5727 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
5728 mddev->sysfs_level = sysfs_get_dirent_safe(mddev->kobj.sd, "level");
5729 goto out_unlock_disks_mutex;
5730
5731out_del_gendisk:
5732 del_gendisk(disk);
5733out_cleanup_disk:
5734 blk_cleanup_disk(disk);
5735out_unlock_disks_mutex:
Christoph Hellwig94f3cd72021-09-01 13:38:32 +02005736 mutex_unlock(&disks_mutex);
NeilBrownd3374822009-01-09 08:31:10 +11005737 mddev_put(mddev);
NeilBrown0909dc42009-07-01 12:27:21 +10005738 return error;
NeilBrownefeb53c2009-01-09 08:31:10 +11005739}
5740
Christoph Hellwig28144f92020-10-29 15:58:34 +01005741static void md_probe(dev_t dev)
NeilBrownefeb53c2009-01-09 08:31:10 +11005742{
Christoph Hellwig28144f92020-10-29 15:58:34 +01005743 if (MAJOR(dev) == MD_MAJOR && MINOR(dev) >= 512)
5744 return;
NeilBrown78b63502017-04-12 16:26:13 +10005745 if (create_on_open)
5746 md_alloc(dev, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005747}
5748
Kees Cooke4dca7b2017-10-17 19:04:42 -07005749static int add_named_array(const char *val, const struct kernel_param *kp)
NeilBrownefeb53c2009-01-09 08:31:10 +11005750{
NeilBrown039b7222017-04-12 16:26:13 +10005751 /*
5752 * val must be "md_*" or "mdNNN".
5753 * For "md_*" we allocate an array with a large free minor number, and
NeilBrownefeb53c2009-01-09 08:31:10 +11005754 * set the name to val. val must not already be an active name.
NeilBrown039b7222017-04-12 16:26:13 +10005755 * For "mdNNN" we allocate an array with the minor number NNN
5756 * which must not already be in use.
NeilBrownefeb53c2009-01-09 08:31:10 +11005757 */
5758 int len = strlen(val);
5759 char buf[DISK_NAME_LEN];
NeilBrown039b7222017-04-12 16:26:13 +10005760 unsigned long devnum;
NeilBrownefeb53c2009-01-09 08:31:10 +11005761
5762 while (len && val[len-1] == '\n')
5763 len--;
5764 if (len >= DISK_NAME_LEN)
5765 return -E2BIG;
5766 strlcpy(buf, val, len+1);
NeilBrown039b7222017-04-12 16:26:13 +10005767 if (strncmp(buf, "md_", 3) == 0)
5768 return md_alloc(0, buf);
5769 if (strncmp(buf, "md", 2) == 0 &&
5770 isdigit(buf[2]) &&
5771 kstrtoul(buf+2, 10, &devnum) == 0 &&
5772 devnum <= MINORMASK)
5773 return md_alloc(MKDEV(MD_MAJOR, devnum), NULL);
5774
5775 return -EINVAL;
NeilBrownefeb53c2009-01-09 08:31:10 +11005776}
5777
Kees Cook8376d3c2017-10-16 17:01:48 -07005778static void md_safemode_timeout(struct timer_list *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005779{
Kees Cook8376d3c2017-10-16 17:01:48 -07005780 struct mddev *mddev = from_timer(mddev, t, safemode_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005781
NeilBrown4ad23a972017-03-15 14:05:14 +11005782 mddev->safemode = 1;
5783 if (mddev->external)
5784 sysfs_notify_dirent_safe(mddev->sysfs_state);
5785
Linus Torvalds1da177e2005-04-16 15:20:36 -07005786 md_wakeup_thread(mddev->thread);
5787}
5788
NeilBrown6ff8d8ec2006-01-06 00:20:15 -08005789static int start_dirty_degraded;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005790
NeilBrownfd01b882011-10-11 16:47:53 +11005791int md_run(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005792{
NeilBrown2604b702006-01-06 00:20:36 -08005793 int err;
NeilBrown3cb03002011-10-11 16:45:26 +11005794 struct md_rdev *rdev;
NeilBrown84fc4b52011-10-11 16:49:58 +11005795 struct md_personality *pers;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005796
NeilBrowna757e642005-04-16 15:26:42 -07005797 if (list_empty(&mddev->disks))
5798 /* cannot run an array with no devices.. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005799 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005800
5801 if (mddev->pers)
5802 return -EBUSY;
NeilBrownbb4f1e92010-08-08 21:18:03 +10005803 /* Cannot run until previous stop completes properly */
5804 if (mddev->sysfs_active)
5805 return -EBUSY;
NeilBrownb6eb1272010-04-15 10:13:47 +10005806
Linus Torvalds1da177e2005-04-16 15:20:36 -07005807 /*
5808 * Analyze all RAID superblock(s)
5809 */
NeilBrown1ec4a932008-02-06 01:39:53 -08005810 if (!mddev->raid_disks) {
5811 if (!mddev->persistent)
5812 return -EINVAL;
Yufen Yu6a5cb532019-10-16 16:00:03 +08005813 err = analyze_sbs(mddev);
5814 if (err)
5815 return -EINVAL;
NeilBrown1ec4a932008-02-06 01:39:53 -08005816 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005817
NeilBrownd9d166c2006-01-06 00:20:51 -08005818 if (mddev->level != LEVEL_NONE)
5819 request_module("md-level-%d", mddev->level);
5820 else if (mddev->clevel[0])
5821 request_module("md-%s", mddev->clevel);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005822
5823 /*
5824 * Drop all container device buffers, from now on
5825 * the only valid external interface is through the md
5826 * device.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005827 */
Heinz Mauelshagen4b6c1062018-02-02 23:13:19 +01005828 mddev->has_superblocks = false;
NeilBrowndafb20f2012-03-19 12:46:39 +11005829 rdev_for_each(rdev, mddev) {
NeilBrownb2d444d2005-11-08 21:39:31 -08005830 if (test_bit(Faulty, &rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005831 continue;
5832 sync_blockdev(rdev->bdev);
Peter Zijlstraf98393a2007-05-06 14:49:54 -07005833 invalidate_bdev(rdev->bdev);
Christoph Hellwigd7a47832021-02-01 14:17:20 +01005834 if (mddev->ro != 1 && rdev_read_only(rdev)) {
NeilBrown97b20ef2017-04-13 08:53:48 +10005835 mddev->ro = 1;
5836 if (mddev->gendisk)
5837 set_disk_ro(mddev->gendisk, 1);
5838 }
NeilBrownf0d76d72007-07-17 04:06:12 -07005839
Heinz Mauelshagen4b6c1062018-02-02 23:13:19 +01005840 if (rdev->sb_page)
5841 mddev->has_superblocks = true;
5842
NeilBrownf0d76d72007-07-17 04:06:12 -07005843 /* perform some consistency tests on the device.
5844 * We don't want the data to overlap the metadata,
Andre Noll58c0fed2009-03-31 14:33:13 +11005845 * Internal Bitmap issues have been handled elsewhere.
NeilBrownf0d76d72007-07-17 04:06:12 -07005846 */
Jonathan Brassowa6ff7e02011-01-14 09:14:34 +11005847 if (rdev->meta_bdev) {
5848 /* Nothing to check */;
5849 } else if (rdev->data_offset < rdev->sb_start) {
Andre Noll58c0fed2009-03-31 14:33:13 +11005850 if (mddev->dev_sectors &&
5851 rdev->data_offset + mddev->dev_sectors
Andre Noll0f420352008-07-11 22:02:23 +10005852 > rdev->sb_start) {
NeilBrown9d487392016-11-02 14:16:49 +11005853 pr_warn("md: %s: data overlaps metadata\n",
5854 mdname(mddev));
NeilBrownf0d76d72007-07-17 04:06:12 -07005855 return -EINVAL;
5856 }
5857 } else {
Andre Noll0f420352008-07-11 22:02:23 +10005858 if (rdev->sb_start + rdev->sb_size/512
NeilBrownf0d76d72007-07-17 04:06:12 -07005859 > rdev->data_offset) {
NeilBrown9d487392016-11-02 14:16:49 +11005860 pr_warn("md: %s: metadata overlaps data\n",
5861 mdname(mddev));
NeilBrownf0d76d72007-07-17 04:06:12 -07005862 return -EINVAL;
5863 }
5864 }
NeilBrown00bcb4a2010-06-01 19:37:23 +10005865 sysfs_notify_dirent_safe(rdev->sysfs_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005866 }
5867
Kent Overstreetafeee512018-05-20 18:25:52 -04005868 if (!bioset_initialized(&mddev->bio_set)) {
5869 err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
5870 if (err)
5871 return err;
Ming Lei10273172017-02-14 23:29:00 +08005872 }
Kent Overstreetafeee512018-05-20 18:25:52 -04005873 if (!bioset_initialized(&mddev->sync_set)) {
5874 err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
5875 if (err)
Guoqing Jiang10764812021-05-25 17:46:17 +08005876 goto exit_bio_set;
5877 }
Guoqing Jiangdaee2022021-06-03 17:21:06 +08005878 if (mddev->level != 1 && mddev->level != 10 &&
5879 !bioset_initialized(&mddev->io_acct_set)) {
Guoqing Jiang10764812021-05-25 17:46:17 +08005880 err = bioset_init(&mddev->io_acct_set, BIO_POOL_SIZE,
5881 offsetof(struct md_io_acct, bio_clone), 0);
5882 if (err)
5883 goto exit_sync_set;
NeilBrown5a850712017-06-21 09:12:21 +10005884 }
NeilBrowna167f662010-10-26 18:31:13 +11005885
Linus Torvalds1da177e2005-04-16 15:20:36 -07005886 spin_lock(&pers_lock);
NeilBrownd9d166c2006-01-06 00:20:51 -08005887 pers = find_pers(mddev->level, mddev->clevel);
NeilBrown2604b702006-01-06 00:20:36 -08005888 if (!pers || !try_module_get(pers->owner)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005889 spin_unlock(&pers_lock);
NeilBrownd9d166c2006-01-06 00:20:51 -08005890 if (mddev->level != LEVEL_NONE)
NeilBrown9d487392016-11-02 14:16:49 +11005891 pr_warn("md: personality for level %d is not loaded!\n",
5892 mddev->level);
NeilBrownd9d166c2006-01-06 00:20:51 -08005893 else
NeilBrown9d487392016-11-02 14:16:49 +11005894 pr_warn("md: personality for level %s is not loaded!\n",
5895 mddev->clevel);
Shaohua Libfc9dfd2018-06-13 08:39:49 -07005896 err = -EINVAL;
5897 goto abort;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005898 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005899 spin_unlock(&pers_lock);
NeilBrown34817e82009-03-31 14:39:38 +11005900 if (mddev->level != pers->level) {
5901 mddev->level = pers->level;
5902 mddev->new_level = pers->level;
5903 }
NeilBrownd9d166c2006-01-06 00:20:51 -08005904 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005905
NeilBrownf6705572006-03-27 01:18:11 -08005906 if (mddev->reshape_position != MaxSector &&
NeilBrown63c70c42006-03-27 01:18:13 -08005907 pers->start_reshape == NULL) {
NeilBrownf6705572006-03-27 01:18:11 -08005908 /* This personality cannot handle reshaping... */
NeilBrownf6705572006-03-27 01:18:11 -08005909 module_put(pers->owner);
Shaohua Libfc9dfd2018-06-13 08:39:49 -07005910 err = -EINVAL;
5911 goto abort;
NeilBrownf6705572006-03-27 01:18:11 -08005912 }
5913
NeilBrown7dd5e7c32007-02-28 20:11:35 -08005914 if (pers->sync_request) {
5915 /* Warn if this is a potentially silly
5916 * configuration.
5917 */
5918 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
NeilBrown3cb03002011-10-11 16:45:26 +11005919 struct md_rdev *rdev2;
NeilBrown7dd5e7c32007-02-28 20:11:35 -08005920 int warned = 0;
Cheng Renquan159ec1f2009-01-09 08:31:08 +11005921
NeilBrowndafb20f2012-03-19 12:46:39 +11005922 rdev_for_each(rdev, mddev)
5923 rdev_for_each(rdev2, mddev) {
NeilBrown7dd5e7c32007-02-28 20:11:35 -08005924 if (rdev < rdev2 &&
Christoph Hellwig61a27e1f2020-09-03 07:40:58 +02005925 rdev->bdev->bd_disk ==
5926 rdev2->bdev->bd_disk) {
NeilBrown9d487392016-11-02 14:16:49 +11005927 pr_warn("%s: WARNING: %s appears to be on the same physical disk as %s.\n",
5928 mdname(mddev),
5929 bdevname(rdev->bdev,b),
5930 bdevname(rdev2->bdev,b2));
NeilBrown7dd5e7c32007-02-28 20:11:35 -08005931 warned = 1;
5932 }
5933 }
Cheng Renquan159ec1f2009-01-09 08:31:08 +11005934
NeilBrown7dd5e7c32007-02-28 20:11:35 -08005935 if (warned)
NeilBrown9d487392016-11-02 14:16:49 +11005936 pr_warn("True protection against single-disk failure might be compromised.\n");
NeilBrown7dd5e7c32007-02-28 20:11:35 -08005937 }
5938
NeilBrown657390d2005-08-26 18:34:16 -07005939 mddev->recovery = 0;
Andre Noll58c0fed2009-03-31 14:33:13 +11005940 /* may be over-ridden by personality */
5941 mddev->resync_max_sectors = mddev->dev_sectors;
5942
NeilBrown6ff8d8ec2006-01-06 00:20:15 -08005943 mddev->ok_start_degraded = start_dirty_degraded;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005944
NeilBrown0f9552b52009-12-30 12:08:50 +11005945 if (start_readonly && mddev->ro == 0)
NeilBrownf91de922005-11-08 21:39:36 -08005946 mddev->ro = 2; /* read-only, but switch on first write */
5947
NeilBrown36d091f2014-12-15 12:56:58 +11005948 err = pers->run(mddev);
Andre Noll13e53df2008-03-26 00:07:03 +01005949 if (err)
NeilBrown9d487392016-11-02 14:16:49 +11005950 pr_warn("md: pers->run() failed ...\n");
NeilBrown36d091f2014-12-15 12:56:58 +11005951 else if (pers->size(mddev, 0, 0) < mddev->array_sectors) {
NeilBrown9d487392016-11-02 14:16:49 +11005952 WARN_ONCE(!mddev->external_size,
5953 "%s: default size too small, but 'external_size' not in effect?\n",
5954 __func__);
5955 pr_warn("md: invalid array_size %llu > default size %llu\n",
5956 (unsigned long long)mddev->array_sectors / 2,
5957 (unsigned long long)pers->size(mddev, 0, 0) / 2);
Dan Williamsb522adc2009-03-31 15:00:31 +11005958 err = -EINVAL;
Dan Williamsb522adc2009-03-31 15:00:31 +11005959 }
NeilBrown36d091f2014-12-15 12:56:58 +11005960 if (err == 0 && pers->sync_request &&
NeilBrownef99bf42012-05-22 13:55:08 +10005961 (mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05005962 struct bitmap *bitmap;
5963
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07005964 bitmap = md_bitmap_create(mddev, -1);
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05005965 if (IS_ERR(bitmap)) {
5966 err = PTR_ERR(bitmap);
NeilBrown9d487392016-11-02 14:16:49 +11005967 pr_warn("%s: failed to create bitmap (%d)\n",
5968 mdname(mddev), err);
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05005969 } else
5970 mddev->bitmap = bitmap;
5971
NeilBrownb15c2e52006-01-06 00:20:16 -08005972 }
Guoqing Jiangd4945492019-06-14 17:10:39 +08005973 if (err)
5974 goto bitmap_abort;
Guoqing Jiang3e148a32019-06-19 17:30:46 +08005975
5976 if (mddev->bitmap_info.max_write_behind > 0) {
Guoqing Jiang3e173ab2019-12-23 10:48:54 +01005977 bool create_pool = false;
Guoqing Jiang3e148a32019-06-19 17:30:46 +08005978
5979 rdev_for_each(rdev, mddev) {
5980 if (test_bit(WriteMostly, &rdev->flags) &&
Guoqing Jiang404659c2019-12-23 10:48:53 +01005981 rdev_init_serial(rdev))
Guoqing Jiang3e173ab2019-12-23 10:48:54 +01005982 create_pool = true;
Guoqing Jiang3e148a32019-06-19 17:30:46 +08005983 }
Guoqing Jiang3e173ab2019-12-23 10:48:54 +01005984 if (create_pool && mddev->serial_info_pool == NULL) {
Guoqing Jiang404659c2019-12-23 10:48:53 +01005985 mddev->serial_info_pool =
5986 mempool_create_kmalloc_pool(NR_SERIAL_INFOS,
5987 sizeof(struct serial_info));
5988 if (!mddev->serial_info_pool) {
Guoqing Jiang3e148a32019-06-19 17:30:46 +08005989 err = -ENOMEM;
Guoqing Jiangd4945492019-06-14 17:10:39 +08005990 goto bitmap_abort;
Guoqing Jiang3e148a32019-06-19 17:30:46 +08005991 }
5992 }
5993 }
5994
NeilBrown5c675f82014-12-15 12:56:56 +11005995 if (mddev->queue) {
Shaohua Libb086a82016-09-30 09:45:40 -07005996 bool nonrot = true;
5997
5998 rdev_for_each(rdev, mddev) {
5999 if (rdev->raid_disk >= 0 &&
6000 !blk_queue_nonrot(bdev_get_queue(rdev->bdev))) {
6001 nonrot = false;
6002 break;
6003 }
6004 }
6005 if (mddev->degraded)
6006 nonrot = false;
6007 if (nonrot)
Bart Van Assche8b904b52018-03-07 17:10:10 -08006008 blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue);
Shaohua Libb086a82016-09-30 09:45:40 -07006009 else
Bart Van Assche8b904b52018-03-07 17:10:10 -08006010 blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue);
Guoqing Jiang10764812021-05-25 17:46:17 +08006011 blk_queue_flag_set(QUEUE_FLAG_IO_STAT, mddev->queue);
NeilBrown5c675f82014-12-15 12:56:56 +11006012 }
NeilBrown36d091f2014-12-15 12:56:58 +11006013 if (pers->sync_request) {
NeilBrown00bcb4a2010-06-01 19:37:23 +10006014 if (mddev->kobj.sd &&
6015 sysfs_create_group(&mddev->kobj, &md_redundancy_group))
NeilBrown9d487392016-11-02 14:16:49 +11006016 pr_warn("md: cannot register extra attributes for %s\n",
6017 mdname(mddev));
NeilBrown00bcb4a2010-06-01 19:37:23 +10006018 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
Junxiao Bie8efa9b2020-08-04 17:27:18 -07006019 mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed");
6020 mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded");
NeilBrown5e55e2f2007-03-26 21:32:14 -08006021 } else if (mddev->ro == 2) /* auto-readonly not meaningful */
NeilBrownfd9d49c2005-11-08 21:39:42 -08006022 mddev->ro = 0;
6023
Robert Becker1e509152009-12-14 12:49:58 +11006024 atomic_set(&mddev->max_corr_read_errors,
6025 MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006026 mddev->safemode = 0;
Goldwyn Rodrigues28c1b9f2015-10-22 16:01:25 +11006027 if (mddev_is_clustered(mddev))
6028 mddev->safemode_delay = 0;
6029 else
Zhao Heming7c9d5c52020-07-21 02:08:52 +08006030 mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006031 mddev->in_sync = 1;
NeilBrown0ca69882011-01-14 09:14:33 +11006032 smp_wmb();
NeilBrown36d091f2014-12-15 12:56:58 +11006033 spin_lock(&mddev->lock);
6034 mddev->pers = pers;
NeilBrown36d091f2014-12-15 12:56:58 +11006035 spin_unlock(&mddev->lock);
NeilBrowndafb20f2012-03-19 12:46:39 +11006036 rdev_for_each(rdev, mddev)
Namhyung Kim36fad852011-07-27 11:00:36 +10006037 if (rdev->raid_disk >= 0)
Yufen Yue5b521e2019-06-14 15:41:07 -07006038 sysfs_link_rdev(mddev, rdev); /* failure here is OK */
NeilBrownf72ffdd2014-09-30 14:23:59 +10006039
NeilBrowna4a3d262015-07-17 11:57:30 +10006040 if (mddev->degraded && !mddev->ro)
6041 /* This ensures that recovering status is reported immediately
6042 * via sysfs - until a lack of spares is confirmed.
6043 */
6044 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006045 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
NeilBrownf72ffdd2014-09-30 14:23:59 +10006046
Shaohua Li29530792016-12-08 15:48:19 -08006047 if (mddev->sb_flags)
NeilBrown850b2b422006-10-03 01:15:46 -07006048 md_update_sb(mddev, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006049
Guoqing Jiang54679482021-10-04 23:34:53 +08006050 md_new_event();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006051 return 0;
Xiao Nib1261942018-01-24 12:17:38 +08006052
Guoqing Jiangd4945492019-06-14 17:10:39 +08006053bitmap_abort:
6054 mddev_detach(mddev);
6055 if (mddev->private)
6056 pers->free(mddev, mddev->private);
6057 mddev->private = NULL;
6058 module_put(pers->owner);
6059 md_bitmap_destroy(mddev);
Xiao Nib1261942018-01-24 12:17:38 +08006060abort:
Guoqing Jiangdaee2022021-06-03 17:21:06 +08006061 if (mddev->level != 1 && mddev->level != 10)
6062 bioset_exit(&mddev->io_acct_set);
Guoqing Jiang10764812021-05-25 17:46:17 +08006063exit_sync_set:
NeilBrown4bc034d2019-03-29 10:46:16 -07006064 bioset_exit(&mddev->sync_set);
Guoqing Jiang10764812021-05-25 17:46:17 +08006065exit_bio_set:
6066 bioset_exit(&mddev->bio_set);
Xiao Nib1261942018-01-24 12:17:38 +08006067 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006068}
NeilBrown390ee602010-06-01 19:37:27 +10006069EXPORT_SYMBOL_GPL(md_run);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006070
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02006071int do_md_run(struct mddev *mddev)
NeilBrownfe60b012010-03-29 11:10:42 +11006072{
6073 int err;
6074
NeilBrown9d4b45d2019-08-20 10:21:09 +10006075 set_bit(MD_NOT_READY, &mddev->flags);
NeilBrownfe60b012010-03-29 11:10:42 +11006076 err = md_run(mddev);
6077 if (err)
6078 goto out;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07006079 err = md_bitmap_load(mddev);
NeilBrown69e51b42010-06-01 19:37:35 +10006080 if (err) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07006081 md_bitmap_destroy(mddev);
NeilBrown69e51b42010-06-01 19:37:35 +10006082 goto out;
6083 }
Jonathan Brassow0fd018a2011-06-07 17:49:36 -05006084
Goldwyn Rodrigues28c1b9f2015-10-22 16:01:25 +11006085 if (mddev_is_clustered(mddev))
6086 md_allow_write(mddev);
6087
Song Liud5d885f2017-11-19 22:17:01 -08006088 /* run start up tasks that require md_thread */
6089 md_start(mddev);
6090
Jonathan Brassow0fd018a2011-06-07 17:49:36 -05006091 md_wakeup_thread(mddev->thread);
6092 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
6093
Christoph Hellwig2c247c52020-11-16 15:57:11 +01006094 set_capacity_and_notify(mddev->gendisk, mddev->array_sectors);
NeilBrown9d4b45d2019-08-20 10:21:09 +10006095 clear_bit(MD_NOT_READY, &mddev->flags);
NeilBrownf0b4f7e2011-02-24 17:26:41 +11006096 mddev->changed = 1;
NeilBrownfe60b012010-03-29 11:10:42 +11006097 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
NeilBrown9d4b45d2019-08-20 10:21:09 +10006098 sysfs_notify_dirent_safe(mddev->sysfs_state);
6099 sysfs_notify_dirent_safe(mddev->sysfs_action);
Junxiao Bie1a86db2020-07-14 16:10:26 -07006100 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
NeilBrownfe60b012010-03-29 11:10:42 +11006101out:
NeilBrown9d4b45d2019-08-20 10:21:09 +10006102 clear_bit(MD_NOT_READY, &mddev->flags);
NeilBrownfe60b012010-03-29 11:10:42 +11006103 return err;
6104}
6105
Song Liud5d885f2017-11-19 22:17:01 -08006106int md_start(struct mddev *mddev)
6107{
6108 int ret = 0;
6109
6110 if (mddev->pers->start) {
6111 set_bit(MD_RECOVERY_WAIT, &mddev->recovery);
6112 md_wakeup_thread(mddev->thread);
6113 ret = mddev->pers->start(mddev);
6114 clear_bit(MD_RECOVERY_WAIT, &mddev->recovery);
6115 md_wakeup_thread(mddev->sync_thread);
6116 }
6117 return ret;
6118}
6119EXPORT_SYMBOL_GPL(md_start);
6120
NeilBrownfd01b882011-10-11 16:47:53 +11006121static int restart_array(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006122{
6123 struct gendisk *disk = mddev->gendisk;
NeilBrown97b20ef2017-04-13 08:53:48 +10006124 struct md_rdev *rdev;
6125 bool has_journal = false;
6126 bool has_readonly = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006127
Andre Noll80fab1d2008-07-11 22:02:21 +10006128 /* Complain if it has no devices */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006129 if (list_empty(&mddev->disks))
Andre Noll80fab1d2008-07-11 22:02:21 +10006130 return -ENXIO;
6131 if (!mddev->pers)
6132 return -EINVAL;
6133 if (!mddev->ro)
6134 return -EBUSY;
Song Liu339421d2015-10-08 21:54:13 -07006135
NeilBrown97b20ef2017-04-13 08:53:48 +10006136 rcu_read_lock();
6137 rdev_for_each_rcu(rdev, mddev) {
6138 if (test_bit(Journal, &rdev->flags) &&
6139 !test_bit(Faulty, &rdev->flags))
6140 has_journal = true;
Christoph Hellwiga42e0d72021-02-01 14:17:21 +01006141 if (rdev_read_only(rdev))
NeilBrown97b20ef2017-04-13 08:53:48 +10006142 has_readonly = true;
Song Liu339421d2015-10-08 21:54:13 -07006143 }
NeilBrown97b20ef2017-04-13 08:53:48 +10006144 rcu_read_unlock();
6145 if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !has_journal)
6146 /* Don't restart rw with journal missing/faulty */
6147 return -EINVAL;
6148 if (has_readonly)
6149 return -EROFS;
Song Liu339421d2015-10-08 21:54:13 -07006150
Andre Noll80fab1d2008-07-11 22:02:21 +10006151 mddev->safemode = 0;
6152 mddev->ro = 0;
6153 set_disk_ro(disk, 0);
NeilBrown9d487392016-11-02 14:16:49 +11006154 pr_debug("md: %s switched to read-write mode.\n", mdname(mddev));
Andre Noll80fab1d2008-07-11 22:02:21 +10006155 /* Kick recovery or resync if necessary */
6156 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6157 md_wakeup_thread(mddev->thread);
6158 md_wakeup_thread(mddev->sync_thread);
NeilBrown00bcb4a2010-06-01 19:37:23 +10006159 sysfs_notify_dirent_safe(mddev->sysfs_state);
Andre Noll80fab1d2008-07-11 22:02:21 +10006160 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006161}
6162
NeilBrownfd01b882011-10-11 16:47:53 +11006163static void md_clean(struct mddev *mddev)
NeilBrown6177b472010-03-29 11:37:13 +11006164{
6165 mddev->array_sectors = 0;
6166 mddev->external_size = 0;
6167 mddev->dev_sectors = 0;
6168 mddev->raid_disks = 0;
6169 mddev->recovery_cp = 0;
6170 mddev->resync_min = 0;
6171 mddev->resync_max = MaxSector;
6172 mddev->reshape_position = MaxSector;
6173 mddev->external = 0;
6174 mddev->persistent = 0;
6175 mddev->level = LEVEL_NONE;
6176 mddev->clevel[0] = 0;
6177 mddev->flags = 0;
Shaohua Li29530792016-12-08 15:48:19 -08006178 mddev->sb_flags = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006179 mddev->ro = 0;
6180 mddev->metadata_type[0] = 0;
6181 mddev->chunk_sectors = 0;
6182 mddev->ctime = mddev->utime = 0;
6183 mddev->layout = 0;
6184 mddev->max_disks = 0;
6185 mddev->events = 0;
NeilBrowna8707c02010-05-18 09:28:43 +10006186 mddev->can_decrease_events = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006187 mddev->delta_disks = 0;
NeilBrown2c810cd2012-05-21 09:27:00 +10006188 mddev->reshape_backwards = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006189 mddev->new_level = LEVEL_NONE;
6190 mddev->new_layout = 0;
6191 mddev->new_chunk_sectors = 0;
6192 mddev->curr_resync = 0;
Jianpeng Ma7f7583d2012-10-11 14:17:59 +11006193 atomic64_set(&mddev->resync_mismatches, 0);
NeilBrown6177b472010-03-29 11:37:13 +11006194 mddev->suspend_lo = mddev->suspend_hi = 0;
6195 mddev->sync_speed_min = mddev->sync_speed_max = 0;
6196 mddev->recovery = 0;
6197 mddev->in_sync = 0;
NeilBrownf0b4f7e2011-02-24 17:26:41 +11006198 mddev->changed = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006199 mddev->degraded = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006200 mddev->safemode = 0;
NeilBrownbd691922015-06-25 17:01:40 +10006201 mddev->private = NULL;
Guoqing Jiangc20c33f2016-08-12 13:42:38 +08006202 mddev->cluster_info = NULL;
NeilBrown6177b472010-03-29 11:37:13 +11006203 mddev->bitmap_info.offset = 0;
6204 mddev->bitmap_info.default_offset = 0;
NeilBrown6409bb02012-05-22 13:55:07 +10006205 mddev->bitmap_info.default_space = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006206 mddev->bitmap_info.chunksize = 0;
6207 mddev->bitmap_info.daemon_sleep = 0;
6208 mddev->bitmap_info.max_write_behind = 0;
Guoqing Jiangc20c33f2016-08-12 13:42:38 +08006209 mddev->bitmap_info.nodes = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006210}
6211
NeilBrownfd01b882011-10-11 16:47:53 +11006212static void __md_stop_writes(struct mddev *mddev)
NeilBrowna047e122010-03-29 12:07:53 +11006213{
NeilBrown6b6204e2013-05-09 09:48:30 +10006214 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
Guoqing Jiang21e09582020-04-04 23:57:07 +02006215 if (work_pending(&mddev->del_work))
6216 flush_workqueue(md_misc_wq);
NeilBrowna047e122010-03-29 12:07:53 +11006217 if (mddev->sync_thread) {
NeilBrowna047e122010-03-29 12:07:53 +11006218 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10006219 md_reap_sync_thread(mddev);
NeilBrowna047e122010-03-29 12:07:53 +11006220 }
6221
6222 del_timer_sync(&mddev->safemode_timer);
6223
Shaohua Li034e33f2016-11-21 10:29:19 -08006224 if (mddev->pers && mddev->pers->quiesce) {
6225 mddev->pers->quiesce(mddev, 1);
6226 mddev->pers->quiesce(mddev, 0);
6227 }
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07006228 md_bitmap_flush(mddev);
NeilBrowna047e122010-03-29 12:07:53 +11006229
NeilBrownb6d428c2013-04-24 11:42:42 +10006230 if (mddev->ro == 0 &&
Goldwyn Rodrigues28c1b9f2015-10-22 16:01:25 +11006231 ((!mddev->in_sync && !mddev_is_clustered(mddev)) ||
Shaohua Li29530792016-12-08 15:48:19 -08006232 mddev->sb_flags)) {
NeilBrowna047e122010-03-29 12:07:53 +11006233 /* mark array as shutdown cleanly */
Goldwyn Rodrigues28c1b9f2015-10-22 16:01:25 +11006234 if (!mddev_is_clustered(mddev))
6235 mddev->in_sync = 1;
NeilBrowna047e122010-03-29 12:07:53 +11006236 md_update_sb(mddev, 1);
6237 }
Guoqing Jiang69b00b52019-12-23 10:49:00 +01006238 /* disable policy to guarantee rdevs free resources for serialization */
6239 mddev->serialize_policy = 0;
6240 mddev_destroy_serial_pool(mddev, NULL, true);
NeilBrowna047e122010-03-29 12:07:53 +11006241}
NeilBrowndefad612011-01-14 09:14:33 +11006242
NeilBrownfd01b882011-10-11 16:47:53 +11006243void md_stop_writes(struct mddev *mddev)
NeilBrowndefad612011-01-14 09:14:33 +11006244{
NeilBrown29f097c2013-11-14 17:54:51 +11006245 mddev_lock_nointr(mddev);
NeilBrowndefad612011-01-14 09:14:33 +11006246 __md_stop_writes(mddev);
6247 mddev_unlock(mddev);
6248}
NeilBrown390ee602010-06-01 19:37:27 +10006249EXPORT_SYMBOL_GPL(md_stop_writes);
NeilBrowna047e122010-03-29 12:07:53 +11006250
NeilBrown5aa61f42014-12-15 12:56:57 +11006251static void mddev_detach(struct mddev *mddev)
6252{
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07006253 md_bitmap_wait_behind_writes(mddev);
Guoqing Jiang6b40bec2020-02-11 11:10:04 +01006254 if (mddev->pers && mddev->pers->quiesce && !mddev->suspended) {
NeilBrown5aa61f42014-12-15 12:56:57 +11006255 mddev->pers->quiesce(mddev, 1);
6256 mddev->pers->quiesce(mddev, 0);
6257 }
6258 md_unregister_thread(&mddev->thread);
6259 if (mddev->queue)
6260 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
6261}
6262
NeilBrown5eff3c42012-11-19 10:47:48 +11006263static void __md_stop(struct mddev *mddev)
NeilBrown6177b472010-03-29 11:37:13 +11006264{
NeilBrown36d091f2014-12-15 12:56:58 +11006265 struct md_personality *pers = mddev->pers;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07006266 md_bitmap_destroy(mddev);
NeilBrown5aa61f42014-12-15 12:56:57 +11006267 mddev_detach(mddev);
NeilBrownee5d0042015-07-22 10:20:07 +10006268 /* Ensure ->event_work is done */
Guoqing Jiang21e09582020-04-04 23:57:07 +02006269 if (mddev->event_work.func)
6270 flush_workqueue(md_misc_wq);
NeilBrown36d091f2014-12-15 12:56:58 +11006271 spin_lock(&mddev->lock);
NeilBrown6177b472010-03-29 11:37:13 +11006272 mddev->pers = NULL;
NeilBrown36d091f2014-12-15 12:56:58 +11006273 spin_unlock(&mddev->lock);
zhangyue07641b52021-11-16 10:35:26 +08006274 if (mddev->private)
6275 pers->free(mddev, mddev->private);
NeilBrownbd691922015-06-25 17:01:40 +10006276 mddev->private = NULL;
NeilBrown36d091f2014-12-15 12:56:58 +11006277 if (pers->sync_request && mddev->to_remove == NULL)
6278 mddev->to_remove = &md_redundancy_group;
6279 module_put(pers->owner);
NeilBrowncca9cf92010-04-01 12:08:16 +11006280 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
Jack Wang6aaa58c2018-10-19 16:21:31 +02006281}
6282
6283void md_stop(struct mddev *mddev)
6284{
6285 /* stop the array and free an attached data structures.
6286 * This is called from dm-raid
6287 */
6288 __md_stop(mddev);
Kent Overstreetafeee512018-05-20 18:25:52 -04006289 bioset_exit(&mddev->bio_set);
6290 bioset_exit(&mddev->sync_set);
Guoqing Jiangdaee2022021-06-03 17:21:06 +08006291 if (mddev->level != 1 && mddev->level != 10)
6292 bioset_exit(&mddev->io_acct_set);
NeilBrown5eff3c42012-11-19 10:47:48 +11006293}
6294
NeilBrown390ee602010-06-01 19:37:27 +10006295EXPORT_SYMBOL_GPL(md_stop);
NeilBrown6177b472010-03-29 11:37:13 +11006296
NeilBrowna05b7ea2012-07-19 15:59:18 +10006297static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
NeilBrowna4bd82d2010-03-29 13:23:10 +11006298{
6299 int err = 0;
NeilBrown30b8feb2013-11-14 15:16:17 +11006300 int did_freeze = 0;
6301
6302 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
6303 did_freeze = 1;
6304 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6305 md_wakeup_thread(mddev->thread);
6306 }
NeilBrownf851b602014-12-11 10:02:10 +11006307 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
NeilBrown30b8feb2013-11-14 15:16:17 +11006308 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
NeilBrownf851b602014-12-11 10:02:10 +11006309 if (mddev->sync_thread)
NeilBrown30b8feb2013-11-14 15:16:17 +11006310 /* Thread might be blocked waiting for metadata update
6311 * which will now never happen */
6312 wake_up_process(mddev->sync_thread->tsk);
NeilBrownf851b602014-12-11 10:02:10 +11006313
Shaohua Li29530792016-12-08 15:48:19 -08006314 if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
NeilBrown88724bf2015-09-24 14:00:51 +10006315 return -EBUSY;
NeilBrown30b8feb2013-11-14 15:16:17 +11006316 mddev_unlock(mddev);
NeilBrownf851b602014-12-11 10:02:10 +11006317 wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
6318 &mddev->recovery));
NeilBrown88724bf2015-09-24 14:00:51 +10006319 wait_event(mddev->sb_wait,
Shaohua Li29530792016-12-08 15:48:19 -08006320 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
NeilBrown30b8feb2013-11-14 15:16:17 +11006321 mddev_lock_nointr(mddev);
6322
NeilBrowna4bd82d2010-03-29 13:23:10 +11006323 mutex_lock(&mddev->open_mutex);
NeilBrown9ba3b7f2014-09-09 14:00:15 +10006324 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
NeilBrown30b8feb2013-11-14 15:16:17 +11006325 mddev->sync_thread ||
Guoqing Jiangaf8d8e62016-08-12 13:42:37 +08006326 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
NeilBrown9d487392016-11-02 14:16:49 +11006327 pr_warn("md: %s still in use.\n",mdname(mddev));
NeilBrown30b8feb2013-11-14 15:16:17 +11006328 if (did_freeze) {
6329 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
NeilBrown45eaf452014-10-29 08:49:50 +11006330 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
NeilBrown30b8feb2013-11-14 15:16:17 +11006331 md_wakeup_thread(mddev->thread);
6332 }
NeilBrowna4bd82d2010-03-29 13:23:10 +11006333 err = -EBUSY;
6334 goto out;
6335 }
6336 if (mddev->pers) {
NeilBrowndefad612011-01-14 09:14:33 +11006337 __md_stop_writes(mddev);
NeilBrowna4bd82d2010-03-29 13:23:10 +11006338
6339 err = -ENXIO;
6340 if (mddev->ro==1)
6341 goto out;
6342 mddev->ro = 1;
6343 set_disk_ro(mddev->gendisk, 1);
6344 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
NeilBrown45eaf452014-10-29 08:49:50 +11006345 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6346 md_wakeup_thread(mddev->thread);
NeilBrown00bcb4a2010-06-01 19:37:23 +10006347 sysfs_notify_dirent_safe(mddev->sysfs_state);
NeilBrown30b8feb2013-11-14 15:16:17 +11006348 err = 0;
NeilBrowna4bd82d2010-03-29 13:23:10 +11006349 }
6350out:
6351 mutex_unlock(&mddev->open_mutex);
6352 return err;
6353}
6354
NeilBrown9e653b62006-06-26 00:27:58 -07006355/* mode:
6356 * 0 - completely stop and dis-assemble array
NeilBrown9e653b62006-06-26 00:27:58 -07006357 * 2 - stop but do not disassemble array
6358 */
NeilBrownf72ffdd2014-09-30 14:23:59 +10006359static int do_md_stop(struct mddev *mddev, int mode,
NeilBrowna05b7ea2012-07-19 15:59:18 +10006360 struct block_device *bdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006361{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006362 struct gendisk *disk = mddev->gendisk;
NeilBrown3cb03002011-10-11 16:45:26 +11006363 struct md_rdev *rdev;
NeilBrown30b8feb2013-11-14 15:16:17 +11006364 int did_freeze = 0;
6365
6366 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
6367 did_freeze = 1;
6368 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6369 md_wakeup_thread(mddev->thread);
6370 }
NeilBrownf851b602014-12-11 10:02:10 +11006371 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
NeilBrown30b8feb2013-11-14 15:16:17 +11006372 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
NeilBrownf851b602014-12-11 10:02:10 +11006373 if (mddev->sync_thread)
NeilBrown30b8feb2013-11-14 15:16:17 +11006374 /* Thread might be blocked waiting for metadata update
6375 * which will now never happen */
6376 wake_up_process(mddev->sync_thread->tsk);
NeilBrownf851b602014-12-11 10:02:10 +11006377
NeilBrown30b8feb2013-11-14 15:16:17 +11006378 mddev_unlock(mddev);
NeilBrownf851b602014-12-11 10:02:10 +11006379 wait_event(resync_wait, (mddev->sync_thread == NULL &&
6380 !test_bit(MD_RECOVERY_RUNNING,
6381 &mddev->recovery)));
NeilBrown30b8feb2013-11-14 15:16:17 +11006382 mddev_lock_nointr(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006383
NeilBrownc8c00a62009-08-10 12:50:52 +10006384 mutex_lock(&mddev->open_mutex);
NeilBrown9ba3b7f2014-09-09 14:00:15 +10006385 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
NeilBrown30b8feb2013-11-14 15:16:17 +11006386 mddev->sysfs_active ||
6387 mddev->sync_thread ||
Guoqing Jiangaf8d8e62016-08-12 13:42:37 +08006388 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
NeilBrown9d487392016-11-02 14:16:49 +11006389 pr_warn("md: %s still in use.\n",mdname(mddev));
NeilBrown6e17b022010-08-07 21:41:19 +10006390 mutex_unlock(&mddev->open_mutex);
NeilBrown30b8feb2013-11-14 15:16:17 +11006391 if (did_freeze) {
6392 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
NeilBrown45eaf452014-10-29 08:49:50 +11006393 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
NeilBrown30b8feb2013-11-14 15:16:17 +11006394 md_wakeup_thread(mddev->thread);
6395 }
NeilBrown260fa032013-08-27 16:44:13 +10006396 return -EBUSY;
6397 }
NeilBrown6e17b022010-08-07 21:41:19 +10006398 if (mddev->pers) {
NeilBrowna4bd82d2010-03-29 13:23:10 +11006399 if (mddev->ro)
6400 set_disk_ro(disk, 0);
NeilBrown409c57f2009-03-31 14:39:39 +11006401
NeilBrowndefad612011-01-14 09:14:33 +11006402 __md_stop_writes(mddev);
NeilBrown5eff3c42012-11-19 10:47:48 +11006403 __md_stop(mddev);
NeilBrown6177b472010-03-29 11:37:13 +11006404
NeilBrowna4bd82d2010-03-29 13:23:10 +11006405 /* tell userspace to handle 'inactive' */
NeilBrown00bcb4a2010-06-01 19:37:23 +10006406 sysfs_notify_dirent_safe(mddev->sysfs_state);
NeilBrown0d4ca602006-12-10 02:20:44 -08006407
NeilBrowndafb20f2012-03-19 12:46:39 +11006408 rdev_for_each(rdev, mddev)
Namhyung Kim36fad852011-07-27 11:00:36 +10006409 if (rdev->raid_disk >= 0)
6410 sysfs_unlink_rdev(mddev, rdev);
NeilBrownc4647292009-05-07 12:51:06 +10006411
Christoph Hellwig2c247c52020-11-16 15:57:11 +01006412 set_capacity_and_notify(disk, 0);
NeilBrown6e17b022010-08-07 21:41:19 +10006413 mutex_unlock(&mddev->open_mutex);
NeilBrownf0b4f7e2011-02-24 17:26:41 +11006414 mddev->changed = 1;
NeilBrown0d4ca602006-12-10 02:20:44 -08006415
NeilBrowna4bd82d2010-03-29 13:23:10 +11006416 if (mddev->ro)
6417 mddev->ro = 0;
NeilBrown6e17b022010-08-07 21:41:19 +10006418 } else
6419 mutex_unlock(&mddev->open_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006420 /*
6421 * Free resources if final stop
6422 */
NeilBrown9e653b62006-06-26 00:27:58 -07006423 if (mode == 0) {
NeilBrown9d487392016-11-02 14:16:49 +11006424 pr_info("md: %s stopped.\n", mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006425
NeilBrownc3d97142009-12-14 12:49:52 +11006426 if (mddev->bitmap_info.file) {
NeilBrown4af1a042014-12-15 12:57:00 +11006427 struct file *f = mddev->bitmap_info.file;
6428 spin_lock(&mddev->lock);
NeilBrownc3d97142009-12-14 12:49:52 +11006429 mddev->bitmap_info.file = NULL;
NeilBrown4af1a042014-12-15 12:57:00 +11006430 spin_unlock(&mddev->lock);
6431 fput(f);
NeilBrown978f9462006-02-02 14:28:05 -08006432 }
NeilBrownc3d97142009-12-14 12:49:52 +11006433 mddev->bitmap_info.offset = 0;
NeilBrown978f9462006-02-02 14:28:05 -08006434
Linus Torvalds1da177e2005-04-16 15:20:36 -07006435 export_array(mddev);
6436
NeilBrown6177b472010-03-29 11:37:13 +11006437 md_clean(mddev);
NeilBrownefeb53c2009-01-09 08:31:10 +11006438 if (mddev->hold_active == UNTIL_STOP)
6439 mddev->hold_active = 0;
NeilBrowna4bd82d2010-03-29 13:23:10 +11006440 }
Guoqing Jiang54679482021-10-04 23:34:53 +08006441 md_new_event();
NeilBrown00bcb4a2010-06-01 19:37:23 +10006442 sysfs_notify_dirent_safe(mddev->sysfs_state);
NeilBrown6e17b022010-08-07 21:41:19 +10006443 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006444}
6445
Jeff Garzikfdee8ae2006-12-10 02:20:50 -08006446#ifndef MODULE
NeilBrownfd01b882011-10-11 16:47:53 +11006447static void autorun_array(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006448{
NeilBrown3cb03002011-10-11 16:45:26 +11006449 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006450 int err;
6451
NeilBrowna757e642005-04-16 15:26:42 -07006452 if (list_empty(&mddev->disks))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006453 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006454
NeilBrown9d487392016-11-02 14:16:49 +11006455 pr_info("md: running: ");
Linus Torvalds1da177e2005-04-16 15:20:36 -07006456
NeilBrowndafb20f2012-03-19 12:46:39 +11006457 rdev_for_each(rdev, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006458 char b[BDEVNAME_SIZE];
NeilBrown9d487392016-11-02 14:16:49 +11006459 pr_cont("<%s>", bdevname(rdev->bdev,b));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006460 }
NeilBrown9d487392016-11-02 14:16:49 +11006461 pr_cont("\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07006462
NeilBrownd710e132008-10-13 11:55:12 +11006463 err = do_md_run(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006464 if (err) {
NeilBrown9d487392016-11-02 14:16:49 +11006465 pr_warn("md: do_md_run() returned %d\n", err);
NeilBrowna05b7ea2012-07-19 15:59:18 +10006466 do_md_stop(mddev, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006467 }
6468}
6469
6470/*
6471 * lets try to run arrays based on all disks that have arrived
6472 * until now. (those are in pending_raid_disks)
6473 *
6474 * the method: pick the first pending disk, collect all disks with
6475 * the same UUID, remove all from the pending list and put them into
6476 * the 'same_array' list. Then order this list based on superblock
6477 * update time (freshest comes first), kick out 'old' disks and
6478 * compare superblocks. If everything's fine then run it.
6479 *
6480 * If "unit" is allocated, then bump its reference count
6481 */
6482static void autorun_devices(int part)
6483{
NeilBrown3cb03002011-10-11 16:45:26 +11006484 struct md_rdev *rdev0, *rdev, *tmp;
NeilBrownfd01b882011-10-11 16:47:53 +11006485 struct mddev *mddev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006486 char b[BDEVNAME_SIZE];
6487
NeilBrown9d487392016-11-02 14:16:49 +11006488 pr_info("md: autorun ...\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07006489 while (!list_empty(&pending_raid_disks)) {
NeilBrowne8703fe2006-10-03 01:15:59 -07006490 int unit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006491 dev_t dev;
NeilBrownad01c9e2006-03-27 01:18:07 -08006492 LIST_HEAD(candidates);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006493 rdev0 = list_entry(pending_raid_disks.next,
NeilBrown3cb03002011-10-11 16:45:26 +11006494 struct md_rdev, same_set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006495
NeilBrown9d487392016-11-02 14:16:49 +11006496 pr_debug("md: considering %s ...\n", bdevname(rdev0->bdev,b));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006497 INIT_LIST_HEAD(&candidates);
Cheng Renquan159ec1f2009-01-09 08:31:08 +11006498 rdev_for_each_list(rdev, tmp, &pending_raid_disks)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006499 if (super_90_load(rdev, rdev0, 0) >= 0) {
NeilBrown9d487392016-11-02 14:16:49 +11006500 pr_debug("md: adding %s ...\n",
6501 bdevname(rdev->bdev,b));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006502 list_move(&rdev->same_set, &candidates);
6503 }
6504 /*
6505 * now we have a set of devices, with all of them having
6506 * mostly sane superblocks. It's time to allocate the
6507 * mddev.
6508 */
NeilBrowne8703fe2006-10-03 01:15:59 -07006509 if (part) {
6510 dev = MKDEV(mdp_major,
6511 rdev0->preferred_minor << MdpMinorShift);
6512 unit = MINOR(dev) >> MdpMinorShift;
6513 } else {
6514 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
6515 unit = MINOR(dev);
6516 }
6517 if (rdev0->preferred_minor != unit) {
NeilBrown9d487392016-11-02 14:16:49 +11006518 pr_warn("md: unit number in %s is bad: %d\n",
6519 bdevname(rdev0->bdev, b), rdev0->preferred_minor);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006520 break;
6521 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006522
Christoph Hellwig28144f92020-10-29 15:58:34 +01006523 md_probe(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006524 mddev = mddev_find(dev);
Christoph Hellwig65aa97c2021-04-03 18:15:29 +02006525 if (!mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006526 break;
Christoph Hellwig65aa97c2021-04-03 18:15:29 +02006527
NeilBrownf72ffdd2014-09-30 14:23:59 +10006528 if (mddev_lock(mddev))
NeilBrown9d487392016-11-02 14:16:49 +11006529 pr_warn("md: %s locked, cannot run\n", mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006530 else if (mddev->raid_disks || mddev->major_version
6531 || !list_empty(&mddev->disks)) {
NeilBrown9d487392016-11-02 14:16:49 +11006532 pr_warn("md: %s already running, cannot run %s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006533 mdname(mddev), bdevname(rdev0->bdev,b));
6534 mddev_unlock(mddev);
6535 } else {
NeilBrown9d487392016-11-02 14:16:49 +11006536 pr_debug("md: created %s\n", mdname(mddev));
NeilBrown1ec4a932008-02-06 01:39:53 -08006537 mddev->persistent = 1;
Cheng Renquan159ec1f2009-01-09 08:31:08 +11006538 rdev_for_each_list(rdev, tmp, &candidates) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006539 list_del_init(&rdev->same_set);
6540 if (bind_rdev_to_array(rdev, mddev))
6541 export_rdev(rdev);
6542 }
6543 autorun_array(mddev);
6544 mddev_unlock(mddev);
6545 }
6546 /* on success, candidates will be empty, on error
6547 * it won't...
6548 */
Cheng Renquan159ec1f2009-01-09 08:31:08 +11006549 rdev_for_each_list(rdev, tmp, &candidates) {
NeilBrown4b809912008-07-21 17:05:25 +10006550 list_del_init(&rdev->same_set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006551 export_rdev(rdev);
NeilBrown4b809912008-07-21 17:05:25 +10006552 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006553 mddev_put(mddev);
6554 }
NeilBrown9d487392016-11-02 14:16:49 +11006555 pr_info("md: ... autorun DONE.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07006556}
Jeff Garzikfdee8ae2006-12-10 02:20:50 -08006557#endif /* !MODULE */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006558
NeilBrownf72ffdd2014-09-30 14:23:59 +10006559static int get_version(void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006560{
6561 mdu_version_t ver;
6562
6563 ver.major = MD_MAJOR_VERSION;
6564 ver.minor = MD_MINOR_VERSION;
6565 ver.patchlevel = MD_PATCHLEVEL_VERSION;
6566
6567 if (copy_to_user(arg, &ver, sizeof(ver)))
6568 return -EFAULT;
6569
6570 return 0;
6571}
6572
NeilBrownf72ffdd2014-09-30 14:23:59 +10006573static int get_array_info(struct mddev *mddev, void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006574{
6575 mdu_array_info_t info;
NeilBrowna9f326e2009-09-23 18:06:41 +10006576 int nr,working,insync,failed,spare;
NeilBrown3cb03002011-10-11 16:45:26 +11006577 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006578
NeilBrown1ca69c42012-10-11 13:37:33 +11006579 nr = working = insync = failed = spare = 0;
6580 rcu_read_lock();
6581 rdev_for_each_rcu(rdev, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006582 nr++;
NeilBrownb2d444d2005-11-08 21:39:31 -08006583 if (test_bit(Faulty, &rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006584 failed++;
6585 else {
6586 working++;
NeilBrownb2d444d2005-11-08 21:39:31 -08006587 if (test_bit(In_sync, &rdev->flags))
NeilBrownf72ffdd2014-09-30 14:23:59 +10006588 insync++;
Song Liub347af82016-08-11 17:14:45 -07006589 else if (test_bit(Journal, &rdev->flags))
6590 /* TODO: add journal count to md_u.h */
6591 ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006592 else
6593 spare++;
6594 }
6595 }
NeilBrown1ca69c42012-10-11 13:37:33 +11006596 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006597
6598 info.major_version = mddev->major_version;
6599 info.minor_version = mddev->minor_version;
6600 info.patch_version = MD_PATCHLEVEL_VERSION;
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11006601 info.ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006602 info.level = mddev->level;
Andre Noll58c0fed2009-03-31 14:33:13 +11006603 info.size = mddev->dev_sectors / 2;
6604 if (info.size != mddev->dev_sectors / 2) /* overflow */
NeilBrown284ae7c2006-02-03 03:03:40 -08006605 info.size = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006606 info.nr_disks = nr;
6607 info.raid_disks = mddev->raid_disks;
6608 info.md_minor = mddev->md_minor;
6609 info.not_persistent= !mddev->persistent;
6610
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11006611 info.utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006612 info.state = 0;
6613 if (mddev->in_sync)
6614 info.state = (1<<MD_SB_CLEAN);
NeilBrownc3d97142009-12-14 12:49:52 +11006615 if (mddev->bitmap && mddev->bitmap_info.offset)
NeilBrown9bd35922014-07-02 11:35:06 +10006616 info.state |= (1<<MD_SB_BITMAP_PRESENT);
Goldwyn Rodriguesca8895d2014-11-26 12:22:03 -06006617 if (mddev_is_clustered(mddev))
6618 info.state |= (1<<MD_SB_CLUSTERED);
NeilBrowna9f326e2009-09-23 18:06:41 +10006619 info.active_disks = insync;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006620 info.working_disks = working;
6621 info.failed_disks = failed;
6622 info.spare_disks = spare;
6623
6624 info.layout = mddev->layout;
Andre Noll9d8f0362009-06-18 08:45:01 +10006625 info.chunk_size = mddev->chunk_sectors << 9;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006626
6627 if (copy_to_user(arg, &info, sizeof(info)))
6628 return -EFAULT;
6629
6630 return 0;
6631}
6632
NeilBrownf72ffdd2014-09-30 14:23:59 +10006633static int get_bitmap_file(struct mddev *mddev, void __user * arg)
NeilBrown32a76272005-06-21 17:17:14 -07006634{
6635 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
NeilBrownf4ad3d32014-12-15 12:57:00 +11006636 char *ptr;
NeilBrown4af1a042014-12-15 12:57:00 +11006637 int err;
NeilBrown32a76272005-06-21 17:17:14 -07006638
Benjamin Randazzob6878d92015-07-25 16:36:50 +02006639 file = kzalloc(sizeof(*file), GFP_NOIO);
NeilBrown32a76272005-06-21 17:17:14 -07006640 if (!file)
NeilBrown4af1a042014-12-15 12:57:00 +11006641 return -ENOMEM;
NeilBrown32a76272005-06-21 17:17:14 -07006642
NeilBrown32a76272005-06-21 17:17:14 -07006643 err = 0;
NeilBrown4af1a042014-12-15 12:57:00 +11006644 spin_lock(&mddev->lock);
Benjamin Randazzo25eafe12015-07-25 16:36:50 +02006645 /* bitmap enabled */
6646 if (mddev->bitmap_info.file) {
6647 ptr = file_path(mddev->bitmap_info.file, file->pathname,
6648 sizeof(file->pathname));
6649 if (IS_ERR(ptr))
6650 err = PTR_ERR(ptr);
6651 else
6652 memmove(file->pathname, ptr,
6653 sizeof(file->pathname)-(ptr-file->pathname));
6654 }
NeilBrown4af1a042014-12-15 12:57:00 +11006655 spin_unlock(&mddev->lock);
6656
6657 if (err == 0 &&
6658 copy_to_user(arg, file, sizeof(*file)))
NeilBrown32a76272005-06-21 17:17:14 -07006659 err = -EFAULT;
NeilBrown4af1a042014-12-15 12:57:00 +11006660
NeilBrown32a76272005-06-21 17:17:14 -07006661 kfree(file);
6662 return err;
6663}
6664
NeilBrownf72ffdd2014-09-30 14:23:59 +10006665static int get_disk_info(struct mddev *mddev, void __user * arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006666{
6667 mdu_disk_info_t info;
NeilBrown3cb03002011-10-11 16:45:26 +11006668 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006669
6670 if (copy_from_user(&info, arg, sizeof(info)))
6671 return -EFAULT;
6672
NeilBrown1ca69c42012-10-11 13:37:33 +11006673 rcu_read_lock();
Goldwyn Rodrigues57d051d2015-04-14 10:43:55 -05006674 rdev = md_find_rdev_nr_rcu(mddev, info.number);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006675 if (rdev) {
6676 info.major = MAJOR(rdev->bdev->bd_dev);
6677 info.minor = MINOR(rdev->bdev->bd_dev);
6678 info.raid_disk = rdev->raid_disk;
6679 info.state = 0;
NeilBrownb2d444d2005-11-08 21:39:31 -08006680 if (test_bit(Faulty, &rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006681 info.state |= (1<<MD_DISK_FAULTY);
NeilBrownb2d444d2005-11-08 21:39:31 -08006682 else if (test_bit(In_sync, &rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006683 info.state |= (1<<MD_DISK_ACTIVE);
6684 info.state |= (1<<MD_DISK_SYNC);
6685 }
Shaohua Li9efdca12015-10-12 16:59:50 -07006686 if (test_bit(Journal, &rdev->flags))
Song Liubac624f2015-08-13 14:31:55 -07006687 info.state |= (1<<MD_DISK_JOURNAL);
NeilBrown8ddf9ef2005-09-09 16:23:45 -07006688 if (test_bit(WriteMostly, &rdev->flags))
6689 info.state |= (1<<MD_DISK_WRITEMOSTLY);
NeilBrown688834e2016-11-18 16:16:11 +11006690 if (test_bit(FailFast, &rdev->flags))
6691 info.state |= (1<<MD_DISK_FAILFAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006692 } else {
6693 info.major = info.minor = 0;
6694 info.raid_disk = -1;
6695 info.state = (1<<MD_DISK_REMOVED);
6696 }
NeilBrown1ca69c42012-10-11 13:37:33 +11006697 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006698
6699 if (copy_to_user(arg, &info, sizeof(info)))
6700 return -EFAULT;
6701
6702 return 0;
6703}
6704
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02006705int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006706{
6707 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
NeilBrown3cb03002011-10-11 16:45:26 +11006708 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006709 dev_t dev = MKDEV(info->major,info->minor);
6710
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006711 if (mddev_is_clustered(mddev) &&
6712 !(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) {
NeilBrown9d487392016-11-02 14:16:49 +11006713 pr_warn("%s: Cannot add to clustered mddev.\n",
6714 mdname(mddev));
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006715 return -EINVAL;
6716 }
6717
Linus Torvalds1da177e2005-04-16 15:20:36 -07006718 if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
6719 return -EOVERFLOW;
6720
6721 if (!mddev->raid_disks) {
6722 int err;
6723 /* expecting a device which has a superblock */
6724 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
6725 if (IS_ERR(rdev)) {
NeilBrown9d487392016-11-02 14:16:49 +11006726 pr_warn("md: md_import_device returned %ld\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006727 PTR_ERR(rdev));
6728 return PTR_ERR(rdev);
6729 }
6730 if (!list_empty(&mddev->disks)) {
NeilBrown3cb03002011-10-11 16:45:26 +11006731 struct md_rdev *rdev0
6732 = list_entry(mddev->disks.next,
6733 struct md_rdev, same_set);
NeilBrowna9f326e2009-09-23 18:06:41 +10006734 err = super_types[mddev->major_version]
Linus Torvalds1da177e2005-04-16 15:20:36 -07006735 .load_super(rdev, rdev0, mddev->minor_version);
6736 if (err < 0) {
NeilBrown9d487392016-11-02 14:16:49 +11006737 pr_warn("md: %s has different UUID to %s\n",
NeilBrownf72ffdd2014-09-30 14:23:59 +10006738 bdevname(rdev->bdev,b),
Linus Torvalds1da177e2005-04-16 15:20:36 -07006739 bdevname(rdev0->bdev,b2));
6740 export_rdev(rdev);
6741 return -EINVAL;
6742 }
6743 }
6744 err = bind_rdev_to_array(rdev, mddev);
6745 if (err)
6746 export_rdev(rdev);
6747 return err;
6748 }
6749
6750 /*
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02006751 * md_add_new_disk can be used once the array is assembled
Linus Torvalds1da177e2005-04-16 15:20:36 -07006752 * to add "hot spares". They must already have a superblock
6753 * written
6754 */
6755 if (mddev->pers) {
6756 int err;
6757 if (!mddev->pers->hot_add_disk) {
NeilBrown9d487392016-11-02 14:16:49 +11006758 pr_warn("%s: personality does not support diskops!\n",
6759 mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006760 return -EINVAL;
6761 }
NeilBrown7b1e35f2005-09-09 16:23:50 -07006762 if (mddev->persistent)
6763 rdev = md_import_device(dev, mddev->major_version,
6764 mddev->minor_version);
6765 else
6766 rdev = md_import_device(dev, -1, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006767 if (IS_ERR(rdev)) {
NeilBrown9d487392016-11-02 14:16:49 +11006768 pr_warn("md: md_import_device returned %ld\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006769 PTR_ERR(rdev));
6770 return PTR_ERR(rdev);
6771 }
NeilBrown1a855a02010-12-09 16:36:28 +11006772 /* set saved_raid_disk if appropriate */
NeilBrown41158c72005-06-21 17:17:25 -07006773 if (!mddev->persistent) {
6774 if (info->state & (1<<MD_DISK_SYNC) &&
NeilBrownbf572542011-01-12 09:03:35 +11006775 info->raid_disk < mddev->raid_disks) {
NeilBrown41158c72005-06-21 17:17:25 -07006776 rdev->raid_disk = info->raid_disk;
NeilBrownbf572542011-01-12 09:03:35 +11006777 set_bit(In_sync, &rdev->flags);
NeilBrown8313b8e2013-12-12 10:13:33 +11006778 clear_bit(Bitmap_sync, &rdev->flags);
NeilBrownbf572542011-01-12 09:03:35 +11006779 } else
NeilBrown41158c72005-06-21 17:17:25 -07006780 rdev->raid_disk = -1;
NeilBrownf4667222013-12-09 12:04:56 +11006781 rdev->saved_raid_disk = rdev->raid_disk;
NeilBrown41158c72005-06-21 17:17:25 -07006782 } else
6783 super_types[mddev->major_version].
6784 validate_super(mddev, rdev);
NeilBrownbedd86b2011-05-11 14:26:20 +10006785 if ((info->state & (1<<MD_DISK_SYNC)) &&
NeilBrownf4563092012-07-03 15:59:06 +10006786 rdev->raid_disk != info->raid_disk) {
NeilBrownbedd86b2011-05-11 14:26:20 +10006787 /* This was a hot-add request, but events doesn't
6788 * match, so reject it.
6789 */
6790 export_rdev(rdev);
6791 return -EINVAL;
6792 }
6793
NeilBrownb2d444d2005-11-08 21:39:31 -08006794 clear_bit(In_sync, &rdev->flags); /* just to be sure */
NeilBrown8ddf9ef2005-09-09 16:23:45 -07006795 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
6796 set_bit(WriteMostly, &rdev->flags);
NeilBrown575a80f2009-03-31 14:33:13 +11006797 else
6798 clear_bit(WriteMostly, &rdev->flags);
NeilBrown688834e2016-11-18 16:16:11 +11006799 if (info->state & (1<<MD_DISK_FAILFAST))
6800 set_bit(FailFast, &rdev->flags);
6801 else
6802 clear_bit(FailFast, &rdev->flags);
NeilBrown8ddf9ef2005-09-09 16:23:45 -07006803
Shaohua Lif6b6ec52015-12-21 10:51:02 +11006804 if (info->state & (1<<MD_DISK_JOURNAL)) {
6805 struct md_rdev *rdev2;
6806 bool has_journal = false;
6807
6808 /* make sure no existing journal disk */
6809 rdev_for_each(rdev2, mddev) {
6810 if (test_bit(Journal, &rdev2->flags)) {
6811 has_journal = true;
6812 break;
6813 }
6814 }
NeilBrown230b55f2017-10-17 14:24:09 +11006815 if (has_journal || mddev->bitmap) {
Shaohua Lif6b6ec52015-12-21 10:51:02 +11006816 export_rdev(rdev);
6817 return -EBUSY;
6818 }
Song Liubac624f2015-08-13 14:31:55 -07006819 set_bit(Journal, &rdev->flags);
Shaohua Lif6b6ec52015-12-21 10:51:02 +11006820 }
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006821 /*
6822 * check whether the device shows up in other nodes
6823 */
6824 if (mddev_is_clustered(mddev)) {
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006825 if (info->state & (1 << MD_DISK_CANDIDATE))
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006826 set_bit(Candidate, &rdev->flags);
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006827 else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) {
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006828 /* --add initiated by this node */
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006829 err = md_cluster_ops->add_new_disk(mddev, rdev);
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006830 if (err) {
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006831 export_rdev(rdev);
6832 return err;
6833 }
6834 }
6835 }
6836
Linus Torvalds1da177e2005-04-16 15:20:36 -07006837 rdev->raid_disk = -1;
6838 err = bind_rdev_to_array(rdev, mddev);
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006839
Linus Torvalds1da177e2005-04-16 15:20:36 -07006840 if (err)
6841 export_rdev(rdev);
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006842
6843 if (mddev_is_clustered(mddev)) {
Guoqing Jiange566aef2016-08-12 13:42:34 +08006844 if (info->state & (1 << MD_DISK_CANDIDATE)) {
6845 if (!err) {
6846 err = md_cluster_ops->new_disk_ack(mddev,
6847 err == 0);
6848 if (err)
6849 md_kick_rdev_from_array(rdev);
6850 }
6851 } else {
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006852 if (err)
6853 md_cluster_ops->add_new_disk_cancel(mddev);
6854 else
6855 err = add_bound_rdev(rdev);
6856 }
6857
6858 } else if (!err)
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05006859 err = add_bound_rdev(rdev);
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006860
Linus Torvalds1da177e2005-04-16 15:20:36 -07006861 return err;
6862 }
6863
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02006864 /* otherwise, md_add_new_disk is only allowed
Linus Torvalds1da177e2005-04-16 15:20:36 -07006865 * for major_version==0 superblocks
6866 */
6867 if (mddev->major_version != 0) {
NeilBrown9d487392016-11-02 14:16:49 +11006868 pr_warn("%s: ADD_NEW_DISK not supported\n", mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006869 return -EINVAL;
6870 }
6871
6872 if (!(info->state & (1<<MD_DISK_FAULTY))) {
6873 int err;
NeilBrownd710e132008-10-13 11:55:12 +11006874 rdev = md_import_device(dev, -1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006875 if (IS_ERR(rdev)) {
NeilBrown9d487392016-11-02 14:16:49 +11006876 pr_warn("md: error, md_import_device() returned %ld\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006877 PTR_ERR(rdev));
6878 return PTR_ERR(rdev);
6879 }
6880 rdev->desc_nr = info->number;
6881 if (info->raid_disk < mddev->raid_disks)
6882 rdev->raid_disk = info->raid_disk;
6883 else
6884 rdev->raid_disk = -1;
6885
Linus Torvalds1da177e2005-04-16 15:20:36 -07006886 if (rdev->raid_disk < mddev->raid_disks)
NeilBrownb2d444d2005-11-08 21:39:31 -08006887 if (info->state & (1<<MD_DISK_SYNC))
6888 set_bit(In_sync, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006889
NeilBrown8ddf9ef2005-09-09 16:23:45 -07006890 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
6891 set_bit(WriteMostly, &rdev->flags);
NeilBrown688834e2016-11-18 16:16:11 +11006892 if (info->state & (1<<MD_DISK_FAILFAST))
6893 set_bit(FailFast, &rdev->flags);
NeilBrown8ddf9ef2005-09-09 16:23:45 -07006894
Linus Torvalds1da177e2005-04-16 15:20:36 -07006895 if (!mddev->persistent) {
NeilBrown9d487392016-11-02 14:16:49 +11006896 pr_debug("md: nonpersistent superblock ...\n");
Christoph Hellwig0fe80342021-10-18 12:11:06 +02006897 rdev->sb_start = bdev_nr_sectors(rdev->bdev);
Mike Snitzer77304d22010-11-08 14:39:12 +01006898 } else
Jonathan Brassow57b2caa2011-01-14 09:14:33 +11006899 rdev->sb_start = calc_dev_sboffset(rdev);
NeilBrown8190e752009-06-18 08:48:58 +10006900 rdev->sectors = rdev->sb_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006901
NeilBrown2bf071b2006-01-06 00:20:55 -08006902 err = bind_rdev_to_array(rdev, mddev);
6903 if (err) {
6904 export_rdev(rdev);
6905 return err;
6906 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006907 }
6908
6909 return 0;
6910}
6911
NeilBrownf72ffdd2014-09-30 14:23:59 +10006912static int hot_remove_disk(struct mddev *mddev, dev_t dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006913{
6914 char b[BDEVNAME_SIZE];
NeilBrown3cb03002011-10-11 16:45:26 +11006915 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006916
Yufen Yuc42a0e22018-05-04 18:08:10 +08006917 if (!mddev->pers)
6918 return -ENODEV;
6919
Linus Torvalds1da177e2005-04-16 15:20:36 -07006920 rdev = find_rdev(mddev, dev);
6921 if (!rdev)
6922 return -ENXIO;
6923
Goldwyn Rodrigues2910ff12015-09-28 10:27:26 -05006924 if (rdev->raid_disk < 0)
6925 goto kick_rdev;
Goldwyn Rodrigues293467a2014-06-07 01:44:51 -05006926
NeilBrown3ea8929d2013-04-24 11:42:41 +10006927 clear_bit(Blocked, &rdev->flags);
6928 remove_and_add_spares(mddev, rdev);
6929
Linus Torvalds1da177e2005-04-16 15:20:36 -07006930 if (rdev->raid_disk >= 0)
6931 goto busy;
6932
Goldwyn Rodrigues2910ff12015-09-28 10:27:26 -05006933kick_rdev:
Zhao Hemingbca5b062020-11-19 19:41:34 +08006934 if (mddev_is_clustered(mddev)) {
6935 if (md_cluster_ops->remove_disk(mddev, rdev))
6936 goto busy;
6937 }
Goldwyn Rodrigues88bcfef2015-04-14 10:44:44 -05006938
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05006939 md_kick_rdev_from_array(rdev);
Shaohua Li29530792016-12-08 15:48:19 -08006940 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrown060b0682016-11-04 16:46:03 +11006941 if (mddev->thread)
6942 md_wakeup_thread(mddev->thread);
6943 else
6944 md_update_sb(mddev, 1);
Guoqing Jiang54679482021-10-04 23:34:53 +08006945 md_new_event();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006946
6947 return 0;
6948busy:
NeilBrown9d487392016-11-02 14:16:49 +11006949 pr_debug("md: cannot remove active disk %s from %s ...\n",
6950 bdevname(rdev->bdev,b), mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006951 return -EBUSY;
6952}
6953
NeilBrownf72ffdd2014-09-30 14:23:59 +10006954static int hot_add_disk(struct mddev *mddev, dev_t dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006955{
6956 char b[BDEVNAME_SIZE];
6957 int err;
NeilBrown3cb03002011-10-11 16:45:26 +11006958 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006959
6960 if (!mddev->pers)
6961 return -ENODEV;
6962
6963 if (mddev->major_version != 0) {
NeilBrown9d487392016-11-02 14:16:49 +11006964 pr_warn("%s: HOT_ADD may only be used with version-0 superblocks.\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006965 mdname(mddev));
6966 return -EINVAL;
6967 }
6968 if (!mddev->pers->hot_add_disk) {
NeilBrown9d487392016-11-02 14:16:49 +11006969 pr_warn("%s: personality does not support diskops!\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006970 mdname(mddev));
6971 return -EINVAL;
6972 }
6973
NeilBrownd710e132008-10-13 11:55:12 +11006974 rdev = md_import_device(dev, -1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006975 if (IS_ERR(rdev)) {
NeilBrown9d487392016-11-02 14:16:49 +11006976 pr_warn("md: error, md_import_device() returned %ld\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006977 PTR_ERR(rdev));
6978 return -EINVAL;
6979 }
6980
6981 if (mddev->persistent)
Jonathan Brassow57b2caa2011-01-14 09:14:33 +11006982 rdev->sb_start = calc_dev_sboffset(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006983 else
Christoph Hellwig0fe80342021-10-18 12:11:06 +02006984 rdev->sb_start = bdev_nr_sectors(rdev->bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006985
NeilBrown8190e752009-06-18 08:48:58 +10006986 rdev->sectors = rdev->sb_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006987
NeilBrownb2d444d2005-11-08 21:39:31 -08006988 if (test_bit(Faulty, &rdev->flags)) {
NeilBrown9d487392016-11-02 14:16:49 +11006989 pr_warn("md: can not hot-add faulty %s disk to %s!\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006990 bdevname(rdev->bdev,b), mdname(mddev));
6991 err = -EINVAL;
6992 goto abort_export;
6993 }
Goldwyn Rodrigues293467a2014-06-07 01:44:51 -05006994
NeilBrownb2d444d2005-11-08 21:39:31 -08006995 clear_bit(In_sync, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006996 rdev->desc_nr = -1;
NeilBrown58427302006-10-06 00:44:04 -07006997 rdev->saved_raid_disk = -1;
NeilBrown2bf071b2006-01-06 00:20:55 -08006998 err = bind_rdev_to_array(rdev, mddev);
6999 if (err)
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05007000 goto abort_export;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007001
7002 /*
7003 * The rest should better be atomic, we can have disk failures
7004 * noticed in interrupt contexts ...
7005 */
7006
Linus Torvalds1da177e2005-04-16 15:20:36 -07007007 rdev->raid_disk = -1;
7008
Shaohua Li29530792016-12-08 15:48:19 -08007009 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrown060b0682016-11-04 16:46:03 +11007010 if (!mddev->thread)
7011 md_update_sb(mddev, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007012 /*
7013 * Kick recovery, maybe this spare has to be added to the
7014 * array immediately.
7015 */
7016 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7017 md_wakeup_thread(mddev->thread);
Guoqing Jiang54679482021-10-04 23:34:53 +08007018 md_new_event();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007019 return 0;
7020
Linus Torvalds1da177e2005-04-16 15:20:36 -07007021abort_export:
7022 export_rdev(rdev);
7023 return err;
7024}
7025
NeilBrownfd01b882011-10-11 16:47:53 +11007026static int set_bitmap_file(struct mddev *mddev, int fd)
NeilBrown32a76272005-06-21 17:17:14 -07007027{
NeilBrown035328c2014-04-09 12:25:40 +10007028 int err = 0;
NeilBrown32a76272005-06-21 17:17:14 -07007029
NeilBrown36fa3062005-09-09 16:23:45 -07007030 if (mddev->pers) {
NeilBrownd66b1b32014-08-08 15:40:24 +10007031 if (!mddev->pers->quiesce || !mddev->thread)
NeilBrown36fa3062005-09-09 16:23:45 -07007032 return -EBUSY;
7033 if (mddev->recovery || mddev->sync_thread)
7034 return -EBUSY;
7035 /* we should be able to change the bitmap.. */
NeilBrown32a76272005-06-21 17:17:14 -07007036 }
7037
NeilBrown36fa3062005-09-09 16:23:45 -07007038 if (fd >= 0) {
NeilBrown035328c2014-04-09 12:25:40 +10007039 struct inode *inode;
NeilBrown1e594bb2014-12-15 12:57:00 +11007040 struct file *f;
NeilBrown36fa3062005-09-09 16:23:45 -07007041
NeilBrown1e594bb2014-12-15 12:57:00 +11007042 if (mddev->bitmap || mddev->bitmap_info.file)
7043 return -EEXIST; /* cannot add when bitmap is present */
7044 f = fget(fd);
7045
7046 if (f == NULL) {
NeilBrown9d487392016-11-02 14:16:49 +11007047 pr_warn("%s: error: failed to get bitmap file\n",
7048 mdname(mddev));
NeilBrown36fa3062005-09-09 16:23:45 -07007049 return -EBADF;
7050 }
7051
NeilBrown1e594bb2014-12-15 12:57:00 +11007052 inode = f->f_mapping->host;
NeilBrown035328c2014-04-09 12:25:40 +10007053 if (!S_ISREG(inode->i_mode)) {
NeilBrown9d487392016-11-02 14:16:49 +11007054 pr_warn("%s: error: bitmap file must be a regular file\n",
7055 mdname(mddev));
NeilBrown035328c2014-04-09 12:25:40 +10007056 err = -EBADF;
NeilBrown1e594bb2014-12-15 12:57:00 +11007057 } else if (!(f->f_mode & FMODE_WRITE)) {
NeilBrown9d487392016-11-02 14:16:49 +11007058 pr_warn("%s: error: bitmap file must open for write\n",
7059 mdname(mddev));
NeilBrown035328c2014-04-09 12:25:40 +10007060 err = -EBADF;
7061 } else if (atomic_read(&inode->i_writecount) != 1) {
NeilBrown9d487392016-11-02 14:16:49 +11007062 pr_warn("%s: error: bitmap file is already in use\n",
7063 mdname(mddev));
NeilBrown035328c2014-04-09 12:25:40 +10007064 err = -EBUSY;
7065 }
7066 if (err) {
NeilBrown1e594bb2014-12-15 12:57:00 +11007067 fput(f);
NeilBrown36fa3062005-09-09 16:23:45 -07007068 return err;
7069 }
NeilBrown1e594bb2014-12-15 12:57:00 +11007070 mddev->bitmap_info.file = f;
NeilBrownc3d97142009-12-14 12:49:52 +11007071 mddev->bitmap_info.offset = 0; /* file overrides offset */
NeilBrown36fa3062005-09-09 16:23:45 -07007072 } else if (mddev->bitmap == NULL)
7073 return -ENOENT; /* cannot remove what isn't there */
7074 err = 0;
7075 if (mddev->pers) {
NeilBrown69e51b42010-06-01 19:37:35 +10007076 if (fd >= 0) {
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05007077 struct bitmap *bitmap;
7078
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007079 bitmap = md_bitmap_create(mddev, -1);
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007080 mddev_suspend(mddev);
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05007081 if (!IS_ERR(bitmap)) {
7082 mddev->bitmap = bitmap;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007083 err = md_bitmap_load(mddev);
NeilBrownba599ac2015-02-25 11:44:11 +11007084 } else
7085 err = PTR_ERR(bitmap);
NeilBrown52a0d492017-10-17 13:46:43 +11007086 if (err) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007087 md_bitmap_destroy(mddev);
NeilBrown52a0d492017-10-17 13:46:43 +11007088 fd = -1;
7089 }
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007090 mddev_resume(mddev);
NeilBrown52a0d492017-10-17 13:46:43 +11007091 } else if (fd < 0) {
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007092 mddev_suspend(mddev);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007093 md_bitmap_destroy(mddev);
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007094 mddev_resume(mddev);
NeilBrownd7375ab2006-06-26 00:27:43 -07007095 }
NeilBrownd7375ab2006-06-26 00:27:43 -07007096 }
7097 if (fd < 0) {
NeilBrown4af1a042014-12-15 12:57:00 +11007098 struct file *f = mddev->bitmap_info.file;
7099 if (f) {
7100 spin_lock(&mddev->lock);
7101 mddev->bitmap_info.file = NULL;
7102 spin_unlock(&mddev->lock);
7103 fput(f);
7104 }
NeilBrown36fa3062005-09-09 16:23:45 -07007105 }
7106
NeilBrown32a76272005-06-21 17:17:14 -07007107 return err;
7108}
7109
Linus Torvalds1da177e2005-04-16 15:20:36 -07007110/*
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02007111 * md_set_array_info is used two different ways
Linus Torvalds1da177e2005-04-16 15:20:36 -07007112 * The original usage is when creating a new array.
7113 * In this usage, raid_disks is > 0 and it together with
7114 * level, size, not_persistent,layout,chunksize determine the
7115 * shape of the array.
7116 * This will always create an array with a type-0.90.0 superblock.
7117 * The newer usage is when assembling an array.
7118 * In this case raid_disks will be 0, and the major_version field is
7119 * use to determine which style super-blocks are to be found on the devices.
7120 * The minor and patch _version numbers are also kept incase the
7121 * super_block handler wishes to interpret them.
7122 */
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02007123int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007124{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007125 if (info->raid_disks == 0) {
7126 /* just setting version number for superblock loading */
7127 if (info->major_version < 0 ||
Ahmed S. Darwish50511da2007-05-09 02:35:34 -07007128 info->major_version >= ARRAY_SIZE(super_types) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07007129 super_types[info->major_version].name == NULL) {
7130 /* maybe try to auto-load a module? */
NeilBrown9d487392016-11-02 14:16:49 +11007131 pr_warn("md: superblock version %d not known\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07007132 info->major_version);
7133 return -EINVAL;
7134 }
7135 mddev->major_version = info->major_version;
7136 mddev->minor_version = info->minor_version;
7137 mddev->patch_version = info->patch_version;
NeilBrown3f9d7b02006-12-22 01:11:41 -08007138 mddev->persistent = !info->not_persistent;
NeilBrowncbd19982009-12-30 12:08:49 +11007139 /* ensure mddev_put doesn't delete this now that there
7140 * is some minimal configuration.
7141 */
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11007142 mddev->ctime = ktime_get_real_seconds();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007143 return 0;
7144 }
7145 mddev->major_version = MD_MAJOR_VERSION;
7146 mddev->minor_version = MD_MINOR_VERSION;
7147 mddev->patch_version = MD_PATCHLEVEL_VERSION;
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11007148 mddev->ctime = ktime_get_real_seconds();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007149
7150 mddev->level = info->level;
NeilBrown17115e02006-01-16 22:14:57 -08007151 mddev->clevel[0] = 0;
Andre Noll58c0fed2009-03-31 14:33:13 +11007152 mddev->dev_sectors = 2 * (sector_t)info->size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007153 mddev->raid_disks = info->raid_disks;
7154 /* don't set md_minor, it is determined by which /dev/md* was
7155 * openned
7156 */
7157 if (info->state & (1<<MD_SB_CLEAN))
7158 mddev->recovery_cp = MaxSector;
7159 else
7160 mddev->recovery_cp = 0;
7161 mddev->persistent = ! info->not_persistent;
NeilBrowne6910632008-02-06 01:39:51 -08007162 mddev->external = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007163
7164 mddev->layout = info->layout;
NeilBrown33f2c352019-09-09 16:52:29 +10007165 if (mddev->level == 0)
7166 /* Cannot trust RAID0 layout info here */
7167 mddev->layout = -1;
Andre Noll9d8f0362009-06-18 08:45:01 +10007168 mddev->chunk_sectors = info->chunk_size >> 9;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007169
Shaohua Li29530792016-12-08 15:48:19 -08007170 if (mddev->persistent) {
NeilBrown1b3bae42017-03-01 07:31:28 +11007171 mddev->max_disks = MD_SB_DISKS;
7172 mddev->flags = 0;
7173 mddev->sb_flags = 0;
Shaohua Li29530792016-12-08 15:48:19 -08007174 }
7175 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007176
NeilBrownc3d97142009-12-14 12:49:52 +11007177 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
NeilBrown6409bb02012-05-22 13:55:07 +10007178 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
NeilBrownc3d97142009-12-14 12:49:52 +11007179 mddev->bitmap_info.offset = 0;
NeilBrownb2a27032005-11-28 13:44:12 -08007180
NeilBrownf6705572006-03-27 01:18:11 -08007181 mddev->reshape_position = MaxSector;
7182
Linus Torvalds1da177e2005-04-16 15:20:36 -07007183 /*
7184 * Generate a 128 bit UUID
7185 */
7186 get_random_bytes(mddev->uuid, 16);
7187
NeilBrownf6705572006-03-27 01:18:11 -08007188 mddev->new_level = mddev->level;
Andre Noll664e7c42009-06-18 08:45:27 +10007189 mddev->new_chunk_sectors = mddev->chunk_sectors;
NeilBrownf6705572006-03-27 01:18:11 -08007190 mddev->new_layout = mddev->layout;
7191 mddev->delta_disks = 0;
NeilBrown2c810cd2012-05-21 09:27:00 +10007192 mddev->reshape_backwards = 0;
NeilBrownf6705572006-03-27 01:18:11 -08007193
Linus Torvalds1da177e2005-04-16 15:20:36 -07007194 return 0;
7195}
7196
NeilBrownfd01b882011-10-11 16:47:53 +11007197void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors)
Dan Williams1f403622009-03-31 14:59:03 +11007198{
Shaohua Liefa4b772017-10-18 22:08:13 -07007199 lockdep_assert_held(&mddev->reconfig_mutex);
Dan Williamsb522adc2009-03-31 15:00:31 +11007200
7201 if (mddev->external_size)
7202 return;
7203
Dan Williams1f403622009-03-31 14:59:03 +11007204 mddev->array_sectors = array_sectors;
7205}
7206EXPORT_SYMBOL(md_set_array_sectors);
7207
NeilBrownfd01b882011-10-11 16:47:53 +11007208static int update_size(struct mddev *mddev, sector_t num_sectors)
NeilBrowna35b0d62006-01-06 00:20:49 -08007209{
NeilBrown3cb03002011-10-11 16:45:26 +11007210 struct md_rdev *rdev;
NeilBrowna35b0d62006-01-06 00:20:49 -08007211 int rv;
Andre Nolld71f9f82008-07-11 22:02:22 +10007212 int fit = (num_sectors == 0);
Guoqing Jiang818da592017-03-01 16:42:40 +08007213 sector_t old_dev_sectors = mddev->dev_sectors;
Guoqing Jiangab5a98b2016-05-02 11:33:13 -04007214
NeilBrowna35b0d62006-01-06 00:20:49 -08007215 if (mddev->pers->resize == NULL)
7216 return -EINVAL;
Andre Nolld71f9f82008-07-11 22:02:22 +10007217 /* The "num_sectors" is the number of sectors of each device that
7218 * is used. This can only make sense for arrays with redundancy.
7219 * linear and raid0 always use whatever space is available. We can only
7220 * consider changing this number if no resync or reconstruction is
7221 * happening, and if the new size is acceptable. It must fit before the
Andre Noll0f420352008-07-11 22:02:23 +10007222 * sb_start or, if that is <data_offset, it must fit before the size
Andre Nolld71f9f82008-07-11 22:02:22 +10007223 * of each device. If num_sectors is zero, we find the largest size
7224 * that fits.
NeilBrowna35b0d62006-01-06 00:20:49 -08007225 */
NeilBrownf851b602014-12-11 10:02:10 +11007226 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
7227 mddev->sync_thread)
NeilBrowna35b0d62006-01-06 00:20:49 -08007228 return -EBUSY;
NeilBrownbd8839e2014-05-28 13:39:21 +10007229 if (mddev->ro)
7230 return -EROFS;
NeilBrowna4a61252012-05-22 13:55:27 +10007231
NeilBrowndafb20f2012-03-19 12:46:39 +11007232 rdev_for_each(rdev, mddev) {
Andre Nolldd8ac332009-03-31 14:33:13 +11007233 sector_t avail = rdev->sectors;
NeilBrown01ab5662006-10-28 10:38:30 -07007234
Andre Nolld71f9f82008-07-11 22:02:22 +10007235 if (fit && (num_sectors == 0 || num_sectors > avail))
7236 num_sectors = avail;
7237 if (avail < num_sectors)
NeilBrowna35b0d62006-01-06 00:20:49 -08007238 return -ENOSPC;
7239 }
Andre Nolld71f9f82008-07-11 22:02:22 +10007240 rv = mddev->pers->resize(mddev, num_sectors);
Guoqing Jiangc9483632017-02-24 11:15:23 +08007241 if (!rv) {
Guoqing Jiang818da592017-03-01 16:42:40 +08007242 if (mddev_is_clustered(mddev))
7243 md_cluster_ops->update_size(mddev, old_dev_sectors);
7244 else if (mddev->queue) {
Christoph Hellwig2c247c52020-11-16 15:57:11 +01007245 set_capacity_and_notify(mddev->gendisk,
7246 mddev->array_sectors);
Guoqing Jiangc9483632017-02-24 11:15:23 +08007247 }
7248 }
NeilBrowna35b0d62006-01-06 00:20:49 -08007249 return rv;
7250}
7251
NeilBrownfd01b882011-10-11 16:47:53 +11007252static int update_raid_disks(struct mddev *mddev, int raid_disks)
NeilBrownda943b992006-01-06 00:20:54 -08007253{
7254 int rv;
NeilBrownc6563a82012-05-21 09:27:00 +10007255 struct md_rdev *rdev;
NeilBrownda943b992006-01-06 00:20:54 -08007256 /* change the number of raid disks */
NeilBrown63c70c42006-03-27 01:18:13 -08007257 if (mddev->pers->check_reshape == NULL)
NeilBrownda943b992006-01-06 00:20:54 -08007258 return -EINVAL;
NeilBrownbd8839e2014-05-28 13:39:21 +10007259 if (mddev->ro)
7260 return -EROFS;
NeilBrownda943b992006-01-06 00:20:54 -08007261 if (raid_disks <= 0 ||
NeilBrown233fca32010-04-14 17:02:09 +10007262 (mddev->max_disks && raid_disks >= mddev->max_disks))
NeilBrownda943b992006-01-06 00:20:54 -08007263 return -EINVAL;
NeilBrownf851b602014-12-11 10:02:10 +11007264 if (mddev->sync_thread ||
7265 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
Zhao Heminga8da01f2020-11-19 19:41:33 +08007266 test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) ||
NeilBrownf851b602014-12-11 10:02:10 +11007267 mddev->reshape_position != MaxSector)
NeilBrownda943b992006-01-06 00:20:54 -08007268 return -EBUSY;
NeilBrownc6563a82012-05-21 09:27:00 +10007269
7270 rdev_for_each(rdev, mddev) {
7271 if (mddev->raid_disks < raid_disks &&
7272 rdev->data_offset < rdev->new_data_offset)
7273 return -EINVAL;
7274 if (mddev->raid_disks > raid_disks &&
7275 rdev->data_offset > rdev->new_data_offset)
7276 return -EINVAL;
7277 }
7278
NeilBrown63c70c42006-03-27 01:18:13 -08007279 mddev->delta_disks = raid_disks - mddev->raid_disks;
NeilBrown2c810cd2012-05-21 09:27:00 +10007280 if (mddev->delta_disks < 0)
7281 mddev->reshape_backwards = 1;
7282 else if (mddev->delta_disks > 0)
7283 mddev->reshape_backwards = 0;
NeilBrown63c70c42006-03-27 01:18:13 -08007284
7285 rv = mddev->pers->check_reshape(mddev);
NeilBrown2c810cd2012-05-21 09:27:00 +10007286 if (rv < 0) {
NeilBrownde171cb2011-01-31 11:57:42 +11007287 mddev->delta_disks = 0;
NeilBrown2c810cd2012-05-21 09:27:00 +10007288 mddev->reshape_backwards = 0;
7289 }
NeilBrownda943b992006-01-06 00:20:54 -08007290 return rv;
7291}
7292
Linus Torvalds1da177e2005-04-16 15:20:36 -07007293/*
7294 * update_array_info is used to change the configuration of an
7295 * on-line array.
7296 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
7297 * fields in the info are checked against the array.
7298 * Any differences that cannot be handled will cause an error.
7299 * Normally, only one change can be managed at a time.
7300 */
NeilBrownfd01b882011-10-11 16:47:53 +11007301static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007302{
7303 int rv = 0;
7304 int cnt = 0;
NeilBrown36fa3062005-09-09 16:23:45 -07007305 int state = 0;
7306
7307 /* calculate expected state,ignoring low bits */
NeilBrownc3d97142009-12-14 12:49:52 +11007308 if (mddev->bitmap && mddev->bitmap_info.offset)
NeilBrown36fa3062005-09-09 16:23:45 -07007309 state |= (1 << MD_SB_BITMAP_PRESENT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007310
7311 if (mddev->major_version != info->major_version ||
7312 mddev->minor_version != info->minor_version ||
7313/* mddev->patch_version != info->patch_version || */
7314 mddev->ctime != info->ctime ||
7315 mddev->level != info->level ||
7316/* mddev->layout != info->layout || */
Firo Yang4e023612015-06-11 09:41:10 +08007317 mddev->persistent != !info->not_persistent ||
Andre Noll9d8f0362009-06-18 08:45:01 +10007318 mddev->chunk_sectors != info->chunk_size >> 9 ||
NeilBrown36fa3062005-09-09 16:23:45 -07007319 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
7320 ((state^info->state) & 0xfffffe00)
7321 )
Linus Torvalds1da177e2005-04-16 15:20:36 -07007322 return -EINVAL;
7323 /* Check there is only one change */
Andre Noll58c0fed2009-03-31 14:33:13 +11007324 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
7325 cnt++;
7326 if (mddev->raid_disks != info->raid_disks)
7327 cnt++;
7328 if (mddev->layout != info->layout)
7329 cnt++;
7330 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
7331 cnt++;
7332 if (cnt == 0)
7333 return 0;
7334 if (cnt > 1)
7335 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007336
7337 if (mddev->layout != info->layout) {
7338 /* Change layout
7339 * we don't need to do anything at the md level, the
7340 * personality will take care of it all.
7341 */
NeilBrown50ac1682009-06-18 08:47:55 +10007342 if (mddev->pers->check_reshape == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007343 return -EINVAL;
NeilBrown597a7112009-06-18 08:47:42 +10007344 else {
7345 mddev->new_layout = info->layout;
NeilBrown50ac1682009-06-18 08:47:55 +10007346 rv = mddev->pers->check_reshape(mddev);
NeilBrown597a7112009-06-18 08:47:42 +10007347 if (rv)
7348 mddev->new_layout = mddev->layout;
7349 return rv;
7350 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007351 }
Andre Noll58c0fed2009-03-31 14:33:13 +11007352 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
Andre Nolld71f9f82008-07-11 22:02:22 +10007353 rv = update_size(mddev, (sector_t)info->size * 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007354
NeilBrownda943b992006-01-06 00:20:54 -08007355 if (mddev->raid_disks != info->raid_disks)
7356 rv = update_raid_disks(mddev, info->raid_disks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007357
NeilBrown36fa3062005-09-09 16:23:45 -07007358 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
Goldwyn Rodrigues293467a2014-06-07 01:44:51 -05007359 if (mddev->pers->quiesce == NULL || mddev->thread == NULL) {
7360 rv = -EINVAL;
7361 goto err;
7362 }
7363 if (mddev->recovery || mddev->sync_thread) {
7364 rv = -EBUSY;
7365 goto err;
7366 }
NeilBrown36fa3062005-09-09 16:23:45 -07007367 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05007368 struct bitmap *bitmap;
NeilBrown36fa3062005-09-09 16:23:45 -07007369 /* add the bitmap */
Goldwyn Rodrigues293467a2014-06-07 01:44:51 -05007370 if (mddev->bitmap) {
7371 rv = -EEXIST;
7372 goto err;
7373 }
7374 if (mddev->bitmap_info.default_offset == 0) {
7375 rv = -EINVAL;
7376 goto err;
7377 }
NeilBrownc3d97142009-12-14 12:49:52 +11007378 mddev->bitmap_info.offset =
7379 mddev->bitmap_info.default_offset;
NeilBrown6409bb02012-05-22 13:55:07 +10007380 mddev->bitmap_info.space =
7381 mddev->bitmap_info.default_space;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007382 bitmap = md_bitmap_create(mddev, -1);
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007383 mddev_suspend(mddev);
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05007384 if (!IS_ERR(bitmap)) {
7385 mddev->bitmap = bitmap;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007386 rv = md_bitmap_load(mddev);
NeilBrownba599ac2015-02-25 11:44:11 +11007387 } else
7388 rv = PTR_ERR(bitmap);
NeilBrown36fa3062005-09-09 16:23:45 -07007389 if (rv)
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007390 md_bitmap_destroy(mddev);
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007391 mddev_resume(mddev);
NeilBrown36fa3062005-09-09 16:23:45 -07007392 } else {
7393 /* remove the bitmap */
Goldwyn Rodrigues293467a2014-06-07 01:44:51 -05007394 if (!mddev->bitmap) {
7395 rv = -ENOENT;
7396 goto err;
7397 }
7398 if (mddev->bitmap->storage.file) {
7399 rv = -EINVAL;
7400 goto err;
7401 }
Guoqing Jiangf6a2dc62015-12-21 10:51:00 +11007402 if (mddev->bitmap_info.nodes) {
7403 /* hold PW on all the bitmap lock */
7404 if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) {
NeilBrown9d487392016-11-02 14:16:49 +11007405 pr_warn("md: can't change bitmap to none since the array is in use by more than one node\n");
Guoqing Jiangf6a2dc62015-12-21 10:51:00 +11007406 rv = -EPERM;
7407 md_cluster_ops->unlock_all_bitmaps(mddev);
7408 goto err;
7409 }
7410
7411 mddev->bitmap_info.nodes = 0;
7412 md_cluster_ops->leave(mddev);
Zhao Hemingedee9df2020-07-21 02:08:53 +08007413 module_put(md_cluster_mod);
Zhao Heming7c9d5c52020-07-21 02:08:52 +08007414 mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY;
Guoqing Jiangf6a2dc62015-12-21 10:51:00 +11007415 }
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007416 mddev_suspend(mddev);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007417 md_bitmap_destroy(mddev);
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007418 mddev_resume(mddev);
NeilBrownc3d97142009-12-14 12:49:52 +11007419 mddev->bitmap_info.offset = 0;
NeilBrown36fa3062005-09-09 16:23:45 -07007420 }
7421 }
NeilBrown850b2b422006-10-03 01:15:46 -07007422 md_update_sb(mddev, 1);
Goldwyn Rodrigues293467a2014-06-07 01:44:51 -05007423 return rv;
7424err:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007425 return rv;
7426}
7427
NeilBrownfd01b882011-10-11 16:47:53 +11007428static int set_disk_faulty(struct mddev *mddev, dev_t dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007429{
NeilBrown3cb03002011-10-11 16:45:26 +11007430 struct md_rdev *rdev;
NeilBrown1ca69c42012-10-11 13:37:33 +11007431 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007432
7433 if (mddev->pers == NULL)
7434 return -ENODEV;
7435
NeilBrown1ca69c42012-10-11 13:37:33 +11007436 rcu_read_lock();
Tomasz Majchrzak1532d9e2017-12-27 10:31:40 +01007437 rdev = md_find_rdev_rcu(mddev, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007438 if (!rdev)
NeilBrown1ca69c42012-10-11 13:37:33 +11007439 err = -ENODEV;
7440 else {
7441 md_error(mddev, rdev);
7442 if (!test_bit(Faulty, &rdev->flags))
7443 err = -EBUSY;
7444 }
7445 rcu_read_unlock();
7446 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007447}
7448
Andre Noll2f9618c2008-04-25 18:57:58 +02007449/*
7450 * We have a problem here : there is no easy way to give a CHS
7451 * virtual geometry. We currently pretend that we have a 2 heads
7452 * 4 sectors (with a BIG number of cylinders...). This drives
7453 * dosfs just mad... ;-)
7454 */
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08007455static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
7456{
NeilBrownfd01b882011-10-11 16:47:53 +11007457 struct mddev *mddev = bdev->bd_disk->private_data;
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08007458
7459 geo->heads = 2;
7460 geo->sectors = 4;
NeilBrown49ce6ce2010-03-29 10:51:42 +11007461 geo->cylinders = mddev->array_sectors / 8;
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08007462 return 0;
7463}
7464
Nicolas Schichancb335f82014-01-15 16:58:52 +01007465static inline bool md_ioctl_valid(unsigned int cmd)
7466{
7467 switch (cmd) {
7468 case ADD_NEW_DISK:
Nicolas Schichancb335f82014-01-15 16:58:52 +01007469 case GET_ARRAY_INFO:
7470 case GET_BITMAP_FILE:
7471 case GET_DISK_INFO:
7472 case HOT_ADD_DISK:
7473 case HOT_REMOVE_DISK:
Nicolas Schichancb335f82014-01-15 16:58:52 +01007474 case RAID_VERSION:
7475 case RESTART_ARRAY_RW:
7476 case RUN_ARRAY:
7477 case SET_ARRAY_INFO:
7478 case SET_BITMAP_FILE:
7479 case SET_DISK_FAULTY:
7480 case STOP_ARRAY:
7481 case STOP_ARRAY_RO:
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05007482 case CLUSTERED_DISK_NACK:
Nicolas Schichancb335f82014-01-15 16:58:52 +01007483 return true;
7484 default:
7485 return false;
7486 }
7487}
7488
Al Viroa39907f2008-03-02 10:31:15 -05007489static int md_ioctl(struct block_device *bdev, fmode_t mode,
Linus Torvalds1da177e2005-04-16 15:20:36 -07007490 unsigned int cmd, unsigned long arg)
7491{
7492 int err = 0;
7493 void __user *argp = (void __user *)arg;
NeilBrownfd01b882011-10-11 16:47:53 +11007494 struct mddev *mddev = NULL;
NeilBrown065e5192017-04-06 11:16:33 +08007495 bool did_set_md_closing = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007496
Nicolas Schichancb335f82014-01-15 16:58:52 +01007497 if (!md_ioctl_valid(cmd))
7498 return -ENOTTY;
7499
NeilBrown506c9e42011-12-23 10:17:26 +11007500 switch (cmd) {
7501 case RAID_VERSION:
7502 case GET_ARRAY_INFO:
7503 case GET_DISK_INFO:
7504 break;
7505 default:
7506 if (!capable(CAP_SYS_ADMIN))
7507 return -EACCES;
7508 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007509
7510 /*
7511 * Commands dealing with the RAID driver but not any
7512 * particular array:
7513 */
NeilBrownc02c0ae2012-12-11 13:39:21 +11007514 switch (cmd) {
7515 case RAID_VERSION:
7516 err = get_version(argp);
NeilBrown3adc28d2014-09-30 15:46:41 +10007517 goto out;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007518 default:;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007519 }
7520
7521 /*
7522 * Commands creating/starting a new array:
7523 */
7524
Al Viroa39907f2008-03-02 10:31:15 -05007525 mddev = bdev->bd_disk->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007526
7527 if (!mddev) {
7528 BUG();
NeilBrown3adc28d2014-09-30 15:46:41 +10007529 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007530 }
7531
NeilBrown1ca69c42012-10-11 13:37:33 +11007532 /* Some actions do not requires the mutex */
7533 switch (cmd) {
7534 case GET_ARRAY_INFO:
7535 if (!mddev->raid_disks && !mddev->external)
7536 err = -ENODEV;
7537 else
7538 err = get_array_info(mddev, argp);
NeilBrown3adc28d2014-09-30 15:46:41 +10007539 goto out;
NeilBrown1ca69c42012-10-11 13:37:33 +11007540
7541 case GET_DISK_INFO:
7542 if (!mddev->raid_disks && !mddev->external)
7543 err = -ENODEV;
7544 else
7545 err = get_disk_info(mddev, argp);
NeilBrown3adc28d2014-09-30 15:46:41 +10007546 goto out;
NeilBrown1ca69c42012-10-11 13:37:33 +11007547
7548 case SET_DISK_FAULTY:
7549 err = set_disk_faulty(mddev, new_decode_dev(arg));
NeilBrown3adc28d2014-09-30 15:46:41 +10007550 goto out;
NeilBrown4af1a042014-12-15 12:57:00 +11007551
7552 case GET_BITMAP_FILE:
7553 err = get_bitmap_file(mddev, argp);
7554 goto out;
7555
NeilBrown1ca69c42012-10-11 13:37:33 +11007556 }
7557
Guoqing Jiang78b990c2020-04-04 23:57:10 +02007558 if (cmd == ADD_NEW_DISK || cmd == HOT_ADD_DISK)
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02007559 flush_rdev_wq(mddev);
NeilBrowna7a3f082012-12-11 13:35:54 +11007560
Hannes Reinecke90f5f7a2013-04-02 08:38:55 +02007561 if (cmd == HOT_REMOVE_DISK)
7562 /* need to ensure recovery thread has run */
7563 wait_event_interruptible_timeout(mddev->sb_wait,
7564 !test_bit(MD_RECOVERY_NEEDED,
Shaohua Li82a301c2016-12-08 15:48:18 -08007565 &mddev->recovery),
Hannes Reinecke90f5f7a2013-04-02 08:38:55 +02007566 msecs_to_jiffies(5000));
NeilBrown260fa032013-08-27 16:44:13 +10007567 if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
7568 /* Need to flush page cache, and ensure no-one else opens
7569 * and writes
7570 */
7571 mutex_lock(&mddev->open_mutex);
NeilBrown9ba3b7f2014-09-09 14:00:15 +10007572 if (mddev->pers && atomic_read(&mddev->openers) > 1) {
NeilBrown260fa032013-08-27 16:44:13 +10007573 mutex_unlock(&mddev->open_mutex);
7574 err = -EBUSY;
NeilBrown3adc28d2014-09-30 15:46:41 +10007575 goto out;
NeilBrown260fa032013-08-27 16:44:13 +10007576 }
Dae R. Jeongc731b842020-10-22 10:21:28 +09007577 if (test_and_set_bit(MD_CLOSING, &mddev->flags)) {
7578 mutex_unlock(&mddev->open_mutex);
7579 err = -EBUSY;
7580 goto out;
7581 }
NeilBrown065e5192017-04-06 11:16:33 +08007582 did_set_md_closing = true;
NeilBrown260fa032013-08-27 16:44:13 +10007583 mutex_unlock(&mddev->open_mutex);
7584 sync_blockdev(bdev);
7585 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007586 err = mddev_lock(mddev);
7587 if (err) {
NeilBrown9d487392016-11-02 14:16:49 +11007588 pr_debug("md: ioctl lock interrupted, reason %d, cmd %d\n",
7589 err, cmd);
NeilBrown3adc28d2014-09-30 15:46:41 +10007590 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007591 }
7592
NeilBrownc02c0ae2012-12-11 13:39:21 +11007593 if (cmd == SET_ARRAY_INFO) {
7594 mdu_array_info_t info;
7595 if (!arg)
7596 memset(&info, 0, sizeof(info));
7597 else if (copy_from_user(&info, argp, sizeof(info))) {
7598 err = -EFAULT;
NeilBrown3adc28d2014-09-30 15:46:41 +10007599 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007600 }
7601 if (mddev->pers) {
7602 err = update_array_info(mddev, &info);
7603 if (err) {
NeilBrown9d487392016-11-02 14:16:49 +11007604 pr_warn("md: couldn't update array info. %d\n", err);
NeilBrown3adc28d2014-09-30 15:46:41 +10007605 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007606 }
NeilBrown3adc28d2014-09-30 15:46:41 +10007607 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007608 }
7609 if (!list_empty(&mddev->disks)) {
NeilBrown9d487392016-11-02 14:16:49 +11007610 pr_warn("md: array %s already has disks!\n", mdname(mddev));
NeilBrownc02c0ae2012-12-11 13:39:21 +11007611 err = -EBUSY;
NeilBrown3adc28d2014-09-30 15:46:41 +10007612 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007613 }
7614 if (mddev->raid_disks) {
NeilBrown9d487392016-11-02 14:16:49 +11007615 pr_warn("md: array %s already initialised!\n", mdname(mddev));
NeilBrownc02c0ae2012-12-11 13:39:21 +11007616 err = -EBUSY;
NeilBrown3adc28d2014-09-30 15:46:41 +10007617 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007618 }
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02007619 err = md_set_array_info(mddev, &info);
NeilBrownc02c0ae2012-12-11 13:39:21 +11007620 if (err) {
NeilBrown9d487392016-11-02 14:16:49 +11007621 pr_warn("md: couldn't set array info. %d\n", err);
NeilBrown3adc28d2014-09-30 15:46:41 +10007622 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007623 }
NeilBrown3adc28d2014-09-30 15:46:41 +10007624 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007625 }
7626
7627 /*
7628 * Commands querying/configuring an existing array:
7629 */
NeilBrown32a76272005-06-21 17:17:14 -07007630 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
NeilBrown3f9d7b02006-12-22 01:11:41 -08007631 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
NeilBrowna17184a2008-02-06 01:39:55 -08007632 if ((!mddev->raid_disks && !mddev->external)
7633 && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
7634 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
7635 && cmd != GET_BITMAP_FILE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007636 err = -ENODEV;
NeilBrown3adc28d2014-09-30 15:46:41 +10007637 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007638 }
7639
7640 /*
7641 * Commands even a read-only array can execute:
7642 */
NeilBrownc02c0ae2012-12-11 13:39:21 +11007643 switch (cmd) {
NeilBrownc02c0ae2012-12-11 13:39:21 +11007644 case RESTART_ARRAY_RW:
7645 err = restart_array(mddev);
NeilBrown3adc28d2014-09-30 15:46:41 +10007646 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007647
7648 case STOP_ARRAY:
7649 err = do_md_stop(mddev, 0, bdev);
NeilBrown3adc28d2014-09-30 15:46:41 +10007650 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007651
7652 case STOP_ARRAY_RO:
7653 err = md_set_readonly(mddev, bdev);
NeilBrown3adc28d2014-09-30 15:46:41 +10007654 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007655
NeilBrown3ea8929d2013-04-24 11:42:41 +10007656 case HOT_REMOVE_DISK:
7657 err = hot_remove_disk(mddev, new_decode_dev(arg));
NeilBrown3adc28d2014-09-30 15:46:41 +10007658 goto unlock;
NeilBrown3ea8929d2013-04-24 11:42:41 +10007659
NeilBrown7ceb17e2013-04-24 11:42:42 +10007660 case ADD_NEW_DISK:
7661 /* We can support ADD_NEW_DISK on read-only arrays
Wei Fang466ad292016-03-21 19:19:30 +08007662 * only if we are re-adding a preexisting device.
NeilBrown7ceb17e2013-04-24 11:42:42 +10007663 * So require mddev->pers and MD_DISK_SYNC.
7664 */
7665 if (mddev->pers) {
7666 mdu_disk_info_t info;
7667 if (copy_from_user(&info, argp, sizeof(info)))
7668 err = -EFAULT;
7669 else if (!(info.state & (1<<MD_DISK_SYNC)))
7670 /* Need to clear read-only for this */
7671 break;
7672 else
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02007673 err = md_add_new_disk(mddev, &info);
NeilBrown3adc28d2014-09-30 15:46:41 +10007674 goto unlock;
NeilBrown7ceb17e2013-04-24 11:42:42 +10007675 }
7676 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007677 }
7678
7679 /*
7680 * The remaining ioctls are changing the state of the
NeilBrownf91de922005-11-08 21:39:36 -08007681 * superblock, so we do not allow them on read-only arrays.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007682 */
NeilBrown326eb172014-09-30 15:36:28 +10007683 if (mddev->ro && mddev->pers) {
NeilBrownf91de922005-11-08 21:39:36 -08007684 if (mddev->ro == 2) {
7685 mddev->ro = 0;
NeilBrown00bcb4a2010-06-01 19:37:23 +10007686 sysfs_notify_dirent_safe(mddev->sysfs_state);
Neil Brown0fd62b82008-06-28 08:31:36 +10007687 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
NeilBrownf3378b42013-02-28 11:59:03 +11007688 /* mddev_unlock will wake thread */
7689 /* If a device failed while we were read-only, we
7690 * need to make sure the metadata is updated now.
7691 */
Shaohua Li29530792016-12-08 15:48:19 -08007692 if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) {
NeilBrownf3378b42013-02-28 11:59:03 +11007693 mddev_unlock(mddev);
7694 wait_event(mddev->sb_wait,
Shaohua Li29530792016-12-08 15:48:19 -08007695 !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) &&
7696 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
NeilBrown29f097c2013-11-14 17:54:51 +11007697 mddev_lock_nointr(mddev);
NeilBrownf3378b42013-02-28 11:59:03 +11007698 }
NeilBrownf91de922005-11-08 21:39:36 -08007699 } else {
7700 err = -EROFS;
NeilBrown3adc28d2014-09-30 15:46:41 +10007701 goto unlock;
NeilBrownf91de922005-11-08 21:39:36 -08007702 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007703 }
7704
NeilBrownc02c0ae2012-12-11 13:39:21 +11007705 switch (cmd) {
7706 case ADD_NEW_DISK:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007707 {
NeilBrownc02c0ae2012-12-11 13:39:21 +11007708 mdu_disk_info_t info;
7709 if (copy_from_user(&info, argp, sizeof(info)))
7710 err = -EFAULT;
7711 else
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02007712 err = md_add_new_disk(mddev, &info);
NeilBrown3adc28d2014-09-30 15:46:41 +10007713 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007714 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007715
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05007716 case CLUSTERED_DISK_NACK:
7717 if (mddev_is_clustered(mddev))
7718 md_cluster_ops->new_disk_ack(mddev, false);
7719 else
7720 err = -EINVAL;
7721 goto unlock;
7722
NeilBrownc02c0ae2012-12-11 13:39:21 +11007723 case HOT_ADD_DISK:
7724 err = hot_add_disk(mddev, new_decode_dev(arg));
NeilBrown3adc28d2014-09-30 15:46:41 +10007725 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007726
NeilBrownc02c0ae2012-12-11 13:39:21 +11007727 case RUN_ARRAY:
7728 err = do_md_run(mddev);
NeilBrown3adc28d2014-09-30 15:46:41 +10007729 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007730
NeilBrownc02c0ae2012-12-11 13:39:21 +11007731 case SET_BITMAP_FILE:
7732 err = set_bitmap_file(mddev, (int)arg);
NeilBrown3adc28d2014-09-30 15:46:41 +10007733 goto unlock;
NeilBrown32a76272005-06-21 17:17:14 -07007734
NeilBrownc02c0ae2012-12-11 13:39:21 +11007735 default:
7736 err = -EINVAL;
NeilBrown3adc28d2014-09-30 15:46:41 +10007737 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007738 }
7739
NeilBrown3adc28d2014-09-30 15:46:41 +10007740unlock:
NeilBrownd3374822009-01-09 08:31:10 +11007741 if (mddev->hold_active == UNTIL_IOCTL &&
7742 err != -EINVAL)
7743 mddev->hold_active = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007744 mddev_unlock(mddev);
NeilBrown3adc28d2014-09-30 15:46:41 +10007745out:
NeilBrown065e5192017-04-06 11:16:33 +08007746 if(did_set_md_closing)
7747 clear_bit(MD_CLOSING, &mddev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007748 return err;
7749}
Arnd Bergmannaa98aa32009-12-14 12:50:05 +11007750#ifdef CONFIG_COMPAT
7751static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
7752 unsigned int cmd, unsigned long arg)
7753{
7754 switch (cmd) {
7755 case HOT_REMOVE_DISK:
7756 case HOT_ADD_DISK:
7757 case SET_DISK_FAULTY:
7758 case SET_BITMAP_FILE:
7759 /* These take in integer arg, do not convert */
7760 break;
7761 default:
7762 arg = (unsigned long)compat_ptr(arg);
7763 break;
7764 }
7765
7766 return md_ioctl(bdev, mode, cmd, arg);
7767}
7768#endif /* CONFIG_COMPAT */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007769
Christoph Hellwig118cf082020-11-03 11:00:13 +01007770static int md_set_read_only(struct block_device *bdev, bool ro)
7771{
7772 struct mddev *mddev = bdev->bd_disk->private_data;
7773 int err;
7774
7775 err = mddev_lock(mddev);
7776 if (err)
7777 return err;
7778
7779 if (!mddev->raid_disks && !mddev->external) {
7780 err = -ENODEV;
7781 goto out_unlock;
7782 }
7783
7784 /*
7785 * Transitioning to read-auto need only happen for arrays that call
7786 * md_write_start and which are not ready for writes yet.
7787 */
7788 if (!ro && mddev->ro == 1 && mddev->pers) {
7789 err = restart_array(mddev);
7790 if (err)
7791 goto out_unlock;
7792 mddev->ro = 2;
7793 }
7794
7795out_unlock:
7796 mddev_unlock(mddev);
7797 return err;
7798}
7799
Al Viroa39907f2008-03-02 10:31:15 -05007800static int md_open(struct block_device *bdev, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007801{
7802 /*
7803 * Succeed if we can lock the mddev, which confirms that
7804 * it isn't being stopped right now.
7805 */
NeilBrownfd01b882011-10-11 16:47:53 +11007806 struct mddev *mddev = mddev_find(bdev->bd_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007807 int err;
7808
Yuanhan Liu0c098222012-05-22 13:55:32 +10007809 if (!mddev)
7810 return -ENODEV;
7811
NeilBrownd3374822009-01-09 08:31:10 +11007812 if (mddev->gendisk != bdev->bd_disk) {
7813 /* we are racing with mddev_put which is discarding this
7814 * bd_disk.
7815 */
7816 mddev_put(mddev);
7817 /* Wait until bdev->bd_disk is definitely gone */
Guoqing Jiangf6766ff2020-04-04 23:57:09 +02007818 if (work_pending(&mddev->del_work))
7819 flush_workqueue(md_misc_wq);
Zhao Heming6a4db2a2021-04-03 11:01:25 +08007820 return -EBUSY;
NeilBrownd3374822009-01-09 08:31:10 +11007821 }
7822 BUG_ON(mddev != bdev->bd_disk->private_data);
7823
NeilBrownc8c00a62009-08-10 12:50:52 +10007824 if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07007825 goto out;
7826
Guoqing Jiangaf8d8e62016-08-12 13:42:37 +08007827 if (test_bit(MD_CLOSING, &mddev->flags)) {
7828 mutex_unlock(&mddev->open_mutex);
NeilBrowne2342ca2016-12-05 16:40:50 +11007829 err = -ENODEV;
7830 goto out;
Guoqing Jiangaf8d8e62016-08-12 13:42:37 +08007831 }
7832
Linus Torvalds1da177e2005-04-16 15:20:36 -07007833 err = 0;
NeilBrownf2ea68c2008-07-21 17:05:25 +10007834 atomic_inc(&mddev->openers);
NeilBrownc8c00a62009-08-10 12:50:52 +10007835 mutex_unlock(&mddev->open_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007836
Christoph Hellwig818077d2020-09-08 16:53:43 +02007837 bdev_check_media_change(bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007838 out:
NeilBrowne2342ca2016-12-05 16:40:50 +11007839 if (err)
7840 mddev_put(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007841 return err;
7842}
7843
Al Virodb2a1442013-05-05 21:52:57 -04007844static void md_release(struct gendisk *disk, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007845{
NeilBrownf72ffdd2014-09-30 14:23:59 +10007846 struct mddev *mddev = disk->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007847
Eric Sesterhenn52e5f9d2006-10-03 23:33:23 +02007848 BUG_ON(!mddev);
NeilBrownf2ea68c2008-07-21 17:05:25 +10007849 atomic_dec(&mddev->openers);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007850 mddev_put(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007851}
NeilBrownf0b4f7e2011-02-24 17:26:41 +11007852
Christoph Hellwiga564e232020-07-08 14:25:41 +02007853static unsigned int md_check_events(struct gendisk *disk, unsigned int clearing)
NeilBrownf0b4f7e2011-02-24 17:26:41 +11007854{
NeilBrownfd01b882011-10-11 16:47:53 +11007855 struct mddev *mddev = disk->private_data;
Christoph Hellwiga564e232020-07-08 14:25:41 +02007856 unsigned int ret = 0;
NeilBrownf0b4f7e2011-02-24 17:26:41 +11007857
Christoph Hellwiga564e232020-07-08 14:25:41 +02007858 if (mddev->changed)
7859 ret = DISK_EVENT_MEDIA_CHANGE;
NeilBrownf0b4f7e2011-02-24 17:26:41 +11007860 mddev->changed = 0;
Christoph Hellwiga564e232020-07-08 14:25:41 +02007861 return ret;
NeilBrownf0b4f7e2011-02-24 17:26:41 +11007862}
Christoph Hellwiga564e232020-07-08 14:25:41 +02007863
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02007864const struct block_device_operations md_fops =
Linus Torvalds1da177e2005-04-16 15:20:36 -07007865{
7866 .owner = THIS_MODULE,
Christoph Hellwigc62b37d2020-07-01 10:59:43 +02007867 .submit_bio = md_submit_bio,
Al Viroa39907f2008-03-02 10:31:15 -05007868 .open = md_open,
7869 .release = md_release,
NeilBrownb492b852009-05-26 12:57:36 +10007870 .ioctl = md_ioctl,
Arnd Bergmannaa98aa32009-12-14 12:50:05 +11007871#ifdef CONFIG_COMPAT
7872 .compat_ioctl = md_compat_ioctl,
7873#endif
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08007874 .getgeo = md_getgeo,
Christoph Hellwiga564e232020-07-08 14:25:41 +02007875 .check_events = md_check_events,
Christoph Hellwig118cf082020-11-03 11:00:13 +01007876 .set_read_only = md_set_read_only,
Linus Torvalds1da177e2005-04-16 15:20:36 -07007877};
7878
NeilBrownf72ffdd2014-09-30 14:23:59 +10007879static int md_thread(void *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007880{
NeilBrown2b8bf342011-10-11 16:48:23 +11007881 struct md_thread *thread = arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007882
Linus Torvalds1da177e2005-04-16 15:20:36 -07007883 /*
7884 * md_thread is a 'system-thread', it's priority should be very
7885 * high. We avoid resource deadlocks individually in each
7886 * raid personality. (RAID5 does preallocation) We also use RR and
7887 * the very same RT priority as kswapd, thus we will never get
7888 * into a priority inversion deadlock.
7889 *
7890 * we definitely have to have equal or higher priority than
7891 * bdflush, otherwise bdflush will deadlock if there are too
7892 * many dirty RAID5 blocks.
7893 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007894
NeilBrown6985c432005-10-19 21:23:47 -07007895 allow_signal(SIGKILL);
NeilBrowna6fb0932005-09-09 16:23:56 -07007896 while (!kthread_should_stop()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007897
NeilBrown93588e22005-11-15 00:09:12 -08007898 /* We need to wait INTERRUPTIBLE so that
7899 * we don't add to the load-average.
7900 * That means we need to be sure no signals are
7901 * pending
7902 */
7903 if (signal_pending(current))
7904 flush_signals(current);
7905
7906 wait_event_interruptible_timeout
7907 (thread->wqueue,
7908 test_bit(THREAD_WAKEUP, &thread->flags)
Shaohua Lice1ccd02016-11-21 10:29:18 -08007909 || kthread_should_stop() || kthread_should_park(),
NeilBrown93588e22005-11-15 00:09:12 -08007910 thread->timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007911
NeilBrown6c987912011-01-14 09:13:53 +11007912 clear_bit(THREAD_WAKEUP, &thread->flags);
Shaohua Lice1ccd02016-11-21 10:29:18 -08007913 if (kthread_should_park())
7914 kthread_parkme();
NeilBrown6c987912011-01-14 09:13:53 +11007915 if (!kthread_should_stop())
Shaohua Li4ed87312012-10-11 13:34:00 +11007916 thread->run(thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007917 }
NeilBrowna6fb0932005-09-09 16:23:56 -07007918
Linus Torvalds1da177e2005-04-16 15:20:36 -07007919 return 0;
7920}
7921
NeilBrown2b8bf342011-10-11 16:48:23 +11007922void md_wakeup_thread(struct md_thread *thread)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007923{
7924 if (thread) {
NeilBrown36a4e1f2011-10-07 14:23:17 +11007925 pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm);
Guoqing Jiangd1d90142017-10-09 10:32:48 +08007926 set_bit(THREAD_WAKEUP, &thread->flags);
7927 wake_up(&thread->wqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007928 }
7929}
NeilBrown6c144d32014-09-30 16:15:38 +10007930EXPORT_SYMBOL(md_wakeup_thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007931
Shaohua Li4ed87312012-10-11 13:34:00 +11007932struct md_thread *md_register_thread(void (*run) (struct md_thread *),
7933 struct mddev *mddev, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007934{
NeilBrown2b8bf342011-10-11 16:48:23 +11007935 struct md_thread *thread;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007936
NeilBrown2b8bf342011-10-11 16:48:23 +11007937 thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007938 if (!thread)
7939 return NULL;
7940
Linus Torvalds1da177e2005-04-16 15:20:36 -07007941 init_waitqueue_head(&thread->wqueue);
7942
Linus Torvalds1da177e2005-04-16 15:20:36 -07007943 thread->run = run;
7944 thread->mddev = mddev;
NeilBrown32a76272005-06-21 17:17:14 -07007945 thread->timeout = MAX_SCHEDULE_TIMEOUT;
NeilBrown0da3c612009-09-23 18:09:45 +10007946 thread->tsk = kthread_run(md_thread, thread,
7947 "%s_%s",
7948 mdname(thread->mddev),
NeilBrown02326052012-07-03 15:56:52 +10007949 name);
NeilBrowna6fb0932005-09-09 16:23:56 -07007950 if (IS_ERR(thread->tsk)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007951 kfree(thread);
7952 return NULL;
7953 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007954 return thread;
7955}
NeilBrown6c144d32014-09-30 16:15:38 +10007956EXPORT_SYMBOL(md_register_thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007957
NeilBrown2b8bf342011-10-11 16:48:23 +11007958void md_unregister_thread(struct md_thread **threadp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007959{
NeilBrown2b8bf342011-10-11 16:48:23 +11007960 struct md_thread *thread = *threadp;
NeilBrowne0cf8f02009-03-31 14:39:39 +11007961 if (!thread)
7962 return;
NeilBrown36a4e1f2011-10-07 14:23:17 +11007963 pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
NeilBrown01f96c02011-09-21 15:30:20 +10007964 /* Locking ensures that mddev_unlock does not wake_up a
7965 * non-existent thread
7966 */
7967 spin_lock(&pers_lock);
7968 *threadp = NULL;
7969 spin_unlock(&pers_lock);
NeilBrowna6fb0932005-09-09 16:23:56 -07007970
7971 kthread_stop(thread->tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007972 kfree(thread);
7973}
NeilBrown6c144d32014-09-30 16:15:38 +10007974EXPORT_SYMBOL(md_unregister_thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007975
NeilBrownfd01b882011-10-11 16:47:53 +11007976void md_error(struct mddev *mddev, struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007977{
NeilBrownb2d444d2005-11-08 21:39:31 -08007978 if (!rdev || test_bit(Faulty, &rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07007979 return;
Dan Williams6bfe0b42008-04-30 00:52:32 -07007980
NeilBrownde393cd2011-07-28 11:31:48 +10007981 if (!mddev->pers || !mddev->pers->error_handler)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007982 return;
7983 mddev->pers->error_handler(mddev,rdev);
Neil Brown72a23c22008-06-28 08:31:41 +10007984 if (mddev->degraded)
7985 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
NeilBrown00bcb4a2010-06-01 19:37:23 +10007986 sysfs_notify_dirent_safe(rdev->sysfs_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007987 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7988 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7989 md_wakeup_thread(mddev->thread);
NeilBrown768a4182010-07-26 11:49:55 +10007990 if (mddev->event_work.func)
Tejun Heoe804ac72010-10-15 15:36:08 +02007991 queue_work(md_misc_wq, &mddev->event_work);
Guoqing Jiang54679482021-10-04 23:34:53 +08007992 md_new_event();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007993}
NeilBrown6c144d32014-09-30 16:15:38 +10007994EXPORT_SYMBOL(md_error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007995
7996/* seq_file implementation /proc/mdstat */
7997
7998static void status_unused(struct seq_file *seq)
7999{
8000 int i = 0;
NeilBrown3cb03002011-10-11 16:45:26 +11008001 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008002
8003 seq_printf(seq, "unused devices: ");
8004
Cheng Renquan159ec1f2009-01-09 08:31:08 +11008005 list_for_each_entry(rdev, &pending_raid_disks, same_set) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008006 char b[BDEVNAME_SIZE];
8007 i++;
8008 seq_printf(seq, "%s ",
8009 bdevname(rdev->bdev,b));
8010 }
8011 if (!i)
8012 seq_printf(seq, "<none>");
8013
8014 seq_printf(seq, "\n");
8015}
8016
NeilBrownf7851be2015-07-02 17:12:58 +10008017static int status_resync(struct seq_file *seq, struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008018{
NeilBrowndd71cf62009-05-07 12:49:35 +10008019 sector_t max_sectors, resync, res;
Mariusz Tkaczyk9642fa72019-06-13 16:11:41 +02008020 unsigned long dt, db = 0;
8021 sector_t rt, curr_mark_cnt, resync_mark_cnt;
8022 int scale, recovery_active;
NeilBrown4588b422006-03-27 01:18:04 -08008023 unsigned int per_milli;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008024
NeilBrownc804cde2012-05-21 09:28:33 +10008025 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
8026 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
NeilBrowndd71cf62009-05-07 12:49:35 +10008027 max_sectors = mddev->resync_max_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008028 else
NeilBrowndd71cf62009-05-07 12:49:35 +10008029 max_sectors = mddev->dev_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008030
NeilBrownf7851be2015-07-02 17:12:58 +10008031 resync = mddev->curr_resync;
8032 if (resync <= 3) {
8033 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
8034 /* Still cleaning up */
8035 resync = max_sectors;
Nate Daileyd2e2ec82017-11-30 11:33:30 -05008036 } else if (resync > max_sectors)
8037 resync = max_sectors;
8038 else
NeilBrownf7851be2015-07-02 17:12:58 +10008039 resync -= atomic_read(&mddev->recovery_active);
8040
8041 if (resync == 0) {
Guoqing Jiang0357ba22018-07-02 16:26:25 +08008042 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery)) {
8043 struct md_rdev *rdev;
8044
8045 rdev_for_each(rdev, mddev)
8046 if (rdev->raid_disk >= 0 &&
8047 !test_bit(Faulty, &rdev->flags) &&
8048 rdev->recovery_offset != MaxSector &&
8049 rdev->recovery_offset) {
8050 seq_printf(seq, "\trecover=REMOTE");
8051 return 1;
8052 }
8053 if (mddev->reshape_position != MaxSector)
8054 seq_printf(seq, "\treshape=REMOTE");
8055 else
8056 seq_printf(seq, "\tresync=REMOTE");
8057 return 1;
8058 }
NeilBrownf7851be2015-07-02 17:12:58 +10008059 if (mddev->recovery_cp < MaxSector) {
8060 seq_printf(seq, "\tresync=PENDING");
8061 return 1;
8062 }
8063 return 0;
8064 }
8065 if (resync < 3) {
8066 seq_printf(seq, "\tresync=DELAYED");
8067 return 1;
8068 }
8069
NeilBrown403df472014-09-30 15:52:29 +10008070 WARN_ON(max_sectors == 0);
NeilBrown4588b422006-03-27 01:18:04 -08008071 /* Pick 'scale' such that (resync>>scale)*1000 will fit
NeilBrowndd71cf62009-05-07 12:49:35 +10008072 * in a sector_t, and (max_sectors>>scale) will fit in a
NeilBrown4588b422006-03-27 01:18:04 -08008073 * u32, as those are the requirements for sector_div.
8074 * Thus 'scale' must be at least 10
8075 */
8076 scale = 10;
8077 if (sizeof(sector_t) > sizeof(unsigned long)) {
NeilBrowndd71cf62009-05-07 12:49:35 +10008078 while ( max_sectors/2 > (1ULL<<(scale+32)))
NeilBrown4588b422006-03-27 01:18:04 -08008079 scale++;
8080 }
8081 res = (resync>>scale)*1000;
NeilBrowndd71cf62009-05-07 12:49:35 +10008082 sector_div(res, (u32)((max_sectors>>scale)+1));
NeilBrown4588b422006-03-27 01:18:04 -08008083
8084 per_milli = res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008085 {
NeilBrown4588b422006-03-27 01:18:04 -08008086 int i, x = per_milli/50, y = 20-x;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008087 seq_printf(seq, "[");
8088 for (i = 0; i < x; i++)
8089 seq_printf(seq, "=");
8090 seq_printf(seq, ">");
8091 for (i = 0; i < y; i++)
8092 seq_printf(seq, ".");
8093 seq_printf(seq, "] ");
8094 }
NeilBrown4588b422006-03-27 01:18:04 -08008095 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
NeilBrownccfcc3c2006-03-27 01:18:09 -08008096 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
8097 "reshape" :
NeilBrown61df9d92006-10-03 01:15:57 -07008098 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
8099 "check" :
8100 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
8101 "resync" : "recovery"))),
8102 per_milli/10, per_milli % 10,
NeilBrowndd71cf62009-05-07 12:49:35 +10008103 (unsigned long long) resync/2,
8104 (unsigned long long) max_sectors/2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008105
8106 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07008107 * dt: time from mark until now
8108 * db: blocks written from mark until now
8109 * rt: remaining time
NeilBrowndd71cf62009-05-07 12:49:35 +10008110 *
Mariusz Tkaczyk9642fa72019-06-13 16:11:41 +02008111 * rt is a sector_t, which is always 64bit now. We are keeping
8112 * the original algorithm, but it is not really necessary.
8113 *
8114 * Original algorithm:
8115 * So we divide before multiply in case it is 32bit and close
8116 * to the limit.
8117 * We scale the divisor (db) by 32 to avoid losing precision
8118 * near the end of resync when the number of remaining sectors
8119 * is close to 'db'.
8120 * We then divide rt by 32 after multiplying by db to compensate.
8121 * The '+1' avoids division by zero if db is very small.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008122 */
8123 dt = ((jiffies - mddev->resync_mark) / HZ);
8124 if (!dt) dt++;
Mariusz Tkaczyk9642fa72019-06-13 16:11:41 +02008125
8126 curr_mark_cnt = mddev->curr_mark_cnt;
8127 recovery_active = atomic_read(&mddev->recovery_active);
8128 resync_mark_cnt = mddev->resync_mark_cnt;
8129
8130 if (curr_mark_cnt >= (recovery_active + resync_mark_cnt))
8131 db = curr_mark_cnt - (recovery_active + resync_mark_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008132
NeilBrowndd71cf62009-05-07 12:49:35 +10008133 rt = max_sectors - resync; /* number of remaining sectors */
Mariusz Tkaczyk9642fa72019-06-13 16:11:41 +02008134 rt = div64_u64(rt, db/32+1);
NeilBrowndd71cf62009-05-07 12:49:35 +10008135 rt *= dt;
8136 rt >>= 5;
8137
8138 seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
8139 ((unsigned long)rt % 60)/6);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008140
NeilBrownff4e8d92006-07-10 04:44:16 -07008141 seq_printf(seq, " speed=%ldK/sec", db/2/dt);
NeilBrownf7851be2015-07-02 17:12:58 +10008142 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008143}
8144
8145static void *md_seq_start(struct seq_file *seq, loff_t *pos)
8146{
8147 struct list_head *tmp;
8148 loff_t l = *pos;
NeilBrownfd01b882011-10-11 16:47:53 +11008149 struct mddev *mddev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008150
Jan Glauber7abfaba2021-03-17 15:04:39 +01008151 if (l == 0x10000) {
8152 ++*pos;
8153 return (void *)2;
8154 }
8155 if (l > 0x10000)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008156 return NULL;
8157 if (!l--)
8158 /* header */
8159 return (void*)1;
8160
8161 spin_lock(&all_mddevs_lock);
8162 list_for_each(tmp,&all_mddevs)
8163 if (!l--) {
NeilBrownfd01b882011-10-11 16:47:53 +11008164 mddev = list_entry(tmp, struct mddev, all_mddevs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008165 mddev_get(mddev);
8166 spin_unlock(&all_mddevs_lock);
8167 return mddev;
8168 }
8169 spin_unlock(&all_mddevs_lock);
8170 if (!l--)
8171 return (void*)2;/* tail */
8172 return NULL;
8173}
8174
8175static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
8176{
8177 struct list_head *tmp;
NeilBrownfd01b882011-10-11 16:47:53 +11008178 struct mddev *next_mddev, *mddev = v;
NeilBrownf72ffdd2014-09-30 14:23:59 +10008179
Linus Torvalds1da177e2005-04-16 15:20:36 -07008180 ++*pos;
8181 if (v == (void*)2)
8182 return NULL;
8183
8184 spin_lock(&all_mddevs_lock);
8185 if (v == (void*)1)
8186 tmp = all_mddevs.next;
8187 else
8188 tmp = mddev->all_mddevs.next;
8189 if (tmp != &all_mddevs)
NeilBrownfd01b882011-10-11 16:47:53 +11008190 next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs));
Linus Torvalds1da177e2005-04-16 15:20:36 -07008191 else {
8192 next_mddev = (void*)2;
8193 *pos = 0x10000;
NeilBrownf72ffdd2014-09-30 14:23:59 +10008194 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008195 spin_unlock(&all_mddevs_lock);
8196
8197 if (v != (void*)1)
8198 mddev_put(mddev);
8199 return next_mddev;
8200
8201}
8202
8203static void md_seq_stop(struct seq_file *seq, void *v)
8204{
NeilBrownfd01b882011-10-11 16:47:53 +11008205 struct mddev *mddev = v;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008206
8207 if (mddev && v != (void*)1 && v != (void*)2)
8208 mddev_put(mddev);
8209}
8210
8211static int md_seq_show(struct seq_file *seq, void *v)
8212{
NeilBrownfd01b882011-10-11 16:47:53 +11008213 struct mddev *mddev = v;
Andre Nolldd8ac332009-03-31 14:33:13 +11008214 sector_t sectors;
NeilBrown3cb03002011-10-11 16:45:26 +11008215 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008216
8217 if (v == (void*)1) {
NeilBrown84fc4b52011-10-11 16:49:58 +11008218 struct md_personality *pers;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008219 seq_printf(seq, "Personalities : ");
8220 spin_lock(&pers_lock);
NeilBrown2604b702006-01-06 00:20:36 -08008221 list_for_each_entry(pers, &pers_list, list)
8222 seq_printf(seq, "[%s] ", pers->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008223
8224 spin_unlock(&pers_lock);
8225 seq_printf(seq, "\n");
Kay Sieversf1514632011-07-12 20:48:39 +02008226 seq->poll_event = atomic_read(&md_event_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008227 return 0;
8228 }
8229 if (v == (void*)2) {
8230 status_unused(seq);
8231 return 0;
8232 }
8233
NeilBrown36d091f2014-12-15 12:56:58 +11008234 spin_lock(&mddev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008235 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
8236 seq_printf(seq, "%s : %sactive", mdname(mddev),
8237 mddev->pers ? "" : "in");
8238 if (mddev->pers) {
NeilBrownf91de922005-11-08 21:39:36 -08008239 if (mddev->ro==1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008240 seq_printf(seq, " (read-only)");
NeilBrownf91de922005-11-08 21:39:36 -08008241 if (mddev->ro==2)
NeilBrown52720ae2008-03-10 11:43:47 -07008242 seq_printf(seq, " (auto-read-only)");
Linus Torvalds1da177e2005-04-16 15:20:36 -07008243 seq_printf(seq, " %s", mddev->pers->name);
8244 }
8245
Andre Nolldd8ac332009-03-31 14:33:13 +11008246 sectors = 0;
NeilBrownf97fcad2014-12-15 12:56:59 +11008247 rcu_read_lock();
8248 rdev_for_each_rcu(rdev, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008249 char b[BDEVNAME_SIZE];
8250 seq_printf(seq, " %s[%d]",
8251 bdevname(rdev->bdev,b), rdev->desc_nr);
NeilBrown8ddf9ef2005-09-09 16:23:45 -07008252 if (test_bit(WriteMostly, &rdev->flags))
8253 seq_printf(seq, "(W)");
Shaohua Li9efdca12015-10-12 16:59:50 -07008254 if (test_bit(Journal, &rdev->flags))
8255 seq_printf(seq, "(J)");
NeilBrownb2d444d2005-11-08 21:39:31 -08008256 if (test_bit(Faulty, &rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008257 seq_printf(seq, "(F)");
8258 continue;
NeilBrown2d78f8c2011-12-23 10:17:51 +11008259 }
8260 if (rdev->raid_disk < 0)
NeilBrownb325a322005-09-09 16:24:00 -07008261 seq_printf(seq, "(S)"); /* spare */
NeilBrown2d78f8c2011-12-23 10:17:51 +11008262 if (test_bit(Replacement, &rdev->flags))
8263 seq_printf(seq, "(R)");
Andre Nolldd8ac332009-03-31 14:33:13 +11008264 sectors += rdev->sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008265 }
NeilBrownf97fcad2014-12-15 12:56:59 +11008266 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07008267
8268 if (!list_empty(&mddev->disks)) {
8269 if (mddev->pers)
8270 seq_printf(seq, "\n %llu blocks",
Andre Nollf233ea52008-07-21 17:05:22 +10008271 (unsigned long long)
8272 mddev->array_sectors / 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008273 else
8274 seq_printf(seq, "\n %llu blocks",
Andre Nolldd8ac332009-03-31 14:33:13 +11008275 (unsigned long long)sectors / 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008276 }
NeilBrown1cd6bf12005-09-09 16:24:00 -07008277 if (mddev->persistent) {
8278 if (mddev->major_version != 0 ||
8279 mddev->minor_version != 90) {
8280 seq_printf(seq," super %d.%d",
8281 mddev->major_version,
8282 mddev->minor_version);
8283 }
NeilBrowne6910632008-02-06 01:39:51 -08008284 } else if (mddev->external)
8285 seq_printf(seq, " super external:%s",
8286 mddev->metadata_type);
8287 else
NeilBrown1cd6bf12005-09-09 16:24:00 -07008288 seq_printf(seq, " super non-persistent");
Linus Torvalds1da177e2005-04-16 15:20:36 -07008289
8290 if (mddev->pers) {
NeilBrownd710e132008-10-13 11:55:12 +11008291 mddev->pers->status(seq, mddev);
NeilBrownf72ffdd2014-09-30 14:23:59 +10008292 seq_printf(seq, "\n ");
NeilBrown8e1b39d2005-11-08 21:39:41 -08008293 if (mddev->pers->sync_request) {
NeilBrownf7851be2015-07-02 17:12:58 +10008294 if (status_resync(seq, mddev))
NeilBrown8e1b39d2005-11-08 21:39:41 -08008295 seq_printf(seq, "\n ");
NeilBrown8e1b39d2005-11-08 21:39:41 -08008296 }
NeilBrown32a76272005-06-21 17:17:14 -07008297 } else
8298 seq_printf(seq, "\n ");
8299
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07008300 md_bitmap_status(seq, mddev->bitmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008301
8302 seq_printf(seq, "\n");
8303 }
NeilBrown36d091f2014-12-15 12:56:58 +11008304 spin_unlock(&mddev->lock);
NeilBrownf72ffdd2014-09-30 14:23:59 +10008305
Linus Torvalds1da177e2005-04-16 15:20:36 -07008306 return 0;
8307}
8308
Jan Engelhardt110518b2009-05-07 12:49:37 +10008309static const struct seq_operations md_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008310 .start = md_seq_start,
8311 .next = md_seq_next,
8312 .stop = md_seq_stop,
8313 .show = md_seq_show,
8314};
8315
8316static int md_seq_open(struct inode *inode, struct file *file)
8317{
Kay Sieversf1514632011-07-12 20:48:39 +02008318 struct seq_file *seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008319 int error;
8320
8321 error = seq_open(file, &md_seq_ops);
NeilBrownd7603b72006-01-06 00:20:30 -08008322 if (error)
Kay Sieversf1514632011-07-12 20:48:39 +02008323 return error;
8324
8325 seq = file->private_data;
8326 seq->poll_event = atomic_read(&md_event_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008327 return error;
8328}
8329
NeilBrowne2f23b62014-04-09 14:33:51 +10008330static int md_unloading;
Al Viroafc9a422017-07-03 06:39:46 -04008331static __poll_t mdstat_poll(struct file *filp, poll_table *wait)
NeilBrownd7603b72006-01-06 00:20:30 -08008332{
Kay Sieversf1514632011-07-12 20:48:39 +02008333 struct seq_file *seq = filp->private_data;
Al Viroafc9a422017-07-03 06:39:46 -04008334 __poll_t mask;
NeilBrownd7603b72006-01-06 00:20:30 -08008335
NeilBrowne2f23b62014-04-09 14:33:51 +10008336 if (md_unloading)
Linus Torvaldsa9a08842018-02-11 14:34:03 -08008337 return EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI;
NeilBrownd7603b72006-01-06 00:20:30 -08008338 poll_wait(filp, &md_event_waiters, wait);
8339
8340 /* always allow read */
Linus Torvaldsa9a08842018-02-11 14:34:03 -08008341 mask = EPOLLIN | EPOLLRDNORM;
NeilBrownd7603b72006-01-06 00:20:30 -08008342
Kay Sieversf1514632011-07-12 20:48:39 +02008343 if (seq->poll_event != atomic_read(&md_event_count))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08008344 mask |= EPOLLERR | EPOLLPRI;
NeilBrownd7603b72006-01-06 00:20:30 -08008345 return mask;
8346}
8347
Alexey Dobriyan97a32532020-02-03 17:37:17 -08008348static const struct proc_ops mdstat_proc_ops = {
8349 .proc_open = md_seq_open,
8350 .proc_read = seq_read,
8351 .proc_lseek = seq_lseek,
8352 .proc_release = seq_release,
8353 .proc_poll = mdstat_poll,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008354};
8355
NeilBrown84fc4b52011-10-11 16:49:58 +11008356int register_md_personality(struct md_personality *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008357{
NeilBrown9d487392016-11-02 14:16:49 +11008358 pr_debug("md: %s personality registered for level %d\n",
8359 p->name, p->level);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008360 spin_lock(&pers_lock);
NeilBrown2604b702006-01-06 00:20:36 -08008361 list_add_tail(&p->list, &pers_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008362 spin_unlock(&pers_lock);
8363 return 0;
8364}
NeilBrown6c144d32014-09-30 16:15:38 +10008365EXPORT_SYMBOL(register_md_personality);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008366
NeilBrown84fc4b52011-10-11 16:49:58 +11008367int unregister_md_personality(struct md_personality *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008368{
NeilBrown9d487392016-11-02 14:16:49 +11008369 pr_debug("md: %s personality unregistered\n", p->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008370 spin_lock(&pers_lock);
NeilBrown2604b702006-01-06 00:20:36 -08008371 list_del_init(&p->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008372 spin_unlock(&pers_lock);
8373 return 0;
8374}
NeilBrown6c144d32014-09-30 16:15:38 +10008375EXPORT_SYMBOL(unregister_md_personality);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008376
NeilBrown6022e752015-08-13 12:32:55 +10008377int register_md_cluster_operations(struct md_cluster_operations *ops,
8378 struct module *module)
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008379{
NeilBrown6022e752015-08-13 12:32:55 +10008380 int ret = 0;
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008381 spin_lock(&pers_lock);
NeilBrown6022e752015-08-13 12:32:55 +10008382 if (md_cluster_ops != NULL)
8383 ret = -EALREADY;
8384 else {
8385 md_cluster_ops = ops;
8386 md_cluster_mod = module;
8387 }
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008388 spin_unlock(&pers_lock);
NeilBrown6022e752015-08-13 12:32:55 +10008389 return ret;
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008390}
8391EXPORT_SYMBOL(register_md_cluster_operations);
8392
8393int unregister_md_cluster_operations(void)
8394{
8395 spin_lock(&pers_lock);
8396 md_cluster_ops = NULL;
8397 spin_unlock(&pers_lock);
8398 return 0;
8399}
8400EXPORT_SYMBOL(unregister_md_cluster_operations);
8401
8402int md_setup_cluster(struct mddev *mddev, int nodes)
8403{
Zhao Heming7c9d5c52020-07-21 02:08:52 +08008404 int ret;
Guoqing Jiang47a7b0d2016-09-04 22:17:28 -04008405 if (!md_cluster_ops)
8406 request_module("md-cluster");
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008407 spin_lock(&pers_lock);
Guoqing Jiang47a7b0d2016-09-04 22:17:28 -04008408 /* ensure module won't be unloaded */
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008409 if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
NeilBrown9d487392016-11-02 14:16:49 +11008410 pr_warn("can't find md-cluster module or get it's reference.\n");
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008411 spin_unlock(&pers_lock);
8412 return -ENOENT;
8413 }
8414 spin_unlock(&pers_lock);
8415
Zhao Heming7c9d5c52020-07-21 02:08:52 +08008416 ret = md_cluster_ops->join(mddev, nodes);
8417 if (!ret)
8418 mddev->safemode_delay = 0;
8419 return ret;
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008420}
8421
8422void md_cluster_stop(struct mddev *mddev)
8423{
Goldwyn Rodriguesc4ce8672014-03-29 10:20:02 -05008424 if (!md_cluster_ops)
8425 return;
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008426 md_cluster_ops->leave(mddev);
8427 module_put(md_cluster_mod);
8428}
8429
NeilBrownfd01b882011-10-11 16:47:53 +11008430static int is_mddev_idle(struct mddev *mddev, int init)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008431{
NeilBrownf72ffdd2014-09-30 14:23:59 +10008432 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008433 int idle;
NeilBrowneea1bf32009-03-31 14:27:02 +11008434 int curr_events;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008435
8436 idle = 1;
NeilBrown4b809912008-07-21 17:05:25 +10008437 rcu_read_lock();
8438 rdev_for_each_rcu(rdev, mddev) {
Christoph Hellwig4245e522020-09-03 07:40:59 +02008439 struct gendisk *disk = rdev->bdev->bd_disk;
Christoph Hellwig8446fe92020-11-24 09:36:54 +01008440 curr_events = (int)part_stat_read_accum(disk->part0, sectors) -
NeilBrowneea1bf32009-03-31 14:27:02 +11008441 atomic_read(&disk->sync_io);
NeilBrown713f6ab2007-07-17 04:06:12 -07008442 /* sync IO will cause sync_io to increase before the disk_stats
8443 * as sync_io is counted when a request starts, and
8444 * disk_stats is counted when it completes.
8445 * So resync activity will cause curr_events to be smaller than
8446 * when there was no such activity.
8447 * non-sync IO will cause disk_stat to increase without
8448 * increasing sync_io so curr_events will (eventually)
8449 * be larger than it was before. Once it becomes
8450 * substantially larger, the test below will cause
8451 * the array to appear non-idle, and resync will slow
8452 * down.
8453 * If there is a lot of outstanding resync activity when
8454 * we set last_event to curr_events, then all that activity
8455 * completing might cause the array to appear non-idle
8456 * and resync will be slowed down even though there might
8457 * not have been non-resync activity. This will only
8458 * happen once though. 'last_events' will soon reflect
8459 * the state where there is little or no outstanding
8460 * resync requests, and further resync activity will
8461 * always make curr_events less than last_events.
NeilBrownc0e48522005-11-18 01:11:01 -08008462 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07008463 */
NeilBrowneea1bf32009-03-31 14:27:02 +11008464 if (init || curr_events - rdev->last_events > 64) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008465 rdev->last_events = curr_events;
8466 idle = 0;
8467 }
8468 }
NeilBrown4b809912008-07-21 17:05:25 +10008469 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07008470 return idle;
8471}
8472
NeilBrownfd01b882011-10-11 16:47:53 +11008473void md_done_sync(struct mddev *mddev, int blocks, int ok)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008474{
8475 /* another "blocks" (512byte) blocks have been synced */
8476 atomic_sub(blocks, &mddev->recovery_active);
8477 wake_up(&mddev->recovery_wait);
8478 if (!ok) {
NeilBrowndfc70642008-05-23 13:04:39 -07008479 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
majianpeng0a19caa2012-11-19 19:57:34 +08008480 set_bit(MD_RECOVERY_ERROR, &mddev->recovery);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008481 md_wakeup_thread(mddev->thread);
8482 // stop recovery, signal do_sync ....
8483 }
8484}
NeilBrown6c144d32014-09-30 16:15:38 +10008485EXPORT_SYMBOL(md_done_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008486
NeilBrown06d91a52005-06-21 17:17:12 -07008487/* md_write_start(mddev, bi)
8488 * If we need to update some array metadata (e.g. 'active' flag
NeilBrown3d310eb2005-06-21 17:17:26 -07008489 * in superblock) before writing, schedule a superblock update
8490 * and wait for it to complete.
NeilBrowncc27b0c2017-06-05 16:49:39 +10008491 * A return value of 'false' means that the write wasn't recorded
8492 * and cannot proceed as the array is being suspend.
NeilBrown06d91a52005-06-21 17:17:12 -07008493 */
NeilBrowncc27b0c2017-06-05 16:49:39 +10008494bool md_write_start(struct mddev *mddev, struct bio *bi)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008495{
Neil Brown0fd62b82008-06-28 08:31:36 +10008496 int did_change = 0;
Heinz Mauelshagen4b6c1062018-02-02 23:13:19 +01008497
NeilBrown06d91a52005-06-21 17:17:12 -07008498 if (bio_data_dir(bi) != WRITE)
NeilBrowncc27b0c2017-06-05 16:49:39 +10008499 return true;
NeilBrown06d91a52005-06-21 17:17:12 -07008500
NeilBrownf91de922005-11-08 21:39:36 -08008501 BUG_ON(mddev->ro == 1);
8502 if (mddev->ro == 2) {
8503 /* need to switch to read/write */
8504 mddev->ro = 0;
8505 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8506 md_wakeup_thread(mddev->thread);
NeilBrown25156192008-03-04 14:29:32 -08008507 md_wakeup_thread(mddev->sync_thread);
Neil Brown0fd62b82008-06-28 08:31:36 +10008508 did_change = 1;
NeilBrownf91de922005-11-08 21:39:36 -08008509 }
NeilBrown4ad23a972017-03-15 14:05:14 +11008510 rcu_read_lock();
8511 percpu_ref_get(&mddev->writes_pending);
NeilBrown55cc39f2017-03-15 14:05:14 +11008512 smp_mb(); /* Match smp_mb in set_in_sync() */
NeilBrown31a59e32008-04-30 00:52:30 -07008513 if (mddev->safemode == 1)
8514 mddev->safemode = 0;
NeilBrown4ad23a972017-03-15 14:05:14 +11008515 /* sync_checkers is always 0 when writes_pending is in per-cpu mode */
NeilBrown81fe48e2017-08-08 16:56:36 +10008516 if (mddev->in_sync || mddev->sync_checkers) {
NeilBrown85572d72014-12-15 12:56:56 +11008517 spin_lock(&mddev->lock);
NeilBrown3d310eb2005-06-21 17:17:26 -07008518 if (mddev->in_sync) {
8519 mddev->in_sync = 0;
Shaohua Li29530792016-12-08 15:48:19 -08008520 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
8521 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
NeilBrown3d310eb2005-06-21 17:17:26 -07008522 md_wakeup_thread(mddev->thread);
Neil Brown0fd62b82008-06-28 08:31:36 +10008523 did_change = 1;
NeilBrown3d310eb2005-06-21 17:17:26 -07008524 }
NeilBrown85572d72014-12-15 12:56:56 +11008525 spin_unlock(&mddev->lock);
NeilBrown06d91a52005-06-21 17:17:12 -07008526 }
NeilBrown4ad23a972017-03-15 14:05:14 +11008527 rcu_read_unlock();
Neil Brown0fd62b82008-06-28 08:31:36 +10008528 if (did_change)
NeilBrown00bcb4a2010-06-01 19:37:23 +10008529 sysfs_notify_dirent_safe(mddev->sysfs_state);
Heinz Mauelshagen4b6c1062018-02-02 23:13:19 +01008530 if (!mddev->has_superblocks)
8531 return true;
NeilBrown09a44cc2008-05-23 13:04:36 -07008532 wait_event(mddev->sb_wait,
NeilBrownd47c8ad2017-10-05 16:23:16 +11008533 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) ||
8534 mddev->suspended);
NeilBrowncc27b0c2017-06-05 16:49:39 +10008535 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
8536 percpu_ref_put(&mddev->writes_pending);
8537 return false;
8538 }
8539 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008540}
NeilBrown6c144d32014-09-30 16:15:38 +10008541EXPORT_SYMBOL(md_write_start);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008542
NeilBrown49728052017-03-15 14:05:12 +11008543/* md_write_inc can only be called when md_write_start() has
8544 * already been called at least once of the current request.
8545 * It increments the counter and is useful when a single request
8546 * is split into several parts. Each part causes an increment and
8547 * so needs a matching md_write_end().
8548 * Unlike md_write_start(), it is safe to call md_write_inc() inside
8549 * a spinlocked region.
8550 */
8551void md_write_inc(struct mddev *mddev, struct bio *bi)
8552{
8553 if (bio_data_dir(bi) != WRITE)
8554 return;
8555 WARN_ON_ONCE(mddev->in_sync || mddev->ro);
NeilBrown4ad23a972017-03-15 14:05:14 +11008556 percpu_ref_get(&mddev->writes_pending);
NeilBrown49728052017-03-15 14:05:12 +11008557}
8558EXPORT_SYMBOL(md_write_inc);
8559
NeilBrownfd01b882011-10-11 16:47:53 +11008560void md_write_end(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008561{
NeilBrown4ad23a972017-03-15 14:05:14 +11008562 percpu_ref_put(&mddev->writes_pending);
8563
8564 if (mddev->safemode == 2)
8565 md_wakeup_thread(mddev->thread);
8566 else if (mddev->safemode_delay)
8567 /* The roundup() ensures this only performs locking once
8568 * every ->safemode_delay jiffies
8569 */
8570 mod_timer(&mddev->safemode_timer,
8571 roundup(jiffies, mddev->safemode_delay) +
8572 mddev->safemode_delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008573}
NeilBrown4ad23a972017-03-15 14:05:14 +11008574
NeilBrown6c144d32014-09-30 16:15:38 +10008575EXPORT_SYMBOL(md_write_end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008576
Xiao Nicf784082021-02-04 15:50:43 +08008577/* This is used by raid0 and raid10 */
8578void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
8579 struct bio *bio, sector_t start, sector_t size)
8580{
8581 struct bio *discard_bio = NULL;
8582
8583 if (__blkdev_issue_discard(rdev->bdev, start, size, GFP_NOIO, 0,
8584 &discard_bio) || !discard_bio)
8585 return;
8586
8587 bio_chain(discard_bio, bio);
8588 bio_clone_blkg_association(discard_bio, bio);
8589 if (mddev->gendisk)
8590 trace_block_bio_remap(discard_bio,
8591 disk_devt(mddev->gendisk),
8592 bio->bi_iter.bi_sector);
8593 submit_bio_noacct(discard_bio);
8594}
8595EXPORT_SYMBOL_GPL(md_submit_discard_bio);
8596
Guoqing Jiang10764812021-05-25 17:46:17 +08008597static void md_end_io_acct(struct bio *bio)
8598{
8599 struct md_io_acct *md_io_acct = bio->bi_private;
8600 struct bio *orig_bio = md_io_acct->orig_bio;
8601
8602 orig_bio->bi_status = bio->bi_status;
8603
8604 bio_end_io_acct(orig_bio, md_io_acct->start_time);
8605 bio_put(bio);
8606 bio_endio(orig_bio);
8607}
8608
Guoqing Jiangdaee2022021-06-03 17:21:06 +08008609/*
8610 * Used by personalities that don't already clone the bio and thus can't
8611 * easily add the timestamp to their extended bio structure.
8612 */
Guoqing Jiang10764812021-05-25 17:46:17 +08008613void md_account_bio(struct mddev *mddev, struct bio **bio)
8614{
8615 struct md_io_acct *md_io_acct;
8616 struct bio *clone;
8617
8618 if (!blk_queue_io_stat((*bio)->bi_bdev->bd_disk->queue))
8619 return;
8620
8621 clone = bio_clone_fast(*bio, GFP_NOIO, &mddev->io_acct_set);
8622 md_io_acct = container_of(clone, struct md_io_acct, bio_clone);
8623 md_io_acct->orig_bio = *bio;
8624 md_io_acct->start_time = bio_start_io_acct(*bio);
8625
8626 clone->bi_end_io = md_end_io_acct;
8627 clone->bi_private = md_io_acct;
8628 *bio = clone;
8629}
8630EXPORT_SYMBOL_GPL(md_account_bio);
8631
NeilBrown2a2275d2007-01-26 00:57:11 -08008632/* md_allow_write(mddev)
8633 * Calling this ensures that the array is marked 'active' so that writes
8634 * may proceed without blocking. It is important to call this before
8635 * attempting a GFP_KERNEL allocation while holding the mddev lock.
8636 * Must be called with mddev_lock held.
8637 */
Artur Paszkiewicz2214c262017-05-08 11:56:55 +02008638void md_allow_write(struct mddev *mddev)
NeilBrown2a2275d2007-01-26 00:57:11 -08008639{
8640 if (!mddev->pers)
Artur Paszkiewicz2214c262017-05-08 11:56:55 +02008641 return;
NeilBrown2a2275d2007-01-26 00:57:11 -08008642 if (mddev->ro)
Artur Paszkiewicz2214c262017-05-08 11:56:55 +02008643 return;
Neil Brown1a0fd492008-06-28 08:31:27 +10008644 if (!mddev->pers->sync_request)
Artur Paszkiewicz2214c262017-05-08 11:56:55 +02008645 return;
NeilBrown2a2275d2007-01-26 00:57:11 -08008646
NeilBrown85572d72014-12-15 12:56:56 +11008647 spin_lock(&mddev->lock);
NeilBrown2a2275d2007-01-26 00:57:11 -08008648 if (mddev->in_sync) {
8649 mddev->in_sync = 0;
Shaohua Li29530792016-12-08 15:48:19 -08008650 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
8651 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
NeilBrown2a2275d2007-01-26 00:57:11 -08008652 if (mddev->safemode_delay &&
8653 mddev->safemode == 0)
8654 mddev->safemode = 1;
NeilBrown85572d72014-12-15 12:56:56 +11008655 spin_unlock(&mddev->lock);
NeilBrown2a2275d2007-01-26 00:57:11 -08008656 md_update_sb(mddev, 0);
NeilBrown00bcb4a2010-06-01 19:37:23 +10008657 sysfs_notify_dirent_safe(mddev->sysfs_state);
Artur Paszkiewicz2214c262017-05-08 11:56:55 +02008658 /* wait for the dirty state to be recorded in the metadata */
8659 wait_event(mddev->sb_wait,
Artur Paszkiewicz2214c262017-05-08 11:56:55 +02008660 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
NeilBrown2a2275d2007-01-26 00:57:11 -08008661 } else
NeilBrown85572d72014-12-15 12:56:56 +11008662 spin_unlock(&mddev->lock);
NeilBrown2a2275d2007-01-26 00:57:11 -08008663}
8664EXPORT_SYMBOL_GPL(md_allow_write);
8665
Linus Torvalds1da177e2005-04-16 15:20:36 -07008666#define SYNC_MARKS 10
8667#define SYNC_MARK_STEP (3*HZ)
majianpeng54f89342012-10-31 11:59:10 +11008668#define UPDATE_FREQUENCY (5*60*HZ)
Shaohua Li4ed87312012-10-11 13:34:00 +11008669void md_do_sync(struct md_thread *thread)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008670{
Shaohua Li4ed87312012-10-11 13:34:00 +11008671 struct mddev *mddev = thread->mddev;
NeilBrownfd01b882011-10-11 16:47:53 +11008672 struct mddev *mddev2;
Yufen Yue5b521e2019-06-14 15:41:07 -07008673 unsigned int currspeed = 0, window;
Xiao Niac7e50a2014-08-07 09:37:41 -04008674 sector_t max_sectors,j, io_sectors, recovery_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008675 unsigned long mark[SYNC_MARKS];
majianpeng54f89342012-10-31 11:59:10 +11008676 unsigned long update_time;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008677 sector_t mark_cnt[SYNC_MARKS];
8678 int last_mark,m;
8679 struct list_head *tmp;
8680 sector_t last_check;
NeilBrown57afd892005-06-21 17:17:13 -07008681 int skipped = 0;
NeilBrown3cb03002011-10-11 16:45:26 +11008682 struct md_rdev *rdev;
Jonathan Brassowc4a39552013-06-25 01:23:59 -05008683 char *desc, *action = NULL;
majianpeng7c2c57c2012-07-03 12:12:26 +10008684 struct blk_plug plug;
Guoqing Jiang41a9a0d2016-05-02 11:33:08 -04008685 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008686
8687 /* just incase thread restarts... */
Song Liud5d885f2017-11-19 22:17:01 -08008688 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
8689 test_bit(MD_RECOVERY_WAIT, &mddev->recovery))
Linus Torvalds1da177e2005-04-16 15:20:36 -07008690 return;
NeilBrown3991b312014-05-28 13:39:23 +10008691 if (mddev->ro) {/* never try to sync a read-only array */
8692 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008693 return;
NeilBrown3991b312014-05-28 13:39:23 +10008694 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008695
Guoqing Jiang41a9a0d2016-05-02 11:33:08 -04008696 if (mddev_is_clustered(mddev)) {
8697 ret = md_cluster_ops->resync_start(mddev);
8698 if (ret)
8699 goto skip;
8700
Guoqing Jiangbb8bf152016-06-02 23:32:04 -04008701 set_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags);
Guoqing Jiang41a9a0d2016-05-02 11:33:08 -04008702 if (!(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
8703 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ||
8704 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
8705 && ((unsigned long long)mddev->curr_resync_completed
8706 < (unsigned long long)mddev->resync_max_sectors))
8707 goto skip;
8708 }
8709
NeilBrown61df9d92006-10-03 01:15:57 -07008710 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
Jonathan Brassowc4a39552013-06-25 01:23:59 -05008711 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
NeilBrown61df9d92006-10-03 01:15:57 -07008712 desc = "data-check";
Jonathan Brassowc4a39552013-06-25 01:23:59 -05008713 action = "check";
8714 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
NeilBrown61df9d92006-10-03 01:15:57 -07008715 desc = "requested-resync";
Jonathan Brassowc4a39552013-06-25 01:23:59 -05008716 action = "repair";
8717 } else
NeilBrown61df9d92006-10-03 01:15:57 -07008718 desc = "resync";
8719 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
8720 desc = "reshape";
8721 else
8722 desc = "recovery";
8723
Jonathan Brassowc4a39552013-06-25 01:23:59 -05008724 mddev->last_sync_action = action ?: desc;
8725
Linus Torvalds1da177e2005-04-16 15:20:36 -07008726 /* we overload curr_resync somewhat here.
8727 * 0 == not engaged in resync at all
8728 * 2 == checking that there is no conflict with another sync
8729 * 1 == like 2, but have yielded to allow conflicting resync to
Yufen Yue5b521e2019-06-14 15:41:07 -07008730 * commence
Linus Torvalds1da177e2005-04-16 15:20:36 -07008731 * other == active in resync - this many blocks
8732 *
8733 * Before starting a resync we must have set curr_resync to
8734 * 2, and then checked that every "conflicting" array has curr_resync
8735 * less than ours. When we find one that is the same or higher
8736 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync
8737 * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
8738 * This will mean we have to start checking from the beginning again.
8739 *
8740 */
8741
8742 do {
Artur Paszkiewiczc622ca52016-08-16 14:26:08 +02008743 int mddev2_minor = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008744 mddev->curr_resync = 2;
8745
8746 try_again:
NeilBrown404e4b42009-12-30 15:25:23 +11008747 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
Linus Torvalds1da177e2005-04-16 15:20:36 -07008748 goto skip;
NeilBrown29ac4aa2008-02-06 01:39:58 -08008749 for_each_mddev(mddev2, tmp) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008750 if (mddev2 == mddev)
8751 continue;
Bernd Schubert90b08712008-05-23 13:04:38 -07008752 if (!mddev->parallel_resync
8753 && mddev2->curr_resync
8754 && match_mddev_units(mddev, mddev2)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008755 DEFINE_WAIT(wq);
8756 if (mddev < mddev2 && mddev->curr_resync == 2) {
8757 /* arbitrarily yield */
8758 mddev->curr_resync = 1;
8759 wake_up(&resync_wait);
8760 }
8761 if (mddev > mddev2 && mddev->curr_resync == 1)
8762 /* no need to wait here, we can wait the next
8763 * time 'round when curr_resync == 2
8764 */
8765 continue;
NeilBrown97441972008-09-19 11:49:54 +10008766 /* We need to wait 'interruptible' so as not to
8767 * contribute to the load average, and not to
8768 * be caught by 'softlockup'
8769 */
8770 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
NeilBrownc91abf52013-11-19 12:02:01 +11008771 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
NeilBrown8712e552005-10-26 01:58:58 -07008772 mddev2->curr_resync >= mddev->curr_resync) {
Artur Paszkiewiczc622ca52016-08-16 14:26:08 +02008773 if (mddev2_minor != mddev2->md_minor) {
8774 mddev2_minor = mddev2->md_minor;
NeilBrown9d487392016-11-02 14:16:49 +11008775 pr_info("md: delaying %s of %s until %s has finished (they share one or more physical units)\n",
8776 desc, mdname(mddev),
8777 mdname(mddev2));
Artur Paszkiewiczc622ca52016-08-16 14:26:08 +02008778 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008779 mddev_put(mddev2);
NeilBrown97441972008-09-19 11:49:54 +10008780 if (signal_pending(current))
8781 flush_signals(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008782 schedule();
8783 finish_wait(&resync_wait, &wq);
8784 goto try_again;
8785 }
8786 finish_wait(&resync_wait, &wq);
8787 }
8788 }
8789 } while (mddev->curr_resync < 2);
8790
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008791 j = 0;
NeilBrown9d888832005-11-08 21:39:26 -08008792 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008793 /* resync follows the size requested by the personality,
NeilBrown57afd892005-06-21 17:17:13 -07008794 * which defaults to physical size, but can be virtual size
Linus Torvalds1da177e2005-04-16 15:20:36 -07008795 */
8796 max_sectors = mddev->resync_max_sectors;
Jianpeng Ma7f7583d2012-10-11 14:17:59 +11008797 atomic64_set(&mddev->resync_mismatches, 0);
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008798 /* we don't use the checkpoint if there's a bitmap */
Neil Brown5e96ee62008-06-28 08:31:24 +10008799 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
8800 j = mddev->resync_min;
8801 else if (!mddev->bitmap)
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008802 j = mddev->recovery_cp;
Neil Brown5e96ee62008-06-28 08:31:24 +10008803
Guoqing Jiangcb9ee152018-10-18 16:37:47 +08008804 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
NeilBrownc804cde2012-05-21 09:28:33 +10008805 max_sectors = mddev->resync_max_sectors;
Guoqing Jiangcb9ee152018-10-18 16:37:47 +08008806 /*
8807 * If the original node aborts reshaping then we continue the
8808 * reshaping, so set j again to avoid restart reshape from the
8809 * first beginning
8810 */
8811 if (mddev_is_clustered(mddev) &&
8812 mddev->reshape_position != MaxSector)
8813 j = mddev->reshape_position;
8814 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008815 /* recovery follows the physical size of devices */
Andre Noll58c0fed2009-03-31 14:33:13 +11008816 max_sectors = mddev->dev_sectors;
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008817 j = MaxSector;
Dan Williams4e59ca72009-12-12 21:17:06 -07008818 rcu_read_lock();
NeilBrowndafb20f2012-03-19 12:46:39 +11008819 rdev_for_each_rcu(rdev, mddev)
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008820 if (rdev->raid_disk >= 0 &&
Shaohua Lif2076e72015-10-08 21:54:12 -07008821 !test_bit(Journal, &rdev->flags) &&
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008822 !test_bit(Faulty, &rdev->flags) &&
8823 !test_bit(In_sync, &rdev->flags) &&
8824 rdev->recovery_offset < j)
8825 j = rdev->recovery_offset;
Dan Williams4e59ca72009-12-12 21:17:06 -07008826 rcu_read_unlock();
NeilBrown133d4522014-07-02 12:04:14 +10008827
8828 /* If there is a bitmap, we need to make sure all
8829 * writes that started before we added a spare
8830 * complete before we start doing a recovery.
8831 * Otherwise the write might complete and (via
8832 * bitmap_endwrite) set a bit in the bitmap after the
8833 * recovery has checked that bit and skipped that
8834 * region.
8835 */
8836 if (mddev->bitmap) {
8837 mddev->pers->quiesce(mddev, 1);
8838 mddev->pers->quiesce(mddev, 0);
8839 }
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008840 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008841
NeilBrown9d487392016-11-02 14:16:49 +11008842 pr_info("md: %s of RAID array %s\n", desc, mdname(mddev));
8843 pr_debug("md: minimum _guaranteed_ speed: %d KB/sec/disk.\n", speed_min(mddev));
8844 pr_debug("md: using maximum available idle IO bandwidth (but not more than %d KB/sec) for %s.\n",
8845 speed_max(mddev), desc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008846
NeilBrowneea1bf32009-03-31 14:27:02 +11008847 is_mddev_idle(mddev, 1); /* this initializes IO event counters */
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008848
NeilBrown57afd892005-06-21 17:17:13 -07008849 io_sectors = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008850 for (m = 0; m < SYNC_MARKS; m++) {
8851 mark[m] = jiffies;
NeilBrown57afd892005-06-21 17:17:13 -07008852 mark_cnt[m] = io_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008853 }
8854 last_mark = 0;
8855 mddev->resync_mark = mark[last_mark];
8856 mddev->resync_mark_cnt = mark_cnt[last_mark];
8857
8858 /*
8859 * Tune reconstruction:
8860 */
Yufen Yue5b521e2019-06-14 15:41:07 -07008861 window = 32 * (PAGE_SIZE / 512);
NeilBrown9d487392016-11-02 14:16:49 +11008862 pr_debug("md: using %dk window, over a total of %lluk.\n",
8863 window/2, (unsigned long long)max_sectors/2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008864
8865 atomic_set(&mddev->recovery_active, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008866 last_check = 0;
8867
8868 if (j>2) {
NeilBrown9d487392016-11-02 14:16:49 +11008869 pr_debug("md: resuming %s of %s from checkpoint.\n",
8870 desc, mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07008871 mddev->curr_resync = j;
NeilBrown72f36d52012-10-11 14:25:57 +11008872 } else
8873 mddev->curr_resync = 3; /* no longer delayed */
NeilBrown75d3da42011-01-14 09:14:34 +11008874 mddev->curr_resync_completed = j;
Junxiao Bie1a86db2020-07-14 16:10:26 -07008875 sysfs_notify_dirent_safe(mddev->sysfs_completed);
Guoqing Jiang54679482021-10-04 23:34:53 +08008876 md_new_event();
majianpeng54f89342012-10-31 11:59:10 +11008877 update_time = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008878
majianpeng7c2c57c2012-07-03 12:12:26 +10008879 blk_start_plug(&plug);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008880 while (j < max_sectors) {
NeilBrown57afd892005-06-21 17:17:13 -07008881 sector_t sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008882
NeilBrown57afd892005-06-21 17:17:13 -07008883 skipped = 0;
NeilBrown97e4f422009-03-31 14:33:13 +11008884
NeilBrown7a91ee12009-05-26 12:57:21 +10008885 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8886 ((mddev->curr_resync > mddev->curr_resync_completed &&
8887 (mddev->curr_resync - mddev->curr_resync_completed)
8888 > (max_sectors >> 4)) ||
majianpeng54f89342012-10-31 11:59:10 +11008889 time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) ||
NeilBrown7a91ee12009-05-26 12:57:21 +10008890 (j - mddev->curr_resync_completed)*2
NeilBrownc5e19d92015-07-17 12:06:02 +10008891 >= mddev->resync_max - mddev->curr_resync_completed ||
8892 mddev->curr_resync_completed > mddev->resync_max
NeilBrown7a91ee12009-05-26 12:57:21 +10008893 )) {
NeilBrown97e4f422009-03-31 14:33:13 +11008894 /* time to update curr_resync_completed */
NeilBrown97e4f422009-03-31 14:33:13 +11008895 wait_event(mddev->recovery_wait,
8896 atomic_read(&mddev->recovery_active) == 0);
NeilBrown75d3da42011-01-14 09:14:34 +11008897 mddev->curr_resync_completed = j;
kernelmail35d78c62012-10-31 11:59:10 +11008898 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
8899 j > mddev->recovery_cp)
8900 mddev->recovery_cp = j;
majianpeng54f89342012-10-31 11:59:10 +11008901 update_time = jiffies;
Shaohua Li29530792016-12-08 15:48:19 -08008902 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
Junxiao Bie1a86db2020-07-14 16:10:26 -07008903 sysfs_notify_dirent_safe(mddev->sysfs_completed);
NeilBrown97e4f422009-03-31 14:33:13 +11008904 }
NeilBrownacb180b2009-04-14 16:28:34 +10008905
NeilBrownc91abf52013-11-19 12:02:01 +11008906 while (j >= mddev->resync_max &&
8907 !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
NeilBrowne62e58a2009-07-01 13:15:35 +10008908 /* As this condition is controlled by user-space,
8909 * we can block indefinitely, so use '_interruptible'
8910 * to avoid triggering warnings.
8911 */
8912 flush_signals(current); /* just in case */
8913 wait_event_interruptible(mddev->recovery_wait,
8914 mddev->resync_max > j
NeilBrownc91abf52013-11-19 12:02:01 +11008915 || test_bit(MD_RECOVERY_INTR,
8916 &mddev->recovery));
NeilBrowne62e58a2009-07-01 13:15:35 +10008917 }
NeilBrownacb180b2009-04-14 16:28:34 +10008918
NeilBrownc91abf52013-11-19 12:02:01 +11008919 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8920 break;
NeilBrownacb180b2009-04-14 16:28:34 +10008921
NeilBrown09314792015-02-19 16:04:40 +11008922 sectors = mddev->pers->sync_request(mddev, j, &skipped);
NeilBrown57afd892005-06-21 17:17:13 -07008923 if (sectors == 0) {
NeilBrowndfc70642008-05-23 13:04:39 -07008924 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
NeilBrownc91abf52013-11-19 12:02:01 +11008925 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008926 }
NeilBrown57afd892005-06-21 17:17:13 -07008927
8928 if (!skipped) { /* actual IO requested */
8929 io_sectors += sectors;
8930 atomic_add(sectors, &mddev->recovery_active);
8931 }
8932
NeilBrowne875ece2011-07-28 11:39:24 +10008933 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8934 break;
8935
Linus Torvalds1da177e2005-04-16 15:20:36 -07008936 j += sectors;
NeilBrown5ed1df22015-07-24 13:27:08 +10008937 if (j > max_sectors)
8938 /* when skipping, extra large numbers can be returned. */
8939 j = max_sectors;
NeilBrown72f36d52012-10-11 14:25:57 +11008940 if (j > 2)
8941 mddev->curr_resync = j;
NeilBrownff4e8d92006-07-10 04:44:16 -07008942 mddev->curr_mark_cnt = io_sectors;
NeilBrownd7603b72006-01-06 00:20:30 -08008943 if (last_check == 0)
NeilBrowne875ece2011-07-28 11:39:24 +10008944 /* this is the earliest that rebuild will be
NeilBrownd7603b72006-01-06 00:20:30 -08008945 * visible in /proc/mdstat
8946 */
Guoqing Jiang54679482021-10-04 23:34:53 +08008947 md_new_event();
NeilBrown57afd892005-06-21 17:17:13 -07008948
8949 if (last_check + window > io_sectors || j == max_sectors)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008950 continue;
8951
NeilBrown57afd892005-06-21 17:17:13 -07008952 last_check = io_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008953 repeat:
8954 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
8955 /* step marks */
8956 int next = (last_mark+1) % SYNC_MARKS;
8957
8958 mddev->resync_mark = mark[next];
8959 mddev->resync_mark_cnt = mark_cnt[next];
8960 mark[next] = jiffies;
NeilBrown57afd892005-06-21 17:17:13 -07008961 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008962 last_mark = next;
8963 }
8964
NeilBrownc91abf52013-11-19 12:02:01 +11008965 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8966 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008967
8968 /*
8969 * this loop exits only if either when we are slower than
8970 * the 'hard' speed limit, or the system was IO-idle for
8971 * a jiffy.
8972 * the system might be non-idle CPU-wise, but we only care
8973 * about not overloading the IO subsystem. (things like an
8974 * e2fsck being done on the RAID array should execute fast)
8975 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07008976 cond_resched();
8977
Xiao Niac7e50a2014-08-07 09:37:41 -04008978 recovery_done = io_sectors - atomic_read(&mddev->recovery_active);
8979 currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2
NeilBrown57afd892005-06-21 17:17:13 -07008980 /((jiffies-mddev->resync_mark)/HZ +1) +1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008981
NeilBrown88202a02006-01-06 00:21:36 -08008982 if (currspeed > speed_min(mddev)) {
NeilBrownac8fa412015-02-19 16:55:00 +11008983 if (currspeed > speed_max(mddev)) {
NeilBrownc0e48522005-11-18 01:11:01 -08008984 msleep(500);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008985 goto repeat;
8986 }
NeilBrownac8fa412015-02-19 16:55:00 +11008987 if (!is_mddev_idle(mddev, 0)) {
8988 /*
8989 * Give other IO more of a chance.
8990 * The faster the devices, the less we wait.
8991 */
8992 wait_event(mddev->recovery_wait,
8993 !atomic_read(&mddev->recovery_active));
8994 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008995 }
8996 }
NeilBrown9d487392016-11-02 14:16:49 +11008997 pr_info("md: %s: %s %s.\n",mdname(mddev), desc,
8998 test_bit(MD_RECOVERY_INTR, &mddev->recovery)
8999 ? "interrupted" : "done");
Linus Torvalds1da177e2005-04-16 15:20:36 -07009000 /*
9001 * this also signals 'finished resyncing' to md_stop
9002 */
majianpeng7c2c57c2012-07-03 12:12:26 +10009003 blk_finish_plug(&plug);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009004 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
9005
NeilBrown5ed1df22015-07-24 13:27:08 +10009006 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
9007 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
NeilBrown1217e1d2016-10-28 15:59:41 +11009008 mddev->curr_resync > 3) {
NeilBrown5ed1df22015-07-24 13:27:08 +10009009 mddev->curr_resync_completed = mddev->curr_resync;
Junxiao Bie1a86db2020-07-14 16:10:26 -07009010 sysfs_notify_dirent_safe(mddev->sysfs_completed);
NeilBrown5ed1df22015-07-24 13:27:08 +10009011 }
NeilBrown09314792015-02-19 16:04:40 +11009012 mddev->pers->sync_request(mddev, max_sectors, &skipped);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009013
NeilBrowndfc70642008-05-23 13:04:39 -07009014 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
NeilBrown1217e1d2016-10-28 15:59:41 +11009015 mddev->curr_resync > 3) {
NeilBrown5fd6c1d2006-06-26 00:27:40 -07009016 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
9017 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
9018 if (mddev->curr_resync >= mddev->recovery_cp) {
NeilBrown9d487392016-11-02 14:16:49 +11009019 pr_debug("md: checkpointing %s of %s.\n",
9020 desc, mdname(mddev));
majianpeng0a19caa2012-11-19 19:57:34 +08009021 if (test_bit(MD_RECOVERY_ERROR,
9022 &mddev->recovery))
9023 mddev->recovery_cp =
9024 mddev->curr_resync_completed;
9025 else
9026 mddev->recovery_cp =
9027 mddev->curr_resync;
NeilBrown5fd6c1d2006-06-26 00:27:40 -07009028 }
9029 } else
9030 mddev->recovery_cp = MaxSector;
9031 } else {
9032 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
9033 mddev->curr_resync = MaxSector;
NeilBrowndb0505d2017-10-17 16:18:36 +11009034 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
9035 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) {
9036 rcu_read_lock();
9037 rdev_for_each_rcu(rdev, mddev)
9038 if (rdev->raid_disk >= 0 &&
9039 mddev->delta_disks >= 0 &&
9040 !test_bit(Journal, &rdev->flags) &&
9041 !test_bit(Faulty, &rdev->flags) &&
9042 !test_bit(In_sync, &rdev->flags) &&
9043 rdev->recovery_offset < mddev->curr_resync)
9044 rdev->recovery_offset = mddev->curr_resync;
9045 rcu_read_unlock();
9046 }
NeilBrown5fd6c1d2006-06-26 00:27:40 -07009047 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009048 }
NeilBrowndb91ff52012-02-07 12:01:51 +11009049 skip:
Guoqing Jiangbb8bf152016-06-02 23:32:04 -04009050 /* set CHANGE_PENDING here since maybe another update is needed,
9051 * so other nodes are informed. It should be harmless for normal
9052 * raid */
Shaohua Li29530792016-12-08 15:48:19 -08009053 set_mask_bits(&mddev->sb_flags, 0,
9054 BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS));
Goldwyn Rodriguesc186b122015-09-30 13:20:35 -05009055
BingJing Chang88763912018-02-22 13:34:46 +08009056 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
9057 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
9058 mddev->delta_disks > 0 &&
9059 mddev->pers->finish_reshape &&
9060 mddev->pers->size &&
9061 mddev->queue) {
9062 mddev_lock_nointr(mddev);
9063 md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0));
9064 mddev_unlock(mddev);
Christoph Hellwig2c247c52020-11-16 15:57:11 +01009065 if (!mddev_is_clustered(mddev))
9066 set_capacity_and_notify(mddev->gendisk,
9067 mddev->array_sectors);
BingJing Chang88763912018-02-22 13:34:46 +08009068 }
9069
NeilBrown23da4222014-12-15 12:57:01 +11009070 spin_lock(&mddev->lock);
NeilBrownc07b70a2009-12-14 12:49:48 +11009071 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
9072 /* We completed so min/max setting can be forgotten if used. */
9073 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
9074 mddev->resync_min = 0;
9075 mddev->resync_max = MaxSector;
9076 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
9077 mddev->resync_min = mddev->curr_resync_completed;
NeilBrownf7851be2015-07-02 17:12:58 +10009078 set_bit(MD_RECOVERY_DONE, &mddev->recovery);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009079 mddev->curr_resync = 0;
NeilBrown23da4222014-12-15 12:57:01 +11009080 spin_unlock(&mddev->lock);
9081
Linus Torvalds1da177e2005-04-16 15:20:36 -07009082 wake_up(&resync_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009083 md_wakeup_thread(mddev->thread);
NeilBrownc6207272008-02-06 01:39:52 -08009084 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009085}
NeilBrown29269552006-03-27 01:18:10 -08009086EXPORT_SYMBOL_GPL(md_do_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009087
NeilBrown746d3202013-04-24 11:42:41 +10009088static int remove_and_add_spares(struct mddev *mddev,
9089 struct md_rdev *this)
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009090{
NeilBrown3cb03002011-10-11 16:45:26 +11009091 struct md_rdev *rdev;
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009092 int spares = 0;
NeilBrownf2a371c2012-01-09 00:46:41 +11009093 int removed = 0;
NeilBrownd787be42016-06-02 16:19:53 +10009094 bool remove_some = false;
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009095
NeilBrown39772f02018-02-03 09:19:30 +11009096 if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
9097 /* Mustn't remove devices when resync thread is running */
9098 return 0;
9099
NeilBrownd787be42016-06-02 16:19:53 +10009100 rdev_for_each(rdev, mddev) {
NeilBrown746d3202013-04-24 11:42:41 +10009101 if ((this == NULL || rdev == this) &&
9102 rdev->raid_disk >= 0 &&
Dan Williams6bfe0b42008-04-30 00:52:32 -07009103 !test_bit(Blocked, &rdev->flags) &&
NeilBrownd787be42016-06-02 16:19:53 +10009104 test_bit(Faulty, &rdev->flags) &&
9105 atomic_read(&rdev->nr_pending)==0) {
9106 /* Faulty non-Blocked devices with nr_pending == 0
9107 * never get nr_pending incremented,
9108 * never get Faulty cleared, and never get Blocked set.
9109 * So we can synchronize_rcu now rather than once per device
9110 */
9111 remove_some = true;
9112 set_bit(RemoveSynchronized, &rdev->flags);
9113 }
9114 }
9115
9116 if (remove_some)
9117 synchronize_rcu();
9118 rdev_for_each(rdev, mddev) {
9119 if ((this == NULL || rdev == this) &&
9120 rdev->raid_disk >= 0 &&
9121 !test_bit(Blocked, &rdev->flags) &&
9122 ((test_bit(RemoveSynchronized, &rdev->flags) ||
Shaohua Lif2076e72015-10-08 21:54:12 -07009123 (!test_bit(In_sync, &rdev->flags) &&
9124 !test_bit(Journal, &rdev->flags))) &&
NeilBrownd787be42016-06-02 16:19:53 +10009125 atomic_read(&rdev->nr_pending)==0)) {
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009126 if (mddev->pers->hot_remove_disk(
NeilBrownb8321b62011-12-23 10:17:51 +11009127 mddev, rdev) == 0) {
Namhyung Kim36fad852011-07-27 11:00:36 +10009128 sysfs_unlink_rdev(mddev, rdev);
NeilBrown011abdc2018-04-26 14:46:29 +10009129 rdev->saved_raid_disk = rdev->raid_disk;
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009130 rdev->raid_disk = -1;
NeilBrownf2a371c2012-01-09 00:46:41 +11009131 removed++;
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009132 }
9133 }
NeilBrownd787be42016-06-02 16:19:53 +10009134 if (remove_some && test_bit(RemoveSynchronized, &rdev->flags))
9135 clear_bit(RemoveSynchronized, &rdev->flags);
9136 }
9137
Jonathan Brassow90584fc2013-03-07 16:24:26 -06009138 if (removed && mddev->kobj.sd)
Junxiao Bie1a86db2020-07-14 16:10:26 -07009139 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009140
Goldwyn Rodrigues2910ff12015-09-28 10:27:26 -05009141 if (this && removed)
NeilBrown746d3202013-04-24 11:42:41 +10009142 goto no_add;
9143
NeilBrowndafb20f2012-03-19 12:46:39 +11009144 rdev_for_each(rdev, mddev) {
Goldwyn Rodrigues2910ff12015-09-28 10:27:26 -05009145 if (this && this != rdev)
9146 continue;
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05009147 if (test_bit(Candidate, &rdev->flags))
9148 continue;
NeilBrown7bfec5f2011-12-23 10:17:53 +11009149 if (rdev->raid_disk >= 0 &&
9150 !test_bit(In_sync, &rdev->flags) &&
Shaohua Lif2076e72015-10-08 21:54:12 -07009151 !test_bit(Journal, &rdev->flags) &&
NeilBrown7bfec5f2011-12-23 10:17:53 +11009152 !test_bit(Faulty, &rdev->flags))
9153 spares++;
NeilBrown7ceb17e2013-04-24 11:42:42 +10009154 if (rdev->raid_disk >= 0)
9155 continue;
9156 if (test_bit(Faulty, &rdev->flags))
9157 continue;
Shaohua Lif6b6ec52015-12-21 10:51:02 +11009158 if (!test_bit(Journal, &rdev->flags)) {
9159 if (mddev->ro &&
9160 ! (rdev->saved_raid_disk >= 0 &&
9161 !test_bit(Bitmap_sync, &rdev->flags)))
9162 continue;
NeilBrown7ceb17e2013-04-24 11:42:42 +10009163
Shaohua Lif6b6ec52015-12-21 10:51:02 +11009164 rdev->recovery_offset = 0;
9165 }
Guoqing Jiang3f79cc22020-04-04 23:57:11 +02009166 if (mddev->pers->hot_add_disk(mddev, rdev) == 0) {
Damien Le Moal5e3b8a82020-07-16 13:54:40 +09009167 /* failure here is OK */
9168 sysfs_link_rdev(mddev, rdev);
Shaohua Lif6b6ec52015-12-21 10:51:02 +11009169 if (!test_bit(Journal, &rdev->flags))
9170 spares++;
Guoqing Jiang54679482021-10-04 23:34:53 +08009171 md_new_event();
Shaohua Li29530792016-12-08 15:48:19 -08009172 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrowndfc70642008-05-23 13:04:39 -07009173 }
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009174 }
NeilBrown746d3202013-04-24 11:42:41 +10009175no_add:
NeilBrown6dafab62012-09-19 12:54:22 +10009176 if (removed)
Shaohua Li29530792016-12-08 15:48:19 -08009177 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009178 return spares;
9179}
NeilBrown7ebc0be2011-01-14 09:14:33 +11009180
NeilBrownac05f252014-09-30 08:10:42 +10009181static void md_start_sync(struct work_struct *ws)
9182{
9183 struct mddev *mddev = container_of(ws, struct mddev, del_work);
Goldwyn Rodriguesc186b122015-09-30 13:20:35 -05009184
NeilBrownac05f252014-09-30 08:10:42 +10009185 mddev->sync_thread = md_register_thread(md_do_sync,
9186 mddev,
9187 "resync");
9188 if (!mddev->sync_thread) {
NeilBrown9d487392016-11-02 14:16:49 +11009189 pr_warn("%s: could not start resync thread...\n",
9190 mdname(mddev));
NeilBrownac05f252014-09-30 08:10:42 +10009191 /* leave the spares where they are, it shouldn't hurt */
9192 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9193 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
9194 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
9195 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
9196 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
NeilBrownf851b602014-12-11 10:02:10 +11009197 wake_up(&resync_wait);
NeilBrownac05f252014-09-30 08:10:42 +10009198 if (test_and_clear_bit(MD_RECOVERY_RECOVER,
9199 &mddev->recovery))
9200 if (mddev->sysfs_action)
9201 sysfs_notify_dirent_safe(mddev->sysfs_action);
9202 } else
9203 md_wakeup_thread(mddev->sync_thread);
9204 sysfs_notify_dirent_safe(mddev->sysfs_action);
Guoqing Jiang54679482021-10-04 23:34:53 +08009205 md_new_event();
NeilBrownac05f252014-09-30 08:10:42 +10009206}
9207
Linus Torvalds1da177e2005-04-16 15:20:36 -07009208/*
9209 * This routine is regularly called by all per-raid-array threads to
9210 * deal with generic issues like resync and super-block update.
9211 * Raid personalities that don't have a thread (linear/raid0) do not
9212 * need this as they never do any recovery or update the superblock.
9213 *
9214 * It does not do any resync itself, but rather "forks" off other threads
9215 * to do that as needed.
9216 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
9217 * "->recovery" and create a thread at ->sync_thread.
NeilBrowndfc70642008-05-23 13:04:39 -07009218 * When the thread finishes it sets MD_RECOVERY_DONE
Linus Torvalds1da177e2005-04-16 15:20:36 -07009219 * and wakeups up this thread which will reap the thread and finish up.
9220 * This thread also removes any faulty devices (with nr_pending == 0).
9221 *
9222 * The overall approach is:
9223 * 1/ if the superblock needs updating, update it.
9224 * 2/ If a recovery thread is running, don't do anything else.
9225 * 3/ If recovery has finished, clean up, possibly marking spares active.
9226 * 4/ If there are any faulty devices, remove them.
9227 * 5/ If array is degraded, try to add spares devices
9228 * 6/ If array has spares or is not in-sync, start a resync thread.
9229 */
NeilBrownfd01b882011-10-11 16:47:53 +11009230void md_check_recovery(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009231{
NeilBrown059421e2018-10-03 15:04:41 +10009232 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) {
9233 /* Write superblock - thread that called mddev_suspend()
9234 * holds reconfig_mutex for us.
9235 */
9236 set_bit(MD_UPDATING_SB, &mddev->flags);
9237 smp_mb__after_atomic();
9238 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags))
9239 md_update_sb(mddev, 0);
9240 clear_bit_unlock(MD_UPDATING_SB, &mddev->flags);
9241 wake_up(&mddev->sb_wait);
9242 }
9243
Jonathan Brassow68866e42011-06-08 15:10:08 +10009244 if (mddev->suspended)
9245 return;
9246
NeilBrown5f404022005-06-21 17:17:16 -07009247 if (mddev->bitmap)
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07009248 md_bitmap_daemon_work(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009249
NeilBrownfca4d842005-06-21 17:17:11 -07009250 if (signal_pending(current)) {
NeilBrown31a59e32008-04-30 00:52:30 -07009251 if (mddev->pers->sync_request && !mddev->external) {
NeilBrown9d487392016-11-02 14:16:49 +11009252 pr_debug("md: %s in immediate safe mode\n",
9253 mdname(mddev));
NeilBrownfca4d842005-06-21 17:17:11 -07009254 mddev->safemode = 2;
9255 }
9256 flush_signals(current);
9257 }
9258
NeilBrownc89a8ee2008-08-05 15:54:13 +10009259 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
9260 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009261 if ( ! (
Shaohua Li29530792016-12-08 15:48:19 -08009262 (mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07009263 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
NeilBrownfca4d842005-06-21 17:17:11 -07009264 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
NeilBrown31a59e32008-04-30 00:52:30 -07009265 (mddev->external == 0 && mddev->safemode == 1) ||
NeilBrown4ad23a972017-03-15 14:05:14 +11009266 (mddev->safemode == 2
NeilBrownfca4d842005-06-21 17:17:11 -07009267 && !mddev->in_sync && mddev->recovery_cp == MaxSector)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009268 ))
9269 return;
NeilBrownfca4d842005-06-21 17:17:11 -07009270
NeilBrowndf5b89b2006-03-27 01:18:20 -08009271 if (mddev_trylock(mddev)) {
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009272 int spares = 0;
NeilBrown480523f2019-08-20 10:21:09 +10009273 bool try_set_sync = mddev->safemode != 0;
NeilBrownfca4d842005-06-21 17:17:11 -07009274
Shaohua Liafc1f552017-08-11 20:34:45 -07009275 if (!mddev->external && mddev->safemode == 1)
NeilBrown33182d12017-08-08 16:56:36 +10009276 mddev->safemode = 0;
9277
NeilBrownc89a8ee2008-08-05 15:54:13 +10009278 if (mddev->ro) {
Neil Brownab16bfc2015-06-17 12:31:46 +10009279 struct md_rdev *rdev;
9280 if (!mddev->external && mddev->in_sync)
9281 /* 'Blocked' flag not needed as failed devices
9282 * will be recorded if array switched to read/write.
9283 * Leaving it set will prevent the device
9284 * from being removed.
9285 */
9286 rdev_for_each(rdev, mddev)
9287 clear_bit(Blocked, &rdev->flags);
NeilBrown7ceb17e2013-04-24 11:42:42 +10009288 /* On a read-only array we can:
9289 * - remove failed devices
9290 * - add already-in_sync devices if the array itself
9291 * is in-sync.
9292 * As we only add devices that are already in-sync,
9293 * we can activate the spares immediately.
NeilBrownc89a8ee2008-08-05 15:54:13 +10009294 */
NeilBrown7ceb17e2013-04-24 11:42:42 +10009295 remove_and_add_spares(mddev, NULL);
NeilBrown8313b8e2013-12-12 10:13:33 +11009296 /* There is no thread, but we need to call
9297 * ->spare_active and clear saved_raid_disk
9298 */
NeilBrown2ac295a2014-05-29 11:40:03 +10009299 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
NeilBrown8313b8e2013-12-12 10:13:33 +11009300 md_reap_sync_thread(mddev);
NeilBrowna4a3d262015-07-17 11:57:30 +10009301 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
NeilBrown8313b8e2013-12-12 10:13:33 +11009302 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
Shaohua Li29530792016-12-08 15:48:19 -08009303 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
NeilBrownc89a8ee2008-08-05 15:54:13 +10009304 goto unlock;
9305 }
9306
Guoqing Jiang659b2542015-12-21 10:50:59 +11009307 if (mddev_is_clustered(mddev)) {
Heming Zhaof7c7a2f2021-04-08 15:44:15 +08009308 struct md_rdev *rdev, *tmp;
Guoqing Jiang659b2542015-12-21 10:50:59 +11009309 /* kick the device if another node issued a
9310 * remove disk.
9311 */
Heming Zhaof7c7a2f2021-04-08 15:44:15 +08009312 rdev_for_each_safe(rdev, tmp, mddev) {
Guoqing Jiang659b2542015-12-21 10:50:59 +11009313 if (test_and_clear_bit(ClusterRemove, &rdev->flags) &&
9314 rdev->raid_disk < 0)
9315 md_kick_rdev_from_array(rdev);
9316 }
9317 }
9318
NeilBrown480523f2019-08-20 10:21:09 +10009319 if (try_set_sync && !mddev->external && !mddev->in_sync) {
NeilBrown85572d72014-12-15 12:56:56 +11009320 spin_lock(&mddev->lock);
NeilBrown6497709b2017-03-15 14:05:14 +11009321 set_in_sync(mddev);
NeilBrown85572d72014-12-15 12:56:56 +11009322 spin_unlock(&mddev->lock);
NeilBrownfca4d842005-06-21 17:17:11 -07009323 }
NeilBrownfca4d842005-06-21 17:17:11 -07009324
Shaohua Li29530792016-12-08 15:48:19 -08009325 if (mddev->sb_flags)
NeilBrown850b2b422006-10-03 01:15:46 -07009326 md_update_sb(mddev, 0);
NeilBrown06d91a52005-06-21 17:17:12 -07009327
Linus Torvalds1da177e2005-04-16 15:20:36 -07009328 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
9329 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
9330 /* resync/recovery still happening */
9331 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9332 goto unlock;
9333 }
9334 if (mddev->sync_thread) {
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009335 md_reap_sync_thread(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009336 goto unlock;
9337 }
Neil Brown72a23c22008-06-28 08:31:41 +10009338 /* Set RUNNING before clearing NEEDED to avoid
9339 * any transients in the value of "sync_action".
9340 */
NeilBrown72f36d52012-10-11 14:25:57 +11009341 mddev->curr_resync_completed = 0;
NeilBrown23da4222014-12-15 12:57:01 +11009342 spin_lock(&mddev->lock);
Neil Brown72a23c22008-06-28 08:31:41 +10009343 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
NeilBrown23da4222014-12-15 12:57:01 +11009344 spin_unlock(&mddev->lock);
NeilBrown24dd4692005-11-08 21:39:26 -08009345 /* Clear some bits that don't mean anything, but
9346 * might be left set
9347 */
NeilBrown24dd4692005-11-08 21:39:26 -08009348 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
9349 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009350
NeilBrowned209582012-04-24 10:23:14 +10009351 if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
9352 test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
NeilBrownac05f252014-09-30 08:10:42 +10009353 goto not_running;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009354 /* no recovery is running.
9355 * remove any failed drives, then
9356 * add spares if possible.
NeilBrown72f36d52012-10-11 14:25:57 +11009357 * Spares are also removed and re-added, to allow
Linus Torvalds1da177e2005-04-16 15:20:36 -07009358 * the personality to fail the re-add.
9359 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009360
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009361 if (mddev->reshape_position != MaxSector) {
NeilBrown50ac1682009-06-18 08:47:55 +10009362 if (mddev->pers->check_reshape == NULL ||
9363 mddev->pers->check_reshape(mddev) != 0)
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009364 /* Cannot proceed */
NeilBrownac05f252014-09-30 08:10:42 +10009365 goto not_running;
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009366 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
Neil Brown72a23c22008-06-28 08:31:41 +10009367 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
NeilBrown746d3202013-04-24 11:42:41 +10009368 } else if ((spares = remove_and_add_spares(mddev, NULL))) {
NeilBrown24dd4692005-11-08 21:39:26 -08009369 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9370 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
Dan Williams56ac36d2008-08-07 10:02:47 -07009371 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
Neil Brown72a23c22008-06-28 08:31:41 +10009372 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
NeilBrown24dd4692005-11-08 21:39:26 -08009373 } else if (mddev->recovery_cp < MaxSector) {
9374 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
Neil Brown72a23c22008-06-28 08:31:41 +10009375 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
NeilBrown24dd4692005-11-08 21:39:26 -08009376 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
9377 /* nothing to be done ... */
NeilBrownac05f252014-09-30 08:10:42 +10009378 goto not_running;
NeilBrown24dd4692005-11-08 21:39:26 -08009379
Linus Torvalds1da177e2005-04-16 15:20:36 -07009380 if (mddev->pers->sync_request) {
NeilBrownef99bf42012-05-22 13:55:08 +10009381 if (spares) {
NeilBrowna654b9d82005-06-21 17:17:27 -07009382 /* We are adding a device or devices to an array
9383 * which has the bitmap stored on all devices.
9384 * So make sure all bitmap pages get written
9385 */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07009386 md_bitmap_write_all(mddev->bitmap);
NeilBrowna654b9d82005-06-21 17:17:27 -07009387 }
NeilBrownac05f252014-09-30 08:10:42 +10009388 INIT_WORK(&mddev->del_work, md_start_sync);
9389 queue_work(md_misc_wq, &mddev->del_work);
9390 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009391 }
NeilBrownac05f252014-09-30 08:10:42 +10009392 not_running:
Neil Brown72a23c22008-06-28 08:31:41 +10009393 if (!mddev->sync_thread) {
9394 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
NeilBrownf851b602014-12-11 10:02:10 +11009395 wake_up(&resync_wait);
Neil Brown72a23c22008-06-28 08:31:41 +10009396 if (test_and_clear_bit(MD_RECOVERY_RECOVER,
9397 &mddev->recovery))
NeilBrown0c3573f2009-01-09 08:31:05 +11009398 if (mddev->sysfs_action)
NeilBrown00bcb4a2010-06-01 19:37:23 +10009399 sysfs_notify_dirent_safe(mddev->sysfs_action);
Neil Brown72a23c22008-06-28 08:31:41 +10009400 }
NeilBrownac05f252014-09-30 08:10:42 +10009401 unlock:
9402 wake_up(&mddev->sb_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009403 mddev_unlock(mddev);
9404 }
9405}
NeilBrown6c144d32014-09-30 16:15:38 +10009406EXPORT_SYMBOL(md_check_recovery);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009407
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009408void md_reap_sync_thread(struct mddev *mddev)
9409{
9410 struct md_rdev *rdev;
Guoqing Jiangaefb2e52018-10-18 16:37:44 +08009411 sector_t old_dev_sectors = mddev->dev_sectors;
9412 bool is_reshaped = false;
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009413
9414 /* resync has finished, collect result */
9415 md_unregister_thread(&mddev->sync_thread);
9416 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
Guoqing Jiang0d8ed0e92019-07-24 11:09:21 +02009417 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
9418 mddev->degraded != mddev->raid_disks) {
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009419 /* success...*/
9420 /* activate any spares */
9421 if (mddev->pers->spare_active(mddev)) {
Junxiao Bie1a86db2020-07-14 16:10:26 -07009422 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
Shaohua Li29530792016-12-08 15:48:19 -08009423 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009424 }
9425 }
9426 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
Guoqing Jiangaefb2e52018-10-18 16:37:44 +08009427 mddev->pers->finish_reshape) {
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009428 mddev->pers->finish_reshape(mddev);
Guoqing Jiangaefb2e52018-10-18 16:37:44 +08009429 if (mddev_is_clustered(mddev))
9430 is_reshaped = true;
9431 }
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009432
9433 /* If array is no-longer degraded, then any saved_raid_disk
NeilBrownf4667222013-12-09 12:04:56 +11009434 * information must be scrapped.
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009435 */
NeilBrownf4667222013-12-09 12:04:56 +11009436 if (!mddev->degraded)
9437 rdev_for_each(rdev, mddev)
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009438 rdev->saved_raid_disk = -1;
9439
9440 md_update_sb(mddev, 1);
Shaohua Li29530792016-12-08 15:48:19 -08009441 /* MD_SB_CHANGE_PENDING should be cleared by md_update_sb, so we can
Guoqing Jiangbb8bf152016-06-02 23:32:04 -04009442 * call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by
9443 * clustered raid */
9444 if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags))
9445 md_cluster_ops->resync_finish(mddev);
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009446 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
NeilBrownea358cd2015-06-12 20:05:04 +10009447 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009448 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9449 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
9450 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
9451 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
Guoqing Jiangaefb2e52018-10-18 16:37:44 +08009452 /*
9453 * We call md_cluster_ops->update_size here because sync_size could
9454 * be changed by md_update_sb, and MD_RECOVERY_RESHAPE is cleared,
9455 * so it is time to update size across cluster.
9456 */
9457 if (mddev_is_clustered(mddev) && is_reshaped
9458 && !test_bit(MD_CLOSING, &mddev->flags))
9459 md_cluster_ops->update_size(mddev, old_dev_sectors);
NeilBrownf851b602014-12-11 10:02:10 +11009460 wake_up(&resync_wait);
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009461 /* flag recovery needed just to double check */
9462 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9463 sysfs_notify_dirent_safe(mddev->sysfs_action);
Guoqing Jiang54679482021-10-04 23:34:53 +08009464 md_new_event();
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009465 if (mddev->event_work.func)
9466 queue_work(md_misc_wq, &mddev->event_work);
9467}
NeilBrown6c144d32014-09-30 16:15:38 +10009468EXPORT_SYMBOL(md_reap_sync_thread);
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009469
NeilBrownfd01b882011-10-11 16:47:53 +11009470void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev)
Dan Williams6bfe0b42008-04-30 00:52:32 -07009471{
NeilBrown00bcb4a2010-06-01 19:37:23 +10009472 sysfs_notify_dirent_safe(rdev->sysfs_state);
Dan Williams6bfe0b42008-04-30 00:52:32 -07009473 wait_event_timeout(rdev->blocked_wait,
NeilBrownde393cd2011-07-28 11:31:48 +10009474 !test_bit(Blocked, &rdev->flags) &&
9475 !test_bit(BlockedBadBlocks, &rdev->flags),
Dan Williams6bfe0b42008-04-30 00:52:32 -07009476 msecs_to_jiffies(5000));
9477 rdev_dec_pending(rdev, mddev);
9478}
9479EXPORT_SYMBOL(md_wait_for_blocked_rdev);
9480
NeilBrownc6563a82012-05-21 09:27:00 +10009481void md_finish_reshape(struct mddev *mddev)
9482{
9483 /* called be personality module when reshape completes. */
9484 struct md_rdev *rdev;
9485
9486 rdev_for_each(rdev, mddev) {
9487 if (rdev->data_offset > rdev->new_data_offset)
9488 rdev->sectors += rdev->data_offset - rdev->new_data_offset;
9489 else
9490 rdev->sectors -= rdev->new_data_offset - rdev->data_offset;
9491 rdev->data_offset = rdev->new_data_offset;
9492 }
9493}
9494EXPORT_SYMBOL(md_finish_reshape);
NeilBrown2230dfe2011-07-28 11:31:46 +10009495
Vishal Vermafc974ee2015-12-24 19:20:34 -07009496/* Bad block management */
NeilBrown2230dfe2011-07-28 11:31:46 +10009497
Vishal Vermafc974ee2015-12-24 19:20:34 -07009498/* Returns 1 on success, 0 on failure */
NeilBrown3cb03002011-10-11 16:45:26 +11009499int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
NeilBrownc6563a82012-05-21 09:27:00 +10009500 int is_new)
NeilBrown2230dfe2011-07-28 11:31:46 +10009501{
Guoqing Jiang85ad1d12016-05-03 22:22:13 -04009502 struct mddev *mddev = rdev->mddev;
NeilBrownc6563a82012-05-21 09:27:00 +10009503 int rv;
9504 if (is_new)
9505 s += rdev->new_data_offset;
9506 else
9507 s += rdev->data_offset;
Vishal Vermafc974ee2015-12-24 19:20:34 -07009508 rv = badblocks_set(&rdev->badblocks, s, sectors, 0);
9509 if (rv == 0) {
NeilBrown2230dfe2011-07-28 11:31:46 +10009510 /* Make sure they get written out promptly */
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02009511 if (test_bit(ExternalBbl, &rdev->flags))
Junxiao Bie1a86db2020-07-14 16:10:26 -07009512 sysfs_notify_dirent_safe(rdev->sysfs_unack_badblocks);
NeilBrown8bd2f0a2011-12-08 16:26:08 +11009513 sysfs_notify_dirent_safe(rdev->sysfs_state);
Shaohua Li29530792016-12-08 15:48:19 -08009514 set_mask_bits(&mddev->sb_flags, 0,
9515 BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING));
NeilBrown2230dfe2011-07-28 11:31:46 +10009516 md_wakeup_thread(rdev->mddev->thread);
Vishal Vermafc974ee2015-12-24 19:20:34 -07009517 return 1;
9518 } else
9519 return 0;
NeilBrown2230dfe2011-07-28 11:31:46 +10009520}
9521EXPORT_SYMBOL_GPL(rdev_set_badblocks);
9522
NeilBrownc6563a82012-05-21 09:27:00 +10009523int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
9524 int is_new)
NeilBrown2230dfe2011-07-28 11:31:46 +10009525{
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02009526 int rv;
NeilBrownc6563a82012-05-21 09:27:00 +10009527 if (is_new)
9528 s += rdev->new_data_offset;
9529 else
9530 s += rdev->data_offset;
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02009531 rv = badblocks_clear(&rdev->badblocks, s, sectors);
9532 if ((rv == 0) && test_bit(ExternalBbl, &rdev->flags))
Junxiao Bie1a86db2020-07-14 16:10:26 -07009533 sysfs_notify_dirent_safe(rdev->sysfs_badblocks);
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02009534 return rv;
NeilBrown2230dfe2011-07-28 11:31:46 +10009535}
9536EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
9537
Adrian Bunk75c96f82005-05-05 16:16:09 -07009538static int md_notify_reboot(struct notifier_block *this,
9539 unsigned long code, void *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009540{
9541 struct list_head *tmp;
NeilBrownfd01b882011-10-11 16:47:53 +11009542 struct mddev *mddev;
Daniel P. Berrange2dba6a92011-09-23 10:40:45 +01009543 int need_delay = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009544
NeilBrownc744a652012-03-19 12:46:37 +11009545 for_each_mddev(mddev, tmp) {
9546 if (mddev_trylock(mddev)) {
NeilBrown30b8aa92012-04-24 10:23:16 +10009547 if (mddev->pers)
9548 __md_stop_writes(mddev);
NeilBrown0f62fb22014-05-06 09:36:08 +10009549 if (mddev->persistent)
9550 mddev->safemode = 2;
NeilBrownc744a652012-03-19 12:46:37 +11009551 mddev_unlock(mddev);
Daniel P. Berrange2dba6a92011-09-23 10:40:45 +01009552 }
NeilBrownc744a652012-03-19 12:46:37 +11009553 need_delay = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009554 }
NeilBrownc744a652012-03-19 12:46:37 +11009555 /*
9556 * certain more exotic SCSI devices are known to be
9557 * volatile wrt too early system reboots. While the
9558 * right place to handle this issue is the given
9559 * driver, we do want to have a safe RAID driver ...
9560 */
9561 if (need_delay)
9562 mdelay(1000*1);
9563
Linus Torvalds1da177e2005-04-16 15:20:36 -07009564 return NOTIFY_DONE;
9565}
9566
Adrian Bunk75c96f82005-05-05 16:16:09 -07009567static struct notifier_block md_notifier = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009568 .notifier_call = md_notify_reboot,
9569 .next = NULL,
9570 .priority = INT_MAX, /* before any real devices */
9571};
9572
9573static void md_geninit(void)
9574{
NeilBrown36a4e1f2011-10-07 14:23:17 +11009575 pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
Linus Torvalds1da177e2005-04-16 15:20:36 -07009576
Alexey Dobriyan97a32532020-02-03 17:37:17 -08009577 proc_create("mdstat", S_IRUGO, NULL, &mdstat_proc_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009578}
9579
Adrian Bunk75c96f82005-05-05 16:16:09 -07009580static int __init md_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009581{
Tejun Heoe804ac72010-10-15 15:36:08 +02009582 int ret = -ENOMEM;
9583
Tejun Heoada609e2011-01-25 14:35:54 +01009584 md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0);
Tejun Heoe804ac72010-10-15 15:36:08 +02009585 if (!md_wq)
9586 goto err_wq;
9587
9588 md_misc_wq = alloc_workqueue("md_misc", 0, 0);
9589 if (!md_misc_wq)
9590 goto err_misc_wq;
9591
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02009592 md_rdev_misc_wq = alloc_workqueue("md_rdev_misc", 0, 0);
Guoqing Jiangcf0b9b42020-10-08 05:19:09 +02009593 if (!md_rdev_misc_wq)
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02009594 goto err_rdev_misc_wq;
9595
Christoph Hellwig28144f92020-10-29 15:58:34 +01009596 ret = __register_blkdev(MD_MAJOR, "md", md_probe);
9597 if (ret < 0)
Tejun Heoe804ac72010-10-15 15:36:08 +02009598 goto err_md;
9599
Christoph Hellwig28144f92020-10-29 15:58:34 +01009600 ret = __register_blkdev(0, "mdp", md_probe);
9601 if (ret < 0)
Tejun Heoe804ac72010-10-15 15:36:08 +02009602 goto err_mdp;
9603 mdp_major = ret;
9604
Linus Torvalds1da177e2005-04-16 15:20:36 -07009605 register_reboot_notifier(&md_notifier);
Eric W. Biederman0b4d4142007-02-14 00:34:09 -08009606 raid_table_header = register_sysctl_table(raid_root_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009607
9608 md_geninit();
NeilBrownd710e132008-10-13 11:55:12 +11009609 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009610
Tejun Heoe804ac72010-10-15 15:36:08 +02009611err_mdp:
9612 unregister_blkdev(MD_MAJOR, "md");
9613err_md:
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02009614 destroy_workqueue(md_rdev_misc_wq);
9615err_rdev_misc_wq:
Tejun Heoe804ac72010-10-15 15:36:08 +02009616 destroy_workqueue(md_misc_wq);
9617err_misc_wq:
9618 destroy_workqueue(md_wq);
9619err_wq:
9620 return ret;
9621}
Linus Torvalds1da177e2005-04-16 15:20:36 -07009622
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009623static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
Goldwyn Rodrigues1d7e3e92014-06-07 01:53:00 -05009624{
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009625 struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
Heming Zhaof7c7a2f2021-04-08 15:44:15 +08009626 struct md_rdev *rdev2, *tmp;
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009627 int role, ret;
9628 char b[BDEVNAME_SIZE];
Goldwyn Rodrigues1d7e3e92014-06-07 01:53:00 -05009629
Guoqing Jiang818da592017-03-01 16:42:40 +08009630 /*
9631 * If size is changed in another node then we need to
9632 * do resize as well.
9633 */
9634 if (mddev->dev_sectors != le64_to_cpu(sb->size)) {
9635 ret = mddev->pers->resize(mddev, le64_to_cpu(sb->size));
9636 if (ret)
9637 pr_info("md-cluster: resize failed\n");
9638 else
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07009639 md_bitmap_update_sb(mddev->bitmap);
Guoqing Jiang818da592017-03-01 16:42:40 +08009640 }
9641
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009642 /* Check for change of roles in the active devices */
Heming Zhaof7c7a2f2021-04-08 15:44:15 +08009643 rdev_for_each_safe(rdev2, tmp, mddev) {
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009644 if (test_bit(Faulty, &rdev2->flags))
9645 continue;
9646
9647 /* Check if the roles changed */
9648 role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]);
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05009649
9650 if (test_bit(Candidate, &rdev2->flags)) {
9651 if (role == 0xfffe) {
9652 pr_info("md: Removing Candidate device %s because add failed\n", bdevname(rdev2->bdev,b));
9653 md_kick_rdev_from_array(rdev2);
9654 continue;
9655 }
9656 else
9657 clear_bit(Candidate, &rdev2->flags);
9658 }
9659
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009660 if (role != rdev2->raid_disk) {
Guoqing Jiangca1e98e2018-10-18 16:37:45 +08009661 /*
9662 * got activated except reshape is happening.
9663 */
9664 if (rdev2->raid_disk == -1 && role != 0xffff &&
9665 !(le32_to_cpu(sb->feature_map) &
9666 MD_FEATURE_RESHAPE_ACTIVE)) {
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009667 rdev2->saved_raid_disk = role;
9668 ret = remove_and_add_spares(mddev, rdev2);
9669 pr_info("Activated spare: %s\n",
NeilBrown9d487392016-11-02 14:16:49 +11009670 bdevname(rdev2->bdev,b));
Guoqing Jianga5781832016-05-02 11:33:14 -04009671 /* wakeup mddev->thread here, so array could
9672 * perform resync with the new activated disk */
9673 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9674 md_wakeup_thread(mddev->thread);
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009675 }
9676 /* device faulty
9677 * We just want to do the minimum to mark the disk
9678 * as faulty. The recovery is performed by the
9679 * one who initiated the error.
9680 */
9681 if ((role == 0xfffe) || (role == 0xfffd)) {
9682 md_error(mddev, rdev2);
9683 clear_bit(Blocked, &rdev2->flags);
9684 }
9685 }
Goldwyn Rodrigues1d7e3e92014-06-07 01:53:00 -05009686 }
9687
Zhao Heminga8da01f2020-11-19 19:41:33 +08009688 if (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) {
9689 ret = update_raid_disks(mddev, le32_to_cpu(sb->raid_disks));
9690 if (ret)
9691 pr_warn("md: updating array disks failed. %d\n", ret);
9692 }
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009693
Guoqing Jiang7564bed2018-10-18 16:37:42 +08009694 /*
9695 * Since mddev->delta_disks has already updated in update_raid_disks,
9696 * so it is time to check reshape.
9697 */
9698 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) &&
9699 (le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
9700 /*
9701 * reshape is happening in the remote node, we need to
9702 * update reshape_position and call start_reshape.
9703 */
Christoph Hellwiged4d0a4e2019-04-04 18:56:10 +02009704 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
Guoqing Jiang7564bed2018-10-18 16:37:42 +08009705 if (mddev->pers->update_reshape_pos)
9706 mddev->pers->update_reshape_pos(mddev);
9707 if (mddev->pers->start_reshape)
9708 mddev->pers->start_reshape(mddev);
9709 } else if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) &&
9710 mddev->reshape_position != MaxSector &&
9711 !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
9712 /* reshape is just done in another node. */
9713 mddev->reshape_position = MaxSector;
9714 if (mddev->pers->update_reshape_pos)
9715 mddev->pers->update_reshape_pos(mddev);
9716 }
9717
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009718 /* Finally set the event to be up to date */
9719 mddev->events = le64_to_cpu(sb->events);
9720}
9721
9722static int read_rdev(struct mddev *mddev, struct md_rdev *rdev)
9723{
9724 int err;
9725 struct page *swapout = rdev->sb_page;
9726 struct mdp_superblock_1 *sb;
9727
9728 /* Store the sb page of the rdev in the swapout temporary
9729 * variable in case we err in the future
9730 */
9731 rdev->sb_page = NULL;
NeilBrown7f0f0d82016-11-02 14:16:49 +11009732 err = alloc_disk_sb(rdev);
9733 if (err == 0) {
9734 ClearPageUptodate(rdev->sb_page);
9735 rdev->sb_loaded = 0;
9736 err = super_types[mddev->major_version].
9737 load_super(rdev, NULL, mddev->minor_version);
9738 }
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009739 if (err < 0) {
9740 pr_warn("%s: %d Could not reload rdev(%d) err: %d. Restoring old values\n",
9741 __func__, __LINE__, rdev->desc_nr, err);
NeilBrown7f0f0d82016-11-02 14:16:49 +11009742 if (rdev->sb_page)
9743 put_page(rdev->sb_page);
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009744 rdev->sb_page = swapout;
9745 rdev->sb_loaded = 1;
9746 return err;
9747 }
9748
9749 sb = page_address(rdev->sb_page);
9750 /* Read the offset unconditionally, even if MD_FEATURE_RECOVERY_OFFSET
9751 * is not set
9752 */
9753
9754 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RECOVERY_OFFSET))
9755 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
9756
9757 /* The other node finished recovery, call spare_active to set
9758 * device In_sync and mddev->degraded
9759 */
9760 if (rdev->recovery_offset == MaxSector &&
9761 !test_bit(In_sync, &rdev->flags) &&
9762 mddev->pers->spare_active(mddev))
Junxiao Bie1a86db2020-07-14 16:10:26 -07009763 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009764
9765 put_page(swapout);
9766 return 0;
9767}
9768
9769void md_reload_sb(struct mddev *mddev, int nr)
9770{
9771 struct md_rdev *rdev;
9772 int err;
9773
9774 /* Find the rdev */
9775 rdev_for_each_rcu(rdev, mddev) {
9776 if (rdev->desc_nr == nr)
9777 break;
9778 }
9779
9780 if (!rdev || rdev->desc_nr != nr) {
9781 pr_warn("%s: %d Could not find rdev with nr %d\n", __func__, __LINE__, nr);
9782 return;
9783 }
9784
9785 err = read_rdev(mddev, rdev);
9786 if (err < 0)
9787 return;
9788
9789 check_sb_changes(mddev, rdev);
9790
9791 /* Read all rdev's to update recovery_offset */
Guoqing Jiang0ea99242018-04-09 17:01:21 +08009792 rdev_for_each_rcu(rdev, mddev) {
9793 if (!test_bit(Faulty, &rdev->flags))
9794 read_rdev(mddev, rdev);
9795 }
Goldwyn Rodrigues1d7e3e92014-06-07 01:53:00 -05009796}
9797EXPORT_SYMBOL(md_reload_sb);
9798
Linus Torvalds1da177e2005-04-16 15:20:36 -07009799#ifndef MODULE
9800
9801/*
9802 * Searches all registered partitions for autorun RAID arrays
9803 * at boot time.
9804 */
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009805
Cong Wang5b1f5bc32016-06-08 09:20:16 -07009806static DEFINE_MUTEX(detected_devices_mutex);
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009807static LIST_HEAD(all_detected_devices);
9808struct detected_devices_node {
9809 struct list_head list;
9810 dev_t dev;
9811};
Linus Torvalds1da177e2005-04-16 15:20:36 -07009812
9813void md_autodetect_dev(dev_t dev)
9814{
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009815 struct detected_devices_node *node_detected_dev;
9816
9817 node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
9818 if (node_detected_dev) {
9819 node_detected_dev->dev = dev;
Cong Wang5b1f5bc32016-06-08 09:20:16 -07009820 mutex_lock(&detected_devices_mutex);
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009821 list_add_tail(&node_detected_dev->list, &all_detected_devices);
Cong Wang5b1f5bc32016-06-08 09:20:16 -07009822 mutex_unlock(&detected_devices_mutex);
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009823 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009824}
9825
Christoph Hellwigd82fa812020-06-06 15:00:24 +02009826void md_autostart_arrays(int part)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009827{
NeilBrown3cb03002011-10-11 16:45:26 +11009828 struct md_rdev *rdev;
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009829 struct detected_devices_node *node_detected_dev;
9830 dev_t dev;
9831 int i_scanned, i_passed;
9832
9833 i_scanned = 0;
9834 i_passed = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009835
NeilBrown9d487392016-11-02 14:16:49 +11009836 pr_info("md: Autodetecting RAID arrays.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07009837
Cong Wang5b1f5bc32016-06-08 09:20:16 -07009838 mutex_lock(&detected_devices_mutex);
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009839 while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
9840 i_scanned++;
9841 node_detected_dev = list_entry(all_detected_devices.next,
9842 struct detected_devices_node, list);
9843 list_del(&node_detected_dev->list);
9844 dev = node_detected_dev->dev;
9845 kfree(node_detected_dev);
Shaohua Li90bcf1332016-09-14 14:26:54 -07009846 mutex_unlock(&detected_devices_mutex);
NeilBrowndf968c42007-07-17 04:06:11 -07009847 rdev = md_import_device(dev,0, 90);
Shaohua Li90bcf1332016-09-14 14:26:54 -07009848 mutex_lock(&detected_devices_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009849 if (IS_ERR(rdev))
9850 continue;
9851
NeilBrown403df472014-09-30 15:52:29 +10009852 if (test_bit(Faulty, &rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009853 continue;
NeilBrown403df472014-09-30 15:52:29 +10009854
NeilBrownd0fae182008-03-04 14:29:31 -08009855 set_bit(AutoDetected, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009856 list_add(&rdev->same_set, &pending_raid_disks);
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009857 i_passed++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009858 }
Cong Wang5b1f5bc32016-06-08 09:20:16 -07009859 mutex_unlock(&detected_devices_mutex);
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009860
NeilBrown9d487392016-11-02 14:16:49 +11009861 pr_debug("md: Scanned %d and added %d devices.\n", i_scanned, i_passed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009862
9863 autorun_devices(part);
9864}
9865
Jeff Garzikfdee8ae2006-12-10 02:20:50 -08009866#endif /* !MODULE */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009867
9868static __exit void md_exit(void)
9869{
NeilBrownfd01b882011-10-11 16:47:53 +11009870 struct mddev *mddev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009871 struct list_head *tmp;
NeilBrowne2f23b62014-04-09 14:33:51 +10009872 int delay = 1;
Greg Kroah-Hartman8ab5e4c2005-06-20 21:15:16 -07009873
Christoph Hellwig3dbd8c22009-03-31 14:27:02 +11009874 unregister_blkdev(MD_MAJOR,"md");
Linus Torvalds1da177e2005-04-16 15:20:36 -07009875 unregister_blkdev(mdp_major, "mdp");
9876 unregister_reboot_notifier(&md_notifier);
9877 unregister_sysctl_table(raid_table_header);
NeilBrowne2f23b62014-04-09 14:33:51 +10009878
9879 /* We cannot unload the modules while some process is
9880 * waiting for us in select() or poll() - wake them up
9881 */
9882 md_unloading = 1;
9883 while (waitqueue_active(&md_event_waiters)) {
9884 /* not safe to leave yet */
9885 wake_up(&md_event_waiters);
9886 msleep(delay);
9887 delay += delay;
9888 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009889 remove_proc_entry("mdstat", NULL);
NeilBrowne2f23b62014-04-09 14:33:51 +10009890
NeilBrown29ac4aa2008-02-06 01:39:58 -08009891 for_each_mddev(mddev, tmp) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009892 export_array(mddev);
NeilBrown93568632017-02-06 13:41:39 +11009893 mddev->ctime = 0;
NeilBrownd3374822009-01-09 08:31:10 +11009894 mddev->hold_active = 0;
NeilBrown93568632017-02-06 13:41:39 +11009895 /*
9896 * for_each_mddev() will call mddev_put() at the end of each
9897 * iteration. As the mddev is now fully clear, this will
9898 * schedule the mddev for destruction by a workqueue, and the
9899 * destroy_workqueue() below will wait for that to complete.
9900 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009901 }
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02009902 destroy_workqueue(md_rdev_misc_wq);
Tejun Heoe804ac72010-10-15 15:36:08 +02009903 destroy_workqueue(md_misc_wq);
9904 destroy_workqueue(md_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009905}
9906
Dan Williams685784a2007-07-09 11:56:42 -07009907subsys_initcall(md_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009908module_exit(md_exit)
9909
Kees Cooke4dca7b2017-10-17 19:04:42 -07009910static int get_ro(char *buffer, const struct kernel_param *kp)
NeilBrownf91de922005-11-08 21:39:36 -08009911{
Xiongfeng Wang3f999802020-05-11 16:23:25 +08009912 return sprintf(buffer, "%d\n", start_readonly);
NeilBrownf91de922005-11-08 21:39:36 -08009913}
Kees Cooke4dca7b2017-10-17 19:04:42 -07009914static int set_ro(const char *val, const struct kernel_param *kp)
NeilBrownf91de922005-11-08 21:39:36 -08009915{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03009916 return kstrtouint(val, 10, (unsigned int *)&start_readonly);
NeilBrownf91de922005-11-08 21:39:36 -08009917}
9918
NeilBrown80ca3a42006-07-10 04:44:18 -07009919module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
9920module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
NeilBrownefeb53c2009-01-09 08:31:10 +11009921module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
NeilBrown78b63502017-04-12 16:26:13 +10009922module_param(create_on_open, bool, S_IRUSR|S_IWUSR);
NeilBrownf91de922005-11-08 21:39:36 -08009923
Linus Torvalds1da177e2005-04-16 15:20:36 -07009924MODULE_LICENSE("GPL");
NeilBrown0efb9e62009-12-14 12:49:58 +11009925MODULE_DESCRIPTION("MD RAID framework");
NeilBrownaa1595e2005-08-04 12:53:32 -07009926MODULE_ALIAS("md");
NeilBrown72008652005-08-26 18:34:15 -07009927MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);