blob: 5881d05a76ebc706e2fc7fe342c98023d1c7258b [file] [log] [blame]
Thomas Gleixneraf1a8892019-05-20 19:08:12 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 md.c : Multiple Devices driver for Linux
NeilBrownf72ffdd2014-09-30 14:23:59 +10004 Copyright (C) 1998, 1999, 2000 Ingo Molnar
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 completely rewritten, based on the MD driver code from Marc Zyngier
7
8 Changes:
9
10 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
11 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
12 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
13 - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
14 - kmod support by: Cyrus Durgin
15 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
16 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
17
18 - lots of fixes and improvements to the RAID1/RAID5 and generic
19 RAID code (such as request based resynchronization):
20
21 Neil Brown <neilb@cse.unsw.edu.au>.
22
NeilBrown32a76272005-06-21 17:17:14 -070023 - persistent bitmap code
24 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
25
NeilBrown9d487392016-11-02 14:16:49 +110026
27 Errors, Warnings, etc.
28 Please use:
29 pr_crit() for error conditions that risk data loss
30 pr_err() for error conditions that are unexpected, like an IO error
31 or internal inconsistency
32 pr_warn() for error conditions that could have been predicated, like
33 adding a device to an array when it has incompatible metadata
34 pr_info() for every interesting, very rare events, like an array starting
35 or stopping, or resync starting or stopping
36 pr_debug() for everything else.
37
Linus Torvalds1da177e2005-04-16 15:20:36 -070038*/
39
Guoqing Jiang963c5552019-06-14 17:10:36 +080040#include <linux/sched/mm.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +010041#include <linux/sched/signal.h>
NeilBrowna6fb0932005-09-09 16:23:56 -070042#include <linux/kthread.h>
NeilBrownbff61972009-03-31 14:33:13 +110043#include <linux/blkdev.h>
Christoph Hellwigfe45e632021-09-20 14:33:27 +020044#include <linux/blk-integrity.h>
Vishal Vermafc974ee2015-12-24 19:20:34 -070045#include <linux/badblocks.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <linux/sysctl.h>
NeilBrownbff61972009-03-31 14:33:13 +110047#include <linux/seq_file.h>
Al Viroff01bb42011-09-16 02:31:11 -040048#include <linux/fs.h>
NeilBrownd7603b72006-01-06 00:20:30 -080049#include <linux/poll.h>
NeilBrown16f17b32006-06-26 00:27:37 -070050#include <linux/ctype.h>
André Goddard Rosae7d28602009-12-14 18:01:06 -080051#include <linux/string.h>
NeilBrownfb4d8c72008-10-13 11:55:12 +110052#include <linux/hdreg.h>
53#include <linux/proc_fs.h>
54#include <linux/random.h>
Christoph Hellwigb81e0c22021-09-20 14:33:25 +020055#include <linux/major.h>
Paul Gortmaker056075c2011-07-03 13:58:33 -040056#include <linux/module.h>
NeilBrownfb4d8c72008-10-13 11:55:12 +110057#include <linux/reboot.h>
NeilBrown32a76272005-06-21 17:17:14 -070058#include <linux/file.h>
Arnd Bergmannaa98aa32009-12-14 12:50:05 +110059#include <linux/compat.h>
Stephen Rothwell25570722008-10-15 09:09:21 +110060#include <linux/delay.h>
NeilBrownbff61972009-03-31 14:33:13 +110061#include <linux/raid/md_p.h>
62#include <linux/raid/md_u.h>
Christoph Hellwig74cc979c2020-03-24 08:25:19 +010063#include <linux/raid/detect.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090064#include <linux/slab.h>
NeilBrown4ad23a972017-03-15 14:05:14 +110065#include <linux/percpu-refcount.h>
Christoph Hellwigc6a564ff2020-03-25 16:48:42 +010066#include <linux/part_stat.h>
NeilBrown4ad23a972017-03-15 14:05:14 +110067
Shaohua Li504634f2016-11-18 09:44:08 -080068#include <trace/events/block.h>
NeilBrown43b2e5d2009-03-31 14:33:13 +110069#include "md.h"
Mike Snitzer935fe092017-10-10 17:02:41 -040070#include "md-bitmap.h"
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -050071#include "md-cluster.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070072
NeilBrown01f96c02011-09-21 15:30:20 +100073/* pers_list is a list of registered personalities protected
74 * by pers_lock.
75 * pers_lock does extra service to protect accesses to
76 * mddev->thread when the mutex cannot be held.
77 */
NeilBrown2604b702006-01-06 00:20:36 -080078static LIST_HEAD(pers_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079static DEFINE_SPINLOCK(pers_lock);
80
Kent Overstreet28dec872018-06-07 20:52:54 -040081static struct kobj_type md_ktype;
82
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -050083struct md_cluster_operations *md_cluster_ops;
Goldwyn Rodrigues589a1c42014-06-07 02:39:37 -050084EXPORT_SYMBOL(md_cluster_ops);
Christoph Hellwig2b598ee2019-04-04 18:56:14 +020085static struct module *md_cluster_mod;
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -050086
Bernd Schubert90b08712008-05-23 13:04:38 -070087static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
Tejun Heoe804ac72010-10-15 15:36:08 +020088static struct workqueue_struct *md_wq;
89static struct workqueue_struct *md_misc_wq;
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +020090static struct workqueue_struct *md_rdev_misc_wq;
Bernd Schubert90b08712008-05-23 13:04:38 -070091
NeilBrown746d3202013-04-24 11:42:41 +100092static int remove_and_add_spares(struct mddev *mddev,
93 struct md_rdev *this);
NeilBrown5aa61f42014-12-15 12:56:57 +110094static void mddev_detach(struct mddev *mddev);
NeilBrown746d3202013-04-24 11:42:41 +100095
Linus Torvalds1da177e2005-04-16 15:20:36 -070096/*
Robert Becker1e509152009-12-14 12:49:58 +110097 * Default number of read corrections we'll attempt on an rdev
98 * before ejecting it from the array. We divide the read error
99 * count by 2 for every hour elapsed between read errors.
100 */
101#define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
Zhao Heming7c9d5c52020-07-21 02:08:52 +0800102/* Default safemode delay: 200 msec */
103#define DEFAULT_SAFEMODE_DELAY ((200 * HZ)/1000 +1)
Robert Becker1e509152009-12-14 12:49:58 +1100104/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
106 * is 1000 KB/sec, so the extra system load does not show up that much.
107 * Increase it if you want to have more _guaranteed_ speed. Note that
Adrian Bunk338cec32005-09-10 00:26:54 -0700108 * the RAID driver will use the maximum available bandwidth if the IO
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 * subsystem is idle. There is also an 'absolute maximum' reconstruction
110 * speed limit - in case reconstruction slows down your system despite
111 * idle IO detection.
112 *
113 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
NeilBrown88202a02006-01-06 00:21:36 -0800114 * or /sys/block/mdX/md/sync_speed_{min,max}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115 */
116
117static int sysctl_speed_limit_min = 1000;
118static int sysctl_speed_limit_max = 200000;
NeilBrownfd01b882011-10-11 16:47:53 +1100119static inline int speed_min(struct mddev *mddev)
NeilBrown88202a02006-01-06 00:21:36 -0800120{
121 return mddev->sync_speed_min ?
122 mddev->sync_speed_min : sysctl_speed_limit_min;
123}
124
NeilBrownfd01b882011-10-11 16:47:53 +1100125static inline int speed_max(struct mddev *mddev)
NeilBrown88202a02006-01-06 00:21:36 -0800126{
127 return mddev->sync_speed_max ?
128 mddev->sync_speed_max : sysctl_speed_limit_max;
129}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100131static void rdev_uninit_serial(struct md_rdev *rdev)
Guoqing Jiang3e148a32019-06-19 17:30:46 +0800132{
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100133 if (!test_and_clear_bit(CollisionCheck, &rdev->flags))
134 return;
Guoqing Jiang3e148a32019-06-19 17:30:46 +0800135
Guoqing Jiang025471f2019-12-23 10:49:01 +0100136 kvfree(rdev->serial);
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100137 rdev->serial = NULL;
Guoqing Jiang3e148a32019-06-19 17:30:46 +0800138}
139
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100140static void rdevs_uninit_serial(struct mddev *mddev)
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100141{
142 struct md_rdev *rdev;
143
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100144 rdev_for_each(rdev, mddev)
145 rdev_uninit_serial(rdev);
146}
147
148static int rdev_init_serial(struct md_rdev *rdev)
149{
Guoqing Jiang025471f2019-12-23 10:49:01 +0100150 /* serial_nums equals with BARRIER_BUCKETS_NR */
151 int i, serial_nums = 1 << ((PAGE_SHIFT - ilog2(sizeof(atomic_t))));
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100152 struct serial_in_rdev *serial = NULL;
153
154 if (test_bit(CollisionCheck, &rdev->flags))
155 return 0;
156
Guoqing Jiang025471f2019-12-23 10:49:01 +0100157 serial = kvmalloc(sizeof(struct serial_in_rdev) * serial_nums,
158 GFP_KERNEL);
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100159 if (!serial)
160 return -ENOMEM;
161
Guoqing Jiang025471f2019-12-23 10:49:01 +0100162 for (i = 0; i < serial_nums; i++) {
163 struct serial_in_rdev *serial_tmp = &serial[i];
164
165 spin_lock_init(&serial_tmp->serial_lock);
166 serial_tmp->serial_rb = RB_ROOT_CACHED;
167 init_waitqueue_head(&serial_tmp->serial_io_wait);
168 }
169
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100170 rdev->serial = serial;
171 set_bit(CollisionCheck, &rdev->flags);
172
173 return 0;
174}
175
176static int rdevs_init_serial(struct mddev *mddev)
177{
178 struct md_rdev *rdev;
179 int ret = 0;
180
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100181 rdev_for_each(rdev, mddev) {
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100182 ret = rdev_init_serial(rdev);
183 if (ret)
184 break;
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100185 }
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100186
187 /* Free all resources if pool is not existed */
188 if (ret && !mddev->serial_info_pool)
189 rdevs_uninit_serial(mddev);
190
191 return ret;
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100192}
193
Guoqing Jiang963c5552019-06-14 17:10:36 +0800194/*
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100195 * rdev needs to enable serial stuffs if it meets the conditions:
196 * 1. it is multi-queue device flaged with writemostly.
197 * 2. the write-behind mode is enabled.
198 */
199static int rdev_need_serial(struct md_rdev *rdev)
200{
201 return (rdev && rdev->mddev->bitmap_info.max_write_behind > 0 &&
Christoph Hellwige556f6b2020-06-26 10:01:56 +0200202 rdev->bdev->bd_disk->queue->nr_hw_queues != 1 &&
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100203 test_bit(WriteMostly, &rdev->flags));
204}
205
206/*
207 * Init resource for rdev(s), then create serial_info_pool if:
208 * 1. rdev is the first device which return true from rdev_enable_serial.
209 * 2. rdev is NULL, means we want to enable serialization for all rdevs.
Guoqing Jiang963c5552019-06-14 17:10:36 +0800210 */
Guoqing Jiang404659c2019-12-23 10:48:53 +0100211void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100212 bool is_suspend)
Guoqing Jiang963c5552019-06-14 17:10:36 +0800213{
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100214 int ret = 0;
215
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100216 if (rdev && !rdev_need_serial(rdev) &&
217 !test_bit(CollisionCheck, &rdev->flags))
Guoqing Jiang963c5552019-06-14 17:10:36 +0800218 return;
219
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100220 if (!is_suspend)
221 mddev_suspend(mddev);
222
223 if (!rdev)
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100224 ret = rdevs_init_serial(mddev);
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100225 else
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100226 ret = rdev_init_serial(rdev);
227 if (ret)
228 goto abort;
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100229
Guoqing Jiang404659c2019-12-23 10:48:53 +0100230 if (mddev->serial_info_pool == NULL) {
Coly Li3024ba22020-04-09 22:17:23 +0800231 /*
232 * already in memalloc noio context by
233 * mddev_suspend()
234 */
Guoqing Jiang404659c2019-12-23 10:48:53 +0100235 mddev->serial_info_pool =
236 mempool_create_kmalloc_pool(NR_SERIAL_INFOS,
237 sizeof(struct serial_info));
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100238 if (!mddev->serial_info_pool) {
239 rdevs_uninit_serial(mddev);
Guoqing Jiang404659c2019-12-23 10:48:53 +0100240 pr_err("can't alloc memory pool for serialization\n");
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100241 }
Guoqing Jiang963c5552019-06-14 17:10:36 +0800242 }
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100243
244abort:
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100245 if (!is_suspend)
246 mddev_resume(mddev);
Guoqing Jiang963c5552019-06-14 17:10:36 +0800247}
Guoqing Jiang963c5552019-06-14 17:10:36 +0800248
249/*
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100250 * Free resource from rdev(s), and destroy serial_info_pool under conditions:
251 * 1. rdev is the last device flaged with CollisionCheck.
252 * 2. when bitmap is destroyed while policy is not enabled.
253 * 3. for disable policy, the pool is destroyed only when no rdev needs it.
Guoqing Jiang963c5552019-06-14 17:10:36 +0800254 */
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100255void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
256 bool is_suspend)
Guoqing Jiang963c5552019-06-14 17:10:36 +0800257{
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100258 if (rdev && !test_bit(CollisionCheck, &rdev->flags))
Guoqing Jiang963c5552019-06-14 17:10:36 +0800259 return;
260
Guoqing Jiang404659c2019-12-23 10:48:53 +0100261 if (mddev->serial_info_pool) {
Guoqing Jiang963c5552019-06-14 17:10:36 +0800262 struct md_rdev *temp;
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100263 int num = 0; /* used to track if other rdevs need the pool */
Guoqing Jiang963c5552019-06-14 17:10:36 +0800264
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100265 if (!is_suspend)
266 mddev_suspend(mddev);
267 rdev_for_each(temp, mddev) {
268 if (!rdev) {
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100269 if (!mddev->serialize_policy ||
270 !rdev_need_serial(temp))
271 rdev_uninit_serial(temp);
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100272 else
273 num++;
274 } else if (temp != rdev &&
275 test_bit(CollisionCheck, &temp->flags))
Guoqing Jiang963c5552019-06-14 17:10:36 +0800276 num++;
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100277 }
278
279 if (rdev)
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100280 rdev_uninit_serial(rdev);
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100281
282 if (num)
283 pr_info("The mempool could be used by other devices\n");
284 else {
Guoqing Jiang404659c2019-12-23 10:48:53 +0100285 mempool_destroy(mddev->serial_info_pool);
286 mddev->serial_info_pool = NULL;
Guoqing Jiang963c5552019-06-14 17:10:36 +0800287 }
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100288 if (!is_suspend)
289 mddev_resume(mddev);
Guoqing Jiang963c5552019-06-14 17:10:36 +0800290 }
291}
292
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293static struct ctl_table_header *raid_table_header;
294
Joe Perches82592c32013-11-14 15:16:18 +1100295static struct ctl_table raid_table[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 .procname = "speed_limit_min",
298 .data = &sysctl_speed_limit_min,
299 .maxlen = sizeof(int),
NeilBrown80ca3a42006-07-10 04:44:18 -0700300 .mode = S_IRUGO|S_IWUSR,
Eric W. Biederman6d456112009-11-16 03:11:48 -0800301 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 },
303 {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 .procname = "speed_limit_max",
305 .data = &sysctl_speed_limit_max,
306 .maxlen = sizeof(int),
NeilBrown80ca3a42006-07-10 04:44:18 -0700307 .mode = S_IRUGO|S_IWUSR,
Eric W. Biederman6d456112009-11-16 03:11:48 -0800308 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 },
Eric W. Biederman894d2492009-11-05 14:34:02 -0800310 { }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311};
312
Joe Perches82592c32013-11-14 15:16:18 +1100313static struct ctl_table raid_dir_table[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 .procname = "raid",
316 .maxlen = 0,
NeilBrown80ca3a42006-07-10 04:44:18 -0700317 .mode = S_IRUGO|S_IXUGO,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318 .child = raid_table,
319 },
Eric W. Biederman894d2492009-11-05 14:34:02 -0800320 { }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321};
322
Joe Perches82592c32013-11-14 15:16:18 +1100323static struct ctl_table raid_root_table[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 .procname = "dev",
326 .maxlen = 0,
327 .mode = 0555,
328 .child = raid_dir_table,
329 },
Eric W. Biederman894d2492009-11-05 14:34:02 -0800330 { }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331};
332
NeilBrownf91de922005-11-08 21:39:36 -0800333static int start_readonly;
334
NeilBrown78b63502017-04-12 16:26:13 +1000335/*
336 * The original mechanism for creating an md device is to create
337 * a device node in /dev and to open it. This causes races with device-close.
338 * The preferred method is to write to the "new_array" module parameter.
339 * This can avoid races.
340 * Setting create_on_open to false disables the original mechanism
341 * so all the races disappear.
342 */
343static bool create_on_open = true;
344
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345/*
NeilBrownd7603b72006-01-06 00:20:30 -0800346 * We have a system wide 'event count' that is incremented
347 * on any 'interesting' event, and readers of /proc/mdstat
348 * can use 'poll' or 'select' to find out when the event
349 * count increases.
350 *
351 * Events are:
352 * start array, stop array, error, add device, remove device,
353 * start build, activate spare
354 */
NeilBrown2989ddb2006-01-06 00:20:43 -0800355static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
NeilBrownd7603b72006-01-06 00:20:30 -0800356static atomic_t md_event_count;
Guoqing Jiang54679482021-10-04 23:34:53 +0800357void md_new_event(void)
NeilBrownd7603b72006-01-06 00:20:30 -0800358{
359 atomic_inc(&md_event_count);
360 wake_up(&md_event_waiters);
361}
NeilBrown29269552006-03-27 01:18:10 -0800362EXPORT_SYMBOL_GPL(md_new_event);
NeilBrownd7603b72006-01-06 00:20:30 -0800363
364/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 * Enables to iterate over all existing md arrays
366 * all_mddevs_lock protects this list.
367 */
368static LIST_HEAD(all_mddevs);
369static DEFINE_SPINLOCK(all_mddevs_lock);
370
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371/*
372 * iterates through all used mddevs in the system.
373 * We take care to grab the all_mddevs_lock whenever navigating
374 * the list, and to always hold a refcount when unlocked.
375 * Any code which breaks out of this loop while own
376 * a reference to the current mddev and must mddev_put it.
377 */
NeilBrownfd01b882011-10-11 16:47:53 +1100378#define for_each_mddev(_mddev,_tmp) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 \
NeilBrownf72ffdd2014-09-30 14:23:59 +1000380 for (({ spin_lock(&all_mddevs_lock); \
NeilBrownfd01b882011-10-11 16:47:53 +1100381 _tmp = all_mddevs.next; \
382 _mddev = NULL;}); \
383 ({ if (_tmp != &all_mddevs) \
384 mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 spin_unlock(&all_mddevs_lock); \
NeilBrownfd01b882011-10-11 16:47:53 +1100386 if (_mddev) mddev_put(_mddev); \
387 _mddev = list_entry(_tmp, struct mddev, all_mddevs); \
388 _tmp != &all_mddevs;}); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 ({ spin_lock(&all_mddevs_lock); \
NeilBrownfd01b882011-10-11 16:47:53 +1100390 _tmp = _tmp->next;}) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 )
392
NeilBrown409c57f2009-03-31 14:39:39 +1100393/* Rather than calling directly into the personality make_request function,
394 * IO requests come here first so that we can check if the device is
395 * being suspended pending a reconfiguration.
396 * We hold a refcount over the call to ->make_request. By the time that
397 * call has finished, the bio has been linked into some internal structure
398 * and so is visible to ->quiesce(), so we don't need the refcount any more.
399 */
NeilBrownb3143b92017-10-17 13:46:43 +1100400static bool is_suspended(struct mddev *mddev, struct bio *bio)
401{
402 if (mddev->suspended)
403 return true;
404 if (bio_data_dir(bio) != WRITE)
405 return false;
406 if (mddev->suspend_lo >= mddev->suspend_hi)
407 return false;
408 if (bio->bi_iter.bi_sector >= mddev->suspend_hi)
409 return false;
410 if (bio_end_sector(bio) < mddev->suspend_lo)
411 return false;
412 return true;
413}
414
Shaohua Li393debc2017-09-21 10:23:35 -0700415void md_handle_request(struct mddev *mddev, struct bio *bio)
416{
417check_suspended:
418 rcu_read_lock();
NeilBrownb3143b92017-10-17 13:46:43 +1100419 if (is_suspended(mddev, bio)) {
Shaohua Li393debc2017-09-21 10:23:35 -0700420 DEFINE_WAIT(__wait);
Vishal Vermaf51d46d2021-12-21 20:06:19 +0000421 /* Bail out if REQ_NOWAIT is set for the bio */
422 if (bio->bi_opf & REQ_NOWAIT) {
423 rcu_read_unlock();
424 bio_wouldblock_error(bio);
425 return;
426 }
Shaohua Li393debc2017-09-21 10:23:35 -0700427 for (;;) {
428 prepare_to_wait(&mddev->sb_wait, &__wait,
429 TASK_UNINTERRUPTIBLE);
NeilBrownb3143b92017-10-17 13:46:43 +1100430 if (!is_suspended(mddev, bio))
Shaohua Li393debc2017-09-21 10:23:35 -0700431 break;
432 rcu_read_unlock();
433 schedule();
434 rcu_read_lock();
435 }
436 finish_wait(&mddev->sb_wait, &__wait);
437 }
438 atomic_inc(&mddev->active_io);
439 rcu_read_unlock();
440
441 if (!mddev->pers->make_request(mddev, bio)) {
442 atomic_dec(&mddev->active_io);
443 wake_up(&mddev->sb_wait);
444 goto check_suspended;
445 }
446
447 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
448 wake_up(&mddev->sb_wait);
449}
450EXPORT_SYMBOL(md_handle_request);
451
Christoph Hellwig3e087732021-10-12 13:12:24 +0200452static void md_submit_bio(struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453{
NeilBrown49077322010-03-25 16:20:56 +1100454 const int rw = bio_data_dir(bio);
Christoph Hellwig309dca302021-01-24 11:02:34 +0100455 struct mddev *mddev = bio->bi_bdev->bd_disk->private_data;
NeilBrown49077322010-03-25 16:20:56 +1100456
Colin Ian King9a5a8592020-07-02 12:35:02 +0100457 if (mddev == NULL || mddev->pers == NULL) {
458 bio_io_error(bio);
Christoph Hellwig3e087732021-10-12 13:12:24 +0200459 return;
Colin Ian King9a5a8592020-07-02 12:35:02 +0100460 }
NeilBrown409c57f2009-03-31 14:39:39 +1100461
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -0300462 if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) {
463 bio_io_error(bio);
Christoph Hellwig3e087732021-10-12 13:12:24 +0200464 return;
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -0300465 }
466
Christoph Hellwigf695ca32020-07-01 10:59:39 +0200467 blk_queue_split(&bio);
Kent Overstreet54efd502015-04-23 22:37:18 -0700468
Sebastian Riemerbbfa57c2013-02-21 13:28:09 +1100469 if (mddev->ro == 1 && unlikely(rw == WRITE)) {
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200470 if (bio_sectors(bio) != 0)
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200471 bio->bi_status = BLK_STS_IOERR;
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200472 bio_endio(bio);
Christoph Hellwig3e087732021-10-12 13:12:24 +0200473 return;
Sebastian Riemerbbfa57c2013-02-21 13:28:09 +1100474 }
NeilBrown49077322010-03-25 16:20:56 +1100475
Shaohua Li9c573de2016-04-25 16:52:38 -0700476 /* bio could be mergeable after passing to underlayer */
Jens Axboe1eff9d32016-08-05 15:35:16 -0600477 bio->bi_opf &= ~REQ_NOMERGE;
Shaohua Li393debc2017-09-21 10:23:35 -0700478
479 md_handle_request(mddev, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480}
481
NeilBrown9e35b992010-04-06 14:23:02 +1000482/* mddev_suspend makes sure no new requests are submitted
483 * to the device, and that any requests that have been submitted
484 * are completely handled.
NeilBrownafa0f552014-12-15 12:56:58 +1100485 * Once mddev_detach() is called and completes, the module will be
486 * completely unused.
NeilBrown9e35b992010-04-06 14:23:02 +1000487 */
NeilBrownfd01b882011-10-11 16:47:53 +1100488void mddev_suspend(struct mddev *mddev)
NeilBrown409c57f2009-03-31 14:39:39 +1100489{
Heinz Mauelshagen092398d2016-05-03 19:43:57 +0200490 WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk);
NeilBrown4d5324f2017-10-19 12:17:16 +1100491 lockdep_assert_held(&mddev->reconfig_mutex);
Mikulas Patocka0dc10e52015-12-18 15:19:16 +1100492 if (mddev->suspended++)
493 return;
NeilBrown409c57f2009-03-31 14:39:39 +1100494 synchronize_rcu();
NeilBrowncc27b0c2017-06-05 16:49:39 +1000495 wake_up(&mddev->sb_wait);
NeilBrown35bfc522017-10-17 13:46:43 +1100496 set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags);
497 smp_mb__after_atomic();
NeilBrown409c57f2009-03-31 14:39:39 +1100498 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
499 mddev->pers->quiesce(mddev, 1);
NeilBrown35bfc522017-10-17 13:46:43 +1100500 clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags);
501 wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags));
Jonathan Brassow0d9f4f12012-05-16 04:06:14 -0500502
503 del_timer_sync(&mddev->safemode_timer);
Coly Li78f57ef2020-04-09 22:17:20 +0800504 /* restrict memory reclaim I/O during raid array is suspend */
505 mddev->noio_flag = memalloc_noio_save();
NeilBrown409c57f2009-03-31 14:39:39 +1100506}
NeilBrown390ee602010-06-01 19:37:27 +1000507EXPORT_SYMBOL_GPL(mddev_suspend);
NeilBrown409c57f2009-03-31 14:39:39 +1100508
NeilBrownfd01b882011-10-11 16:47:53 +1100509void mddev_resume(struct mddev *mddev)
NeilBrown409c57f2009-03-31 14:39:39 +1100510{
Coly Li78f57ef2020-04-09 22:17:20 +0800511 /* entred the memalloc scope from mddev_suspend() */
512 memalloc_noio_restore(mddev->noio_flag);
NeilBrown4d5324f2017-10-19 12:17:16 +1100513 lockdep_assert_held(&mddev->reconfig_mutex);
Mikulas Patocka0dc10e52015-12-18 15:19:16 +1100514 if (--mddev->suspended)
515 return;
NeilBrown409c57f2009-03-31 14:39:39 +1100516 wake_up(&mddev->sb_wait);
517 mddev->pers->quiesce(mddev, 0);
Jonathan Brassow0fd018a2011-06-07 17:49:36 -0500518
Jonathan Brassow47525e52012-05-22 13:55:29 +1000519 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
Jonathan Brassow0fd018a2011-06-07 17:49:36 -0500520 md_wakeup_thread(mddev->thread);
521 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
NeilBrown409c57f2009-03-31 14:39:39 +1100522}
NeilBrown390ee602010-06-01 19:37:27 +1000523EXPORT_SYMBOL_GPL(mddev_resume);
NeilBrown409c57f2009-03-31 14:39:39 +1100524
NeilBrowna2826aa2009-12-14 12:49:49 +1100525/*
Tejun Heoe9c74692010-09-03 11:56:18 +0200526 * Generic flush handling for md
NeilBrowna2826aa2009-12-14 12:49:49 +1100527 */
NeilBrown4bc034d2019-03-29 10:46:16 -0700528
529static void md_end_flush(struct bio *bio)
NeilBrowna2826aa2009-12-14 12:49:49 +1100530{
NeilBrown4bc034d2019-03-29 10:46:16 -0700531 struct md_rdev *rdev = bio->bi_private;
532 struct mddev *mddev = rdev->mddev;
NeilBrowna2826aa2009-12-14 12:49:49 +1100533
534 rdev_dec_pending(rdev, mddev);
535
NeilBrown4bc034d2019-03-29 10:46:16 -0700536 if (atomic_dec_and_test(&mddev->flush_pending)) {
537 /* The pre-request flush has finished */
538 queue_work(md_wq, &mddev->flush_work);
NeilBrowna2826aa2009-12-14 12:49:49 +1100539 }
NeilBrown4bc034d2019-03-29 10:46:16 -0700540 bio_put(bio);
NeilBrowna2826aa2009-12-14 12:49:49 +1100541}
542
NeilBrown4bc034d2019-03-29 10:46:16 -0700543static void md_submit_flush_data(struct work_struct *ws);
544
545static void submit_flushes(struct work_struct *ws)
NeilBrowna2826aa2009-12-14 12:49:49 +1100546{
NeilBrown4bc034d2019-03-29 10:46:16 -0700547 struct mddev *mddev = container_of(ws, struct mddev, flush_work);
NeilBrown3cb03002011-10-11 16:45:26 +1100548 struct md_rdev *rdev;
NeilBrowna2826aa2009-12-14 12:49:49 +1100549
NeilBrown2bc13b82019-03-29 10:46:17 -0700550 mddev->start_flush = ktime_get_boottime();
NeilBrown4bc034d2019-03-29 10:46:16 -0700551 INIT_WORK(&mddev->flush_work, md_submit_flush_data);
552 atomic_set(&mddev->flush_pending, 1);
NeilBrowna2826aa2009-12-14 12:49:49 +1100553 rcu_read_lock();
NeilBrowndafb20f2012-03-19 12:46:39 +1100554 rdev_for_each_rcu(rdev, mddev)
NeilBrowna2826aa2009-12-14 12:49:49 +1100555 if (rdev->raid_disk >= 0 &&
556 !test_bit(Faulty, &rdev->flags)) {
557 /* Take two references, one is dropped
558 * when request finishes, one after
559 * we reclaim rcu_read_lock
560 */
561 struct bio *bi;
562 atomic_inc(&rdev->nr_pending);
563 atomic_inc(&rdev->nr_pending);
564 rcu_read_unlock();
Christoph Hellwiga78f18d2021-01-26 15:52:41 +0100565 bi = bio_alloc_bioset(GFP_NOIO, 0, &mddev->bio_set);
Xiao Ni5a409b42018-05-21 11:49:54 +0800566 bi->bi_end_io = md_end_flush;
NeilBrown4bc034d2019-03-29 10:46:16 -0700567 bi->bi_private = rdev;
568 bio_set_dev(bi, rdev->bdev);
Christoph Hellwig70fd7612016-11-01 07:40:10 -0600569 bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
NeilBrown4bc034d2019-03-29 10:46:16 -0700570 atomic_inc(&mddev->flush_pending);
Mike Christie4e49ea42016-06-05 14:31:41 -0500571 submit_bio(bi);
NeilBrowna2826aa2009-12-14 12:49:49 +1100572 rcu_read_lock();
573 rdev_dec_pending(rdev, mddev);
574 }
575 rcu_read_unlock();
NeilBrown4bc034d2019-03-29 10:46:16 -0700576 if (atomic_dec_and_test(&mddev->flush_pending))
577 queue_work(md_wq, &mddev->flush_work);
578}
NeilBrowna2826aa2009-12-14 12:49:49 +1100579
NeilBrown4bc034d2019-03-29 10:46:16 -0700580static void md_submit_flush_data(struct work_struct *ws)
581{
582 struct mddev *mddev = container_of(ws, struct mddev, flush_work);
583 struct bio *bio = mddev->flush_bio;
584
585 /*
586 * must reset flush_bio before calling into md_handle_request to avoid a
587 * deadlock, because other bios passed md_handle_request suspend check
588 * could wait for this and below md_handle_request could wait for those
589 * bios because of suspend check
590 */
Xiao Nidc5d17a32020-12-10 14:33:32 +0800591 spin_lock_irq(&mddev->lock);
Pankaj Gupta81ba3c22020-11-11 06:16:56 +0100592 mddev->prev_flush_start = mddev->start_flush;
NeilBrown4bc034d2019-03-29 10:46:16 -0700593 mddev->flush_bio = NULL;
Xiao Nidc5d17a32020-12-10 14:33:32 +0800594 spin_unlock_irq(&mddev->lock);
NeilBrown4bc034d2019-03-29 10:46:16 -0700595 wake_up(&mddev->sb_wait);
596
597 if (bio->bi_iter.bi_size == 0) {
598 /* an empty barrier - all done */
599 bio_endio(bio);
600 } else {
601 bio->bi_opf &= ~REQ_PREFLUSH;
602 md_handle_request(mddev, bio);
NeilBrowna2826aa2009-12-14 12:49:49 +1100603 }
NeilBrowna2826aa2009-12-14 12:49:49 +1100604}
NeilBrown4bc034d2019-03-29 10:46:16 -0700605
David Jeffery775d7832019-09-16 13:15:14 -0400606/*
607 * Manages consolidation of flushes and submitting any flushes needed for
608 * a bio with REQ_PREFLUSH. Returns true if the bio is finished or is
609 * being finished in another context. Returns false if the flushing is
610 * complete but still needs the I/O portion of the bio to be processed.
611 */
612bool md_flush_request(struct mddev *mddev, struct bio *bio)
NeilBrown4bc034d2019-03-29 10:46:16 -0700613{
Pankaj Gupta81ba3c22020-11-11 06:16:56 +0100614 ktime_t req_start = ktime_get_boottime();
NeilBrown4bc034d2019-03-29 10:46:16 -0700615 spin_lock_irq(&mddev->lock);
Pankaj Gupta204d1a62020-11-11 06:16:57 +0100616 /* flush requests wait until ongoing flush completes,
617 * hence coalescing all the pending requests.
618 */
NeilBrown4bc034d2019-03-29 10:46:16 -0700619 wait_event_lock_irq(mddev->sb_wait,
NeilBrown2bc13b82019-03-29 10:46:17 -0700620 !mddev->flush_bio ||
Pankaj Guptaa23f2aa2020-11-11 06:16:58 +0100621 ktime_before(req_start, mddev->prev_flush_start),
NeilBrown4bc034d2019-03-29 10:46:16 -0700622 mddev->lock);
Pankaj Gupta204d1a62020-11-11 06:16:57 +0100623 /* new request after previous flush is completed */
Pankaj Guptaa23f2aa2020-11-11 06:16:58 +0100624 if (ktime_after(req_start, mddev->prev_flush_start)) {
NeilBrown2bc13b82019-03-29 10:46:17 -0700625 WARN_ON(mddev->flush_bio);
626 mddev->flush_bio = bio;
627 bio = NULL;
628 }
NeilBrown4bc034d2019-03-29 10:46:16 -0700629 spin_unlock_irq(&mddev->lock);
630
NeilBrown2bc13b82019-03-29 10:46:17 -0700631 if (!bio) {
632 INIT_WORK(&mddev->flush_work, submit_flushes);
633 queue_work(md_wq, &mddev->flush_work);
634 } else {
635 /* flush was performed for some other bio while we waited. */
636 if (bio->bi_iter.bi_size == 0)
637 /* an empty barrier - all done */
638 bio_endio(bio);
639 else {
640 bio->bi_opf &= ~REQ_PREFLUSH;
David Jeffery775d7832019-09-16 13:15:14 -0400641 return false;
NeilBrown2bc13b82019-03-29 10:46:17 -0700642 }
643 }
David Jeffery775d7832019-09-16 13:15:14 -0400644 return true;
NeilBrown4bc034d2019-03-29 10:46:16 -0700645}
Tejun Heoe9c74692010-09-03 11:56:18 +0200646EXPORT_SYMBOL(md_flush_request);
NeilBrown409c57f2009-03-31 14:39:39 +1100647
NeilBrownfd01b882011-10-11 16:47:53 +1100648static inline struct mddev *mddev_get(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649{
650 atomic_inc(&mddev->active);
651 return mddev;
652}
653
Dan Williams5fd3a172009-03-04 00:57:25 -0700654static void mddev_delayed_delete(struct work_struct *ws);
NeilBrownd3374822009-01-09 08:31:10 +1100655
NeilBrownfd01b882011-10-11 16:47:53 +1100656static void mddev_put(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657{
658 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
659 return;
NeilBrownd3374822009-01-09 08:31:10 +1100660 if (!mddev->raid_disks && list_empty(&mddev->disks) &&
NeilBrowncbd19982009-12-30 12:08:49 +1100661 mddev->ctime == 0 && !mddev->hold_active) {
662 /* Array is not configured at all, and not held active,
663 * so destroy it */
NeilBrownaf8a2432011-12-08 15:49:46 +1100664 list_del_init(&mddev->all_mddevs);
Kent Overstreet28dec872018-06-07 20:52:54 -0400665
666 /*
667 * Call queue_work inside the spinlock so that
668 * flush_workqueue() after mddev_find will succeed in waiting
669 * for the work to be done.
670 */
671 INIT_WORK(&mddev->del_work, mddev_delayed_delete);
672 queue_work(md_misc_wq, &mddev->del_work);
NeilBrownd3374822009-01-09 08:31:10 +1100673 }
674 spin_unlock(&all_mddevs_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675}
676
Kees Cook8376d3c2017-10-16 17:01:48 -0700677static void md_safemode_timeout(struct timer_list *t);
Sasha Levin25b2edf2015-07-24 18:19:58 -0400678
NeilBrownfd01b882011-10-11 16:47:53 +1100679void mddev_init(struct mddev *mddev)
NeilBrownfafd7fb2010-04-01 15:55:30 +1100680{
Kent Overstreet28dec872018-06-07 20:52:54 -0400681 kobject_init(&mddev->kobj, &md_ktype);
NeilBrownfafd7fb2010-04-01 15:55:30 +1100682 mutex_init(&mddev->open_mutex);
683 mutex_init(&mddev->reconfig_mutex);
684 mutex_init(&mddev->bitmap_info.mutex);
685 INIT_LIST_HEAD(&mddev->disks);
686 INIT_LIST_HEAD(&mddev->all_mddevs);
Kees Cook8376d3c2017-10-16 17:01:48 -0700687 timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0);
NeilBrownfafd7fb2010-04-01 15:55:30 +1100688 atomic_set(&mddev->active, 1);
689 atomic_set(&mddev->openers, 0);
690 atomic_set(&mddev->active_io, 0);
NeilBrown85572d72014-12-15 12:56:56 +1100691 spin_lock_init(&mddev->lock);
NeilBrown4bc034d2019-03-29 10:46:16 -0700692 atomic_set(&mddev->flush_pending, 0);
NeilBrownfafd7fb2010-04-01 15:55:30 +1100693 init_waitqueue_head(&mddev->sb_wait);
694 init_waitqueue_head(&mddev->recovery_wait);
695 mddev->reshape_position = MaxSector;
NeilBrown2c810cd2012-05-21 09:27:00 +1000696 mddev->reshape_backwards = 0;
Jonathan Brassowc4a39552013-06-25 01:23:59 -0500697 mddev->last_sync_action = "none";
NeilBrownfafd7fb2010-04-01 15:55:30 +1100698 mddev->resync_min = 0;
699 mddev->resync_max = MaxSector;
700 mddev->level = LEVEL_NONE;
701}
NeilBrown390ee602010-06-01 19:37:27 +1000702EXPORT_SYMBOL_GPL(mddev_init);
NeilBrownfafd7fb2010-04-01 15:55:30 +1100703
Christoph Hellwig8b57251f2021-04-03 18:15:28 +0200704static struct mddev *mddev_find_locked(dev_t unit)
705{
706 struct mddev *mddev;
707
708 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
709 if (mddev->unit == unit)
710 return mddev;
711
712 return NULL;
713}
714
Christoph Hellwig85c8c3c2021-04-12 10:05:28 +0200715/* find an unused unit number */
716static dev_t mddev_alloc_unit(void)
717{
718 static int next_minor = 512;
719 int start = next_minor;
720 bool is_free = 0;
721 dev_t dev = 0;
722
723 while (!is_free) {
724 dev = MKDEV(MD_MAJOR, next_minor);
725 next_minor++;
726 if (next_minor > MINORMASK)
727 next_minor = 0;
728 if (next_minor == start)
729 return 0; /* Oh dear, all in use. */
730 is_free = !mddev_find_locked(dev);
731 }
732
733 return dev;
734}
735
NeilBrownf72ffdd2014-09-30 14:23:59 +1000736static struct mddev *mddev_find(dev_t unit)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737{
Christoph Hellwig65aa97c2021-04-03 18:15:29 +0200738 struct mddev *mddev;
739
740 if (MAJOR(unit) != MD_MAJOR)
741 unit &= ~((1 << MdpMinorShift) - 1);
742
743 spin_lock(&all_mddevs_lock);
744 mddev = mddev_find_locked(unit);
745 if (mddev)
746 mddev_get(mddev);
747 spin_unlock(&all_mddevs_lock);
748
749 return mddev;
750}
751
Christoph Hellwig0d809b32021-04-12 10:05:30 +0200752static struct mddev *mddev_alloc(dev_t unit)
Christoph Hellwig65aa97c2021-04-03 18:15:29 +0200753{
Christoph Hellwig0d809b32021-04-12 10:05:30 +0200754 struct mddev *new;
755 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756
NeilBrown8f5f02c2011-02-16 13:58:51 +1100757 if (unit && MAJOR(unit) != MD_MAJOR)
Christoph Hellwigd144fe62021-04-12 10:05:29 +0200758 unit &= ~((1 << MdpMinorShift) - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759
NeilBrown9ffae0c2006-01-06 00:20:32 -0800760 new = kzalloc(sizeof(*new), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 if (!new)
Christoph Hellwig0d809b32021-04-12 10:05:30 +0200762 return ERR_PTR(-ENOMEM);
NeilBrownfafd7fb2010-04-01 15:55:30 +1100763 mddev_init(new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764
Christoph Hellwigd144fe62021-04-12 10:05:29 +0200765 spin_lock(&all_mddevs_lock);
766 if (unit) {
Christoph Hellwig0d809b32021-04-12 10:05:30 +0200767 error = -EEXIST;
768 if (mddev_find_locked(unit))
Christoph Hellwigd144fe62021-04-12 10:05:29 +0200769 goto out_free_new;
Christoph Hellwigd144fe62021-04-12 10:05:29 +0200770 new->unit = unit;
771 if (MAJOR(unit) == MD_MAJOR)
772 new->md_minor = MINOR(unit);
773 else
774 new->md_minor = MINOR(unit) >> MdpMinorShift;
775 new->hold_active = UNTIL_IOCTL;
776 } else {
Christoph Hellwig0d809b32021-04-12 10:05:30 +0200777 error = -ENODEV;
Christoph Hellwigd144fe62021-04-12 10:05:29 +0200778 new->unit = mddev_alloc_unit();
779 if (!new->unit)
780 goto out_free_new;
781 new->md_minor = MINOR(new->unit);
782 new->hold_active = UNTIL_STOP;
783 }
784
785 list_add(&new->all_mddevs, &all_mddevs);
786 spin_unlock(&all_mddevs_lock);
787 return new;
788out_free_new:
789 spin_unlock(&all_mddevs_lock);
790 kfree(new);
Christoph Hellwig0d809b32021-04-12 10:05:30 +0200791 return ERR_PTR(error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792}
793
Rikard Falkebornc32dc042021-05-29 12:30:49 +0200794static const struct attribute_group md_redundancy_group;
NeilBrownb6eb1272010-04-15 10:13:47 +1000795
NeilBrown5c47daf2014-12-15 12:57:01 +1100796void mddev_unlock(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797{
NeilBrowna64c8762010-04-14 17:15:37 +1000798 if (mddev->to_remove) {
NeilBrownb6eb1272010-04-15 10:13:47 +1000799 /* These cannot be removed under reconfig_mutex as
800 * an access to the files will try to take reconfig_mutex
801 * while holding the file unremovable, which leads to
802 * a deadlock.
NeilBrownbb4f1e92010-08-08 21:18:03 +1000803 * So hold set sysfs_active while the remove in happeing,
804 * and anything else which might set ->to_remove or my
805 * otherwise change the sysfs namespace will fail with
806 * -EBUSY if sysfs_active is still set.
807 * We set sysfs_active under reconfig_mutex and elsewhere
808 * test it under the same mutex to ensure its correct value
809 * is seen.
NeilBrownb6eb1272010-04-15 10:13:47 +1000810 */
Rikard Falkebornc32dc042021-05-29 12:30:49 +0200811 const struct attribute_group *to_remove = mddev->to_remove;
NeilBrowna64c8762010-04-14 17:15:37 +1000812 mddev->to_remove = NULL;
NeilBrownbb4f1e92010-08-08 21:18:03 +1000813 mddev->sysfs_active = 1;
NeilBrownb6eb1272010-04-15 10:13:47 +1000814 mutex_unlock(&mddev->reconfig_mutex);
815
NeilBrown00bcb4a2010-06-01 19:37:23 +1000816 if (mddev->kobj.sd) {
817 if (to_remove != &md_redundancy_group)
818 sysfs_remove_group(&mddev->kobj, to_remove);
819 if (mddev->pers == NULL ||
820 mddev->pers->sync_request == NULL) {
821 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
822 if (mddev->sysfs_action)
823 sysfs_put(mddev->sysfs_action);
Junxiao Bie8efa9b2020-08-04 17:27:18 -0700824 if (mddev->sysfs_completed)
825 sysfs_put(mddev->sysfs_completed);
826 if (mddev->sysfs_degraded)
827 sysfs_put(mddev->sysfs_degraded);
NeilBrown00bcb4a2010-06-01 19:37:23 +1000828 mddev->sysfs_action = NULL;
Junxiao Bie8efa9b2020-08-04 17:27:18 -0700829 mddev->sysfs_completed = NULL;
830 mddev->sysfs_degraded = NULL;
NeilBrown00bcb4a2010-06-01 19:37:23 +1000831 }
NeilBrowna64c8762010-04-14 17:15:37 +1000832 }
NeilBrownbb4f1e92010-08-08 21:18:03 +1000833 mddev->sysfs_active = 0;
NeilBrownb6eb1272010-04-15 10:13:47 +1000834 } else
835 mutex_unlock(&mddev->reconfig_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836
Chris Dunlop751e67c2011-10-19 16:48:26 +1100837 /* As we've dropped the mutex we need a spinlock to
838 * make sure the thread doesn't disappear
NeilBrown01f96c02011-09-21 15:30:20 +1000839 */
840 spin_lock(&pers_lock);
NeilBrown005eca52005-08-22 13:11:08 -0700841 md_wakeup_thread(mddev->thread);
NeilBrown4d5324f2017-10-19 12:17:16 +1100842 wake_up(&mddev->sb_wait);
NeilBrown01f96c02011-09-21 15:30:20 +1000843 spin_unlock(&pers_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844}
NeilBrown5c47daf2014-12-15 12:57:01 +1100845EXPORT_SYMBOL_GPL(mddev_unlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846
Goldwyn Rodrigues57d051d2015-04-14 10:43:55 -0500847struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr)
NeilBrown1ca69c42012-10-11 13:37:33 +1100848{
849 struct md_rdev *rdev;
850
851 rdev_for_each_rcu(rdev, mddev)
852 if (rdev->desc_nr == nr)
853 return rdev;
854
855 return NULL;
856}
Goldwyn Rodrigues57d051d2015-04-14 10:43:55 -0500857EXPORT_SYMBOL_GPL(md_find_rdev_nr_rcu);
NeilBrown1ca69c42012-10-11 13:37:33 +1100858
859static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860{
NeilBrown3cb03002011-10-11 16:45:26 +1100861 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862
NeilBrowndafb20f2012-03-19 12:46:39 +1100863 rdev_for_each(rdev, mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864 if (rdev->bdev->bd_dev == dev)
865 return rdev;
Cheng Renquan159ec1f2009-01-09 08:31:08 +1100866
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 return NULL;
868}
869
Tomasz Majchrzak1532d9e2017-12-27 10:31:40 +0100870struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev)
NeilBrown1ca69c42012-10-11 13:37:33 +1100871{
872 struct md_rdev *rdev;
873
874 rdev_for_each_rcu(rdev, mddev)
875 if (rdev->bdev->bd_dev == dev)
876 return rdev;
877
878 return NULL;
879}
Tomasz Majchrzak1532d9e2017-12-27 10:31:40 +0100880EXPORT_SYMBOL_GPL(md_find_rdev_rcu);
NeilBrown1ca69c42012-10-11 13:37:33 +1100881
NeilBrown84fc4b52011-10-11 16:49:58 +1100882static struct md_personality *find_pers(int level, char *clevel)
NeilBrown2604b702006-01-06 00:20:36 -0800883{
NeilBrown84fc4b52011-10-11 16:49:58 +1100884 struct md_personality *pers;
NeilBrownd9d166c2006-01-06 00:20:51 -0800885 list_for_each_entry(pers, &pers_list, list) {
886 if (level != LEVEL_NONE && pers->level == level)
NeilBrown2604b702006-01-06 00:20:36 -0800887 return pers;
NeilBrownd9d166c2006-01-06 00:20:51 -0800888 if (strcmp(pers->name, clevel)==0)
889 return pers;
890 }
NeilBrown2604b702006-01-06 00:20:36 -0800891 return NULL;
892}
893
Andre Nollb73df2d2008-07-11 22:02:23 +1000894/* return the offset of the super block in 512byte sectors */
NeilBrown3cb03002011-10-11 16:45:26 +1100895static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896{
Christoph Hellwig0fe80342021-10-18 12:11:06 +0200897 return MD_NEW_SIZE_SECTORS(bdev_nr_sectors(rdev->bdev));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898}
899
NeilBrownf72ffdd2014-09-30 14:23:59 +1000900static int alloc_disk_sb(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 rdev->sb_page = alloc_page(GFP_KERNEL);
NeilBrown7f0f0d82016-11-02 14:16:49 +1100903 if (!rdev->sb_page)
Andre Nollebc24332008-07-11 22:02:20 +1000904 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 return 0;
906}
907
NeilBrown545c8792012-05-22 13:54:30 +1000908void md_rdev_clear(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909{
910 if (rdev->sb_page) {
NeilBrown2d1f3b52006-01-06 00:20:31 -0800911 put_page(rdev->sb_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 rdev->sb_loaded = 0;
913 rdev->sb_page = NULL;
Andre Noll0f420352008-07-11 22:02:23 +1000914 rdev->sb_start = 0;
Andre Nolldd8ac332009-03-31 14:33:13 +1100915 rdev->sectors = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916 }
NeilBrown2699b672011-07-28 11:31:47 +1000917 if (rdev->bb_page) {
918 put_page(rdev->bb_page);
919 rdev->bb_page = NULL;
920 }
Dan Williamsd3b407fb2016-01-06 12:19:22 -0800921 badblocks_exit(&rdev->badblocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922}
NeilBrown545c8792012-05-22 13:54:30 +1000923EXPORT_SYMBOL_GPL(md_rdev_clear);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200925static void super_written(struct bio *bio)
NeilBrown7bfa19f2005-06-21 17:17:28 -0700926{
NeilBrown3cb03002011-10-11 16:45:26 +1100927 struct md_rdev *rdev = bio->bi_private;
NeilBrownfd01b882011-10-11 16:47:53 +1100928 struct mddev *mddev = rdev->mddev;
NeilBrown7bfa19f2005-06-21 17:17:28 -0700929
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200930 if (bio->bi_status) {
Guoqing Jiangb3db8a22020-07-28 12:01:41 +0200931 pr_err("md: %s gets error=%d\n", __func__,
932 blk_status_to_errno(bio->bi_status));
NeilBrowna9701a32005-11-08 21:39:34 -0800933 md_error(mddev, rdev);
NeilBrown46533ff2016-11-18 16:16:11 +1100934 if (!test_bit(Faulty, &rdev->flags)
935 && (bio->bi_opf & MD_FAILFAST)) {
Shaohua Li29530792016-12-08 15:48:19 -0800936 set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags);
NeilBrown46533ff2016-11-18 16:16:11 +1100937 set_bit(LastDev, &rdev->flags);
938 }
939 } else
940 clear_bit(LastDev, &rdev->flags);
NeilBrown7bfa19f2005-06-21 17:17:28 -0700941
NeilBrowna9701a32005-11-08 21:39:34 -0800942 if (atomic_dec_and_test(&mddev->pending_writes))
943 wake_up(&mddev->sb_wait);
Shaohua Lied3b98c2016-03-29 14:00:19 -0700944 rdev_dec_pending(rdev, mddev);
Neil Brownf8b58ed2005-06-27 22:29:34 -0700945 bio_put(bio);
NeilBrown7bfa19f2005-06-21 17:17:28 -0700946}
947
NeilBrownfd01b882011-10-11 16:47:53 +1100948void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
NeilBrown7bfa19f2005-06-21 17:17:28 -0700949 sector_t sector, int size, struct page *page)
950{
951 /* write first size bytes of page to sector of rdev
952 * Increment mddev->pending_writes before returning
953 * and decrement it on completion, waking up sb_wait
954 * if zero is reached.
955 * If an error occurred, call md_error
956 */
NeilBrown46533ff2016-11-18 16:16:11 +1100957 struct bio *bio;
958 int ff = 0;
959
Heinz Mauelshagen4b6c1062018-02-02 23:13:19 +0100960 if (!page)
961 return;
962
NeilBrown46533ff2016-11-18 16:16:11 +1100963 if (test_bit(Faulty, &rdev->flags))
964 return;
965
Christoph Hellwig6a596562021-01-26 15:52:43 +0100966 bio = bio_alloc_bioset(GFP_NOIO, 1, &mddev->sync_set);
NeilBrown7bfa19f2005-06-21 17:17:28 -0700967
Shaohua Lied3b98c2016-03-29 14:00:19 -0700968 atomic_inc(&rdev->nr_pending);
969
Christoph Hellwig74d46992017-08-23 19:10:32 +0200970 bio_set_dev(bio, rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev);
Kent Overstreet4f024f32013-10-11 15:44:27 -0700971 bio->bi_iter.bi_sector = sector;
NeilBrown7bfa19f2005-06-21 17:17:28 -0700972 bio_add_page(bio, page, size, 0);
973 bio->bi_private = rdev;
974 bio->bi_end_io = super_written;
NeilBrown46533ff2016-11-18 16:16:11 +1100975
976 if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) &&
977 test_bit(FailFast, &rdev->flags) &&
978 !test_bit(LastDev, &rdev->flags))
979 ff = MD_FAILFAST;
Jan Kara5a8948f2017-05-31 09:44:33 +0200980 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA | ff;
NeilBrowna9701a32005-11-08 21:39:34 -0800981
NeilBrown7bfa19f2005-06-21 17:17:28 -0700982 atomic_inc(&mddev->pending_writes);
Mike Christie4e49ea42016-06-05 14:31:41 -0500983 submit_bio(bio);
NeilBrowna9701a32005-11-08 21:39:34 -0800984}
985
NeilBrown46533ff2016-11-18 16:16:11 +1100986int md_super_wait(struct mddev *mddev)
NeilBrowna9701a32005-11-08 21:39:34 -0800987{
Tejun Heoe9c74692010-09-03 11:56:18 +0200988 /* wait for all superblock writes that were scheduled to complete */
NeilBrown1967cd52014-09-09 14:20:28 +1000989 wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
Shaohua Li29530792016-12-08 15:48:19 -0800990 if (test_and_clear_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags))
NeilBrown46533ff2016-11-18 16:16:11 +1100991 return -EAGAIN;
992 return 0;
NeilBrown7bfa19f2005-06-21 17:17:28 -0700993}
994
NeilBrown3cb03002011-10-11 16:45:26 +1100995int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
Mike Christie796a5cf2016-06-05 14:32:07 -0500996 struct page *page, int op, int op_flags, bool metadata_op)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997{
Christoph Hellwig32637382021-01-26 15:52:42 +0100998 struct bio bio;
999 struct bio_vec bvec;
1000
1001 bio_init(&bio, &bvec, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002
Christoph Hellwig74d46992017-08-23 19:10:32 +02001003 if (metadata_op && rdev->meta_bdev)
Christoph Hellwig32637382021-01-26 15:52:42 +01001004 bio_set_dev(&bio, rdev->meta_bdev);
Christoph Hellwig74d46992017-08-23 19:10:32 +02001005 else
Christoph Hellwig32637382021-01-26 15:52:42 +01001006 bio_set_dev(&bio, rdev->bdev);
1007 bio.bi_opf = op | op_flags;
Jonathan Brassowccebd4c2011-01-14 09:14:33 +11001008 if (metadata_op)
Christoph Hellwig32637382021-01-26 15:52:42 +01001009 bio.bi_iter.bi_sector = sector + rdev->sb_start;
NeilBrown1fdd6fc92012-05-21 09:28:32 +10001010 else if (rdev->mddev->reshape_position != MaxSector &&
1011 (rdev->mddev->reshape_backwards ==
1012 (sector >= rdev->mddev->reshape_position)))
Christoph Hellwig32637382021-01-26 15:52:42 +01001013 bio.bi_iter.bi_sector = sector + rdev->new_data_offset;
Jonathan Brassowccebd4c2011-01-14 09:14:33 +11001014 else
Christoph Hellwig32637382021-01-26 15:52:42 +01001015 bio.bi_iter.bi_sector = sector + rdev->data_offset;
1016 bio_add_page(&bio, page, size, 0);
Mike Christie4e49ea42016-06-05 14:31:41 -05001017
Christoph Hellwig32637382021-01-26 15:52:42 +01001018 submit_bio_wait(&bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019
Christoph Hellwig32637382021-01-26 15:52:42 +01001020 return !bio.bi_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021}
NeilBrowna8745db2006-01-06 00:20:34 -08001022EXPORT_SYMBOL_GPL(sync_page_io);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023
NeilBrownf72ffdd2014-09-30 14:23:59 +10001024static int read_disk_sb(struct md_rdev *rdev, int size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025{
1026 char b[BDEVNAME_SIZE];
NeilBrown403df472014-09-30 15:52:29 +10001027
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028 if (rdev->sb_loaded)
1029 return 0;
1030
Mike Christie796a5cf2016-06-05 14:32:07 -05001031 if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032 goto fail;
1033 rdev->sb_loaded = 1;
1034 return 0;
1035
1036fail:
NeilBrown9d487392016-11-02 14:16:49 +11001037 pr_err("md: disabled device %s, could not read superblock.\n",
1038 bdevname(rdev->bdev,b));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039 return -EINVAL;
1040}
1041
Amir Goldsteine6fd2092017-05-04 16:26:20 +03001042static int md_uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043{
NeilBrownf72ffdd2014-09-30 14:23:59 +10001044 return sb1->set_uuid0 == sb2->set_uuid0 &&
Andre Noll05710462008-07-11 22:02:20 +10001045 sb1->set_uuid1 == sb2->set_uuid1 &&
1046 sb1->set_uuid2 == sb2->set_uuid2 &&
1047 sb1->set_uuid3 == sb2->set_uuid3;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048}
1049
Amir Goldsteine6fd2092017-05-04 16:26:20 +03001050static int md_sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051{
1052 int ret;
1053 mdp_super_t *tmp1, *tmp2;
1054
1055 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
1056 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
1057
1058 if (!tmp1 || !tmp2) {
1059 ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060 goto abort;
1061 }
1062
1063 *tmp1 = *sb1;
1064 *tmp2 = *sb2;
1065
1066 /*
1067 * nr_disks is not constant
1068 */
1069 tmp1->nr_disks = 0;
1070 tmp2->nr_disks = 0;
1071
Andre Nollce0c8e02008-07-11 22:02:20 +10001072 ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073abort:
Jesper Juhl990a8ba2005-06-21 17:17:30 -07001074 kfree(tmp1);
1075 kfree(tmp2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 return ret;
1077}
1078
NeilBrown4d167f02007-05-09 02:35:37 -07001079static u32 md_csum_fold(u32 csum)
1080{
1081 csum = (csum & 0xffff) + (csum >> 16);
1082 return (csum & 0xffff) + (csum >> 16);
1083}
1084
NeilBrownf72ffdd2014-09-30 14:23:59 +10001085static unsigned int calc_sb_csum(mdp_super_t *sb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086{
NeilBrown4d167f02007-05-09 02:35:37 -07001087 u64 newcsum = 0;
1088 u32 *sb32 = (u32*)sb;
1089 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090 unsigned int disk_csum, csum;
1091
1092 disk_csum = sb->sb_csum;
1093 sb->sb_csum = 0;
NeilBrown4d167f02007-05-09 02:35:37 -07001094
1095 for (i = 0; i < MD_SB_BYTES/4 ; i++)
1096 newcsum += sb32[i];
1097 csum = (newcsum & 0xffffffff) + (newcsum>>32);
1098
NeilBrown4d167f02007-05-09 02:35:37 -07001099#ifdef CONFIG_ALPHA
1100 /* This used to use csum_partial, which was wrong for several
1101 * reasons including that different results are returned on
1102 * different architectures. It isn't critical that we get exactly
1103 * the same return value as before (we always csum_fold before
1104 * testing, and that removes any differences). However as we
1105 * know that csum_partial always returned a 16bit value on
1106 * alphas, do a fold to maximise conformity to previous behaviour.
1107 */
1108 sb->sb_csum = md_csum_fold(disk_csum);
1109#else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110 sb->sb_csum = disk_csum;
NeilBrown4d167f02007-05-09 02:35:37 -07001111#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112 return csum;
1113}
1114
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115/*
1116 * Handle superblock details.
1117 * We want to be able to handle multiple superblock formats
1118 * so we have a common interface to them all, and an array of
1119 * different handlers.
1120 * We rely on user-space to write the initial superblock, and support
1121 * reading and updating of superblocks.
1122 * Interface methods are:
NeilBrown3cb03002011-10-11 16:45:26 +11001123 * int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124 * loads and validates a superblock on dev.
1125 * if refdev != NULL, compare superblocks on both devices
1126 * Return:
1127 * 0 - dev has a superblock that is compatible with refdev
1128 * 1 - dev has a superblock that is compatible and newer than refdev
1129 * so dev should be used as the refdev in future
1130 * -EINVAL superblock incompatible or invalid
1131 * -othererror e.g. -EIO
1132 *
NeilBrownfd01b882011-10-11 16:47:53 +11001133 * int validate_super(struct mddev *mddev, struct md_rdev *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134 * Verify that dev is acceptable into mddev.
1135 * The first time, mddev->raid_disks will be 0, and data from
1136 * dev should be merged in. Subsequent calls check that dev
1137 * is new enough. Return 0 or -EINVAL
1138 *
NeilBrownfd01b882011-10-11 16:47:53 +11001139 * void sync_super(struct mddev *mddev, struct md_rdev *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140 * Update the superblock for rdev with data in mddev
1141 * This does not write to disc.
1142 *
1143 */
1144
1145struct super_type {
Chris Webb0cd17fe2008-06-28 08:31:46 +10001146 char *name;
1147 struct module *owner;
NeilBrownc6563a82012-05-21 09:27:00 +10001148 int (*load_super)(struct md_rdev *rdev,
1149 struct md_rdev *refdev,
Chris Webb0cd17fe2008-06-28 08:31:46 +10001150 int minor_version);
NeilBrownc6563a82012-05-21 09:27:00 +10001151 int (*validate_super)(struct mddev *mddev,
1152 struct md_rdev *rdev);
1153 void (*sync_super)(struct mddev *mddev,
1154 struct md_rdev *rdev);
NeilBrown3cb03002011-10-11 16:45:26 +11001155 unsigned long long (*rdev_size_change)(struct md_rdev *rdev,
Andre Noll15f4a5f2008-07-21 14:42:12 +10001156 sector_t num_sectors);
NeilBrownc6563a82012-05-21 09:27:00 +10001157 int (*allow_new_offset)(struct md_rdev *rdev,
1158 unsigned long long new_offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159};
1160
1161/*
Andre Noll0894cc32009-06-18 08:49:23 +10001162 * Check that the given mddev has no bitmap.
1163 *
1164 * This function is called from the run method of all personalities that do not
1165 * support bitmaps. It prints an error message and returns non-zero if mddev
1166 * has a bitmap. Otherwise, it returns 0.
1167 *
1168 */
NeilBrownfd01b882011-10-11 16:47:53 +11001169int md_check_no_bitmap(struct mddev *mddev)
Andre Noll0894cc32009-06-18 08:49:23 +10001170{
NeilBrownc3d97142009-12-14 12:49:52 +11001171 if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
Andre Noll0894cc32009-06-18 08:49:23 +10001172 return 0;
NeilBrown9d487392016-11-02 14:16:49 +11001173 pr_warn("%s: bitmaps are not supported for %s\n",
Andre Noll0894cc32009-06-18 08:49:23 +10001174 mdname(mddev), mddev->pers->name);
1175 return 1;
1176}
1177EXPORT_SYMBOL(md_check_no_bitmap);
1178
1179/*
NeilBrownf72ffdd2014-09-30 14:23:59 +10001180 * load_super for 0.90.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181 */
NeilBrown3cb03002011-10-11 16:45:26 +11001182static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183{
1184 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1185 mdp_super_t *sb;
1186 int ret;
Yufen Yu228fc7d2019-10-30 18:47:02 +08001187 bool spare_disk = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188
1189 /*
Andre Noll0f420352008-07-11 22:02:23 +10001190 * Calculate the position of the superblock (512byte sectors),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191 * it's at the end of the disk.
1192 *
1193 * It also happens to be a multiple of 4Kb.
1194 */
Jonathan Brassow57b2caa2011-01-14 09:14:33 +11001195 rdev->sb_start = calc_dev_sboffset(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196
NeilBrown0002b272005-09-09 16:23:53 -07001197 ret = read_disk_sb(rdev, MD_SB_BYTES);
NeilBrown9d487392016-11-02 14:16:49 +11001198 if (ret)
1199 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200
1201 ret = -EINVAL;
1202
1203 bdevname(rdev->bdev, b);
Namhyung Kim65a06f062011-07-27 11:00:36 +10001204 sb = page_address(rdev->sb_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205
1206 if (sb->md_magic != MD_SB_MAGIC) {
NeilBrown9d487392016-11-02 14:16:49 +11001207 pr_warn("md: invalid raid superblock magic on %s\n", b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208 goto abort;
1209 }
1210
1211 if (sb->major_version != 0 ||
NeilBrownf6705572006-03-27 01:18:11 -08001212 sb->minor_version < 90 ||
1213 sb->minor_version > 91) {
NeilBrown9d487392016-11-02 14:16:49 +11001214 pr_warn("Bad version number %d.%d on %s\n",
1215 sb->major_version, sb->minor_version, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216 goto abort;
1217 }
1218
1219 if (sb->raid_disks <= 0)
1220 goto abort;
1221
NeilBrown4d167f02007-05-09 02:35:37 -07001222 if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
NeilBrown9d487392016-11-02 14:16:49 +11001223 pr_warn("md: invalid superblock checksum on %s\n", b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224 goto abort;
1225 }
1226
1227 rdev->preferred_minor = sb->md_minor;
1228 rdev->data_offset = 0;
NeilBrownc6563a82012-05-21 09:27:00 +10001229 rdev->new_data_offset = 0;
NeilBrown0002b272005-09-09 16:23:53 -07001230 rdev->sb_size = MD_SB_BYTES;
NeilBrown9f2f3832011-07-28 11:31:47 +10001231 rdev->badblocks.shift = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232
1233 if (sb->level == LEVEL_MULTIPATH)
1234 rdev->desc_nr = -1;
1235 else
1236 rdev->desc_nr = sb->this_disk.number;
1237
Yufen Yu228fc7d2019-10-30 18:47:02 +08001238 /* not spare disk, or LEVEL_MULTIPATH */
1239 if (sb->level == LEVEL_MULTIPATH ||
1240 (rdev->desc_nr >= 0 &&
Yufen Yu3b7436c2019-12-10 15:01:29 +08001241 rdev->desc_nr < MD_SB_DISKS &&
Yufen Yu228fc7d2019-10-30 18:47:02 +08001242 sb->disks[rdev->desc_nr].state &
1243 ((1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))))
1244 spare_disk = false;
1245
Harvey Harrison9a7b2b02008-04-28 02:15:49 -07001246 if (!refdev) {
Yufen Yu228fc7d2019-10-30 18:47:02 +08001247 if (!spare_disk)
Yufen Yu6a5cb532019-10-16 16:00:03 +08001248 ret = 1;
1249 else
1250 ret = 0;
Harvey Harrison9a7b2b02008-04-28 02:15:49 -07001251 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252 __u64 ev1, ev2;
Namhyung Kim65a06f062011-07-27 11:00:36 +10001253 mdp_super_t *refsb = page_address(refdev->sb_page);
Amir Goldsteine6fd2092017-05-04 16:26:20 +03001254 if (!md_uuid_equal(refsb, sb)) {
NeilBrown9d487392016-11-02 14:16:49 +11001255 pr_warn("md: %s has different UUID to %s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 b, bdevname(refdev->bdev,b2));
1257 goto abort;
1258 }
Amir Goldsteine6fd2092017-05-04 16:26:20 +03001259 if (!md_sb_equal(refsb, sb)) {
NeilBrown9d487392016-11-02 14:16:49 +11001260 pr_warn("md: %s has same UUID but different superblock to %s\n",
1261 b, bdevname(refdev->bdev, b2));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262 goto abort;
1263 }
1264 ev1 = md_event(sb);
1265 ev2 = md_event(refsb);
Yufen Yu6a5cb532019-10-16 16:00:03 +08001266
Yufen Yu228fc7d2019-10-30 18:47:02 +08001267 if (!spare_disk && ev1 > ev2)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268 ret = 1;
NeilBrownf72ffdd2014-09-30 14:23:59 +10001269 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270 ret = 0;
1271 }
NeilBrown8190e752009-06-18 08:48:58 +10001272 rdev->sectors = rdev->sb_start;
NeilBrown667a5312012-08-16 16:46:12 +10001273 /* Limit to 4TB as metadata cannot record more than that.
1274 * (not needed for Linear and RAID0 as metadata doesn't
1275 * record this size)
1276 */
Christoph Hellwig72deb452019-04-05 18:08:59 +02001277 if ((u64)rdev->sectors >= (2ULL << 32) && sb->level >= 1)
Arnd Bergmann3312c952015-12-21 10:51:01 +11001278 rdev->sectors = (sector_t)(2ULL << 32) - 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279
NeilBrown27a7b262011-09-10 17:21:28 +10001280 if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
NeilBrown2bf071b2006-01-06 00:20:55 -08001281 /* "this cannot possibly happen" ... */
1282 ret = -EINVAL;
1283
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284 abort:
1285 return ret;
1286}
1287
1288/*
1289 * validate_super for 0.90.0
1290 */
NeilBrownfd01b882011-10-11 16:47:53 +11001291static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292{
1293 mdp_disk_t *desc;
Namhyung Kim65a06f062011-07-27 11:00:36 +10001294 mdp_super_t *sb = page_address(rdev->sb_page);
NeilBrown07d84d102006-06-26 00:27:56 -07001295 __u64 ev1 = md_event(sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296
NeilBrown41158c72005-06-21 17:17:25 -07001297 rdev->raid_disk = -1;
NeilBrownc5d79ad2008-02-06 01:39:54 -08001298 clear_bit(Faulty, &rdev->flags);
1299 clear_bit(In_sync, &rdev->flags);
NeilBrown8313b8e2013-12-12 10:13:33 +11001300 clear_bit(Bitmap_sync, &rdev->flags);
NeilBrownc5d79ad2008-02-06 01:39:54 -08001301 clear_bit(WriteMostly, &rdev->flags);
NeilBrownc5d79ad2008-02-06 01:39:54 -08001302
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303 if (mddev->raid_disks == 0) {
1304 mddev->major_version = 0;
1305 mddev->minor_version = sb->minor_version;
1306 mddev->patch_version = sb->patch_version;
NeilBrowne6910632008-02-06 01:39:51 -08001307 mddev->external = 0;
Andre Noll9d8f0362009-06-18 08:45:01 +10001308 mddev->chunk_sectors = sb->chunk_size >> 9;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309 mddev->ctime = sb->ctime;
1310 mddev->utime = sb->utime;
1311 mddev->level = sb->level;
NeilBrownd9d166c2006-01-06 00:20:51 -08001312 mddev->clevel[0] = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313 mddev->layout = sb->layout;
1314 mddev->raid_disks = sb->raid_disks;
NeilBrown27a7b262011-09-10 17:21:28 +10001315 mddev->dev_sectors = ((sector_t)sb->size) * 2;
NeilBrown07d84d102006-06-26 00:27:56 -07001316 mddev->events = ev1;
NeilBrownc3d97142009-12-14 12:49:52 +11001317 mddev->bitmap_info.offset = 0;
NeilBrown6409bb02012-05-22 13:55:07 +10001318 mddev->bitmap_info.space = 0;
1319 /* bitmap can use 60 K after the 4K superblocks */
NeilBrownc3d97142009-12-14 12:49:52 +11001320 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
NeilBrown6409bb02012-05-22 13:55:07 +10001321 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
NeilBrown2c810cd2012-05-21 09:27:00 +10001322 mddev->reshape_backwards = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323
NeilBrownf6705572006-03-27 01:18:11 -08001324 if (mddev->minor_version >= 91) {
1325 mddev->reshape_position = sb->reshape_position;
1326 mddev->delta_disks = sb->delta_disks;
1327 mddev->new_level = sb->new_level;
1328 mddev->new_layout = sb->new_layout;
Andre Noll664e7c42009-06-18 08:45:27 +10001329 mddev->new_chunk_sectors = sb->new_chunk >> 9;
NeilBrown2c810cd2012-05-21 09:27:00 +10001330 if (mddev->delta_disks < 0)
1331 mddev->reshape_backwards = 1;
NeilBrownf6705572006-03-27 01:18:11 -08001332 } else {
1333 mddev->reshape_position = MaxSector;
1334 mddev->delta_disks = 0;
1335 mddev->new_level = mddev->level;
1336 mddev->new_layout = mddev->layout;
Andre Noll664e7c42009-06-18 08:45:27 +10001337 mddev->new_chunk_sectors = mddev->chunk_sectors;
NeilBrownf6705572006-03-27 01:18:11 -08001338 }
NeilBrown33f2c352019-09-09 16:52:29 +10001339 if (mddev->level == 0)
1340 mddev->layout = -1;
NeilBrownf6705572006-03-27 01:18:11 -08001341
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342 if (sb->state & (1<<MD_SB_CLEAN))
1343 mddev->recovery_cp = MaxSector;
1344 else {
NeilBrownf72ffdd2014-09-30 14:23:59 +10001345 if (sb->events_hi == sb->cp_events_hi &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346 sb->events_lo == sb->cp_events_lo) {
1347 mddev->recovery_cp = sb->recovery_cp;
1348 } else
1349 mddev->recovery_cp = 0;
1350 }
1351
1352 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
1353 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
1354 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
1355 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
1356
1357 mddev->max_disks = MD_SB_DISKS;
NeilBrowna654b9d82005-06-21 17:17:27 -07001358
1359 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
NeilBrown6409bb02012-05-22 13:55:07 +10001360 mddev->bitmap_info.file == NULL) {
NeilBrownc3d97142009-12-14 12:49:52 +11001361 mddev->bitmap_info.offset =
1362 mddev->bitmap_info.default_offset;
NeilBrown6409bb02012-05-22 13:55:07 +10001363 mddev->bitmap_info.space =
Dave Jonesc9ad0202013-08-19 22:26:32 -04001364 mddev->bitmap_info.default_space;
NeilBrown6409bb02012-05-22 13:55:07 +10001365 }
NeilBrowna654b9d82005-06-21 17:17:27 -07001366
NeilBrown41158c72005-06-21 17:17:25 -07001367 } else if (mddev->pers == NULL) {
NeilBrownbe6800a2010-05-18 10:17:09 +10001368 /* Insist on good event counter while assembling, except
1369 * for spares (which don't need an event count) */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370 ++ev1;
NeilBrownbe6800a2010-05-18 10:17:09 +10001371 if (sb->disks[rdev->desc_nr].state & (
1372 (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
NeilBrownf72ffdd2014-09-30 14:23:59 +10001373 if (ev1 < mddev->events)
NeilBrownbe6800a2010-05-18 10:17:09 +10001374 return -EINVAL;
NeilBrown41158c72005-06-21 17:17:25 -07001375 } else if (mddev->bitmap) {
1376 /* if adding to array with a bitmap, then we can accept an
1377 * older device ... but not too old.
1378 */
NeilBrown41158c72005-06-21 17:17:25 -07001379 if (ev1 < mddev->bitmap->events_cleared)
1380 return 0;
NeilBrown8313b8e2013-12-12 10:13:33 +11001381 if (ev1 < mddev->events)
1382 set_bit(Bitmap_sync, &rdev->flags);
NeilBrown07d84d102006-06-26 00:27:56 -07001383 } else {
1384 if (ev1 < mddev->events)
1385 /* just a hot-add of a new device, leave raid_disk at -1 */
1386 return 0;
1387 }
NeilBrown41158c72005-06-21 17:17:25 -07001388
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389 if (mddev->level != LEVEL_MULTIPATH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390 desc = sb->disks + rdev->desc_nr;
1391
1392 if (desc->state & (1<<MD_DISK_FAULTY))
NeilBrownb2d444d2005-11-08 21:39:31 -08001393 set_bit(Faulty, &rdev->flags);
NeilBrown7c7546c2006-06-26 00:27:41 -07001394 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
1395 desc->raid_disk < mddev->raid_disks */) {
NeilBrownb2d444d2005-11-08 21:39:31 -08001396 set_bit(In_sync, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397 rdev->raid_disk = desc->raid_disk;
NeilBrownf4667222013-12-09 12:04:56 +11001398 rdev->saved_raid_disk = desc->raid_disk;
NeilBrown0261cd9f2009-11-13 17:40:48 +11001399 } else if (desc->state & (1<<MD_DISK_ACTIVE)) {
1400 /* active but not in sync implies recovery up to
1401 * reshape position. We don't know exactly where
1402 * that is, so set to zero for now */
1403 if (mddev->minor_version >= 91) {
1404 rdev->recovery_offset = 0;
1405 rdev->raid_disk = desc->raid_disk;
1406 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407 }
NeilBrown8ddf9ef2005-09-09 16:23:45 -07001408 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
1409 set_bit(WriteMostly, &rdev->flags);
NeilBrown688834e2016-11-18 16:16:11 +11001410 if (desc->state & (1<<MD_DISK_FAILFAST))
1411 set_bit(FailFast, &rdev->flags);
NeilBrown41158c72005-06-21 17:17:25 -07001412 } else /* MULTIPATH are always insync */
NeilBrownb2d444d2005-11-08 21:39:31 -08001413 set_bit(In_sync, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414 return 0;
1415}
1416
1417/*
1418 * sync_super for 0.90.0
1419 */
NeilBrownfd01b882011-10-11 16:47:53 +11001420static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421{
1422 mdp_super_t *sb;
NeilBrown3cb03002011-10-11 16:45:26 +11001423 struct md_rdev *rdev2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424 int next_spare = mddev->raid_disks;
NeilBrown19133a42005-11-08 21:39:35 -08001425
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426 /* make rdev->sb match mddev data..
1427 *
1428 * 1/ zero out disks
1429 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
1430 * 3/ any empty disks < next_spare become removed
1431 *
1432 * disks[0] gets initialised to REMOVED because
1433 * we cannot be sure from other fields if it has
1434 * been initialised or not.
1435 */
1436 int i;
1437 int active=0, working=0,failed=0,spare=0,nr_disks=0;
1438
NeilBrown61181562005-09-09 16:24:02 -07001439 rdev->sb_size = MD_SB_BYTES;
1440
Namhyung Kim65a06f062011-07-27 11:00:36 +10001441 sb = page_address(rdev->sb_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442
1443 memset(sb, 0, sizeof(*sb));
1444
1445 sb->md_magic = MD_SB_MAGIC;
1446 sb->major_version = mddev->major_version;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447 sb->patch_version = mddev->patch_version;
1448 sb->gvalid_words = 0; /* ignored */
1449 memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
1450 memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
1451 memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
1452 memcpy(&sb->set_uuid3, mddev->uuid+12,4);
1453
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11001454 sb->ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455 sb->level = mddev->level;
Andre Noll58c0fed2009-03-31 14:33:13 +11001456 sb->size = mddev->dev_sectors / 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457 sb->raid_disks = mddev->raid_disks;
1458 sb->md_minor = mddev->md_minor;
NeilBrowne6910632008-02-06 01:39:51 -08001459 sb->not_persistent = 0;
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11001460 sb->utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 sb->state = 0;
1462 sb->events_hi = (mddev->events>>32);
1463 sb->events_lo = (u32)mddev->events;
1464
NeilBrownf6705572006-03-27 01:18:11 -08001465 if (mddev->reshape_position == MaxSector)
1466 sb->minor_version = 90;
1467 else {
1468 sb->minor_version = 91;
1469 sb->reshape_position = mddev->reshape_position;
1470 sb->new_level = mddev->new_level;
1471 sb->delta_disks = mddev->delta_disks;
1472 sb->new_layout = mddev->new_layout;
Andre Noll664e7c42009-06-18 08:45:27 +10001473 sb->new_chunk = mddev->new_chunk_sectors << 9;
NeilBrownf6705572006-03-27 01:18:11 -08001474 }
1475 mddev->minor_version = sb->minor_version;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476 if (mddev->in_sync)
1477 {
1478 sb->recovery_cp = mddev->recovery_cp;
1479 sb->cp_events_hi = (mddev->events>>32);
1480 sb->cp_events_lo = (u32)mddev->events;
1481 if (mddev->recovery_cp == MaxSector)
1482 sb->state = (1<< MD_SB_CLEAN);
1483 } else
1484 sb->recovery_cp = 0;
1485
1486 sb->layout = mddev->layout;
Andre Noll9d8f0362009-06-18 08:45:01 +10001487 sb->chunk_size = mddev->chunk_sectors << 9;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488
NeilBrownc3d97142009-12-14 12:49:52 +11001489 if (mddev->bitmap && mddev->bitmap_info.file == NULL)
NeilBrowna654b9d82005-06-21 17:17:27 -07001490 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
1491
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492 sb->disks[0].state = (1<<MD_DISK_REMOVED);
NeilBrowndafb20f2012-03-19 12:46:39 +11001493 rdev_for_each(rdev2, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494 mdp_disk_t *d;
NeilBrown86e6ffd2005-11-08 21:39:24 -08001495 int desc_nr;
NeilBrown0261cd9f2009-11-13 17:40:48 +11001496 int is_active = test_bit(In_sync, &rdev2->flags);
1497
1498 if (rdev2->raid_disk >= 0 &&
1499 sb->minor_version >= 91)
1500 /* we have nowhere to store the recovery_offset,
1501 * but if it is not below the reshape_position,
1502 * we can piggy-back on that.
1503 */
1504 is_active = 1;
1505 if (rdev2->raid_disk < 0 ||
1506 test_bit(Faulty, &rdev2->flags))
1507 is_active = 0;
1508 if (is_active)
NeilBrown86e6ffd2005-11-08 21:39:24 -08001509 desc_nr = rdev2->raid_disk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510 else
NeilBrown86e6ffd2005-11-08 21:39:24 -08001511 desc_nr = next_spare++;
NeilBrown19133a42005-11-08 21:39:35 -08001512 rdev2->desc_nr = desc_nr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513 d = &sb->disks[rdev2->desc_nr];
1514 nr_disks++;
1515 d->number = rdev2->desc_nr;
1516 d->major = MAJOR(rdev2->bdev->bd_dev);
1517 d->minor = MINOR(rdev2->bdev->bd_dev);
NeilBrown0261cd9f2009-11-13 17:40:48 +11001518 if (is_active)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519 d->raid_disk = rdev2->raid_disk;
1520 else
1521 d->raid_disk = rdev2->desc_nr; /* compatibility */
NeilBrown1be78922006-03-27 01:18:03 -08001522 if (test_bit(Faulty, &rdev2->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523 d->state = (1<<MD_DISK_FAULTY);
NeilBrown0261cd9f2009-11-13 17:40:48 +11001524 else if (is_active) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525 d->state = (1<<MD_DISK_ACTIVE);
NeilBrown0261cd9f2009-11-13 17:40:48 +11001526 if (test_bit(In_sync, &rdev2->flags))
1527 d->state |= (1<<MD_DISK_SYNC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528 active++;
1529 working++;
1530 } else {
1531 d->state = 0;
1532 spare++;
1533 working++;
1534 }
NeilBrown8ddf9ef2005-09-09 16:23:45 -07001535 if (test_bit(WriteMostly, &rdev2->flags))
1536 d->state |= (1<<MD_DISK_WRITEMOSTLY);
NeilBrown688834e2016-11-18 16:16:11 +11001537 if (test_bit(FailFast, &rdev2->flags))
1538 d->state |= (1<<MD_DISK_FAILFAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540 /* now set the "removed" and "faulty" bits on any missing devices */
1541 for (i=0 ; i < mddev->raid_disks ; i++) {
1542 mdp_disk_t *d = &sb->disks[i];
1543 if (d->state == 0 && d->number == 0) {
1544 d->number = i;
1545 d->raid_disk = i;
1546 d->state = (1<<MD_DISK_REMOVED);
1547 d->state |= (1<<MD_DISK_FAULTY);
1548 failed++;
1549 }
1550 }
1551 sb->nr_disks = nr_disks;
1552 sb->active_disks = active;
1553 sb->working_disks = working;
1554 sb->failed_disks = failed;
1555 sb->spare_disks = spare;
1556
1557 sb->this_disk = sb->disks[rdev->desc_nr];
1558 sb->sb_csum = calc_sb_csum(sb);
1559}
1560
1561/*
Chris Webb0cd17fe2008-06-28 08:31:46 +10001562 * rdev_size_change for 0.90.0
1563 */
1564static unsigned long long
NeilBrown3cb03002011-10-11 16:45:26 +11001565super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
Chris Webb0cd17fe2008-06-28 08:31:46 +10001566{
Andre Noll58c0fed2009-03-31 14:33:13 +11001567 if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
Chris Webb0cd17fe2008-06-28 08:31:46 +10001568 return 0; /* component must fit device */
NeilBrownc3d97142009-12-14 12:49:52 +11001569 if (rdev->mddev->bitmap_info.offset)
Chris Webb0cd17fe2008-06-28 08:31:46 +10001570 return 0; /* can't move bitmap */
Jonathan Brassow57b2caa2011-01-14 09:14:33 +11001571 rdev->sb_start = calc_dev_sboffset(rdev);
Andre Noll15f4a5f2008-07-21 14:42:12 +10001572 if (!num_sectors || num_sectors > rdev->sb_start)
1573 num_sectors = rdev->sb_start;
NeilBrown27a7b262011-09-10 17:21:28 +10001574 /* Limit to 4TB as metadata cannot record more than that.
1575 * 4TB == 2^32 KB, or 2*2^32 sectors.
1576 */
Christoph Hellwig72deb452019-04-05 18:08:59 +02001577 if ((u64)num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
Arnd Bergmann3312c952015-12-21 10:51:01 +11001578 num_sectors = (sector_t)(2ULL << 32) - 2;
NeilBrown46533ff2016-11-18 16:16:11 +11001579 do {
1580 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
Chris Webb0cd17fe2008-06-28 08:31:46 +10001581 rdev->sb_page);
NeilBrown46533ff2016-11-18 16:16:11 +11001582 } while (md_super_wait(rdev->mddev) < 0);
Justin Maggardc26a44e2010-11-24 16:36:17 +11001583 return num_sectors;
Chris Webb0cd17fe2008-06-28 08:31:46 +10001584}
1585
NeilBrownc6563a82012-05-21 09:27:00 +10001586static int
1587super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset)
1588{
1589 /* non-zero offset changes not possible with v0.90 */
1590 return new_offset == 0;
1591}
Chris Webb0cd17fe2008-06-28 08:31:46 +10001592
1593/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594 * version 1 superblock
1595 */
1596
NeilBrownf72ffdd2014-09-30 14:23:59 +10001597static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598{
NeilBrown1c05b4b2006-10-21 10:24:08 -07001599 __le32 disk_csum;
1600 u32 csum;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601 unsigned long long newcsum;
1602 int size = 256 + le32_to_cpu(sb->max_dev)*2;
NeilBrown1c05b4b2006-10-21 10:24:08 -07001603 __le32 *isuper = (__le32*)sb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604
1605 disk_csum = sb->sb_csum;
1606 sb->sb_csum = 0;
1607 newcsum = 0;
NeilBrown1f3c9902012-12-11 13:09:00 +11001608 for (; size >= 4; size -= 4)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609 newcsum += le32_to_cpu(*isuper++);
1610
1611 if (size == 2)
NeilBrown1c05b4b2006-10-21 10:24:08 -07001612 newcsum += le16_to_cpu(*(__le16*) isuper);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613
1614 csum = (newcsum & 0xffffffff) + (newcsum >> 32);
1615 sb->sb_csum = disk_csum;
1616 return cpu_to_le32(csum);
1617}
1618
NeilBrown3cb03002011-10-11 16:45:26 +11001619static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620{
1621 struct mdp_superblock_1 *sb;
1622 int ret;
Andre Noll0f420352008-07-11 22:02:23 +10001623 sector_t sb_start;
NeilBrownc6563a82012-05-21 09:27:00 +10001624 sector_t sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
NeilBrown0002b272005-09-09 16:23:53 -07001626 int bmask;
Yufen Yu228fc7d2019-10-30 18:47:02 +08001627 bool spare_disk = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628
1629 /*
Andre Noll0f420352008-07-11 22:02:23 +10001630 * Calculate the position of the superblock in 512byte sectors.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631 * It is always aligned to a 4K boundary and
1632 * depeding on minor_version, it can be:
1633 * 0: At least 8K, but less than 12K, from end of device
1634 * 1: At start of device
1635 * 2: 4K from start of device.
1636 */
1637 switch(minor_version) {
1638 case 0:
Christoph Hellwig0fe80342021-10-18 12:11:06 +02001639 sb_start = bdev_nr_sectors(rdev->bdev) - 8 * 2;
Andre Noll0f420352008-07-11 22:02:23 +10001640 sb_start &= ~(sector_t)(4*2-1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641 break;
1642 case 1:
Andre Noll0f420352008-07-11 22:02:23 +10001643 sb_start = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644 break;
1645 case 2:
Andre Noll0f420352008-07-11 22:02:23 +10001646 sb_start = 8;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647 break;
1648 default:
1649 return -EINVAL;
1650 }
Andre Noll0f420352008-07-11 22:02:23 +10001651 rdev->sb_start = sb_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652
NeilBrown0002b272005-09-09 16:23:53 -07001653 /* superblock is rarely larger than 1K, but it can be larger,
1654 * and it is safe to read 4k, so we do that
1655 */
1656 ret = read_disk_sb(rdev, 4096);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657 if (ret) return ret;
1658
Namhyung Kim65a06f062011-07-27 11:00:36 +10001659 sb = page_address(rdev->sb_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660
1661 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1662 sb->major_version != cpu_to_le32(1) ||
1663 le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
Andre Noll0f420352008-07-11 22:02:23 +10001664 le64_to_cpu(sb->super_offset) != rdev->sb_start ||
NeilBrown71c08052005-09-09 16:23:51 -07001665 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666 return -EINVAL;
1667
1668 if (calc_sb_1_csum(sb) != sb->sb_csum) {
NeilBrown9d487392016-11-02 14:16:49 +11001669 pr_warn("md: invalid superblock checksum on %s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670 bdevname(rdev->bdev,b));
1671 return -EINVAL;
1672 }
1673 if (le64_to_cpu(sb->data_size) < 10) {
NeilBrown9d487392016-11-02 14:16:49 +11001674 pr_warn("md: data_size too small on %s\n",
1675 bdevname(rdev->bdev,b));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676 return -EINVAL;
1677 }
NeilBrownc6563a82012-05-21 09:27:00 +10001678 if (sb->pad0 ||
1679 sb->pad3[0] ||
1680 memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1])))
1681 /* Some padding is non-zero, might be a new feature */
1682 return -EINVAL;
NeilBrowne11e93f2007-05-09 02:35:36 -07001683
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684 rdev->preferred_minor = 0xffff;
1685 rdev->data_offset = le64_to_cpu(sb->data_offset);
NeilBrownc6563a82012-05-21 09:27:00 +10001686 rdev->new_data_offset = rdev->data_offset;
1687 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
1688 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
1689 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
NeilBrown4dbcdc72006-01-06 00:20:52 -08001690 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691
NeilBrown0002b272005-09-09 16:23:53 -07001692 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
Martin K. Petersene1defc42009-05-22 17:17:49 -04001693 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
NeilBrown0002b272005-09-09 16:23:53 -07001694 if (rdev->sb_size & bmask)
NeilBrowna1801f82008-03-04 14:29:31 -08001695 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1696
1697 if (minor_version
Andre Noll0f420352008-07-11 22:02:23 +10001698 && rdev->data_offset < sb_start + (rdev->sb_size/512))
NeilBrowna1801f82008-03-04 14:29:31 -08001699 return -EINVAL;
NeilBrownc6563a82012-05-21 09:27:00 +10001700 if (minor_version
1701 && rdev->new_data_offset < sb_start + (rdev->sb_size/512))
1702 return -EINVAL;
NeilBrown0002b272005-09-09 16:23:53 -07001703
NeilBrown31b65a02006-07-10 04:44:14 -07001704 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1705 rdev->desc_nr = -1;
1706 else
1707 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1708
NeilBrown2699b672011-07-28 11:31:47 +10001709 if (!rdev->bb_page) {
1710 rdev->bb_page = alloc_page(GFP_KERNEL);
1711 if (!rdev->bb_page)
1712 return -ENOMEM;
1713 }
1714 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) &&
1715 rdev->badblocks.count == 0) {
1716 /* need to load the bad block list.
1717 * Currently we limit it to one page.
1718 */
1719 s32 offset;
1720 sector_t bb_sector;
Christoph Hellwig00485d02019-04-04 18:56:12 +02001721 __le64 *bbp;
NeilBrown2699b672011-07-28 11:31:47 +10001722 int i;
1723 int sectors = le16_to_cpu(sb->bblog_size);
1724 if (sectors > (PAGE_SIZE / 512))
1725 return -EINVAL;
1726 offset = le32_to_cpu(sb->bblog_offset);
1727 if (offset == 0)
1728 return -EINVAL;
1729 bb_sector = (long long)offset;
1730 if (!sync_page_io(rdev, bb_sector, sectors << 9,
Mike Christie796a5cf2016-06-05 14:32:07 -05001731 rdev->bb_page, REQ_OP_READ, 0, true))
NeilBrown2699b672011-07-28 11:31:47 +10001732 return -EIO;
Christoph Hellwig00485d02019-04-04 18:56:12 +02001733 bbp = (__le64 *)page_address(rdev->bb_page);
NeilBrown2699b672011-07-28 11:31:47 +10001734 rdev->badblocks.shift = sb->bblog_shift;
1735 for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
1736 u64 bb = le64_to_cpu(*bbp);
1737 int count = bb & (0x3ff);
1738 u64 sector = bb >> 10;
1739 sector <<= sb->bblog_shift;
1740 count <<= sb->bblog_shift;
1741 if (bb + 1 == 0)
1742 break;
Vishal Vermafc974ee2015-12-24 19:20:34 -07001743 if (badblocks_set(&rdev->badblocks, sector, count, 1))
NeilBrown2699b672011-07-28 11:31:47 +10001744 return -EINVAL;
1745 }
NeilBrown486adf72013-04-24 11:42:44 +10001746 } else if (sb->bblog_offset != 0)
1747 rdev->badblocks.shift = 0;
NeilBrown2699b672011-07-28 11:31:47 +10001748
Pawel Baldysiakddc08822017-08-16 17:13:45 +02001749 if ((le32_to_cpu(sb->feature_map) &
1750 (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS))) {
Artur Paszkiewiczea0213e2017-03-09 09:59:57 +01001751 rdev->ppl.offset = (__s16)le16_to_cpu(sb->ppl.offset);
1752 rdev->ppl.size = le16_to_cpu(sb->ppl.size);
1753 rdev->ppl.sector = rdev->sb_start + rdev->ppl.offset;
1754 }
1755
NeilBrown33f2c352019-09-09 16:52:29 +10001756 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT) &&
1757 sb->level != 0)
1758 return -EINVAL;
1759
Yufen Yu228fc7d2019-10-30 18:47:02 +08001760 /* not spare disk, or LEVEL_MULTIPATH */
1761 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH) ||
1762 (rdev->desc_nr >= 0 &&
1763 rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
1764 (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
1765 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL)))
1766 spare_disk = false;
Yufen Yu6a5cb532019-10-16 16:00:03 +08001767
Harvey Harrison9a7b2b02008-04-28 02:15:49 -07001768 if (!refdev) {
Yufen Yu228fc7d2019-10-30 18:47:02 +08001769 if (!spare_disk)
Yufen Yu6a5cb532019-10-16 16:00:03 +08001770 ret = 1;
1771 else
1772 ret = 0;
Harvey Harrison9a7b2b02008-04-28 02:15:49 -07001773 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774 __u64 ev1, ev2;
Namhyung Kim65a06f062011-07-27 11:00:36 +10001775 struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776
1777 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1778 sb->level != refsb->level ||
1779 sb->layout != refsb->layout ||
1780 sb->chunksize != refsb->chunksize) {
NeilBrown9d487392016-11-02 14:16:49 +11001781 pr_warn("md: %s has strangely different superblock to %s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782 bdevname(rdev->bdev,b),
1783 bdevname(refdev->bdev,b2));
1784 return -EINVAL;
1785 }
1786 ev1 = le64_to_cpu(sb->events);
1787 ev2 = le64_to_cpu(refsb->events);
1788
Yufen Yu228fc7d2019-10-30 18:47:02 +08001789 if (!spare_disk && ev1 > ev2)
NeilBrown8ed75462006-02-03 03:03:41 -08001790 ret = 1;
1791 else
1792 ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 }
Christoph Hellwig0fe80342021-10-18 12:11:06 +02001794 if (minor_version)
1795 sectors = bdev_nr_sectors(rdev->bdev) - rdev->data_offset;
1796 else
NeilBrownc6563a82012-05-21 09:27:00 +10001797 sectors = rdev->sb_start;
1798 if (sectors < le64_to_cpu(sb->data_size))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799 return -EINVAL;
Andre Nolldd8ac332009-03-31 14:33:13 +11001800 rdev->sectors = le64_to_cpu(sb->data_size);
NeilBrown8ed75462006-02-03 03:03:41 -08001801 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802}
1803
NeilBrownfd01b882011-10-11 16:47:53 +11001804static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805{
Namhyung Kim65a06f062011-07-27 11:00:36 +10001806 struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
NeilBrown07d84d102006-06-26 00:27:56 -07001807 __u64 ev1 = le64_to_cpu(sb->events);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808
NeilBrown41158c72005-06-21 17:17:25 -07001809 rdev->raid_disk = -1;
NeilBrownc5d79ad2008-02-06 01:39:54 -08001810 clear_bit(Faulty, &rdev->flags);
1811 clear_bit(In_sync, &rdev->flags);
NeilBrown8313b8e2013-12-12 10:13:33 +11001812 clear_bit(Bitmap_sync, &rdev->flags);
NeilBrownc5d79ad2008-02-06 01:39:54 -08001813 clear_bit(WriteMostly, &rdev->flags);
NeilBrownc5d79ad2008-02-06 01:39:54 -08001814
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815 if (mddev->raid_disks == 0) {
1816 mddev->major_version = 1;
1817 mddev->patch_version = 0;
NeilBrowne6910632008-02-06 01:39:51 -08001818 mddev->external = 0;
Andre Noll9d8f0362009-06-18 08:45:01 +10001819 mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11001820 mddev->ctime = le64_to_cpu(sb->ctime);
1821 mddev->utime = le64_to_cpu(sb->utime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822 mddev->level = le32_to_cpu(sb->level);
NeilBrownd9d166c2006-01-06 00:20:51 -08001823 mddev->clevel[0] = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824 mddev->layout = le32_to_cpu(sb->layout);
1825 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
Andre Noll58c0fed2009-03-31 14:33:13 +11001826 mddev->dev_sectors = le64_to_cpu(sb->size);
NeilBrown07d84d102006-06-26 00:27:56 -07001827 mddev->events = ev1;
NeilBrownc3d97142009-12-14 12:49:52 +11001828 mddev->bitmap_info.offset = 0;
NeilBrown6409bb02012-05-22 13:55:07 +10001829 mddev->bitmap_info.space = 0;
1830 /* Default location for bitmap is 1K after superblock
1831 * using 3K - total of 4K
1832 */
NeilBrownc3d97142009-12-14 12:49:52 +11001833 mddev->bitmap_info.default_offset = 1024 >> 9;
NeilBrown6409bb02012-05-22 13:55:07 +10001834 mddev->bitmap_info.default_space = (4096-1024) >> 9;
NeilBrown2c810cd2012-05-21 09:27:00 +10001835 mddev->reshape_backwards = 0;
1836
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1838 memcpy(mddev->uuid, sb->set_uuid, 16);
1839
1840 mddev->max_disks = (4096-256)/2;
NeilBrowna654b9d82005-06-21 17:17:27 -07001841
NeilBrown71c08052005-09-09 16:23:51 -07001842 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
NeilBrown6409bb02012-05-22 13:55:07 +10001843 mddev->bitmap_info.file == NULL) {
NeilBrownc3d97142009-12-14 12:49:52 +11001844 mddev->bitmap_info.offset =
1845 (__s32)le32_to_cpu(sb->bitmap_offset);
NeilBrown6409bb02012-05-22 13:55:07 +10001846 /* Metadata doesn't record how much space is available.
1847 * For 1.0, we assume we can use up to the superblock
1848 * if before, else to 4K beyond superblock.
1849 * For others, assume no change is possible.
1850 */
1851 if (mddev->minor_version > 0)
1852 mddev->bitmap_info.space = 0;
1853 else if (mddev->bitmap_info.offset > 0)
1854 mddev->bitmap_info.space =
1855 8 - mddev->bitmap_info.offset;
1856 else
1857 mddev->bitmap_info.space =
1858 -mddev->bitmap_info.offset;
1859 }
NeilBrowne11e93f2007-05-09 02:35:36 -07001860
NeilBrownf6705572006-03-27 01:18:11 -08001861 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1862 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1863 mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1864 mddev->new_level = le32_to_cpu(sb->new_level);
1865 mddev->new_layout = le32_to_cpu(sb->new_layout);
Andre Noll664e7c42009-06-18 08:45:27 +10001866 mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
NeilBrown2c810cd2012-05-21 09:27:00 +10001867 if (mddev->delta_disks < 0 ||
1868 (mddev->delta_disks == 0 &&
1869 (le32_to_cpu(sb->feature_map)
1870 & MD_FEATURE_RESHAPE_BACKWARDS)))
1871 mddev->reshape_backwards = 1;
NeilBrownf6705572006-03-27 01:18:11 -08001872 } else {
1873 mddev->reshape_position = MaxSector;
1874 mddev->delta_disks = 0;
1875 mddev->new_level = mddev->level;
1876 mddev->new_layout = mddev->layout;
Andre Noll664e7c42009-06-18 08:45:27 +10001877 mddev->new_chunk_sectors = mddev->chunk_sectors;
NeilBrownf6705572006-03-27 01:18:11 -08001878 }
1879
NeilBrown33f2c352019-09-09 16:52:29 +10001880 if (mddev->level == 0 &&
1881 !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT))
1882 mddev->layout = -1;
1883
Song Liu486b0f72016-08-19 15:34:01 -07001884 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)
Shaohua Lia62ab492016-01-06 14:37:13 -08001885 set_bit(MD_HAS_JOURNAL, &mddev->flags);
Artur Paszkiewiczea0213e2017-03-09 09:59:57 +01001886
Pawel Baldysiakddc08822017-08-16 17:13:45 +02001887 if (le32_to_cpu(sb->feature_map) &
1888 (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS)) {
Artur Paszkiewiczea0213e2017-03-09 09:59:57 +01001889 if (le32_to_cpu(sb->feature_map) &
1890 (MD_FEATURE_BITMAP_OFFSET | MD_FEATURE_JOURNAL))
1891 return -EINVAL;
Pawel Baldysiakddc08822017-08-16 17:13:45 +02001892 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_PPL) &&
1893 (le32_to_cpu(sb->feature_map) &
1894 MD_FEATURE_MULTIPLE_PPLS))
1895 return -EINVAL;
Artur Paszkiewiczea0213e2017-03-09 09:59:57 +01001896 set_bit(MD_HAS_PPL, &mddev->flags);
1897 }
NeilBrown41158c72005-06-21 17:17:25 -07001898 } else if (mddev->pers == NULL) {
NeilBrownbe6800a2010-05-18 10:17:09 +10001899 /* Insist of good event counter while assembling, except for
1900 * spares (which don't need an event count) */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901 ++ev1;
NeilBrownbe6800a2010-05-18 10:17:09 +10001902 if (rdev->desc_nr >= 0 &&
1903 rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
Song Liua3dfbda2015-10-08 21:54:11 -07001904 (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
1905 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL))
NeilBrownbe6800a2010-05-18 10:17:09 +10001906 if (ev1 < mddev->events)
1907 return -EINVAL;
NeilBrown41158c72005-06-21 17:17:25 -07001908 } else if (mddev->bitmap) {
1909 /* If adding to array with a bitmap, then we can accept an
1910 * older device, but not too old.
1911 */
NeilBrown41158c72005-06-21 17:17:25 -07001912 if (ev1 < mddev->bitmap->events_cleared)
1913 return 0;
NeilBrown8313b8e2013-12-12 10:13:33 +11001914 if (ev1 < mddev->events)
1915 set_bit(Bitmap_sync, &rdev->flags);
NeilBrown07d84d102006-06-26 00:27:56 -07001916 } else {
1917 if (ev1 < mddev->events)
1918 /* just a hot-add of a new device, leave raid_disk at -1 */
1919 return 0;
1920 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921 if (mddev->level != LEVEL_MULTIPATH) {
1922 int role;
NeilBrown3673f302009-08-03 10:59:56 +10001923 if (rdev->desc_nr < 0 ||
1924 rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
Song Liuc4d4c912015-08-13 14:31:54 -07001925 role = MD_DISK_ROLE_SPARE;
NeilBrown3673f302009-08-03 10:59:56 +10001926 rdev->desc_nr = -1;
1927 } else
1928 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929 switch(role) {
Song Liuc4d4c912015-08-13 14:31:54 -07001930 case MD_DISK_ROLE_SPARE: /* spare */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931 break;
Song Liuc4d4c912015-08-13 14:31:54 -07001932 case MD_DISK_ROLE_FAULTY: /* faulty */
NeilBrownb2d444d2005-11-08 21:39:31 -08001933 set_bit(Faulty, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934 break;
Song Liubac624f2015-08-13 14:31:55 -07001935 case MD_DISK_ROLE_JOURNAL: /* journal device */
1936 if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)) {
1937 /* journal device without journal feature */
NeilBrown9d487392016-11-02 14:16:49 +11001938 pr_warn("md: journal device provided without journal feature, ignoring the device\n");
Song Liubac624f2015-08-13 14:31:55 -07001939 return -EINVAL;
1940 }
1941 set_bit(Journal, &rdev->flags);
Shaohua Li3069aa82015-08-13 14:31:56 -07001942 rdev->journal_tail = le64_to_cpu(sb->journal_tail);
Shaohua Li9b156032015-12-18 15:19:16 +11001943 rdev->raid_disk = 0;
Song Liubac624f2015-08-13 14:31:55 -07001944 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945 default:
NeilBrownf4667222013-12-09 12:04:56 +11001946 rdev->saved_raid_disk = role;
NeilBrown5fd6c1d2006-06-26 00:27:40 -07001947 if ((le32_to_cpu(sb->feature_map) &
NeilBrownf4667222013-12-09 12:04:56 +11001948 MD_FEATURE_RECOVERY_OFFSET)) {
NeilBrown5fd6c1d2006-06-26 00:27:40 -07001949 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
NeilBrownf4667222013-12-09 12:04:56 +11001950 if (!(le32_to_cpu(sb->feature_map) &
1951 MD_FEATURE_RECOVERY_BITMAP))
1952 rdev->saved_raid_disk = -1;
Guoqing Jiang062f5b2a2019-07-24 11:09:20 +02001953 } else {
1954 /*
1955 * If the array is FROZEN, then the device can't
1956 * be in_sync with rest of array.
1957 */
1958 if (!test_bit(MD_RECOVERY_FROZEN,
1959 &mddev->recovery))
1960 set_bit(In_sync, &rdev->flags);
1961 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962 rdev->raid_disk = role;
1963 break;
1964 }
NeilBrown8ddf9ef2005-09-09 16:23:45 -07001965 if (sb->devflags & WriteMostly1)
1966 set_bit(WriteMostly, &rdev->flags);
NeilBrown688834e2016-11-18 16:16:11 +11001967 if (sb->devflags & FailFast1)
1968 set_bit(FailFast, &rdev->flags);
NeilBrown2d78f8c2011-12-23 10:17:51 +11001969 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
1970 set_bit(Replacement, &rdev->flags);
NeilBrown41158c72005-06-21 17:17:25 -07001971 } else /* MULTIPATH are always insync */
NeilBrownb2d444d2005-11-08 21:39:31 -08001972 set_bit(In_sync, &rdev->flags);
NeilBrown41158c72005-06-21 17:17:25 -07001973
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974 return 0;
1975}
1976
NeilBrownfd01b882011-10-11 16:47:53 +11001977static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978{
1979 struct mdp_superblock_1 *sb;
NeilBrown3cb03002011-10-11 16:45:26 +11001980 struct md_rdev *rdev2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981 int max_dev, i;
1982 /* make rdev->sb match mddev and rdev data. */
1983
Namhyung Kim65a06f062011-07-27 11:00:36 +10001984 sb = page_address(rdev->sb_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985
1986 sb->feature_map = 0;
1987 sb->pad0 = 0;
NeilBrown5fd6c1d2006-06-26 00:27:40 -07001988 sb->recovery_offset = cpu_to_le64(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989 memset(sb->pad3, 0, sizeof(sb->pad3));
1990
1991 sb->utime = cpu_to_le64((__u64)mddev->utime);
1992 sb->events = cpu_to_le64(mddev->events);
1993 if (mddev->in_sync)
1994 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
Shaohua Libd18f642015-09-02 13:49:50 -07001995 else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags))
1996 sb->resync_offset = cpu_to_le64(MaxSector);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997 else
1998 sb->resync_offset = cpu_to_le64(0);
1999
NeilBrown1c05b4b2006-10-21 10:24:08 -07002000 sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
NeilBrown4dbcdc72006-01-06 00:20:52 -08002001
NeilBrownf0ca3402006-02-02 14:28:04 -08002002 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
Andre Noll58c0fed2009-03-31 14:33:13 +11002003 sb->size = cpu_to_le64(mddev->dev_sectors);
Andre Noll9d8f0362009-06-18 08:45:01 +10002004 sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
NeilBrown62e1e382009-05-26 09:40:59 +10002005 sb->level = cpu_to_le32(mddev->level);
2006 sb->layout = cpu_to_le32(mddev->layout);
NeilBrown688834e2016-11-18 16:16:11 +11002007 if (test_bit(FailFast, &rdev->flags))
2008 sb->devflags |= FailFast1;
2009 else
2010 sb->devflags &= ~FailFast1;
NeilBrownf0ca3402006-02-02 14:28:04 -08002011
NeilBrownaeb9b2112011-08-25 14:43:08 +10002012 if (test_bit(WriteMostly, &rdev->flags))
2013 sb->devflags |= WriteMostly1;
2014 else
2015 sb->devflags &= ~WriteMostly1;
NeilBrownc6563a82012-05-21 09:27:00 +10002016 sb->data_offset = cpu_to_le64(rdev->data_offset);
2017 sb->data_size = cpu_to_le64(rdev->sectors);
NeilBrownaeb9b2112011-08-25 14:43:08 +10002018
NeilBrownc3d97142009-12-14 12:49:52 +11002019 if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
2020 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
NeilBrown71c08052005-09-09 16:23:51 -07002021 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
NeilBrowna654b9d82005-06-21 17:17:27 -07002022 }
NeilBrown5fd6c1d2006-06-26 00:27:40 -07002023
Shaohua Lif2076e72015-10-08 21:54:12 -07002024 if (rdev->raid_disk >= 0 && !test_bit(Journal, &rdev->flags) &&
NeilBrown97e4f422009-03-31 14:33:13 +11002025 !test_bit(In_sync, &rdev->flags)) {
NeilBrown93be75f2009-12-14 12:50:06 +11002026 sb->feature_map |=
2027 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
2028 sb->recovery_offset =
2029 cpu_to_le64(rdev->recovery_offset);
NeilBrownf4667222013-12-09 12:04:56 +11002030 if (rdev->saved_raid_disk >= 0 && mddev->bitmap)
2031 sb->feature_map |=
2032 cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP);
NeilBrown5fd6c1d2006-06-26 00:27:40 -07002033 }
Shaohua Li3069aa82015-08-13 14:31:56 -07002034 /* Note: recovery_offset and journal_tail share space */
2035 if (test_bit(Journal, &rdev->flags))
2036 sb->journal_tail = cpu_to_le64(rdev->journal_tail);
NeilBrown2d78f8c2011-12-23 10:17:51 +11002037 if (test_bit(Replacement, &rdev->flags))
2038 sb->feature_map |=
2039 cpu_to_le32(MD_FEATURE_REPLACEMENT);
NeilBrown5fd6c1d2006-06-26 00:27:40 -07002040
NeilBrownf6705572006-03-27 01:18:11 -08002041 if (mddev->reshape_position != MaxSector) {
2042 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
2043 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
2044 sb->new_layout = cpu_to_le32(mddev->new_layout);
2045 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
2046 sb->new_level = cpu_to_le32(mddev->new_level);
Andre Noll664e7c42009-06-18 08:45:27 +10002047 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
NeilBrown2c810cd2012-05-21 09:27:00 +10002048 if (mddev->delta_disks == 0 &&
2049 mddev->reshape_backwards)
2050 sb->feature_map
2051 |= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS);
NeilBrownc6563a82012-05-21 09:27:00 +10002052 if (rdev->new_data_offset != rdev->data_offset) {
2053 sb->feature_map
2054 |= cpu_to_le32(MD_FEATURE_NEW_OFFSET);
2055 sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset
2056 - rdev->data_offset));
2057 }
NeilBrownf6705572006-03-27 01:18:11 -08002058 }
NeilBrowna654b9d82005-06-21 17:17:27 -07002059
Goldwyn Rodrigues3c462c82015-08-19 07:35:54 +10002060 if (mddev_is_clustered(mddev))
2061 sb->feature_map |= cpu_to_le32(MD_FEATURE_CLUSTERED);
2062
NeilBrown2699b672011-07-28 11:31:47 +10002063 if (rdev->badblocks.count == 0)
2064 /* Nothing to do for bad blocks*/ ;
2065 else if (sb->bblog_offset == 0)
2066 /* Cannot record bad blocks on this device */
2067 md_error(mddev, rdev);
2068 else {
2069 struct badblocks *bb = &rdev->badblocks;
Christoph Hellwigae506402019-04-04 18:56:13 +02002070 __le64 *bbp = (__le64 *)page_address(rdev->bb_page);
NeilBrown2699b672011-07-28 11:31:47 +10002071 u64 *p = bb->page;
2072 sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
2073 if (bb->changed) {
2074 unsigned seq;
2075
2076retry:
2077 seq = read_seqbegin(&bb->lock);
2078
2079 memset(bbp, 0xff, PAGE_SIZE);
2080
2081 for (i = 0 ; i < bb->count ; i++) {
majianpeng35f9ac22012-11-08 08:56:27 +08002082 u64 internal_bb = p[i];
NeilBrown2699b672011-07-28 11:31:47 +10002083 u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
2084 | BB_LEN(internal_bb));
majianpeng35f9ac22012-11-08 08:56:27 +08002085 bbp[i] = cpu_to_le64(store_bb);
NeilBrown2699b672011-07-28 11:31:47 +10002086 }
NeilBrownd0962932012-03-19 12:46:41 +11002087 bb->changed = 0;
NeilBrown2699b672011-07-28 11:31:47 +10002088 if (read_seqretry(&bb->lock, seq))
2089 goto retry;
2090
2091 bb->sector = (rdev->sb_start +
2092 (int)le32_to_cpu(sb->bblog_offset));
2093 bb->size = le16_to_cpu(sb->bblog_size);
NeilBrown2699b672011-07-28 11:31:47 +10002094 }
2095 }
2096
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097 max_dev = 0;
NeilBrowndafb20f2012-03-19 12:46:39 +11002098 rdev_for_each(rdev2, mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099 if (rdev2->desc_nr+1 > max_dev)
2100 max_dev = rdev2->desc_nr+1;
NeilBrowna778b732007-05-23 13:58:10 -07002101
NeilBrown70471da2009-08-03 10:59:57 +10002102 if (max_dev > le32_to_cpu(sb->max_dev)) {
2103 int bmask;
NeilBrowna778b732007-05-23 13:58:10 -07002104 sb->max_dev = cpu_to_le32(max_dev);
NeilBrown70471da2009-08-03 10:59:57 +10002105 rdev->sb_size = max_dev * 2 + 256;
2106 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
2107 if (rdev->sb_size & bmask)
2108 rdev->sb_size = (rdev->sb_size | bmask) + 1;
NeilBrownddcf3522010-09-08 16:48:17 +10002109 } else
2110 max_dev = le32_to_cpu(sb->max_dev);
2111
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112 for (i=0; i<max_dev;i++)
Lidong Zhong8df72022017-06-12 10:45:55 +08002113 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
NeilBrownf72ffdd2014-09-30 14:23:59 +10002114
Song Liua97b7892015-10-08 21:54:09 -07002115 if (test_bit(MD_HAS_JOURNAL, &mddev->flags))
2116 sb->feature_map |= cpu_to_le32(MD_FEATURE_JOURNAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117
Artur Paszkiewiczea0213e2017-03-09 09:59:57 +01002118 if (test_bit(MD_HAS_PPL, &mddev->flags)) {
Pawel Baldysiakddc08822017-08-16 17:13:45 +02002119 if (test_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags))
2120 sb->feature_map |=
2121 cpu_to_le32(MD_FEATURE_MULTIPLE_PPLS);
2122 else
2123 sb->feature_map |= cpu_to_le32(MD_FEATURE_PPL);
Artur Paszkiewiczea0213e2017-03-09 09:59:57 +01002124 sb->ppl.offset = cpu_to_le16(rdev->ppl.offset);
2125 sb->ppl.size = cpu_to_le16(rdev->ppl.size);
2126 }
2127
NeilBrowndafb20f2012-03-19 12:46:39 +11002128 rdev_for_each(rdev2, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129 i = rdev2->desc_nr;
NeilBrownb2d444d2005-11-08 21:39:31 -08002130 if (test_bit(Faulty, &rdev2->flags))
Song Liuc4d4c912015-08-13 14:31:54 -07002131 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY);
NeilBrownb2d444d2005-11-08 21:39:31 -08002132 else if (test_bit(In_sync, &rdev2->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
Song Liua97b7892015-10-08 21:54:09 -07002134 else if (test_bit(Journal, &rdev2->flags))
Song Liubac624f2015-08-13 14:31:55 -07002135 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_JOURNAL);
NeilBrown93be75f2009-12-14 12:50:06 +11002136 else if (rdev2->raid_disk >= 0)
NeilBrown5fd6c1d2006-06-26 00:27:40 -07002137 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002138 else
Song Liuc4d4c912015-08-13 14:31:54 -07002139 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140 }
2141
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142 sb->sb_csum = calc_sb_1_csum(sb);
2143}
2144
Xiao Nid9c0fa52020-06-30 15:55:36 +08002145static sector_t super_1_choose_bm_space(sector_t dev_size)
2146{
2147 sector_t bm_space;
2148
2149 /* if the device is bigger than 8Gig, save 64k for bitmap
2150 * usage, if bigger than 200Gig, save 128k
2151 */
2152 if (dev_size < 64*2)
2153 bm_space = 0;
2154 else if (dev_size - 64*2 >= 200*1024*1024*2)
2155 bm_space = 128*2;
2156 else if (dev_size - 4*2 > 8*1024*1024*2)
2157 bm_space = 64*2;
2158 else
2159 bm_space = 4*2;
2160 return bm_space;
2161}
2162
Chris Webb0cd17fe2008-06-28 08:31:46 +10002163static unsigned long long
NeilBrown3cb03002011-10-11 16:45:26 +11002164super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
Chris Webb0cd17fe2008-06-28 08:31:46 +10002165{
2166 struct mdp_superblock_1 *sb;
Andre Noll15f4a5f2008-07-21 14:42:12 +10002167 sector_t max_sectors;
Andre Noll58c0fed2009-03-31 14:33:13 +11002168 if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
Chris Webb0cd17fe2008-06-28 08:31:46 +10002169 return 0; /* component must fit device */
NeilBrownc6563a82012-05-21 09:27:00 +10002170 if (rdev->data_offset != rdev->new_data_offset)
2171 return 0; /* too confusing */
Andre Noll0f420352008-07-11 22:02:23 +10002172 if (rdev->sb_start < rdev->data_offset) {
Chris Webb0cd17fe2008-06-28 08:31:46 +10002173 /* minor versions 1 and 2; superblock before data */
Christoph Hellwig0fe80342021-10-18 12:11:06 +02002174 max_sectors = bdev_nr_sectors(rdev->bdev) - rdev->data_offset;
Andre Noll15f4a5f2008-07-21 14:42:12 +10002175 if (!num_sectors || num_sectors > max_sectors)
2176 num_sectors = max_sectors;
NeilBrownc3d97142009-12-14 12:49:52 +11002177 } else if (rdev->mddev->bitmap_info.offset) {
Chris Webb0cd17fe2008-06-28 08:31:46 +10002178 /* minor version 0 with bitmap we can't move */
2179 return 0;
2180 } else {
2181 /* minor version 0; superblock after data */
Xiao Nid9c0fa52020-06-30 15:55:36 +08002182 sector_t sb_start, bm_space;
Christoph Hellwig0fe80342021-10-18 12:11:06 +02002183 sector_t dev_size = bdev_nr_sectors(rdev->bdev);
Xiao Nid9c0fa52020-06-30 15:55:36 +08002184
2185 /* 8K is for superblock */
2186 sb_start = dev_size - 8*2;
Andre Noll0f420352008-07-11 22:02:23 +10002187 sb_start &= ~(sector_t)(4*2 - 1);
Xiao Nid9c0fa52020-06-30 15:55:36 +08002188
2189 bm_space = super_1_choose_bm_space(dev_size);
2190
2191 /* Space that can be used to store date needs to decrease
2192 * superblock bitmap space and bad block space(4K)
2193 */
2194 max_sectors = sb_start - bm_space - 4*2;
2195
Andre Noll15f4a5f2008-07-21 14:42:12 +10002196 if (!num_sectors || num_sectors > max_sectors)
2197 num_sectors = max_sectors;
Markus Hochholdinger55df1ce2021-11-16 10:21:35 +00002198 rdev->sb_start = sb_start;
Chris Webb0cd17fe2008-06-28 08:31:46 +10002199 }
Namhyung Kim65a06f062011-07-27 11:00:36 +10002200 sb = page_address(rdev->sb_page);
Andre Noll15f4a5f2008-07-21 14:42:12 +10002201 sb->data_size = cpu_to_le64(num_sectors);
Jason Yan3fb632e2017-03-10 11:27:23 +08002202 sb->super_offset = cpu_to_le64(rdev->sb_start);
Chris Webb0cd17fe2008-06-28 08:31:46 +10002203 sb->sb_csum = calc_sb_1_csum(sb);
NeilBrown46533ff2016-11-18 16:16:11 +11002204 do {
2205 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
2206 rdev->sb_page);
2207 } while (md_super_wait(rdev->mddev) < 0);
Justin Maggardc26a44e2010-11-24 16:36:17 +11002208 return num_sectors;
NeilBrownc6563a82012-05-21 09:27:00 +10002209
2210}
2211
2212static int
2213super_1_allow_new_offset(struct md_rdev *rdev,
2214 unsigned long long new_offset)
2215{
2216 /* All necessary checks on new >= old have been done */
2217 struct bitmap *bitmap;
2218 if (new_offset >= rdev->data_offset)
2219 return 1;
2220
2221 /* with 1.0 metadata, there is no metadata to tread on
2222 * so we can always move back */
2223 if (rdev->mddev->minor_version == 0)
2224 return 1;
2225
2226 /* otherwise we must be sure not to step on
2227 * any metadata, so stay:
2228 * 36K beyond start of superblock
2229 * beyond end of badblocks
2230 * beyond write-intent bitmap
2231 */
2232 if (rdev->sb_start + (32+4)*2 > new_offset)
2233 return 0;
2234 bitmap = rdev->mddev->bitmap;
2235 if (bitmap && !rdev->mddev->bitmap_info.file &&
2236 rdev->sb_start + rdev->mddev->bitmap_info.offset +
NeilBrown1ec885c2012-05-22 13:55:10 +10002237 bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset)
NeilBrownc6563a82012-05-21 09:27:00 +10002238 return 0;
2239 if (rdev->badblocks.sector + rdev->badblocks.size > new_offset)
2240 return 0;
2241
2242 return 1;
Chris Webb0cd17fe2008-06-28 08:31:46 +10002243}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244
Adrian Bunk75c96f82005-05-05 16:16:09 -07002245static struct super_type super_types[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246 [0] = {
2247 .name = "0.90.0",
2248 .owner = THIS_MODULE,
Chris Webb0cd17fe2008-06-28 08:31:46 +10002249 .load_super = super_90_load,
2250 .validate_super = super_90_validate,
2251 .sync_super = super_90_sync,
2252 .rdev_size_change = super_90_rdev_size_change,
NeilBrownc6563a82012-05-21 09:27:00 +10002253 .allow_new_offset = super_90_allow_new_offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254 },
2255 [1] = {
2256 .name = "md-1",
2257 .owner = THIS_MODULE,
Chris Webb0cd17fe2008-06-28 08:31:46 +10002258 .load_super = super_1_load,
2259 .validate_super = super_1_validate,
2260 .sync_super = super_1_sync,
2261 .rdev_size_change = super_1_rdev_size_change,
NeilBrownc6563a82012-05-21 09:27:00 +10002262 .allow_new_offset = super_1_allow_new_offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263 },
2264};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265
NeilBrownfd01b882011-10-11 16:47:53 +11002266static void sync_super(struct mddev *mddev, struct md_rdev *rdev)
Jonathan Brassow076f9682011-06-07 17:51:30 -05002267{
2268 if (mddev->sync_super) {
2269 mddev->sync_super(mddev, rdev);
2270 return;
2271 }
2272
2273 BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));
2274
2275 super_types[mddev->major_version].sync_super(mddev, rdev);
2276}
2277
NeilBrownfd01b882011-10-11 16:47:53 +11002278static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279{
NeilBrown3cb03002011-10-11 16:45:26 +11002280 struct md_rdev *rdev, *rdev2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002281
NeilBrown4b809912008-07-21 17:05:25 +10002282 rcu_read_lock();
Song Liu0b020e82015-09-03 23:00:35 -07002283 rdev_for_each_rcu(rdev, mddev1) {
2284 if (test_bit(Faulty, &rdev->flags) ||
2285 test_bit(Journal, &rdev->flags) ||
2286 rdev->raid_disk == -1)
2287 continue;
2288 rdev_for_each_rcu(rdev2, mddev2) {
2289 if (test_bit(Faulty, &rdev2->flags) ||
2290 test_bit(Journal, &rdev2->flags) ||
2291 rdev2->raid_disk == -1)
2292 continue;
Christoph Hellwig61a27e1f2020-09-03 07:40:58 +02002293 if (rdev->bdev->bd_disk == rdev2->bdev->bd_disk) {
NeilBrown4b809912008-07-21 17:05:25 +10002294 rcu_read_unlock();
NeilBrown7dd5e7c32007-02-28 20:11:35 -08002295 return 1;
NeilBrown4b809912008-07-21 17:05:25 +10002296 }
Song Liu0b020e82015-09-03 23:00:35 -07002297 }
2298 }
NeilBrown4b809912008-07-21 17:05:25 +10002299 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300 return 0;
2301}
2302
2303static LIST_HEAD(pending_raid_disks);
2304
Andre Nollac5e7112009-08-03 10:59:47 +10002305/*
2306 * Try to register data integrity profile for an mddev
2307 *
2308 * This is called when an array is started and after a disk has been kicked
2309 * from the array. It only succeeds if all working and active component devices
2310 * are integrity capable with matching profiles.
2311 */
NeilBrownfd01b882011-10-11 16:47:53 +11002312int md_integrity_register(struct mddev *mddev)
Martin K. Petersen3f9d99c2009-03-31 14:27:02 +11002313{
NeilBrown3cb03002011-10-11 16:45:26 +11002314 struct md_rdev *rdev, *reference = NULL;
Martin K. Petersen3f9d99c2009-03-31 14:27:02 +11002315
Andre Nollac5e7112009-08-03 10:59:47 +10002316 if (list_empty(&mddev->disks))
2317 return 0; /* nothing to do */
Jonathan Brassow629acb62011-06-08 15:10:08 +10002318 if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
2319 return 0; /* shouldn't register, or already is */
NeilBrowndafb20f2012-03-19 12:46:39 +11002320 rdev_for_each(rdev, mddev) {
Andre Nollac5e7112009-08-03 10:59:47 +10002321 /* skip spares and non-functional disks */
2322 if (test_bit(Faulty, &rdev->flags))
2323 continue;
2324 if (rdev->raid_disk < 0)
2325 continue;
Andre Nollac5e7112009-08-03 10:59:47 +10002326 if (!reference) {
2327 /* Use the first rdev as the reference */
2328 reference = rdev;
2329 continue;
2330 }
2331 /* does this rdev's profile match the reference profile? */
2332 if (blk_integrity_compare(reference->bdev->bd_disk,
2333 rdev->bdev->bd_disk) < 0)
2334 return -EINVAL;
Martin K. Petersen3f9d99c2009-03-31 14:27:02 +11002335 }
Martin K. Petersen89078d52011-03-28 20:09:12 -04002336 if (!reference || !bdev_get_integrity(reference->bdev))
2337 return 0;
Andre Nollac5e7112009-08-03 10:59:47 +10002338 /*
2339 * All component devices are integrity capable and have matching
2340 * profiles, register the common profile for the md device.
2341 */
Martin K. Petersen25520d52015-10-21 13:19:49 -04002342 blk_integrity_register(mddev->gendisk,
2343 bdev_get_integrity(reference->bdev));
2344
NeilBrown9d487392016-11-02 14:16:49 +11002345 pr_debug("md: data integrity enabled on %s\n", mdname(mddev));
Guoqing Jiang10764812021-05-25 17:46:17 +08002346 if (bioset_integrity_create(&mddev->bio_set, BIO_POOL_SIZE) ||
Guoqing Jiangdaee2022021-06-03 17:21:06 +08002347 (mddev->level != 1 && mddev->level != 10 &&
2348 bioset_integrity_create(&mddev->io_acct_set, BIO_POOL_SIZE))) {
Guoqing Jiangde3ea662021-06-03 17:21:07 +08002349 /*
2350 * No need to handle the failure of bioset_integrity_create,
2351 * because the function is called by md_run() -> pers->run(),
2352 * md_run calls bioset_exit -> bioset_integrity_free in case
2353 * of failure case.
2354 */
NeilBrown9d487392016-11-02 14:16:49 +11002355 pr_err("md: failed to create integrity pool for %s\n",
Martin K. Petersena91a2782011-03-17 11:11:05 +01002356 mdname(mddev));
2357 return -EINVAL;
2358 }
Andre Nollac5e7112009-08-03 10:59:47 +10002359 return 0;
Martin K. Petersen3f9d99c2009-03-31 14:27:02 +11002360}
Andre Nollac5e7112009-08-03 10:59:47 +10002361EXPORT_SYMBOL(md_integrity_register);
2362
Dan Williams1501efa2016-01-13 16:00:07 -08002363/*
2364 * Attempt to add an rdev, but only if it is consistent with the current
2365 * integrity profile
2366 */
2367int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
Andre Nollac5e7112009-08-03 10:59:47 +10002368{
Jonathan Brassow2863b9e2012-10-11 13:38:58 +11002369 struct blk_integrity *bi_mddev;
Dan Williams1501efa2016-01-13 16:00:07 -08002370 char name[BDEVNAME_SIZE];
Jonathan Brassow2863b9e2012-10-11 13:38:58 +11002371
2372 if (!mddev->gendisk)
Dan Williams1501efa2016-01-13 16:00:07 -08002373 return 0;
Jonathan Brassow2863b9e2012-10-11 13:38:58 +11002374
Jonathan Brassow2863b9e2012-10-11 13:38:58 +11002375 bi_mddev = blk_get_integrity(mddev->gendisk);
Andre Nollac5e7112009-08-03 10:59:47 +10002376
2377 if (!bi_mddev) /* nothing to do */
Dan Williams1501efa2016-01-13 16:00:07 -08002378 return 0;
2379
2380 if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) {
NeilBrown9d487392016-11-02 14:16:49 +11002381 pr_err("%s: incompatible integrity profile for %s\n",
2382 mdname(mddev), bdevname(rdev->bdev, name));
Dan Williams1501efa2016-01-13 16:00:07 -08002383 return -ENXIO;
2384 }
2385
2386 return 0;
Andre Nollac5e7112009-08-03 10:59:47 +10002387}
2388EXPORT_SYMBOL(md_integrity_add_rdev);
Martin K. Petersen3f9d99c2009-03-31 14:27:02 +11002389
Christoph Hellwigd7a47832021-02-01 14:17:20 +01002390static bool rdev_read_only(struct md_rdev *rdev)
2391{
2392 return bdev_read_only(rdev->bdev) ||
2393 (rdev->meta_bdev && bdev_read_only(rdev->meta_bdev));
2394}
2395
NeilBrownf72ffdd2014-09-30 14:23:59 +10002396static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002397{
NeilBrown7dd5e7c32007-02-28 20:11:35 -08002398 char b[BDEVNAME_SIZE];
NeilBrown5e55e2f2007-03-26 21:32:14 -08002399 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002400
Dan Williams11e2ede2008-04-30 00:52:32 -07002401 /* prevent duplicates */
2402 if (find_rdev(mddev, rdev->bdev->bd_dev))
2403 return -EEXIST;
2404
Christoph Hellwigd7a47832021-02-01 14:17:20 +01002405 if (rdev_read_only(rdev) && mddev->pers)
NeilBrown97b20ef2017-04-13 08:53:48 +10002406 return -EROFS;
2407
Andre Nolldd8ac332009-03-31 14:33:13 +11002408 /* make sure rdev->sectors exceeds mddev->dev_sectors */
Shaohua Lif6b6ec52015-12-21 10:51:02 +11002409 if (!test_bit(Journal, &rdev->flags) &&
2410 rdev->sectors &&
2411 (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) {
NeilBrowna778b732007-05-23 13:58:10 -07002412 if (mddev->pers) {
2413 /* Cannot change size, so fail
2414 * If mddev->level <= 0, then we don't care
2415 * about aligning sizes (e.g. linear)
2416 */
2417 if (mddev->level > 0)
2418 return -ENOSPC;
2419 } else
Andre Nolldd8ac332009-03-31 14:33:13 +11002420 mddev->dev_sectors = rdev->sectors;
NeilBrown2bf071b2006-01-06 00:20:55 -08002421 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002422
2423 /* Verify rdev->desc_nr is unique.
2424 * If it is -1, assign a free number, else
2425 * check number is not in use
2426 */
NeilBrown4878e9e2014-09-25 17:00:11 +10002427 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002428 if (rdev->desc_nr < 0) {
2429 int choice = 0;
NeilBrown4878e9e2014-09-25 17:00:11 +10002430 if (mddev->pers)
2431 choice = mddev->raid_disks;
Goldwyn Rodrigues57d051d2015-04-14 10:43:55 -05002432 while (md_find_rdev_nr_rcu(mddev, choice))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433 choice++;
2434 rdev->desc_nr = choice;
2435 } else {
Goldwyn Rodrigues57d051d2015-04-14 10:43:55 -05002436 if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) {
NeilBrown4878e9e2014-09-25 17:00:11 +10002437 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002438 return -EBUSY;
NeilBrown4878e9e2014-09-25 17:00:11 +10002439 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002440 }
NeilBrown4878e9e2014-09-25 17:00:11 +10002441 rcu_read_unlock();
Shaohua Lif6b6ec52015-12-21 10:51:02 +11002442 if (!test_bit(Journal, &rdev->flags) &&
2443 mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
NeilBrown9d487392016-11-02 14:16:49 +11002444 pr_warn("md: %s: array is limited to %d devices\n",
2445 mdname(mddev), mddev->max_disks);
NeilBrownde01dfa2009-02-06 18:02:46 +11002446 return -EBUSY;
2447 }
NeilBrown19133a42005-11-08 21:39:35 -08002448 bdevname(rdev->bdev,b);
Rasmus Villemoes90a9bef2015-06-25 15:02:36 -07002449 strreplace(b, '/', '!');
Greg Kroah-Hartman649316b2007-12-17 23:05:35 -07002450
Linus Torvalds1da177e2005-04-16 15:20:36 -07002451 rdev->mddev = mddev;
NeilBrown9d487392016-11-02 14:16:49 +11002452 pr_debug("md: bind<%s>\n", b);
NeilBrown86e6ffd2005-11-08 21:39:24 -08002453
Guoqing Jiang963c5552019-06-14 17:10:36 +08002454 if (mddev->raid_disks)
Guoqing Jiang404659c2019-12-23 10:48:53 +01002455 mddev_create_serial_pool(mddev, rdev, false);
Guoqing Jiang963c5552019-06-14 17:10:36 +08002456
Greg Kroah-Hartmanb2d6db52007-12-17 23:05:35 -07002457 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
NeilBrown5e55e2f2007-03-26 21:32:14 -08002458 goto fail;
NeilBrown86e6ffd2005-11-08 21:39:24 -08002459
Damien Le Moal5e3b8a82020-07-16 13:54:40 +09002460 /* failure here is OK */
Christoph Hellwig8d652692020-11-17 08:18:55 +01002461 err = sysfs_create_link(&rdev->kobj, bdev_kobj(rdev->bdev), "block");
NeilBrown00bcb4a2010-06-01 19:37:23 +10002462 rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
Junxiao Bie1a86db2020-07-14 16:10:26 -07002463 rdev->sysfs_unack_badblocks =
2464 sysfs_get_dirent_safe(rdev->kobj.sd, "unacknowledged_bad_blocks");
2465 rdev->sysfs_badblocks =
2466 sysfs_get_dirent_safe(rdev->kobj.sd, "bad_blocks");
NeilBrown3c0ee632008-10-21 13:25:28 +11002467
NeilBrown4b809912008-07-21 17:05:25 +10002468 list_add_rcu(&rdev->same_set, &mddev->disks);
Tejun Heoe09b4572010-11-13 11:55:17 +01002469 bd_link_disk_holder(rdev->bdev, mddev->gendisk);
NeilBrown4044ba52009-01-09 08:31:11 +11002470
2471 /* May as well allow recovery to be retried once */
NeilBrown53890422011-07-27 11:00:36 +10002472 mddev->recovery_disabled++;
Martin K. Petersen3f9d99c2009-03-31 14:27:02 +11002473
Linus Torvalds1da177e2005-04-16 15:20:36 -07002474 return 0;
NeilBrown5e55e2f2007-03-26 21:32:14 -08002475
2476 fail:
NeilBrown9d487392016-11-02 14:16:49 +11002477 pr_warn("md: failed to register dev-%s for %s\n",
2478 b, mdname(mddev));
NeilBrown5e55e2f2007-03-26 21:32:14 -08002479 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002480}
2481
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02002482static void rdev_delayed_delete(struct work_struct *ws)
NeilBrown5792a282007-04-04 19:08:18 -07002483{
NeilBrown3cb03002011-10-11 16:45:26 +11002484 struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work);
NeilBrown5792a282007-04-04 19:08:18 -07002485 kobject_del(&rdev->kobj);
NeilBrown177a99b2008-02-06 01:39:56 -08002486 kobject_put(&rdev->kobj);
NeilBrown5792a282007-04-04 19:08:18 -07002487}
2488
NeilBrownf72ffdd2014-09-30 14:23:59 +10002489static void unbind_rdev_from_array(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002490{
2491 char b[BDEVNAME_SIZE];
NeilBrown403df472014-09-30 15:52:29 +10002492
Tejun Heo49731ba2011-01-14 18:43:57 +01002493 bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
NeilBrown4b809912008-07-21 17:05:25 +10002494 list_del_rcu(&rdev->same_set);
NeilBrown9d487392016-11-02 14:16:49 +11002495 pr_debug("md: unbind<%s>\n", bdevname(rdev->bdev,b));
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +01002496 mddev_destroy_serial_pool(rdev->mddev, rdev, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497 rdev->mddev = NULL;
NeilBrown86e6ffd2005-11-08 21:39:24 -08002498 sysfs_remove_link(&rdev->kobj, "block");
NeilBrown3c0ee632008-10-21 13:25:28 +11002499 sysfs_put(rdev->sysfs_state);
Junxiao Bie1a86db2020-07-14 16:10:26 -07002500 sysfs_put(rdev->sysfs_unack_badblocks);
2501 sysfs_put(rdev->sysfs_badblocks);
NeilBrown3c0ee632008-10-21 13:25:28 +11002502 rdev->sysfs_state = NULL;
Junxiao Bie1a86db2020-07-14 16:10:26 -07002503 rdev->sysfs_unack_badblocks = NULL;
2504 rdev->sysfs_badblocks = NULL;
NeilBrown2230dfe2011-07-28 11:31:46 +10002505 rdev->badblocks.count = 0;
NeilBrown5792a282007-04-04 19:08:18 -07002506 /* We need to delay this, otherwise we can deadlock when
NeilBrown4b809912008-07-21 17:05:25 +10002507 * writing to 'remove' to "dev/state". We also need
2508 * to delay it due to rcu usage.
NeilBrown5792a282007-04-04 19:08:18 -07002509 */
NeilBrown4b809912008-07-21 17:05:25 +10002510 synchronize_rcu();
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02002511 INIT_WORK(&rdev->del_work, rdev_delayed_delete);
NeilBrown177a99b2008-02-06 01:39:56 -08002512 kobject_get(&rdev->kobj);
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02002513 queue_work(md_rdev_misc_wq, &rdev->del_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002514}
2515
2516/*
2517 * prevent the device from being mounted, repartitioned or
2518 * otherwise reused by a RAID array (or any other kernel
2519 * subsystem), by bd_claiming the device.
2520 */
NeilBrown3cb03002011-10-11 16:45:26 +11002521static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002522{
2523 int err = 0;
2524 struct block_device *bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002525
Tejun Heod4d77622010-11-13 11:55:18 +01002526 bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
NeilBrown3cb03002011-10-11 16:45:26 +11002527 shared ? (struct md_rdev *)lock_rdev : rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528 if (IS_ERR(bdev)) {
Christoph Hellwigea3edd42020-03-24 08:25:11 +01002529 pr_warn("md: could not open device unknown-block(%u,%u).\n",
2530 MAJOR(dev), MINOR(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002531 return PTR_ERR(bdev);
2532 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533 rdev->bdev = bdev;
2534 return err;
2535}
2536
NeilBrown3cb03002011-10-11 16:45:26 +11002537static void unlock_rdev(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538{
2539 struct block_device *bdev = rdev->bdev;
2540 rdev->bdev = NULL;
Tejun Heoe525fd82010-11-13 11:55:17 +01002541 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002542}
2543
2544void md_autodetect_dev(dev_t dev);
2545
NeilBrownf72ffdd2014-09-30 14:23:59 +10002546static void export_rdev(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547{
2548 char b[BDEVNAME_SIZE];
NeilBrown403df472014-09-30 15:52:29 +10002549
NeilBrown9d487392016-11-02 14:16:49 +11002550 pr_debug("md: export_rdev(%s)\n", bdevname(rdev->bdev,b));
NeilBrown545c8792012-05-22 13:54:30 +10002551 md_rdev_clear(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002552#ifndef MODULE
NeilBrownd0fae182008-03-04 14:29:31 -08002553 if (test_bit(AutoDetected, &rdev->flags))
2554 md_autodetect_dev(rdev->bdev->bd_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002555#endif
2556 unlock_rdev(rdev);
NeilBrown86e6ffd2005-11-08 21:39:24 -08002557 kobject_put(&rdev->kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002558}
2559
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05002560void md_kick_rdev_from_array(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002561{
2562 unbind_rdev_from_array(rdev);
2563 export_rdev(rdev);
2564}
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05002565EXPORT_SYMBOL_GPL(md_kick_rdev_from_array);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002566
NeilBrownfd01b882011-10-11 16:47:53 +11002567static void export_array(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002568{
NeilBrown0638bb02014-09-25 17:43:47 +10002569 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002570
NeilBrown0638bb02014-09-25 17:43:47 +10002571 while (!list_empty(&mddev->disks)) {
2572 rdev = list_first_entry(&mddev->disks, struct md_rdev,
2573 same_set);
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05002574 md_kick_rdev_from_array(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002575 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002576 mddev->raid_disks = 0;
2577 mddev->major_version = 0;
2578}
2579
NeilBrown6497709b2017-03-15 14:05:14 +11002580static bool set_in_sync(struct mddev *mddev)
2581{
Shaohua Liefa4b772017-10-18 22:08:13 -07002582 lockdep_assert_held(&mddev->lock);
NeilBrown4ad23a972017-03-15 14:05:14 +11002583 if (!mddev->in_sync) {
2584 mddev->sync_checkers++;
2585 spin_unlock(&mddev->lock);
2586 percpu_ref_switch_to_atomic_sync(&mddev->writes_pending);
2587 spin_lock(&mddev->lock);
2588 if (!mddev->in_sync &&
2589 percpu_ref_is_zero(&mddev->writes_pending)) {
NeilBrown6497709b2017-03-15 14:05:14 +11002590 mddev->in_sync = 1;
NeilBrown4ad23a972017-03-15 14:05:14 +11002591 /*
2592 * Ensure ->in_sync is visible before we clear
2593 * ->sync_checkers.
2594 */
NeilBrown55cc39f2017-03-15 14:05:14 +11002595 smp_mb();
NeilBrown6497709b2017-03-15 14:05:14 +11002596 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
2597 sysfs_notify_dirent_safe(mddev->sysfs_state);
2598 }
NeilBrown4ad23a972017-03-15 14:05:14 +11002599 if (--mddev->sync_checkers == 0)
2600 percpu_ref_switch_to_percpu(&mddev->writes_pending);
NeilBrown6497709b2017-03-15 14:05:14 +11002601 }
2602 if (mddev->safemode == 1)
2603 mddev->safemode = 0;
2604 return mddev->in_sync;
2605}
2606
NeilBrownf72ffdd2014-09-30 14:23:59 +10002607static void sync_sbs(struct mddev *mddev, int nospares)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002608{
NeilBrown42543762006-06-26 00:27:57 -07002609 /* Update each superblock (in-memory image), but
2610 * if we are allowed to, skip spares which already
2611 * have the right event counter, or have one earlier
2612 * (which would mean they aren't being marked as dirty
2613 * with the rest of the array)
2614 */
NeilBrown3cb03002011-10-11 16:45:26 +11002615 struct md_rdev *rdev;
NeilBrowndafb20f2012-03-19 12:46:39 +11002616 rdev_for_each(rdev, mddev) {
NeilBrown42543762006-06-26 00:27:57 -07002617 if (rdev->sb_events == mddev->events ||
2618 (nospares &&
2619 rdev->raid_disk < 0 &&
NeilBrown42543762006-06-26 00:27:57 -07002620 rdev->sb_events+1 == mddev->events)) {
2621 /* Don't update this superblock */
2622 rdev->sb_loaded = 2;
2623 } else {
Jonathan Brassow076f9682011-06-07 17:51:30 -05002624 sync_super(mddev, rdev);
NeilBrown42543762006-06-26 00:27:57 -07002625 rdev->sb_loaded = 1;
2626 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002627 }
2628}
2629
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002630static bool does_sb_need_changing(struct mddev *mddev)
2631{
2632 struct md_rdev *rdev;
2633 struct mdp_superblock_1 *sb;
2634 int role;
2635
2636 /* Find a good rdev */
2637 rdev_for_each(rdev, mddev)
2638 if ((rdev->raid_disk >= 0) && !test_bit(Faulty, &rdev->flags))
2639 break;
2640
2641 /* No good device found. */
2642 if (!rdev)
2643 return false;
2644
2645 sb = page_address(rdev->sb_page);
2646 /* Check if a device has become faulty or a spare become active */
2647 rdev_for_each(rdev, mddev) {
2648 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
2649 /* Device activated? */
2650 if (role == 0xffff && rdev->raid_disk >=0 &&
2651 !test_bit(Faulty, &rdev->flags))
2652 return true;
2653 /* Device turned faulty? */
2654 if (test_bit(Faulty, &rdev->flags) && (role < 0xfffd))
2655 return true;
2656 }
2657
2658 /* Check if any mddev parameters have changed */
2659 if ((mddev->dev_sectors != le64_to_cpu(sb->size)) ||
2660 (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) ||
Jason Yan13459212017-03-10 11:49:12 +08002661 (mddev->layout != le32_to_cpu(sb->layout)) ||
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002662 (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) ||
2663 (mddev->chunk_sectors != le32_to_cpu(sb->chunksize)))
2664 return true;
2665
2666 return false;
2667}
2668
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05002669void md_update_sb(struct mddev *mddev, int force_change)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002670{
NeilBrown3cb03002011-10-11 16:45:26 +11002671 struct md_rdev *rdev;
NeilBrown06d91a52005-06-21 17:17:12 -07002672 int sync_req;
NeilBrown42543762006-06-26 00:27:57 -07002673 int nospares = 0;
NeilBrown2699b672011-07-28 11:31:47 +10002674 int any_badblocks_changed = 0;
Guoqing Jiang23b63f92015-10-12 17:21:30 +08002675 int ret = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002676
NeilBrownd87f0642013-04-24 11:42:40 +10002677 if (mddev->ro) {
2678 if (force_change)
Shaohua Li29530792016-12-08 15:48:19 -08002679 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrownd87f0642013-04-24 11:42:40 +10002680 return;
2681 }
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002682
Guoqing Jiang2c97cf12016-05-02 11:33:09 -04002683repeat:
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002684 if (mddev_is_clustered(mddev)) {
Shaohua Li29530792016-12-08 15:48:19 -08002685 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002686 force_change = 1;
Shaohua Li29530792016-12-08 15:48:19 -08002687 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
Guoqing Jiang85ad1d12016-05-03 22:22:13 -04002688 nospares = 1;
Guoqing Jiang23b63f92015-10-12 17:21:30 +08002689 ret = md_cluster_ops->metadata_update_start(mddev);
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002690 /* Has someone else has updated the sb */
2691 if (!does_sb_need_changing(mddev)) {
Guoqing Jiang23b63f92015-10-12 17:21:30 +08002692 if (ret == 0)
2693 md_cluster_ops->metadata_update_cancel(mddev);
Shaohua Li29530792016-12-08 15:48:19 -08002694 bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
2695 BIT(MD_SB_CHANGE_DEVS) |
2696 BIT(MD_SB_CHANGE_CLEAN));
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002697 return;
2698 }
2699 }
Guoqing Jiang2c97cf12016-05-02 11:33:09 -04002700
NeilBrowndb0505d2017-10-17 16:18:36 +11002701 /*
2702 * First make sure individual recovery_offsets are correct
2703 * curr_resync_completed can only be used during recovery.
2704 * During reshape/resync it might use array-addresses rather
2705 * that device addresses.
2706 */
NeilBrowndafb20f2012-03-19 12:46:39 +11002707 rdev_for_each(rdev, mddev) {
NeilBrown3a3a5dd2010-08-16 18:09:31 +10002708 if (rdev->raid_disk >= 0 &&
2709 mddev->delta_disks >= 0 &&
NeilBrowndb0505d2017-10-17 16:18:36 +11002710 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
2711 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery) &&
2712 !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
Shaohua Lif2076e72015-10-08 21:54:12 -07002713 !test_bit(Journal, &rdev->flags) &&
NeilBrown3a3a5dd2010-08-16 18:09:31 +10002714 !test_bit(In_sync, &rdev->flags) &&
2715 mddev->curr_resync_completed > rdev->recovery_offset)
2716 rdev->recovery_offset = mddev->curr_resync_completed;
2717
NeilBrownf72ffdd2014-09-30 14:23:59 +10002718 }
Dan Williamsbd52b742010-08-30 17:33:33 +10002719 if (!mddev->persistent) {
Shaohua Li29530792016-12-08 15:48:19 -08002720 clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
2721 clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrownde393cd2011-07-28 11:31:48 +10002722 if (!mddev->external) {
Shaohua Li29530792016-12-08 15:48:19 -08002723 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
NeilBrowndafb20f2012-03-19 12:46:39 +11002724 rdev_for_each(rdev, mddev) {
NeilBrownde393cd2011-07-28 11:31:48 +10002725 if (rdev->badblocks.changed) {
NeilBrownd0962932012-03-19 12:46:41 +11002726 rdev->badblocks.changed = 0;
Vishal Vermafc974ee2015-12-24 19:20:34 -07002727 ack_all_badblocks(&rdev->badblocks);
NeilBrownde393cd2011-07-28 11:31:48 +10002728 md_error(mddev, rdev);
2729 }
2730 clear_bit(Blocked, &rdev->flags);
2731 clear_bit(BlockedBadBlocks, &rdev->flags);
2732 wake_up(&rdev->blocked_wait);
2733 }
2734 }
NeilBrown3a3a5dd2010-08-16 18:09:31 +10002735 wake_up(&mddev->sb_wait);
2736 return;
2737 }
2738
NeilBrown85572d72014-12-15 12:56:56 +11002739 spin_lock(&mddev->lock);
NeilBrown84692192006-08-27 01:23:49 -07002740
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11002741 mddev->utime = ktime_get_real_seconds();
NeilBrown3a3a5dd2010-08-16 18:09:31 +10002742
Shaohua Li29530792016-12-08 15:48:19 -08002743 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
NeilBrown850b2b422006-10-03 01:15:46 -07002744 force_change = 1;
Shaohua Li29530792016-12-08 15:48:19 -08002745 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
NeilBrown850b2b422006-10-03 01:15:46 -07002746 /* just a clean<-> dirty transition, possibly leave spares alone,
2747 * though if events isn't the right even/odd, we will have to do
2748 * spares after all
2749 */
2750 nospares = 1;
2751 if (force_change)
2752 nospares = 0;
2753 if (mddev->degraded)
NeilBrown84692192006-08-27 01:23:49 -07002754 /* If the array is degraded, then skipping spares is both
2755 * dangerous and fairly pointless.
2756 * Dangerous because a device that was removed from the array
2757 * might have a event_count that still looks up-to-date,
2758 * so it can be re-added without a resync.
2759 * Pointless because if there are any spares to skip,
2760 * then a recovery will happen and soon that array won't
2761 * be degraded any more and the spare can go back to sleep then.
2762 */
NeilBrown850b2b422006-10-03 01:15:46 -07002763 nospares = 0;
NeilBrown84692192006-08-27 01:23:49 -07002764
NeilBrown06d91a52005-06-21 17:17:12 -07002765 sync_req = mddev->in_sync;
NeilBrown42543762006-06-26 00:27:57 -07002766
2767 /* If this is just a dirty<->clean transition, and the array is clean
2768 * and 'events' is odd, we can roll back to the previous clean state */
NeilBrown850b2b422006-10-03 01:15:46 -07002769 if (nospares
NeilBrown42543762006-06-26 00:27:57 -07002770 && (mddev->in_sync && mddev->recovery_cp == MaxSector)
NeilBrowna8707c02010-05-18 09:28:43 +10002771 && mddev->can_decrease_events
2772 && mddev->events != 1) {
NeilBrown42543762006-06-26 00:27:57 -07002773 mddev->events--;
NeilBrowna8707c02010-05-18 09:28:43 +10002774 mddev->can_decrease_events = 0;
2775 } else {
NeilBrown42543762006-06-26 00:27:57 -07002776 /* otherwise we have to go forward and ... */
2777 mddev->events ++;
NeilBrowna8707c02010-05-18 09:28:43 +10002778 mddev->can_decrease_events = nospares;
NeilBrown42543762006-06-26 00:27:57 -07002779 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002780
NeilBrown403df472014-09-30 15:52:29 +10002781 /*
2782 * This 64-bit counter should never wrap.
2783 * Either we are in around ~1 trillion A.C., assuming
2784 * 1 reboot per second, or we have a bug...
2785 */
2786 WARN_ON(mddev->events == 0);
NeilBrown2699b672011-07-28 11:31:47 +10002787
NeilBrowndafb20f2012-03-19 12:46:39 +11002788 rdev_for_each(rdev, mddev) {
NeilBrown2699b672011-07-28 11:31:47 +10002789 if (rdev->badblocks.changed)
2790 any_badblocks_changed++;
NeilBrownde393cd2011-07-28 11:31:48 +10002791 if (test_bit(Faulty, &rdev->flags))
2792 set_bit(FaultRecorded, &rdev->flags);
2793 }
NeilBrown2699b672011-07-28 11:31:47 +10002794
NeilBrowne6910632008-02-06 01:39:51 -08002795 sync_sbs(mddev, nospares);
NeilBrown85572d72014-12-15 12:56:56 +11002796 spin_unlock(&mddev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002797
NeilBrown36a4e1f2011-10-07 14:23:17 +11002798 pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
2799 mdname(mddev), mddev->in_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002800
Shaohua Li504634f2016-11-18 09:44:08 -08002801 if (mddev->queue)
2802 blk_add_trace_msg(mddev->queue, "md md_update_sb");
NeilBrown46533ff2016-11-18 16:16:11 +11002803rewrite:
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002804 md_bitmap_update_sb(mddev->bitmap);
NeilBrowndafb20f2012-03-19 12:46:39 +11002805 rdev_for_each(rdev, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002806 char b[BDEVNAME_SIZE];
NeilBrown36a4e1f2011-10-07 14:23:17 +11002807
NeilBrown42543762006-06-26 00:27:57 -07002808 if (rdev->sb_loaded != 1)
2809 continue; /* no noise on spare devices */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002810
NeilBrownf4667222013-12-09 12:04:56 +11002811 if (!test_bit(Faulty, &rdev->flags)) {
NeilBrown7bfa19f2005-06-21 17:17:28 -07002812 md_super_write(mddev,rdev,
Andre Noll0f420352008-07-11 22:02:23 +10002813 rdev->sb_start, rdev->sb_size,
NeilBrown7bfa19f2005-06-21 17:17:28 -07002814 rdev->sb_page);
NeilBrown36a4e1f2011-10-07 14:23:17 +11002815 pr_debug("md: (write) %s's sb offset: %llu\n",
2816 bdevname(rdev->bdev, b),
2817 (unsigned long long)rdev->sb_start);
NeilBrown42543762006-06-26 00:27:57 -07002818 rdev->sb_events = mddev->events;
NeilBrown2699b672011-07-28 11:31:47 +10002819 if (rdev->badblocks.size) {
2820 md_super_write(mddev, rdev,
2821 rdev->badblocks.sector,
2822 rdev->badblocks.size << 9,
2823 rdev->bb_page);
2824 rdev->badblocks.size = 0;
2825 }
NeilBrown7bfa19f2005-06-21 17:17:28 -07002826
NeilBrownf4667222013-12-09 12:04:56 +11002827 } else
NeilBrown36a4e1f2011-10-07 14:23:17 +11002828 pr_debug("md: %s (skipping faulty)\n",
2829 bdevname(rdev->bdev, b));
Andrei Warkentind70ed2e2011-10-18 12:16:48 +11002830
NeilBrown7bfa19f2005-06-21 17:17:28 -07002831 if (mddev->level == LEVEL_MULTIPATH)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002832 /* only need to write one superblock... */
2833 break;
2834 }
NeilBrown46533ff2016-11-18 16:16:11 +11002835 if (md_super_wait(mddev) < 0)
2836 goto rewrite;
Shaohua Li29530792016-12-08 15:48:19 -08002837 /* if there was a failure, MD_SB_CHANGE_DEVS was set, and we re-write super */
NeilBrown7bfa19f2005-06-21 17:17:28 -07002838
Guoqing Jiang2c97cf12016-05-02 11:33:09 -04002839 if (mddev_is_clustered(mddev) && ret == 0)
2840 md_cluster_ops->metadata_update_finish(mddev);
2841
NeilBrown850b2b422006-10-03 01:15:46 -07002842 if (mddev->in_sync != sync_req ||
Shaohua Li29530792016-12-08 15:48:19 -08002843 !bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
2844 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_CLEAN)))
NeilBrown06d91a52005-06-21 17:17:12 -07002845 /* have to write it out again */
NeilBrown06d91a52005-06-21 17:17:12 -07002846 goto repeat;
NeilBrown3d310eb2005-06-21 17:17:26 -07002847 wake_up(&mddev->sb_wait);
NeilBrownacb180b2009-04-14 16:28:34 +10002848 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
Junxiao Bie1a86db2020-07-14 16:10:26 -07002849 sysfs_notify_dirent_safe(mddev->sysfs_completed);
NeilBrown06d91a52005-06-21 17:17:12 -07002850
NeilBrowndafb20f2012-03-19 12:46:39 +11002851 rdev_for_each(rdev, mddev) {
NeilBrownde393cd2011-07-28 11:31:48 +10002852 if (test_and_clear_bit(FaultRecorded, &rdev->flags))
2853 clear_bit(Blocked, &rdev->flags);
2854
2855 if (any_badblocks_changed)
Vishal Vermafc974ee2015-12-24 19:20:34 -07002856 ack_all_badblocks(&rdev->badblocks);
NeilBrownde393cd2011-07-28 11:31:48 +10002857 clear_bit(BlockedBadBlocks, &rdev->flags);
2858 wake_up(&rdev->blocked_wait);
2859 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002860}
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05002861EXPORT_SYMBOL(md_update_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002862
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002863static int add_bound_rdev(struct md_rdev *rdev)
2864{
2865 struct mddev *mddev = rdev->mddev;
2866 int err = 0;
Shaohua Li87d4d912016-01-06 14:37:14 -08002867 bool add_journal = test_bit(Journal, &rdev->flags);
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002868
Shaohua Li87d4d912016-01-06 14:37:14 -08002869 if (!mddev->pers->hot_remove_disk || add_journal) {
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002870 /* If there is hot_add_disk but no hot_remove_disk
2871 * then added disks for geometry changes,
2872 * and should be added immediately.
2873 */
2874 super_types[mddev->major_version].
2875 validate_super(mddev, rdev);
Shaohua Li87d4d912016-01-06 14:37:14 -08002876 if (add_journal)
2877 mddev_suspend(mddev);
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002878 err = mddev->pers->hot_add_disk(mddev, rdev);
Shaohua Li87d4d912016-01-06 14:37:14 -08002879 if (add_journal)
2880 mddev_resume(mddev);
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002881 if (err) {
Guoqing Jiangdb767672016-06-02 23:32:05 -04002882 md_kick_rdev_from_array(rdev);
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002883 return err;
2884 }
2885 }
2886 sysfs_notify_dirent_safe(rdev->sysfs_state);
2887
Shaohua Li29530792016-12-08 15:48:19 -08002888 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002889 if (mddev->degraded)
2890 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
2891 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
Guoqing Jiang54679482021-10-04 23:34:53 +08002892 md_new_event();
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002893 md_wakeup_thread(mddev->thread);
2894 return 0;
2895}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896
Andre Noll7f6ce762008-03-23 18:34:54 +01002897/* words written to sysfs files may, or may not, be \n terminated.
NeilBrownbce74da2006-01-06 00:20:41 -08002898 * We want to accept with case. For this we use cmd_match.
2899 */
2900static int cmd_match(const char *cmd, const char *str)
2901{
2902 /* See if cmd, written into a sysfs file, matches
2903 * str. They must either be the same, or cmd can
2904 * have a trailing newline
2905 */
2906 while (*cmd && *str && *cmd == *str) {
2907 cmd++;
2908 str++;
2909 }
2910 if (*cmd == '\n')
2911 cmd++;
2912 if (*str || *cmd)
2913 return 0;
2914 return 1;
2915}
2916
NeilBrown86e6ffd2005-11-08 21:39:24 -08002917struct rdev_sysfs_entry {
2918 struct attribute attr;
NeilBrown3cb03002011-10-11 16:45:26 +11002919 ssize_t (*show)(struct md_rdev *, char *);
2920 ssize_t (*store)(struct md_rdev *, const char *, size_t);
NeilBrown86e6ffd2005-11-08 21:39:24 -08002921};
2922
2923static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11002924state_show(struct md_rdev *rdev, char *page)
NeilBrown86e6ffd2005-11-08 21:39:24 -08002925{
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02002926 char *sep = ",";
NeilBrown20a49ff2008-02-06 01:39:57 -08002927 size_t len = 0;
Mark Rutland6aa7de02017-10-23 14:07:29 -07002928 unsigned long flags = READ_ONCE(rdev->flags);
NeilBrown86e6ffd2005-11-08 21:39:24 -08002929
NeilBrown758bfc82014-12-15 12:56:59 +11002930 if (test_bit(Faulty, &flags) ||
Tomasz Majchrzakdcbcb482016-10-21 16:27:08 +02002931 (!test_bit(ExternalBbl, &flags) &&
2932 rdev->badblocks.unacked_exist))
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02002933 len += sprintf(page+len, "faulty%s", sep);
2934 if (test_bit(In_sync, &flags))
2935 len += sprintf(page+len, "in_sync%s", sep);
2936 if (test_bit(Journal, &flags))
2937 len += sprintf(page+len, "journal%s", sep);
2938 if (test_bit(WriteMostly, &flags))
2939 len += sprintf(page+len, "write_mostly%s", sep);
NeilBrown758bfc82014-12-15 12:56:59 +11002940 if (test_bit(Blocked, &flags) ||
NeilBrown52c64152011-12-08 16:22:48 +11002941 (rdev->badblocks.unacked_exist
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02002942 && !test_bit(Faulty, &flags)))
2943 len += sprintf(page+len, "blocked%s", sep);
NeilBrown758bfc82014-12-15 12:56:59 +11002944 if (!test_bit(Faulty, &flags) &&
Shaohua Lif2076e72015-10-08 21:54:12 -07002945 !test_bit(Journal, &flags) &&
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02002946 !test_bit(In_sync, &flags))
2947 len += sprintf(page+len, "spare%s", sep);
2948 if (test_bit(WriteErrorSeen, &flags))
2949 len += sprintf(page+len, "write_error%s", sep);
2950 if (test_bit(WantReplacement, &flags))
2951 len += sprintf(page+len, "want_replacement%s", sep);
2952 if (test_bit(Replacement, &flags))
2953 len += sprintf(page+len, "replacement%s", sep);
2954 if (test_bit(ExternalBbl, &flags))
2955 len += sprintf(page+len, "external_bbl%s", sep);
NeilBrown688834e2016-11-18 16:16:11 +11002956 if (test_bit(FailFast, &flags))
2957 len += sprintf(page+len, "failfast%s", sep);
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02002958
2959 if (len)
2960 len -= strlen(sep);
NeilBrown2d78f8c2011-12-23 10:17:51 +11002961
NeilBrown86e6ffd2005-11-08 21:39:24 -08002962 return len+sprintf(page+len, "\n");
2963}
2964
NeilBrown45dc2de2006-06-26 00:27:58 -07002965static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11002966state_store(struct md_rdev *rdev, const char *buf, size_t len)
NeilBrown45dc2de2006-06-26 00:27:58 -07002967{
2968 /* can write
NeilBrownde393cd2011-07-28 11:31:48 +10002969 * faulty - simulates an error
NeilBrown45dc2de2006-06-26 00:27:58 -07002970 * remove - disconnects the device
NeilBrownf6556752006-06-26 00:28:01 -07002971 * writemostly - sets write_mostly
2972 * -writemostly - clears write_mostly
NeilBrownde393cd2011-07-28 11:31:48 +10002973 * blocked - sets the Blocked flags
2974 * -blocked - clears the Blocked and possibly simulates an error
NeilBrown6d56e272009-04-14 12:01:57 +10002975 * insync - sets Insync providing device isn't active
NeilBrownf4667222013-12-09 12:04:56 +11002976 * -insync - clear Insync for a device with a slot assigned,
2977 * so that it gets rebuilt based on bitmap
NeilBrownd7a9d442011-07-28 11:31:48 +10002978 * write_error - sets WriteErrorSeen
2979 * -write_error - clears WriteErrorSeen
NeilBrown688834e2016-11-18 16:16:11 +11002980 * {,-}failfast - set/clear FailFast
NeilBrown45dc2de2006-06-26 00:27:58 -07002981 */
Xiao Ni8b9e2292021-10-13 22:59:33 +08002982
2983 struct mddev *mddev = rdev->mddev;
NeilBrown45dc2de2006-06-26 00:27:58 -07002984 int err = -EINVAL;
Xiao Ni8b9e2292021-10-13 22:59:33 +08002985 bool need_update_sb = false;
2986
NeilBrown45dc2de2006-06-26 00:27:58 -07002987 if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
2988 md_error(rdev->mddev, rdev);
NeilBrown5ef56c82011-08-25 14:42:51 +10002989 if (test_bit(Faulty, &rdev->flags))
2990 err = 0;
2991 else
2992 err = -EBUSY;
NeilBrown45dc2de2006-06-26 00:27:58 -07002993 } else if (cmd_match(buf, "remove")) {
Shaohua Li5d881782016-07-28 09:06:34 -07002994 if (rdev->mddev->pers) {
2995 clear_bit(Blocked, &rdev->flags);
2996 remove_and_add_spares(rdev->mddev, rdev);
2997 }
NeilBrown45dc2de2006-06-26 00:27:58 -07002998 if (rdev->raid_disk >= 0)
2999 err = -EBUSY;
3000 else {
NeilBrown45dc2de2006-06-26 00:27:58 -07003001 err = 0;
Guoqing Jianga9720902015-10-12 17:21:27 +08003002 if (mddev_is_clustered(mddev))
3003 err = md_cluster_ops->remove_disk(mddev, rdev);
3004
3005 if (err == 0) {
3006 md_kick_rdev_from_array(rdev);
NeilBrown060b0682016-11-04 16:46:03 +11003007 if (mddev->pers) {
Shaohua Li29530792016-12-08 15:48:19 -08003008 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrown060b0682016-11-04 16:46:03 +11003009 md_wakeup_thread(mddev->thread);
3010 }
Guoqing Jiang54679482021-10-04 23:34:53 +08003011 md_new_event();
Guoqing Jianga9720902015-10-12 17:21:27 +08003012 }
NeilBrown45dc2de2006-06-26 00:27:58 -07003013 }
NeilBrownf6556752006-06-26 00:28:01 -07003014 } else if (cmd_match(buf, "writemostly")) {
3015 set_bit(WriteMostly, &rdev->flags);
Guoqing Jiang404659c2019-12-23 10:48:53 +01003016 mddev_create_serial_pool(rdev->mddev, rdev, false);
Xiao Ni8b9e2292021-10-13 22:59:33 +08003017 need_update_sb = true;
NeilBrownf6556752006-06-26 00:28:01 -07003018 err = 0;
3019 } else if (cmd_match(buf, "-writemostly")) {
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +01003020 mddev_destroy_serial_pool(rdev->mddev, rdev, false);
NeilBrownf6556752006-06-26 00:28:01 -07003021 clear_bit(WriteMostly, &rdev->flags);
Xiao Ni8b9e2292021-10-13 22:59:33 +08003022 need_update_sb = true;
NeilBrownf6556752006-06-26 00:28:01 -07003023 err = 0;
Dan Williams6bfe0b42008-04-30 00:52:32 -07003024 } else if (cmd_match(buf, "blocked")) {
3025 set_bit(Blocked, &rdev->flags);
3026 err = 0;
3027 } else if (cmd_match(buf, "-blocked")) {
NeilBrownde393cd2011-07-28 11:31:48 +10003028 if (!test_bit(Faulty, &rdev->flags) &&
Tomasz Majchrzakdcbcb482016-10-21 16:27:08 +02003029 !test_bit(ExternalBbl, &rdev->flags) &&
NeilBrown7da64a02011-08-30 16:20:17 +10003030 rdev->badblocks.unacked_exist) {
NeilBrownde393cd2011-07-28 11:31:48 +10003031 /* metadata handler doesn't understand badblocks,
3032 * so we need to fail the device
3033 */
3034 md_error(rdev->mddev, rdev);
3035 }
Dan Williams6bfe0b42008-04-30 00:52:32 -07003036 clear_bit(Blocked, &rdev->flags);
NeilBrownde393cd2011-07-28 11:31:48 +10003037 clear_bit(BlockedBadBlocks, &rdev->flags);
Dan Williams6bfe0b42008-04-30 00:52:32 -07003038 wake_up(&rdev->blocked_wait);
3039 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3040 md_wakeup_thread(rdev->mddev->thread);
3041
3042 err = 0;
NeilBrown6d56e272009-04-14 12:01:57 +10003043 } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
3044 set_bit(In_sync, &rdev->flags);
3045 err = 0;
NeilBrown688834e2016-11-18 16:16:11 +11003046 } else if (cmd_match(buf, "failfast")) {
3047 set_bit(FailFast, &rdev->flags);
Xiao Ni8b9e2292021-10-13 22:59:33 +08003048 need_update_sb = true;
NeilBrown688834e2016-11-18 16:16:11 +11003049 err = 0;
3050 } else if (cmd_match(buf, "-failfast")) {
3051 clear_bit(FailFast, &rdev->flags);
Xiao Ni8b9e2292021-10-13 22:59:33 +08003052 need_update_sb = true;
NeilBrown688834e2016-11-18 16:16:11 +11003053 err = 0;
Shaohua Lif2076e72015-10-08 21:54:12 -07003054 } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 &&
3055 !test_bit(Journal, &rdev->flags)) {
NeilBrowne1960f82014-09-30 15:24:25 +10003056 if (rdev->mddev->pers == NULL) {
3057 clear_bit(In_sync, &rdev->flags);
3058 rdev->saved_raid_disk = rdev->raid_disk;
3059 rdev->raid_disk = -1;
3060 err = 0;
3061 }
NeilBrownd7a9d442011-07-28 11:31:48 +10003062 } else if (cmd_match(buf, "write_error")) {
3063 set_bit(WriteErrorSeen, &rdev->flags);
3064 err = 0;
3065 } else if (cmd_match(buf, "-write_error")) {
3066 clear_bit(WriteErrorSeen, &rdev->flags);
3067 err = 0;
NeilBrown2d78f8c2011-12-23 10:17:51 +11003068 } else if (cmd_match(buf, "want_replacement")) {
3069 /* Any non-spare device that is not a replacement can
3070 * become want_replacement at any time, but we then need to
3071 * check if recovery is needed.
3072 */
3073 if (rdev->raid_disk >= 0 &&
Shaohua Lif2076e72015-10-08 21:54:12 -07003074 !test_bit(Journal, &rdev->flags) &&
NeilBrown2d78f8c2011-12-23 10:17:51 +11003075 !test_bit(Replacement, &rdev->flags))
3076 set_bit(WantReplacement, &rdev->flags);
3077 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3078 md_wakeup_thread(rdev->mddev->thread);
3079 err = 0;
3080 } else if (cmd_match(buf, "-want_replacement")) {
3081 /* Clearing 'want_replacement' is always allowed.
3082 * Once replacements starts it is too late though.
3083 */
3084 err = 0;
3085 clear_bit(WantReplacement, &rdev->flags);
3086 } else if (cmd_match(buf, "replacement")) {
3087 /* Can only set a device as a replacement when array has not
3088 * yet been started. Once running, replacement is automatic
3089 * from spares, or by assigning 'slot'.
3090 */
3091 if (rdev->mddev->pers)
3092 err = -EBUSY;
3093 else {
3094 set_bit(Replacement, &rdev->flags);
3095 err = 0;
3096 }
3097 } else if (cmd_match(buf, "-replacement")) {
3098 /* Similarly, can only clear Replacement before start */
3099 if (rdev->mddev->pers)
3100 err = -EBUSY;
3101 else {
3102 clear_bit(Replacement, &rdev->flags);
3103 err = 0;
3104 }
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05003105 } else if (cmd_match(buf, "re-add")) {
Yufen Yuee37e622019-04-02 14:22:14 +08003106 if (!rdev->mddev->pers)
3107 err = -EINVAL;
3108 else if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
3109 rdev->saved_raid_disk >= 0) {
Goldwyn Rodrigues97f6cd32015-04-14 10:45:42 -05003110 /* clear_bit is performed _after_ all the devices
3111 * have their local Faulty bit cleared. If any writes
3112 * happen in the meantime in the local node, they
3113 * will land in the local bitmap, which will be synced
3114 * by this node eventually
3115 */
3116 if (!mddev_is_clustered(rdev->mddev) ||
3117 (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) {
3118 clear_bit(Faulty, &rdev->flags);
3119 err = add_bound_rdev(rdev);
3120 }
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05003121 } else
3122 err = -EBUSY;
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02003123 } else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) {
3124 set_bit(ExternalBbl, &rdev->flags);
3125 rdev->badblocks.shift = 0;
3126 err = 0;
3127 } else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) {
3128 clear_bit(ExternalBbl, &rdev->flags);
3129 err = 0;
NeilBrown45dc2de2006-06-26 00:27:58 -07003130 }
Xiao Ni8b9e2292021-10-13 22:59:33 +08003131 if (need_update_sb)
3132 md_update_sb(mddev, 1);
NeilBrown00bcb4a2010-06-01 19:37:23 +10003133 if (!err)
3134 sysfs_notify_dirent_safe(rdev->sysfs_state);
NeilBrown45dc2de2006-06-26 00:27:58 -07003135 return err ? err : len;
3136}
NeilBrown80ca3a42006-07-10 04:44:18 -07003137static struct rdev_sysfs_entry rdev_state =
NeilBrown750f1992014-09-30 08:53:05 +10003138__ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
NeilBrown86e6ffd2005-11-08 21:39:24 -08003139
3140static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003141errors_show(struct md_rdev *rdev, char *page)
NeilBrown4dbcdc72006-01-06 00:20:52 -08003142{
3143 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
3144}
3145
3146static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003147errors_store(struct md_rdev *rdev, const char *buf, size_t len)
NeilBrown4dbcdc72006-01-06 00:20:52 -08003148{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03003149 unsigned int n;
3150 int rv;
3151
3152 rv = kstrtouint(buf, 10, &n);
3153 if (rv < 0)
3154 return rv;
3155 atomic_set(&rdev->corrected_errors, n);
3156 return len;
NeilBrown4dbcdc72006-01-06 00:20:52 -08003157}
3158static struct rdev_sysfs_entry rdev_errors =
NeilBrown80ca3a42006-07-10 04:44:18 -07003159__ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
NeilBrown4dbcdc72006-01-06 00:20:52 -08003160
NeilBrown014236d2006-01-06 00:20:55 -08003161static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003162slot_show(struct md_rdev *rdev, char *page)
NeilBrown014236d2006-01-06 00:20:55 -08003163{
Shaohua Lif2076e72015-10-08 21:54:12 -07003164 if (test_bit(Journal, &rdev->flags))
3165 return sprintf(page, "journal\n");
3166 else if (rdev->raid_disk < 0)
NeilBrown014236d2006-01-06 00:20:55 -08003167 return sprintf(page, "none\n");
3168 else
3169 return sprintf(page, "%d\n", rdev->raid_disk);
3170}
3171
3172static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003173slot_store(struct md_rdev *rdev, const char *buf, size_t len)
NeilBrown014236d2006-01-06 00:20:55 -08003174{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03003175 int slot;
NeilBrownc303da62008-02-06 01:39:51 -08003176 int err;
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03003177
Shaohua Lif2076e72015-10-08 21:54:12 -07003178 if (test_bit(Journal, &rdev->flags))
3179 return -EBUSY;
NeilBrown014236d2006-01-06 00:20:55 -08003180 if (strncmp(buf, "none", 4)==0)
3181 slot = -1;
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03003182 else {
3183 err = kstrtouint(buf, 10, (unsigned int *)&slot);
3184 if (err < 0)
3185 return err;
3186 }
Neil Brown6c2fce22008-06-28 08:31:31 +10003187 if (rdev->mddev->pers && slot == -1) {
NeilBrownc303da62008-02-06 01:39:51 -08003188 /* Setting 'slot' on an active array requires also
3189 * updating the 'rd%d' link, and communicating
3190 * with the personality with ->hot_*_disk.
3191 * For now we only support removing
3192 * failed/spare devices. This normally happens automatically,
3193 * but not when the metadata is externally managed.
3194 */
NeilBrownc303da62008-02-06 01:39:51 -08003195 if (rdev->raid_disk == -1)
3196 return -EEXIST;
3197 /* personality does all needed checks */
Namhyung Kim01393f32011-06-09 11:42:54 +10003198 if (rdev->mddev->pers->hot_remove_disk == NULL)
NeilBrownc303da62008-02-06 01:39:51 -08003199 return -EINVAL;
NeilBrown746d3202013-04-24 11:42:41 +10003200 clear_bit(Blocked, &rdev->flags);
3201 remove_and_add_spares(rdev->mddev, rdev);
3202 if (rdev->raid_disk >= 0)
3203 return -EBUSY;
NeilBrownc303da62008-02-06 01:39:51 -08003204 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3205 md_wakeup_thread(rdev->mddev->thread);
Neil Brown6c2fce22008-06-28 08:31:31 +10003206 } else if (rdev->mddev->pers) {
Neil Brown6c2fce22008-06-28 08:31:31 +10003207 /* Activating a spare .. or possibly reactivating
NeilBrown6d56e272009-04-14 12:01:57 +10003208 * if we ever get bitmaps working here.
Neil Brown6c2fce22008-06-28 08:31:31 +10003209 */
Goldwyn Rodriguescb01c542015-12-18 15:19:16 +11003210 int err;
Neil Brown6c2fce22008-06-28 08:31:31 +10003211
3212 if (rdev->raid_disk != -1)
3213 return -EBUSY;
3214
NeilBrownc6751b22011-02-02 11:57:13 +11003215 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
3216 return -EBUSY;
3217
Neil Brown6c2fce22008-06-28 08:31:31 +10003218 if (rdev->mddev->pers->hot_add_disk == NULL)
3219 return -EINVAL;
3220
NeilBrownba1b41b2011-01-14 09:14:34 +11003221 if (slot >= rdev->mddev->raid_disks &&
3222 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
3223 return -ENOSPC;
3224
Neil Brown6c2fce22008-06-28 08:31:31 +10003225 rdev->raid_disk = slot;
3226 if (test_bit(In_sync, &rdev->flags))
3227 rdev->saved_raid_disk = slot;
3228 else
3229 rdev->saved_raid_disk = -1;
NeilBrownd30519f2011-10-18 12:13:47 +11003230 clear_bit(In_sync, &rdev->flags);
NeilBrown8313b8e2013-12-12 10:13:33 +11003231 clear_bit(Bitmap_sync, &rdev->flags);
Guoqing Jiang3f79cc22020-04-04 23:57:11 +02003232 err = rdev->mddev->pers->hot_add_disk(rdev->mddev, rdev);
Goldwyn Rodriguescb01c542015-12-18 15:19:16 +11003233 if (err) {
3234 rdev->raid_disk = -1;
3235 return err;
3236 } else
3237 sysfs_notify_dirent_safe(rdev->sysfs_state);
Damien Le Moal5e3b8a82020-07-16 13:54:40 +09003238 /* failure here is OK */;
3239 sysfs_link_rdev(rdev->mddev, rdev);
Neil Brown6c2fce22008-06-28 08:31:31 +10003240 /* don't wakeup anyone, leave that to userspace. */
NeilBrownc303da62008-02-06 01:39:51 -08003241 } else {
NeilBrownba1b41b2011-01-14 09:14:34 +11003242 if (slot >= rdev->mddev->raid_disks &&
3243 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
NeilBrownc303da62008-02-06 01:39:51 -08003244 return -ENOSPC;
3245 rdev->raid_disk = slot;
3246 /* assume it is working */
NeilBrownc5d79ad2008-02-06 01:39:54 -08003247 clear_bit(Faulty, &rdev->flags);
3248 clear_bit(WriteMostly, &rdev->flags);
NeilBrownc303da62008-02-06 01:39:51 -08003249 set_bit(In_sync, &rdev->flags);
NeilBrown00bcb4a2010-06-01 19:37:23 +10003250 sysfs_notify_dirent_safe(rdev->sysfs_state);
NeilBrownc303da62008-02-06 01:39:51 -08003251 }
NeilBrown014236d2006-01-06 00:20:55 -08003252 return len;
3253}
3254
NeilBrown014236d2006-01-06 00:20:55 -08003255static struct rdev_sysfs_entry rdev_slot =
NeilBrown80ca3a42006-07-10 04:44:18 -07003256__ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
NeilBrown014236d2006-01-06 00:20:55 -08003257
NeilBrown93c8cad2006-01-06 00:20:56 -08003258static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003259offset_show(struct md_rdev *rdev, char *page)
NeilBrown93c8cad2006-01-06 00:20:56 -08003260{
Andrew Morton6961ece2006-01-06 00:20:59 -08003261 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
NeilBrown93c8cad2006-01-06 00:20:56 -08003262}
3263
3264static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003265offset_store(struct md_rdev *rdev, const char *buf, size_t len)
NeilBrown93c8cad2006-01-06 00:20:56 -08003266{
NeilBrownc6563a82012-05-21 09:27:00 +10003267 unsigned long long offset;
Jingoo Hanb29bebd2013-06-01 16:15:16 +09003268 if (kstrtoull(buf, 10, &offset) < 0)
NeilBrown93c8cad2006-01-06 00:20:56 -08003269 return -EINVAL;
Neil Brown8ed0a522008-06-28 08:31:29 +10003270 if (rdev->mddev->pers && rdev->raid_disk >= 0)
NeilBrown93c8cad2006-01-06 00:20:56 -08003271 return -EBUSY;
Andre Nolldd8ac332009-03-31 14:33:13 +11003272 if (rdev->sectors && rdev->mddev->external)
NeilBrownc5d79ad2008-02-06 01:39:54 -08003273 /* Must set offset before size, so overlap checks
3274 * can be sane */
3275 return -EBUSY;
NeilBrown93c8cad2006-01-06 00:20:56 -08003276 rdev->data_offset = offset;
NeilBrown25f7fd42012-07-19 15:59:18 +10003277 rdev->new_data_offset = offset;
NeilBrown93c8cad2006-01-06 00:20:56 -08003278 return len;
3279}
3280
3281static struct rdev_sysfs_entry rdev_offset =
NeilBrown80ca3a42006-07-10 04:44:18 -07003282__ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
NeilBrown93c8cad2006-01-06 00:20:56 -08003283
NeilBrownc6563a82012-05-21 09:27:00 +10003284static ssize_t new_offset_show(struct md_rdev *rdev, char *page)
3285{
3286 return sprintf(page, "%llu\n",
3287 (unsigned long long)rdev->new_data_offset);
3288}
3289
3290static ssize_t new_offset_store(struct md_rdev *rdev,
3291 const char *buf, size_t len)
3292{
3293 unsigned long long new_offset;
3294 struct mddev *mddev = rdev->mddev;
3295
Jingoo Hanb29bebd2013-06-01 16:15:16 +09003296 if (kstrtoull(buf, 10, &new_offset) < 0)
NeilBrownc6563a82012-05-21 09:27:00 +10003297 return -EINVAL;
3298
NeilBrownf851b602014-12-11 10:02:10 +11003299 if (mddev->sync_thread ||
3300 test_bit(MD_RECOVERY_RUNNING,&mddev->recovery))
NeilBrownc6563a82012-05-21 09:27:00 +10003301 return -EBUSY;
3302 if (new_offset == rdev->data_offset)
3303 /* reset is always permitted */
3304 ;
3305 else if (new_offset > rdev->data_offset) {
3306 /* must not push array size beyond rdev_sectors */
3307 if (new_offset - rdev->data_offset
3308 + mddev->dev_sectors > rdev->sectors)
3309 return -E2BIG;
3310 }
3311 /* Metadata worries about other space details. */
3312
3313 /* decreasing the offset is inconsistent with a backwards
3314 * reshape.
3315 */
3316 if (new_offset < rdev->data_offset &&
3317 mddev->reshape_backwards)
3318 return -EINVAL;
3319 /* Increasing offset is inconsistent with forwards
3320 * reshape. reshape_direction should be set to
3321 * 'backwards' first.
3322 */
3323 if (new_offset > rdev->data_offset &&
3324 !mddev->reshape_backwards)
3325 return -EINVAL;
3326
3327 if (mddev->pers && mddev->persistent &&
3328 !super_types[mddev->major_version]
3329 .allow_new_offset(rdev, new_offset))
3330 return -E2BIG;
3331 rdev->new_data_offset = new_offset;
3332 if (new_offset > rdev->data_offset)
3333 mddev->reshape_backwards = 1;
3334 else if (new_offset < rdev->data_offset)
3335 mddev->reshape_backwards = 0;
3336
3337 return len;
3338}
3339static struct rdev_sysfs_entry rdev_new_offset =
3340__ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store);
3341
NeilBrown83303b62006-01-06 00:21:06 -08003342static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003343rdev_size_show(struct md_rdev *rdev, char *page)
NeilBrown83303b62006-01-06 00:21:06 -08003344{
Andre Nolldd8ac332009-03-31 14:33:13 +11003345 return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
NeilBrown83303b62006-01-06 00:21:06 -08003346}
3347
NeilBrownc5d79ad2008-02-06 01:39:54 -08003348static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
3349{
3350 /* check if two start/length pairs overlap */
3351 if (s1+l1 <= s2)
3352 return 0;
3353 if (s2+l2 <= s1)
3354 return 0;
3355 return 1;
3356}
3357
Dan Williamsb522adc2009-03-31 15:00:31 +11003358static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
3359{
3360 unsigned long long blocks;
3361 sector_t new;
3362
Jingoo Hanb29bebd2013-06-01 16:15:16 +09003363 if (kstrtoull(buf, 10, &blocks) < 0)
Dan Williamsb522adc2009-03-31 15:00:31 +11003364 return -EINVAL;
3365
3366 if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
3367 return -EINVAL; /* sector conversion overflow */
3368
3369 new = blocks * 2;
3370 if (new != blocks * 2)
3371 return -EINVAL; /* unsigned long long to sector_t overflow */
3372
3373 *sectors = new;
3374 return 0;
3375}
3376
NeilBrown83303b62006-01-06 00:21:06 -08003377static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003378rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
NeilBrown83303b62006-01-06 00:21:06 -08003379{
NeilBrownfd01b882011-10-11 16:47:53 +11003380 struct mddev *my_mddev = rdev->mddev;
Andre Nolldd8ac332009-03-31 14:33:13 +11003381 sector_t oldsectors = rdev->sectors;
Dan Williamsb522adc2009-03-31 15:00:31 +11003382 sector_t sectors;
NeilBrown27c529b2008-03-04 14:29:33 -08003383
Shaohua Lif2076e72015-10-08 21:54:12 -07003384 if (test_bit(Journal, &rdev->flags))
3385 return -EBUSY;
Dan Williamsb522adc2009-03-31 15:00:31 +11003386 if (strict_blocks_to_sectors(buf, &sectors) < 0)
Neil Brownd7027452008-07-12 10:37:50 +10003387 return -EINVAL;
NeilBrownc6563a82012-05-21 09:27:00 +10003388 if (rdev->data_offset != rdev->new_data_offset)
3389 return -EINVAL; /* too confusing */
Chris Webb0cd17fe2008-06-28 08:31:46 +10003390 if (my_mddev->pers && rdev->raid_disk >= 0) {
Neil Brownd7027452008-07-12 10:37:50 +10003391 if (my_mddev->persistent) {
Andre Nolldd8ac332009-03-31 14:33:13 +11003392 sectors = super_types[my_mddev->major_version].
3393 rdev_size_change(rdev, sectors);
3394 if (!sectors)
Chris Webb0cd17fe2008-06-28 08:31:46 +10003395 return -EBUSY;
Andre Nolldd8ac332009-03-31 14:33:13 +11003396 } else if (!sectors)
Christoph Hellwig0fe80342021-10-18 12:11:06 +02003397 sectors = bdev_nr_sectors(rdev->bdev) -
Andre Nolldd8ac332009-03-31 14:33:13 +11003398 rdev->data_offset;
NeilBrowna6468532013-02-21 14:33:17 +11003399 if (!my_mddev->pers->resize)
3400 /* Cannot change size for RAID0 or Linear etc */
3401 return -EINVAL;
Chris Webb0cd17fe2008-06-28 08:31:46 +10003402 }
Andre Nolldd8ac332009-03-31 14:33:13 +11003403 if (sectors < my_mddev->dev_sectors)
Chris Webb7d3c6f82008-10-13 11:55:11 +11003404 return -EINVAL; /* component must fit device */
Chris Webb0cd17fe2008-06-28 08:31:46 +10003405
Andre Nolldd8ac332009-03-31 14:33:13 +11003406 rdev->sectors = sectors;
3407 if (sectors > oldsectors && my_mddev->external) {
NeilBrown8b1afc32014-09-29 15:33:20 +10003408 /* Need to check that all other rdevs with the same
3409 * ->bdev do not overlap. 'rcu' is sufficient to walk
3410 * the rdev lists safely.
3411 * This check does not provide a hard guarantee, it
3412 * just helps avoid dangerous mistakes.
NeilBrownc5d79ad2008-02-06 01:39:54 -08003413 */
NeilBrownfd01b882011-10-11 16:47:53 +11003414 struct mddev *mddev;
NeilBrownc5d79ad2008-02-06 01:39:54 -08003415 int overlap = 0;
Cheng Renquan159ec1f2009-01-09 08:31:08 +11003416 struct list_head *tmp;
NeilBrownc5d79ad2008-02-06 01:39:54 -08003417
NeilBrown8b1afc32014-09-29 15:33:20 +10003418 rcu_read_lock();
NeilBrown29ac4aa2008-02-06 01:39:58 -08003419 for_each_mddev(mddev, tmp) {
NeilBrown3cb03002011-10-11 16:45:26 +11003420 struct md_rdev *rdev2;
NeilBrownc5d79ad2008-02-06 01:39:54 -08003421
NeilBrowndafb20f2012-03-19 12:46:39 +11003422 rdev_for_each(rdev2, mddev)
NeilBrownf21e9ff2011-01-31 12:10:09 +11003423 if (rdev->bdev == rdev2->bdev &&
3424 rdev != rdev2 &&
3425 overlaps(rdev->data_offset, rdev->sectors,
3426 rdev2->data_offset,
3427 rdev2->sectors)) {
NeilBrownc5d79ad2008-02-06 01:39:54 -08003428 overlap = 1;
3429 break;
3430 }
NeilBrownc5d79ad2008-02-06 01:39:54 -08003431 if (overlap) {
3432 mddev_put(mddev);
3433 break;
3434 }
3435 }
NeilBrown8b1afc32014-09-29 15:33:20 +10003436 rcu_read_unlock();
NeilBrownc5d79ad2008-02-06 01:39:54 -08003437 if (overlap) {
3438 /* Someone else could have slipped in a size
3439 * change here, but doing so is just silly.
Andre Nolldd8ac332009-03-31 14:33:13 +11003440 * We put oldsectors back because we *know* it is
NeilBrownc5d79ad2008-02-06 01:39:54 -08003441 * safe, and trust userspace not to race with
3442 * itself
3443 */
Andre Nolldd8ac332009-03-31 14:33:13 +11003444 rdev->sectors = oldsectors;
NeilBrownc5d79ad2008-02-06 01:39:54 -08003445 return -EBUSY;
3446 }
3447 }
NeilBrown83303b62006-01-06 00:21:06 -08003448 return len;
3449}
3450
3451static struct rdev_sysfs_entry rdev_size =
NeilBrown80ca3a42006-07-10 04:44:18 -07003452__ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
NeilBrown83303b62006-01-06 00:21:06 -08003453
NeilBrown3cb03002011-10-11 16:45:26 +11003454static ssize_t recovery_start_show(struct md_rdev *rdev, char *page)
Dan Williams06e3c812009-12-12 21:17:12 -07003455{
3456 unsigned long long recovery_start = rdev->recovery_offset;
3457
3458 if (test_bit(In_sync, &rdev->flags) ||
3459 recovery_start == MaxSector)
3460 return sprintf(page, "none\n");
3461
3462 return sprintf(page, "%llu\n", recovery_start);
3463}
3464
NeilBrown3cb03002011-10-11 16:45:26 +11003465static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len)
Dan Williams06e3c812009-12-12 21:17:12 -07003466{
3467 unsigned long long recovery_start;
3468
3469 if (cmd_match(buf, "none"))
3470 recovery_start = MaxSector;
Jingoo Hanb29bebd2013-06-01 16:15:16 +09003471 else if (kstrtoull(buf, 10, &recovery_start))
Dan Williams06e3c812009-12-12 21:17:12 -07003472 return -EINVAL;
3473
3474 if (rdev->mddev->pers &&
3475 rdev->raid_disk >= 0)
3476 return -EBUSY;
3477
3478 rdev->recovery_offset = recovery_start;
3479 if (recovery_start == MaxSector)
3480 set_bit(In_sync, &rdev->flags);
3481 else
3482 clear_bit(In_sync, &rdev->flags);
3483 return len;
3484}
3485
3486static struct rdev_sysfs_entry rdev_recovery_start =
3487__ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
3488
Vishal Vermafc974ee2015-12-24 19:20:34 -07003489/* sysfs access to bad-blocks list.
3490 * We present two files.
3491 * 'bad-blocks' lists sector numbers and lengths of ranges that
3492 * are recorded as bad. The list is truncated to fit within
3493 * the one-page limit of sysfs.
3494 * Writing "sector length" to this file adds an acknowledged
3495 * bad block list.
3496 * 'unacknowledged-bad-blocks' lists bad blocks that have not yet
3497 * been acknowledged. Writing to this file adds bad blocks
3498 * without acknowledging them. This is largely for testing.
3499 */
NeilBrown3cb03002011-10-11 16:45:26 +11003500static ssize_t bb_show(struct md_rdev *rdev, char *page)
NeilBrown16c791a2011-07-28 11:31:47 +10003501{
3502 return badblocks_show(&rdev->badblocks, page, 0);
3503}
NeilBrown3cb03002011-10-11 16:45:26 +11003504static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len)
NeilBrown16c791a2011-07-28 11:31:47 +10003505{
NeilBrownde393cd2011-07-28 11:31:48 +10003506 int rv = badblocks_store(&rdev->badblocks, page, len, 0);
3507 /* Maybe that ack was all we needed */
3508 if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags))
3509 wake_up(&rdev->blocked_wait);
3510 return rv;
NeilBrown16c791a2011-07-28 11:31:47 +10003511}
3512static struct rdev_sysfs_entry rdev_bad_blocks =
3513__ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store);
3514
NeilBrown3cb03002011-10-11 16:45:26 +11003515static ssize_t ubb_show(struct md_rdev *rdev, char *page)
NeilBrown16c791a2011-07-28 11:31:47 +10003516{
3517 return badblocks_show(&rdev->badblocks, page, 1);
3518}
NeilBrown3cb03002011-10-11 16:45:26 +11003519static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len)
NeilBrown16c791a2011-07-28 11:31:47 +10003520{
3521 return badblocks_store(&rdev->badblocks, page, len, 1);
3522}
3523static struct rdev_sysfs_entry rdev_unack_bad_blocks =
3524__ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store);
3525
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01003526static ssize_t
3527ppl_sector_show(struct md_rdev *rdev, char *page)
3528{
3529 return sprintf(page, "%llu\n", (unsigned long long)rdev->ppl.sector);
3530}
3531
3532static ssize_t
3533ppl_sector_store(struct md_rdev *rdev, const char *buf, size_t len)
3534{
3535 unsigned long long sector;
3536
3537 if (kstrtoull(buf, 10, &sector) < 0)
3538 return -EINVAL;
3539 if (sector != (sector_t)sector)
3540 return -EINVAL;
3541
3542 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) &&
3543 rdev->raid_disk >= 0)
3544 return -EBUSY;
3545
3546 if (rdev->mddev->persistent) {
3547 if (rdev->mddev->major_version == 0)
3548 return -EINVAL;
3549 if ((sector > rdev->sb_start &&
3550 sector - rdev->sb_start > S16_MAX) ||
3551 (sector < rdev->sb_start &&
3552 rdev->sb_start - sector > -S16_MIN))
3553 return -EINVAL;
3554 rdev->ppl.offset = sector - rdev->sb_start;
3555 } else if (!rdev->mddev->external) {
3556 return -EBUSY;
3557 }
3558 rdev->ppl.sector = sector;
3559 return len;
3560}
3561
3562static struct rdev_sysfs_entry rdev_ppl_sector =
3563__ATTR(ppl_sector, S_IRUGO|S_IWUSR, ppl_sector_show, ppl_sector_store);
3564
3565static ssize_t
3566ppl_size_show(struct md_rdev *rdev, char *page)
3567{
3568 return sprintf(page, "%u\n", rdev->ppl.size);
3569}
3570
3571static ssize_t
3572ppl_size_store(struct md_rdev *rdev, const char *buf, size_t len)
3573{
3574 unsigned int size;
3575
3576 if (kstrtouint(buf, 10, &size) < 0)
3577 return -EINVAL;
3578
3579 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) &&
3580 rdev->raid_disk >= 0)
3581 return -EBUSY;
3582
3583 if (rdev->mddev->persistent) {
3584 if (rdev->mddev->major_version == 0)
3585 return -EINVAL;
3586 if (size > U16_MAX)
3587 return -EINVAL;
3588 } else if (!rdev->mddev->external) {
3589 return -EBUSY;
3590 }
3591 rdev->ppl.size = size;
3592 return len;
3593}
3594
3595static struct rdev_sysfs_entry rdev_ppl_size =
3596__ATTR(ppl_size, S_IRUGO|S_IWUSR, ppl_size_show, ppl_size_store);
3597
NeilBrown86e6ffd2005-11-08 21:39:24 -08003598static struct attribute *rdev_default_attrs[] = {
3599 &rdev_state.attr,
NeilBrown4dbcdc72006-01-06 00:20:52 -08003600 &rdev_errors.attr,
NeilBrown014236d2006-01-06 00:20:55 -08003601 &rdev_slot.attr,
NeilBrown93c8cad2006-01-06 00:20:56 -08003602 &rdev_offset.attr,
NeilBrownc6563a82012-05-21 09:27:00 +10003603 &rdev_new_offset.attr,
NeilBrown83303b62006-01-06 00:21:06 -08003604 &rdev_size.attr,
Dan Williams06e3c812009-12-12 21:17:12 -07003605 &rdev_recovery_start.attr,
NeilBrown16c791a2011-07-28 11:31:47 +10003606 &rdev_bad_blocks.attr,
3607 &rdev_unack_bad_blocks.attr,
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01003608 &rdev_ppl_sector.attr,
3609 &rdev_ppl_size.attr,
NeilBrown86e6ffd2005-11-08 21:39:24 -08003610 NULL,
3611};
Greg Kroah-Hartman1745e852022-01-06 11:03:35 +01003612ATTRIBUTE_GROUPS(rdev_default);
NeilBrown86e6ffd2005-11-08 21:39:24 -08003613static ssize_t
3614rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
3615{
3616 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
NeilBrown3cb03002011-10-11 16:45:26 +11003617 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
NeilBrown86e6ffd2005-11-08 21:39:24 -08003618
3619 if (!entry->show)
3620 return -EIO;
NeilBrown758bfc82014-12-15 12:56:59 +11003621 if (!rdev->mddev)
Marcos Paulo de Souza168b3052019-06-14 15:41:06 -07003622 return -ENODEV;
NeilBrown758bfc82014-12-15 12:56:59 +11003623 return entry->show(rdev, page);
NeilBrown86e6ffd2005-11-08 21:39:24 -08003624}
3625
3626static ssize_t
3627rdev_attr_store(struct kobject *kobj, struct attribute *attr,
3628 const char *page, size_t length)
3629{
3630 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
NeilBrown3cb03002011-10-11 16:45:26 +11003631 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
NeilBrown27c529b2008-03-04 14:29:33 -08003632 ssize_t rv;
NeilBrownfd01b882011-10-11 16:47:53 +11003633 struct mddev *mddev = rdev->mddev;
NeilBrown86e6ffd2005-11-08 21:39:24 -08003634
3635 if (!entry->store)
3636 return -EIO;
NeilBrown67463ac2006-07-10 04:44:19 -07003637 if (!capable(CAP_SYS_ADMIN))
3638 return -EACCES;
Pawel Baldysiakc42d3242019-03-27 13:48:21 +01003639 rv = mddev ? mddev_lock(mddev) : -ENODEV;
NeilBrownca388052008-02-06 01:39:55 -08003640 if (!rv) {
NeilBrown27c529b2008-03-04 14:29:33 -08003641 if (rdev->mddev == NULL)
Pawel Baldysiakc42d3242019-03-27 13:48:21 +01003642 rv = -ENODEV;
NeilBrown27c529b2008-03-04 14:29:33 -08003643 else
3644 rv = entry->store(rdev, page, length);
Dan Williams6a518302008-04-30 00:52:28 -07003645 mddev_unlock(mddev);
NeilBrownca388052008-02-06 01:39:55 -08003646 }
3647 return rv;
NeilBrown86e6ffd2005-11-08 21:39:24 -08003648}
3649
3650static void rdev_free(struct kobject *ko)
3651{
NeilBrown3cb03002011-10-11 16:45:26 +11003652 struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj);
NeilBrown86e6ffd2005-11-08 21:39:24 -08003653 kfree(rdev);
3654}
Emese Revfy52cf25d2010-01-19 02:58:23 +01003655static const struct sysfs_ops rdev_sysfs_ops = {
NeilBrown86e6ffd2005-11-08 21:39:24 -08003656 .show = rdev_attr_show,
3657 .store = rdev_attr_store,
3658};
3659static struct kobj_type rdev_ktype = {
3660 .release = rdev_free,
3661 .sysfs_ops = &rdev_sysfs_ops,
Greg Kroah-Hartman1745e852022-01-06 11:03:35 +01003662 .default_groups = rdev_default_groups,
NeilBrown86e6ffd2005-11-08 21:39:24 -08003663};
3664
NeilBrown3cb03002011-10-11 16:45:26 +11003665int md_rdev_init(struct md_rdev *rdev)
NeilBrowne8bb9a82010-06-01 19:37:26 +10003666{
3667 rdev->desc_nr = -1;
3668 rdev->saved_raid_disk = -1;
3669 rdev->raid_disk = -1;
3670 rdev->flags = 0;
3671 rdev->data_offset = 0;
NeilBrownc6563a82012-05-21 09:27:00 +10003672 rdev->new_data_offset = 0;
NeilBrowne8bb9a82010-06-01 19:37:26 +10003673 rdev->sb_events = 0;
Arnd Bergmann0e3ef492016-06-17 17:33:10 +02003674 rdev->last_read_error = 0;
NeilBrown2699b672011-07-28 11:31:47 +10003675 rdev->sb_loaded = 0;
3676 rdev->bb_page = NULL;
NeilBrowne8bb9a82010-06-01 19:37:26 +10003677 atomic_set(&rdev->nr_pending, 0);
3678 atomic_set(&rdev->read_errors, 0);
3679 atomic_set(&rdev->corrected_errors, 0);
3680
3681 INIT_LIST_HEAD(&rdev->same_set);
3682 init_waitqueue_head(&rdev->blocked_wait);
NeilBrown2230dfe2011-07-28 11:31:46 +10003683
3684 /* Add space to store bad block list.
3685 * This reserves the space even on arrays where it cannot
3686 * be used - I wonder if that matters
3687 */
Vishal Vermafc974ee2015-12-24 19:20:34 -07003688 return badblocks_init(&rdev->badblocks, 0);
NeilBrowne8bb9a82010-06-01 19:37:26 +10003689}
3690EXPORT_SYMBOL_GPL(md_rdev_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003691/*
3692 * Import a device. If 'super_format' >= 0, then sanity check the superblock
3693 *
3694 * mark the device faulty if:
3695 *
3696 * - the device is nonexistent (zero size)
3697 * - the device has no valid superblock
3698 *
3699 * a faulty rdev _never_ has rdev->sb set.
3700 */
NeilBrown3cb03002011-10-11 16:45:26 +11003701static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003702{
3703 char b[BDEVNAME_SIZE];
3704 int err;
NeilBrown3cb03002011-10-11 16:45:26 +11003705 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003706 sector_t size;
3707
NeilBrown9ffae0c2006-01-06 00:20:32 -08003708 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
NeilBrown9d487392016-11-02 14:16:49 +11003709 if (!rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003710 return ERR_PTR(-ENOMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003711
NeilBrown2230dfe2011-07-28 11:31:46 +10003712 err = md_rdev_init(rdev);
3713 if (err)
3714 goto abort_free;
3715 err = alloc_disk_sb(rdev);
3716 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003717 goto abort_free;
3718
NeilBrownc5d79ad2008-02-06 01:39:54 -08003719 err = lock_rdev(rdev, newdev, super_format == -2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003720 if (err)
3721 goto abort_free;
3722
Greg Kroah-Hartmanf9cb0742007-12-17 23:05:35 -07003723 kobject_init(&rdev->kobj, &rdev_ktype);
NeilBrown86e6ffd2005-11-08 21:39:24 -08003724
Christoph Hellwig0fe80342021-10-18 12:11:06 +02003725 size = bdev_nr_bytes(rdev->bdev) >> BLOCK_SIZE_BITS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003726 if (!size) {
NeilBrown9d487392016-11-02 14:16:49 +11003727 pr_warn("md: %s has zero or unknown size, marking faulty!\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003728 bdevname(rdev->bdev,b));
3729 err = -EINVAL;
3730 goto abort_free;
3731 }
3732
3733 if (super_format >= 0) {
3734 err = super_types[super_format].
3735 load_super(rdev, NULL, super_minor);
3736 if (err == -EINVAL) {
NeilBrown9d487392016-11-02 14:16:49 +11003737 pr_warn("md: %s does not have a valid v%d.%d superblock, not importing!\n",
NeilBrowndf968c42007-07-17 04:06:11 -07003738 bdevname(rdev->bdev,b),
NeilBrown9d487392016-11-02 14:16:49 +11003739 super_format, super_minor);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003740 goto abort_free;
3741 }
3742 if (err < 0) {
NeilBrown9d487392016-11-02 14:16:49 +11003743 pr_warn("md: could not read %s's sb, not importing!\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003744 bdevname(rdev->bdev,b));
3745 goto abort_free;
3746 }
3747 }
Dan Williams6bfe0b42008-04-30 00:52:32 -07003748
Linus Torvalds1da177e2005-04-16 15:20:36 -07003749 return rdev;
3750
3751abort_free:
NeilBrown2699b672011-07-28 11:31:47 +10003752 if (rdev->bdev)
3753 unlock_rdev(rdev);
NeilBrown545c8792012-05-22 13:54:30 +10003754 md_rdev_clear(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003755 kfree(rdev);
3756 return ERR_PTR(err);
3757}
3758
3759/*
3760 * Check a full RAID array for plausibility
3761 */
3762
Yufen Yu6a5cb532019-10-16 16:00:03 +08003763static int analyze_sbs(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003764{
3765 int i;
NeilBrown3cb03002011-10-11 16:45:26 +11003766 struct md_rdev *rdev, *freshest, *tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003767 char b[BDEVNAME_SIZE];
3768
3769 freshest = NULL;
NeilBrowndafb20f2012-03-19 12:46:39 +11003770 rdev_for_each_safe(rdev, tmp, mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003771 switch (super_types[mddev->major_version].
3772 load_super(rdev, freshest, mddev->minor_version)) {
3773 case 1:
3774 freshest = rdev;
3775 break;
3776 case 0:
3777 break;
3778 default:
NeilBrown9d487392016-11-02 14:16:49 +11003779 pr_warn("md: fatal superblock inconsistency in %s -- removing from array\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003780 bdevname(rdev->bdev,b));
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05003781 md_kick_rdev_from_array(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003782 }
3783
Yufen Yu6a5cb532019-10-16 16:00:03 +08003784 /* Cannot find a valid fresh disk */
3785 if (!freshest) {
3786 pr_warn("md: cannot find a valid disk\n");
3787 return -EINVAL;
3788 }
3789
Linus Torvalds1da177e2005-04-16 15:20:36 -07003790 super_types[mddev->major_version].
3791 validate_super(mddev, freshest);
3792
3793 i = 0;
NeilBrowndafb20f2012-03-19 12:46:39 +11003794 rdev_for_each_safe(rdev, tmp, mddev) {
NeilBrown233fca32010-04-14 17:02:09 +10003795 if (mddev->max_disks &&
3796 (rdev->desc_nr >= mddev->max_disks ||
3797 i > mddev->max_disks)) {
NeilBrown9d487392016-11-02 14:16:49 +11003798 pr_warn("md: %s: %s: only %d devices permitted\n",
3799 mdname(mddev), bdevname(rdev->bdev, b),
3800 mddev->max_disks);
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05003801 md_kick_rdev_from_array(rdev);
NeilBrownde01dfa2009-02-06 18:02:46 +11003802 continue;
3803 }
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05003804 if (rdev != freshest) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003805 if (super_types[mddev->major_version].
3806 validate_super(mddev, rdev)) {
NeilBrown9d487392016-11-02 14:16:49 +11003807 pr_warn("md: kicking non-fresh %s from array!\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003808 bdevname(rdev->bdev,b));
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05003809 md_kick_rdev_from_array(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003810 continue;
3811 }
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05003812 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003813 if (mddev->level == LEVEL_MULTIPATH) {
3814 rdev->desc_nr = i++;
3815 rdev->raid_disk = rdev->desc_nr;
NeilBrownb2d444d2005-11-08 21:39:31 -08003816 set_bit(In_sync, &rdev->flags);
Shaohua Lif2076e72015-10-08 21:54:12 -07003817 } else if (rdev->raid_disk >=
3818 (mddev->raid_disks - min(0, mddev->delta_disks)) &&
3819 !test_bit(Journal, &rdev->flags)) {
NeilBrowna778b732007-05-23 13:58:10 -07003820 rdev->raid_disk = -1;
3821 clear_bit(In_sync, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003822 }
3823 }
Yufen Yu6a5cb532019-10-16 16:00:03 +08003824
3825 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003826}
3827
NeilBrown72e02072009-12-14 12:49:55 +11003828/* Read a fixed-point number.
3829 * Numbers in sysfs attributes should be in "standard" units where
3830 * possible, so time should be in seconds.
NeilBrownf72ffdd2014-09-30 14:23:59 +10003831 * However we internally use a a much smaller unit such as
NeilBrown72e02072009-12-14 12:49:55 +11003832 * milliseconds or jiffies.
3833 * This function takes a decimal number with a possible fractional
3834 * component, and produces an integer which is the result of
3835 * multiplying that number by 10^'scale'.
3836 * all without any floating-point arithmetic.
3837 */
3838int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
3839{
3840 unsigned long result = 0;
3841 long decimals = -1;
3842 while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
3843 if (*cp == '.')
3844 decimals = 0;
3845 else if (decimals < scale) {
3846 unsigned int value;
3847 value = *cp - '0';
3848 result = result * 10 + value;
3849 if (decimals >= 0)
3850 decimals++;
3851 }
3852 cp++;
3853 }
3854 if (*cp == '\n')
3855 cp++;
3856 if (*cp)
3857 return -EINVAL;
3858 if (decimals < 0)
3859 decimals = 0;
Andy Shevchenkocf891602019-07-23 23:41:55 +03003860 *res = result * int_pow(10, scale - decimals);
NeilBrown72e02072009-12-14 12:49:55 +11003861 return 0;
3862}
3863
NeilBrowneae17012005-11-08 21:39:23 -08003864static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11003865safe_delay_show(struct mddev *mddev, char *page)
NeilBrown16f17b32006-06-26 00:27:37 -07003866{
3867 int msec = (mddev->safemode_delay*1000)/HZ;
3868 return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
3869}
3870static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11003871safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
NeilBrown16f17b32006-06-26 00:27:37 -07003872{
NeilBrown16f17b32006-06-26 00:27:37 -07003873 unsigned long msec;
Dan Williams97ce0a72008-09-24 22:48:19 -07003874
Goldwyn Rodrigues28c1b9f2015-10-22 16:01:25 +11003875 if (mddev_is_clustered(mddev)) {
NeilBrown9d487392016-11-02 14:16:49 +11003876 pr_warn("md: Safemode is disabled for clustered mode\n");
Goldwyn Rodrigues28c1b9f2015-10-22 16:01:25 +11003877 return -EINVAL;
3878 }
3879
NeilBrown72e02072009-12-14 12:49:55 +11003880 if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
NeilBrown16f17b32006-06-26 00:27:37 -07003881 return -EINVAL;
NeilBrown16f17b32006-06-26 00:27:37 -07003882 if (msec == 0)
3883 mddev->safemode_delay = 0;
3884 else {
NeilBrown19052c02008-08-05 15:54:13 +10003885 unsigned long old_delay = mddev->safemode_delay;
NeilBrown1b30e662014-12-15 12:57:00 +11003886 unsigned long new_delay = (msec*HZ)/1000;
3887
3888 if (new_delay == 0)
3889 new_delay = 1;
3890 mddev->safemode_delay = new_delay;
3891 if (new_delay < old_delay || old_delay == 0)
3892 mod_timer(&mddev->safemode_timer, jiffies+1);
NeilBrown16f17b32006-06-26 00:27:37 -07003893 }
3894 return len;
3895}
3896static struct md_sysfs_entry md_safe_delay =
NeilBrown80ca3a42006-07-10 04:44:18 -07003897__ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
NeilBrown16f17b32006-06-26 00:27:37 -07003898
3899static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11003900level_show(struct mddev *mddev, char *page)
NeilBrowneae17012005-11-08 21:39:23 -08003901{
NeilBrown36d091f2014-12-15 12:56:58 +11003902 struct md_personality *p;
3903 int ret;
3904 spin_lock(&mddev->lock);
3905 p = mddev->pers;
NeilBrownd9d166c2006-01-06 00:20:51 -08003906 if (p)
NeilBrown36d091f2014-12-15 12:56:58 +11003907 ret = sprintf(page, "%s\n", p->name);
NeilBrownd9d166c2006-01-06 00:20:51 -08003908 else if (mddev->clevel[0])
NeilBrown36d091f2014-12-15 12:56:58 +11003909 ret = sprintf(page, "%s\n", mddev->clevel);
NeilBrownd9d166c2006-01-06 00:20:51 -08003910 else if (mddev->level != LEVEL_NONE)
NeilBrown36d091f2014-12-15 12:56:58 +11003911 ret = sprintf(page, "%d\n", mddev->level);
NeilBrownd9d166c2006-01-06 00:20:51 -08003912 else
NeilBrown36d091f2014-12-15 12:56:58 +11003913 ret = 0;
3914 spin_unlock(&mddev->lock);
3915 return ret;
NeilBrowneae17012005-11-08 21:39:23 -08003916}
3917
NeilBrownd9d166c2006-01-06 00:20:51 -08003918static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11003919level_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrownd9d166c2006-01-06 00:20:51 -08003920{
Dan Williamsf2859af2010-05-02 10:04:16 -07003921 char clevel[16];
NeilBrown67918752014-12-15 12:57:01 +11003922 ssize_t rv;
3923 size_t slen = len;
NeilBrowndb721d32014-12-15 12:56:58 +11003924 struct md_personality *pers, *oldpers;
Dan Williamsf2859af2010-05-02 10:04:16 -07003925 long level;
NeilBrowndb721d32014-12-15 12:56:58 +11003926 void *priv, *oldpriv;
NeilBrown3cb03002011-10-11 16:45:26 +11003927 struct md_rdev *rdev;
NeilBrown245f46c2009-03-31 14:39:39 +11003928
NeilBrown67918752014-12-15 12:57:01 +11003929 if (slen == 0 || slen >= sizeof(clevel))
3930 return -EINVAL;
3931
3932 rv = mddev_lock(mddev);
3933 if (rv)
NeilBrown245f46c2009-03-31 14:39:39 +11003934 return rv;
NeilBrown67918752014-12-15 12:57:01 +11003935
3936 if (mddev->pers == NULL) {
3937 strncpy(mddev->clevel, buf, slen);
3938 if (mddev->clevel[slen-1] == '\n')
3939 slen--;
3940 mddev->clevel[slen] = 0;
3941 mddev->level = LEVEL_NONE;
3942 rv = len;
3943 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11003944 }
NeilBrown67918752014-12-15 12:57:01 +11003945 rv = -EROFS;
NeilBrownbd8839e2014-05-28 13:39:21 +10003946 if (mddev->ro)
NeilBrown67918752014-12-15 12:57:01 +11003947 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11003948
3949 /* request to change the personality. Need to ensure:
3950 * - array is not engaged in resync/recovery/reshape
3951 * - old personality can be suspended
3952 * - new personality will access other array.
3953 */
3954
NeilBrown67918752014-12-15 12:57:01 +11003955 rv = -EBUSY;
NeilBrownbb4f1e92010-08-08 21:18:03 +10003956 if (mddev->sync_thread ||
NeilBrownf851b602014-12-11 10:02:10 +11003957 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
NeilBrownbb4f1e92010-08-08 21:18:03 +10003958 mddev->reshape_position != MaxSector ||
3959 mddev->sysfs_active)
NeilBrown67918752014-12-15 12:57:01 +11003960 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11003961
NeilBrown67918752014-12-15 12:57:01 +11003962 rv = -EINVAL;
NeilBrown245f46c2009-03-31 14:39:39 +11003963 if (!mddev->pers->quiesce) {
NeilBrown9d487392016-11-02 14:16:49 +11003964 pr_warn("md: %s: %s does not support online personality change\n",
3965 mdname(mddev), mddev->pers->name);
NeilBrown67918752014-12-15 12:57:01 +11003966 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11003967 }
3968
3969 /* Now find the new personality */
NeilBrown67918752014-12-15 12:57:01 +11003970 strncpy(clevel, buf, slen);
3971 if (clevel[slen-1] == '\n')
3972 slen--;
3973 clevel[slen] = 0;
Jingoo Hanb29bebd2013-06-01 16:15:16 +09003974 if (kstrtol(clevel, 10, &level))
Dan Williamsf2859af2010-05-02 10:04:16 -07003975 level = LEVEL_NONE;
NeilBrown245f46c2009-03-31 14:39:39 +11003976
Dan Williamsf2859af2010-05-02 10:04:16 -07003977 if (request_module("md-%s", clevel) != 0)
3978 request_module("md-level-%s", clevel);
NeilBrown245f46c2009-03-31 14:39:39 +11003979 spin_lock(&pers_lock);
Dan Williamsf2859af2010-05-02 10:04:16 -07003980 pers = find_pers(level, clevel);
NeilBrown245f46c2009-03-31 14:39:39 +11003981 if (!pers || !try_module_get(pers->owner)) {
3982 spin_unlock(&pers_lock);
NeilBrown9d487392016-11-02 14:16:49 +11003983 pr_warn("md: personality %s not loaded\n", clevel);
NeilBrown67918752014-12-15 12:57:01 +11003984 rv = -EINVAL;
3985 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11003986 }
3987 spin_unlock(&pers_lock);
3988
3989 if (pers == mddev->pers) {
3990 /* Nothing to do! */
3991 module_put(pers->owner);
NeilBrown67918752014-12-15 12:57:01 +11003992 rv = len;
3993 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11003994 }
3995 if (!pers->takeover) {
3996 module_put(pers->owner);
NeilBrown9d487392016-11-02 14:16:49 +11003997 pr_warn("md: %s: %s does not support personality takeover\n",
3998 mdname(mddev), clevel);
NeilBrown67918752014-12-15 12:57:01 +11003999 rv = -EINVAL;
4000 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11004001 }
4002
NeilBrowndafb20f2012-03-19 12:46:39 +11004003 rdev_for_each(rdev, mddev)
NeilBrowne93f68a2010-06-15 09:36:03 +01004004 rdev->new_raid_disk = rdev->raid_disk;
4005
NeilBrown245f46c2009-03-31 14:39:39 +11004006 /* ->takeover must set new_* and/or delta_disks
4007 * if it succeeds, and may set them when it fails.
4008 */
4009 priv = pers->takeover(mddev);
4010 if (IS_ERR(priv)) {
4011 mddev->new_level = mddev->level;
4012 mddev->new_layout = mddev->layout;
Andre Noll664e7c42009-06-18 08:45:27 +10004013 mddev->new_chunk_sectors = mddev->chunk_sectors;
NeilBrown245f46c2009-03-31 14:39:39 +11004014 mddev->raid_disks -= mddev->delta_disks;
4015 mddev->delta_disks = 0;
NeilBrown2c810cd2012-05-21 09:27:00 +10004016 mddev->reshape_backwards = 0;
NeilBrown245f46c2009-03-31 14:39:39 +11004017 module_put(pers->owner);
NeilBrown9d487392016-11-02 14:16:49 +11004018 pr_warn("md: %s: %s would not accept array\n",
4019 mdname(mddev), clevel);
NeilBrown67918752014-12-15 12:57:01 +11004020 rv = PTR_ERR(priv);
4021 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11004022 }
4023
4024 /* Looks like we have a winner */
4025 mddev_suspend(mddev);
NeilBrown5aa61f42014-12-15 12:56:57 +11004026 mddev_detach(mddev);
NeilBrown36d091f2014-12-15 12:56:58 +11004027
4028 spin_lock(&mddev->lock);
NeilBrowndb721d32014-12-15 12:56:58 +11004029 oldpers = mddev->pers;
4030 oldpriv = mddev->private;
4031 mddev->pers = pers;
4032 mddev->private = priv;
4033 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
4034 mddev->level = mddev->new_level;
4035 mddev->layout = mddev->new_layout;
4036 mddev->chunk_sectors = mddev->new_chunk_sectors;
4037 mddev->delta_disks = 0;
4038 mddev->reshape_backwards = 0;
4039 mddev->degraded = 0;
NeilBrown36d091f2014-12-15 12:56:58 +11004040 spin_unlock(&mddev->lock);
NeilBrownf72ffdd2014-09-30 14:23:59 +10004041
NeilBrowndb721d32014-12-15 12:56:58 +11004042 if (oldpers->sync_request == NULL &&
Trela Maciej54071b32010-03-08 16:02:42 +11004043 mddev->external) {
4044 /* We are converting from a no-redundancy array
4045 * to a redundancy array and metadata is managed
4046 * externally so we need to be sure that writes
4047 * won't block due to a need to transition
4048 * clean->dirty
4049 * until external management is started.
4050 */
4051 mddev->in_sync = 0;
4052 mddev->safemode_delay = 0;
4053 mddev->safemode = 0;
4054 }
4055
NeilBrowndb721d32014-12-15 12:56:58 +11004056 oldpers->free(mddev, oldpriv);
4057
4058 if (oldpers->sync_request == NULL &&
4059 pers->sync_request != NULL) {
4060 /* need to add the md_redundancy_group */
4061 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
NeilBrown9d487392016-11-02 14:16:49 +11004062 pr_warn("md: cannot register extra attributes for %s\n",
4063 mdname(mddev));
NeilBrowndb721d32014-12-15 12:56:58 +11004064 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
Junxiao Bie8efa9b2020-08-04 17:27:18 -07004065 mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed");
4066 mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded");
NeilBrowndb721d32014-12-15 12:56:58 +11004067 }
4068 if (oldpers->sync_request != NULL &&
4069 pers->sync_request == NULL) {
4070 /* need to remove the md_redundancy_group */
4071 if (mddev->to_remove == NULL)
4072 mddev->to_remove = &md_redundancy_group;
4073 }
4074
Alexey Obitotskiy4cb9da72016-06-23 12:11:01 +02004075 module_put(oldpers->owner);
4076
NeilBrowndafb20f2012-03-19 12:46:39 +11004077 rdev_for_each(rdev, mddev) {
NeilBrowne93f68a2010-06-15 09:36:03 +01004078 if (rdev->raid_disk < 0)
4079 continue;
NeilBrownbf2cb0d2011-01-14 09:14:34 +11004080 if (rdev->new_raid_disk >= mddev->raid_disks)
NeilBrowne93f68a2010-06-15 09:36:03 +01004081 rdev->new_raid_disk = -1;
4082 if (rdev->new_raid_disk == rdev->raid_disk)
4083 continue;
Namhyung Kim36fad852011-07-27 11:00:36 +10004084 sysfs_unlink_rdev(mddev, rdev);
NeilBrowne93f68a2010-06-15 09:36:03 +01004085 }
NeilBrowndafb20f2012-03-19 12:46:39 +11004086 rdev_for_each(rdev, mddev) {
NeilBrowne93f68a2010-06-15 09:36:03 +01004087 if (rdev->raid_disk < 0)
4088 continue;
4089 if (rdev->new_raid_disk == rdev->raid_disk)
4090 continue;
4091 rdev->raid_disk = rdev->new_raid_disk;
4092 if (rdev->raid_disk < 0)
NeilBrown3a981b02009-08-03 10:59:55 +10004093 clear_bit(In_sync, &rdev->flags);
NeilBrowne93f68a2010-06-15 09:36:03 +01004094 else {
Namhyung Kim36fad852011-07-27 11:00:36 +10004095 if (sysfs_link_rdev(mddev, rdev))
NeilBrown9d487392016-11-02 14:16:49 +11004096 pr_warn("md: cannot register rd%d for %s after level change\n",
4097 rdev->raid_disk, mdname(mddev));
NeilBrown3a981b02009-08-03 10:59:55 +10004098 }
NeilBrowne93f68a2010-06-15 09:36:03 +01004099 }
4100
NeilBrowndb721d32014-12-15 12:56:58 +11004101 if (pers->sync_request == NULL) {
Trela, Maciej9af204c2010-03-08 16:02:44 +11004102 /* this is now an array without redundancy, so
4103 * it must always be in_sync
4104 */
4105 mddev->in_sync = 1;
4106 del_timer_sync(&mddev->safemode_timer);
4107 }
NeilBrown02e5f5c2013-11-14 15:16:15 +11004108 blk_set_stacking_limits(&mddev->queue->limits);
NeilBrown245f46c2009-03-31 14:39:39 +11004109 pers->run(mddev);
Shaohua Li29530792016-12-08 15:48:19 -08004110 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
Jonathan Brassow47525e52012-05-22 13:55:29 +10004111 mddev_resume(mddev);
NeilBrown830778a2014-01-14 15:17:03 +11004112 if (!mddev->thread)
4113 md_update_sb(mddev, 1);
Junxiao Bie1a86db2020-07-14 16:10:26 -07004114 sysfs_notify_dirent_safe(mddev->sysfs_level);
Guoqing Jiang54679482021-10-04 23:34:53 +08004115 md_new_event();
NeilBrown67918752014-12-15 12:57:01 +11004116 rv = len;
4117out_unlock:
4118 mddev_unlock(mddev);
NeilBrownd9d166c2006-01-06 00:20:51 -08004119 return rv;
4120}
4121
4122static struct md_sysfs_entry md_level =
NeilBrown80ca3a42006-07-10 04:44:18 -07004123__ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
NeilBrowneae17012005-11-08 21:39:23 -08004124
NeilBrownd4dbd022006-06-26 00:27:59 -07004125static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004126layout_show(struct mddev *mddev, char *page)
NeilBrownd4dbd022006-06-26 00:27:59 -07004127{
4128 /* just a number, not meaningful for all levels */
NeilBrown08a02ec2007-05-09 02:35:38 -07004129 if (mddev->reshape_position != MaxSector &&
4130 mddev->layout != mddev->new_layout)
4131 return sprintf(page, "%d (%d)\n",
4132 mddev->new_layout, mddev->layout);
NeilBrownd4dbd022006-06-26 00:27:59 -07004133 return sprintf(page, "%d\n", mddev->layout);
4134}
4135
4136static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004137layout_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrownd4dbd022006-06-26 00:27:59 -07004138{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004139 unsigned int n;
NeilBrown67918752014-12-15 12:57:01 +11004140 int err;
NeilBrownd4dbd022006-06-26 00:27:59 -07004141
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004142 err = kstrtouint(buf, 10, &n);
4143 if (err < 0)
4144 return err;
NeilBrown67918752014-12-15 12:57:01 +11004145 err = mddev_lock(mddev);
4146 if (err)
4147 return err;
NeilBrownd4dbd022006-06-26 00:27:59 -07004148
NeilBrownb3546032009-03-31 14:56:41 +11004149 if (mddev->pers) {
NeilBrown50ac1682009-06-18 08:47:55 +10004150 if (mddev->pers->check_reshape == NULL)
NeilBrown67918752014-12-15 12:57:01 +11004151 err = -EBUSY;
4152 else if (mddev->ro)
4153 err = -EROFS;
4154 else {
4155 mddev->new_layout = n;
4156 err = mddev->pers->check_reshape(mddev);
4157 if (err)
4158 mddev->new_layout = mddev->layout;
NeilBrown597a7112009-06-18 08:47:42 +10004159 }
NeilBrownb3546032009-03-31 14:56:41 +11004160 } else {
NeilBrown08a02ec2007-05-09 02:35:38 -07004161 mddev->new_layout = n;
NeilBrownb3546032009-03-31 14:56:41 +11004162 if (mddev->reshape_position == MaxSector)
4163 mddev->layout = n;
4164 }
NeilBrown67918752014-12-15 12:57:01 +11004165 mddev_unlock(mddev);
4166 return err ?: len;
NeilBrownd4dbd022006-06-26 00:27:59 -07004167}
4168static struct md_sysfs_entry md_layout =
NeilBrown80ca3a42006-07-10 04:44:18 -07004169__ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
NeilBrownd4dbd022006-06-26 00:27:59 -07004170
NeilBrowneae17012005-11-08 21:39:23 -08004171static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004172raid_disks_show(struct mddev *mddev, char *page)
NeilBrowneae17012005-11-08 21:39:23 -08004173{
NeilBrownbb636542005-11-08 21:39:45 -08004174 if (mddev->raid_disks == 0)
4175 return 0;
NeilBrown08a02ec2007-05-09 02:35:38 -07004176 if (mddev->reshape_position != MaxSector &&
4177 mddev->delta_disks != 0)
4178 return sprintf(page, "%d (%d)\n", mddev->raid_disks,
4179 mddev->raid_disks - mddev->delta_disks);
NeilBrowneae17012005-11-08 21:39:23 -08004180 return sprintf(page, "%d\n", mddev->raid_disks);
4181}
4182
NeilBrownfd01b882011-10-11 16:47:53 +11004183static int update_raid_disks(struct mddev *mddev, int raid_disks);
NeilBrownda943b992006-01-06 00:20:54 -08004184
4185static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004186raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrownda943b992006-01-06 00:20:54 -08004187{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004188 unsigned int n;
NeilBrown67918752014-12-15 12:57:01 +11004189 int err;
NeilBrownda943b992006-01-06 00:20:54 -08004190
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004191 err = kstrtouint(buf, 10, &n);
4192 if (err < 0)
4193 return err;
NeilBrownda943b992006-01-06 00:20:54 -08004194
NeilBrown67918752014-12-15 12:57:01 +11004195 err = mddev_lock(mddev);
4196 if (err)
4197 return err;
NeilBrownda943b992006-01-06 00:20:54 -08004198 if (mddev->pers)
NeilBrown67918752014-12-15 12:57:01 +11004199 err = update_raid_disks(mddev, n);
NeilBrown08a02ec2007-05-09 02:35:38 -07004200 else if (mddev->reshape_position != MaxSector) {
NeilBrownc6563a82012-05-21 09:27:00 +10004201 struct md_rdev *rdev;
NeilBrown08a02ec2007-05-09 02:35:38 -07004202 int olddisks = mddev->raid_disks - mddev->delta_disks;
NeilBrownc6563a82012-05-21 09:27:00 +10004203
NeilBrown67918752014-12-15 12:57:01 +11004204 err = -EINVAL;
NeilBrownc6563a82012-05-21 09:27:00 +10004205 rdev_for_each(rdev, mddev) {
4206 if (olddisks < n &&
4207 rdev->data_offset < rdev->new_data_offset)
NeilBrown67918752014-12-15 12:57:01 +11004208 goto out_unlock;
NeilBrownc6563a82012-05-21 09:27:00 +10004209 if (olddisks > n &&
4210 rdev->data_offset > rdev->new_data_offset)
NeilBrown67918752014-12-15 12:57:01 +11004211 goto out_unlock;
NeilBrownc6563a82012-05-21 09:27:00 +10004212 }
NeilBrown67918752014-12-15 12:57:01 +11004213 err = 0;
NeilBrown08a02ec2007-05-09 02:35:38 -07004214 mddev->delta_disks = n - olddisks;
4215 mddev->raid_disks = n;
NeilBrown2c810cd2012-05-21 09:27:00 +10004216 mddev->reshape_backwards = (mddev->delta_disks < 0);
NeilBrown08a02ec2007-05-09 02:35:38 -07004217 } else
NeilBrownda943b992006-01-06 00:20:54 -08004218 mddev->raid_disks = n;
NeilBrown67918752014-12-15 12:57:01 +11004219out_unlock:
4220 mddev_unlock(mddev);
4221 return err ? err : len;
NeilBrownda943b992006-01-06 00:20:54 -08004222}
4223static struct md_sysfs_entry md_raid_disks =
NeilBrown80ca3a42006-07-10 04:44:18 -07004224__ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
NeilBrowneae17012005-11-08 21:39:23 -08004225
NeilBrown24dd4692005-11-08 21:39:26 -08004226static ssize_t
Sebastian Parschauerec164d072020-07-28 12:01:39 +02004227uuid_show(struct mddev *mddev, char *page)
4228{
4229 return sprintf(page, "%pU\n", mddev->uuid);
4230}
4231static struct md_sysfs_entry md_uuid =
4232__ATTR(uuid, S_IRUGO, uuid_show, NULL);
4233
4234static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004235chunk_size_show(struct mddev *mddev, char *page)
NeilBrown3b343802006-01-06 00:20:47 -08004236{
NeilBrown08a02ec2007-05-09 02:35:38 -07004237 if (mddev->reshape_position != MaxSector &&
Andre Noll664e7c42009-06-18 08:45:27 +10004238 mddev->chunk_sectors != mddev->new_chunk_sectors)
4239 return sprintf(page, "%d (%d)\n",
4240 mddev->new_chunk_sectors << 9,
Andre Noll9d8f0362009-06-18 08:45:01 +10004241 mddev->chunk_sectors << 9);
4242 return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
NeilBrown3b343802006-01-06 00:20:47 -08004243}
4244
4245static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004246chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown3b343802006-01-06 00:20:47 -08004247{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004248 unsigned long n;
NeilBrown67918752014-12-15 12:57:01 +11004249 int err;
NeilBrown3b343802006-01-06 00:20:47 -08004250
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004251 err = kstrtoul(buf, 10, &n);
4252 if (err < 0)
4253 return err;
NeilBrown3b343802006-01-06 00:20:47 -08004254
NeilBrown67918752014-12-15 12:57:01 +11004255 err = mddev_lock(mddev);
4256 if (err)
4257 return err;
NeilBrownb3546032009-03-31 14:56:41 +11004258 if (mddev->pers) {
NeilBrown50ac1682009-06-18 08:47:55 +10004259 if (mddev->pers->check_reshape == NULL)
NeilBrown67918752014-12-15 12:57:01 +11004260 err = -EBUSY;
4261 else if (mddev->ro)
4262 err = -EROFS;
4263 else {
4264 mddev->new_chunk_sectors = n >> 9;
4265 err = mddev->pers->check_reshape(mddev);
4266 if (err)
4267 mddev->new_chunk_sectors = mddev->chunk_sectors;
NeilBrown597a7112009-06-18 08:47:42 +10004268 }
NeilBrownb3546032009-03-31 14:56:41 +11004269 } else {
Andre Noll664e7c42009-06-18 08:45:27 +10004270 mddev->new_chunk_sectors = n >> 9;
NeilBrownb3546032009-03-31 14:56:41 +11004271 if (mddev->reshape_position == MaxSector)
Andre Noll9d8f0362009-06-18 08:45:01 +10004272 mddev->chunk_sectors = n >> 9;
NeilBrownb3546032009-03-31 14:56:41 +11004273 }
NeilBrown67918752014-12-15 12:57:01 +11004274 mddev_unlock(mddev);
4275 return err ?: len;
NeilBrown3b343802006-01-06 00:20:47 -08004276}
4277static struct md_sysfs_entry md_chunk_size =
NeilBrown80ca3a42006-07-10 04:44:18 -07004278__ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
NeilBrown3b343802006-01-06 00:20:47 -08004279
NeilBrowna94213b2006-06-26 00:28:00 -07004280static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004281resync_start_show(struct mddev *mddev, char *page)
NeilBrowna94213b2006-06-26 00:28:00 -07004282{
NeilBrownd1a7c502009-03-31 15:24:32 +11004283 if (mddev->recovery_cp == MaxSector)
4284 return sprintf(page, "none\n");
NeilBrowna94213b2006-06-26 00:28:00 -07004285 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
4286}
4287
4288static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004289resync_start_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrowna94213b2006-06-26 00:28:00 -07004290{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004291 unsigned long long n;
NeilBrown67918752014-12-15 12:57:01 +11004292 int err;
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004293
4294 if (cmd_match(buf, "none"))
4295 n = MaxSector;
4296 else {
4297 err = kstrtoull(buf, 10, &n);
4298 if (err < 0)
4299 return err;
4300 if (n != (sector_t)n)
4301 return -EINVAL;
4302 }
NeilBrowna94213b2006-06-26 00:28:00 -07004303
NeilBrown67918752014-12-15 12:57:01 +11004304 err = mddev_lock(mddev);
4305 if (err)
4306 return err;
NeilBrownb0986362011-05-11 15:52:21 +10004307 if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
NeilBrown67918752014-12-15 12:57:01 +11004308 err = -EBUSY;
NeilBrowna94213b2006-06-26 00:28:00 -07004309
NeilBrown67918752014-12-15 12:57:01 +11004310 if (!err) {
4311 mddev->recovery_cp = n;
4312 if (mddev->pers)
Shaohua Li29530792016-12-08 15:48:19 -08004313 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
NeilBrown67918752014-12-15 12:57:01 +11004314 }
4315 mddev_unlock(mddev);
4316 return err ?: len;
NeilBrowna94213b2006-06-26 00:28:00 -07004317}
4318static struct md_sysfs_entry md_resync_start =
NeilBrown750f1992014-09-30 08:53:05 +10004319__ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR,
4320 resync_start_show, resync_start_store);
NeilBrowna94213b2006-06-26 00:28:00 -07004321
NeilBrown9e653b62006-06-26 00:27:58 -07004322/*
4323 * The array state can be:
4324 *
4325 * clear
4326 * No devices, no size, no level
4327 * Equivalent to STOP_ARRAY ioctl
4328 * inactive
4329 * May have some settings, but array is not active
4330 * all IO results in error
4331 * When written, doesn't tear down array, but just stops it
4332 * suspended (not supported yet)
4333 * All IO requests will block. The array can be reconfigured.
Andre Noll910d8cb2008-03-25 21:00:53 +01004334 * Writing this, if accepted, will block until array is quiescent
NeilBrown9e653b62006-06-26 00:27:58 -07004335 * readonly
4336 * no resync can happen. no superblocks get written.
4337 * write requests fail
4338 * read-auto
4339 * like readonly, but behaves like 'clean' on a write request.
4340 *
4341 * clean - no pending writes, but otherwise active.
4342 * When written to inactive array, starts without resync
4343 * If a write request arrives then
4344 * if metadata is known, mark 'dirty' and switch to 'active'.
4345 * if not known, block and switch to write-pending
4346 * If written to an active array that has pending writes, then fails.
4347 * active
4348 * fully active: IO and resync can be happening.
4349 * When written to inactive array, starts with resync
4350 *
4351 * write-pending
4352 * clean, but writes are blocked waiting for 'active' to be written.
4353 *
4354 * active-idle
4355 * like active, but no writes have been seen for a while (100msec).
4356 *
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -03004357 * broken
4358 * RAID0/LINEAR-only: same as clean, but array is missing a member.
4359 * It's useful because RAID0/LINEAR mounted-arrays aren't stopped
4360 * when a member is gone, so this state will at least alert the
4361 * user that something is wrong.
NeilBrown9e653b62006-06-26 00:27:58 -07004362 */
4363enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -03004364 write_pending, active_idle, broken, bad_word};
Adrian Bunk05381952006-06-26 00:28:01 -07004365static char *array_states[] = {
NeilBrown9e653b62006-06-26 00:27:58 -07004366 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -03004367 "write-pending", "active-idle", "broken", NULL };
NeilBrown9e653b62006-06-26 00:27:58 -07004368
4369static int match_word(const char *word, char **list)
4370{
4371 int n;
4372 for (n=0; list[n]; n++)
4373 if (cmd_match(word, list[n]))
4374 break;
4375 return n;
4376}
4377
4378static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004379array_state_show(struct mddev *mddev, char *page)
NeilBrown9e653b62006-06-26 00:27:58 -07004380{
4381 enum array_state st = inactive;
4382
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -03004383 if (mddev->pers && !test_bit(MD_NOT_READY, &mddev->flags)) {
NeilBrown9e653b62006-06-26 00:27:58 -07004384 switch(mddev->ro) {
4385 case 1:
4386 st = readonly;
4387 break;
4388 case 2:
4389 st = read_auto;
4390 break;
4391 case 0:
NeilBrown55cc39f2017-03-15 14:05:14 +11004392 spin_lock(&mddev->lock);
Shaohua Li29530792016-12-08 15:48:19 -08004393 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
NeilBrowne6910632008-02-06 01:39:51 -08004394 st = write_pending;
Tomasz Majchrzak16f88942016-10-24 12:47:28 +02004395 else if (mddev->in_sync)
4396 st = clean;
NeilBrown9e653b62006-06-26 00:27:58 -07004397 else if (mddev->safemode)
4398 st = active_idle;
4399 else
4400 st = active;
NeilBrown55cc39f2017-03-15 14:05:14 +11004401 spin_unlock(&mddev->lock);
NeilBrown9e653b62006-06-26 00:27:58 -07004402 }
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -03004403
4404 if (test_bit(MD_BROKEN, &mddev->flags) && st == clean)
4405 st = broken;
4406 } else {
NeilBrown9e653b62006-06-26 00:27:58 -07004407 if (list_empty(&mddev->disks) &&
4408 mddev->raid_disks == 0 &&
Andre Noll58c0fed2009-03-31 14:33:13 +11004409 mddev->dev_sectors == 0)
NeilBrown9e653b62006-06-26 00:27:58 -07004410 st = clear;
4411 else
4412 st = inactive;
4413 }
4414 return sprintf(page, "%s\n", array_states[st]);
4415}
4416
NeilBrownf72ffdd2014-09-30 14:23:59 +10004417static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev);
4418static int md_set_readonly(struct mddev *mddev, struct block_device *bdev);
NeilBrownfd01b882011-10-11 16:47:53 +11004419static int restart_array(struct mddev *mddev);
NeilBrown9e653b62006-06-26 00:27:58 -07004420
4421static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004422array_state_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown9e653b62006-06-26 00:27:58 -07004423{
NeilBrown6497709b2017-03-15 14:05:14 +11004424 int err = 0;
NeilBrown9e653b62006-06-26 00:27:58 -07004425 enum array_state st = match_word(buf, array_states);
NeilBrown67918752014-12-15 12:57:01 +11004426
4427 if (mddev->pers && (st == active || st == clean) && mddev->ro != 1) {
4428 /* don't take reconfig_mutex when toggling between
4429 * clean and active
4430 */
4431 spin_lock(&mddev->lock);
4432 if (st == active) {
4433 restart_array(mddev);
Shaohua Li29530792016-12-08 15:48:19 -08004434 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
Tomasz Majchrzak91a6c4a2016-10-25 17:07:08 +02004435 md_wakeup_thread(mddev->thread);
NeilBrown67918752014-12-15 12:57:01 +11004436 wake_up(&mddev->sb_wait);
NeilBrown67918752014-12-15 12:57:01 +11004437 } else /* st == clean */ {
4438 restart_array(mddev);
NeilBrown6497709b2017-03-15 14:05:14 +11004439 if (!set_in_sync(mddev))
NeilBrown67918752014-12-15 12:57:01 +11004440 err = -EBUSY;
4441 }
Tomasz Majchrzak573275b2016-06-30 10:47:09 +02004442 if (!err)
4443 sysfs_notify_dirent_safe(mddev->sysfs_state);
NeilBrown67918752014-12-15 12:57:01 +11004444 spin_unlock(&mddev->lock);
NeilBrownc008f1d2015-06-12 19:46:44 +10004445 return err ?: len;
NeilBrown67918752014-12-15 12:57:01 +11004446 }
4447 err = mddev_lock(mddev);
4448 if (err)
4449 return err;
4450 err = -EINVAL;
NeilBrown9e653b62006-06-26 00:27:58 -07004451 switch(st) {
4452 case bad_word:
4453 break;
4454 case clear:
4455 /* stopping an active array */
NeilBrowna05b7ea2012-07-19 15:59:18 +10004456 err = do_md_stop(mddev, 0, NULL);
NeilBrown9e653b62006-06-26 00:27:58 -07004457 break;
4458 case inactive:
4459 /* stopping an active array */
NeilBrown90cf1952012-07-31 10:04:55 +10004460 if (mddev->pers)
NeilBrowna05b7ea2012-07-19 15:59:18 +10004461 err = do_md_stop(mddev, 2, NULL);
NeilBrown90cf1952012-07-31 10:04:55 +10004462 else
NeilBrowne6910632008-02-06 01:39:51 -08004463 err = 0; /* already inactive */
NeilBrown9e653b62006-06-26 00:27:58 -07004464 break;
4465 case suspended:
4466 break; /* not supported yet */
4467 case readonly:
4468 if (mddev->pers)
NeilBrowna05b7ea2012-07-19 15:59:18 +10004469 err = md_set_readonly(mddev, NULL);
NeilBrown9e653b62006-06-26 00:27:58 -07004470 else {
4471 mddev->ro = 1;
NeilBrown648b6292008-04-30 00:52:30 -07004472 set_disk_ro(mddev->gendisk, 1);
NeilBrown9e653b62006-06-26 00:27:58 -07004473 err = do_md_run(mddev);
4474 }
4475 break;
4476 case read_auto:
NeilBrown9e653b62006-06-26 00:27:58 -07004477 if (mddev->pers) {
NeilBrown80268ee2008-10-13 11:55:12 +11004478 if (mddev->ro == 0)
NeilBrowna05b7ea2012-07-19 15:59:18 +10004479 err = md_set_readonly(mddev, NULL);
NeilBrown80268ee2008-10-13 11:55:12 +11004480 else if (mddev->ro == 1)
NeilBrown648b6292008-04-30 00:52:30 -07004481 err = restart_array(mddev);
4482 if (err == 0) {
4483 mddev->ro = 2;
4484 set_disk_ro(mddev->gendisk, 0);
4485 }
NeilBrown9e653b62006-06-26 00:27:58 -07004486 } else {
4487 mddev->ro = 2;
4488 err = do_md_run(mddev);
4489 }
4490 break;
4491 case clean:
4492 if (mddev->pers) {
Song Liu339421d2015-10-08 21:54:13 -07004493 err = restart_array(mddev);
4494 if (err)
4495 break;
NeilBrown85572d72014-12-15 12:56:56 +11004496 spin_lock(&mddev->lock);
NeilBrown6497709b2017-03-15 14:05:14 +11004497 if (!set_in_sync(mddev))
NeilBrowne6910632008-02-06 01:39:51 -08004498 err = -EBUSY;
NeilBrown85572d72014-12-15 12:56:56 +11004499 spin_unlock(&mddev->lock);
NeilBrown5bf29592009-05-07 12:50:57 +10004500 } else
4501 err = -EINVAL;
NeilBrown9e653b62006-06-26 00:27:58 -07004502 break;
4503 case active:
4504 if (mddev->pers) {
Song Liu339421d2015-10-08 21:54:13 -07004505 err = restart_array(mddev);
4506 if (err)
4507 break;
Shaohua Li29530792016-12-08 15:48:19 -08004508 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
NeilBrown9e653b62006-06-26 00:27:58 -07004509 wake_up(&mddev->sb_wait);
4510 err = 0;
4511 } else {
4512 mddev->ro = 0;
NeilBrown648b6292008-04-30 00:52:30 -07004513 set_disk_ro(mddev->gendisk, 0);
NeilBrown9e653b62006-06-26 00:27:58 -07004514 err = do_md_run(mddev);
4515 }
4516 break;
4517 case write_pending:
4518 case active_idle:
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -03004519 case broken:
NeilBrown9e653b62006-06-26 00:27:58 -07004520 /* these cannot be set */
4521 break;
4522 }
NeilBrown67918752014-12-15 12:57:01 +11004523
4524 if (!err) {
NeilBrown1d23f172011-12-08 15:49:12 +11004525 if (mddev->hold_active == UNTIL_IOCTL)
4526 mddev->hold_active = 0;
NeilBrown00bcb4a2010-06-01 19:37:23 +10004527 sysfs_notify_dirent_safe(mddev->sysfs_state);
Neil Brown0fd62b82008-06-28 08:31:36 +10004528 }
NeilBrown67918752014-12-15 12:57:01 +11004529 mddev_unlock(mddev);
4530 return err ?: len;
NeilBrown9e653b62006-06-26 00:27:58 -07004531}
NeilBrown80ca3a42006-07-10 04:44:18 -07004532static struct md_sysfs_entry md_array_state =
NeilBrown750f1992014-09-30 08:53:05 +10004533__ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
NeilBrown9e653b62006-06-26 00:27:58 -07004534
NeilBrown6d7ff7382006-01-06 00:21:16 -08004535static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004536max_corrected_read_errors_show(struct mddev *mddev, char *page) {
Robert Becker1e509152009-12-14 12:49:58 +11004537 return sprintf(page, "%d\n",
4538 atomic_read(&mddev->max_corr_read_errors));
4539}
4540
4541static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004542max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len)
Robert Becker1e509152009-12-14 12:49:58 +11004543{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004544 unsigned int n;
4545 int rv;
Robert Becker1e509152009-12-14 12:49:58 +11004546
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004547 rv = kstrtouint(buf, 10, &n);
4548 if (rv < 0)
4549 return rv;
4550 atomic_set(&mddev->max_corr_read_errors, n);
4551 return len;
Robert Becker1e509152009-12-14 12:49:58 +11004552}
4553
4554static struct md_sysfs_entry max_corr_read_errors =
4555__ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
4556 max_corrected_read_errors_store);
4557
4558static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004559null_show(struct mddev *mddev, char *page)
NeilBrown6d7ff7382006-01-06 00:21:16 -08004560{
4561 return -EINVAL;
4562}
4563
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02004564/* need to ensure rdev_delayed_delete() has completed */
4565static void flush_rdev_wq(struct mddev *mddev)
4566{
4567 struct md_rdev *rdev;
4568
4569 rcu_read_lock();
4570 rdev_for_each_rcu(rdev, mddev)
4571 if (work_pending(&rdev->del_work)) {
4572 flush_workqueue(md_rdev_misc_wq);
4573 break;
4574 }
4575 rcu_read_unlock();
4576}
4577
NeilBrown6d7ff7382006-01-06 00:21:16 -08004578static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004579new_dev_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown6d7ff7382006-01-06 00:21:16 -08004580{
4581 /* buf must be %d:%d\n? giving major and minor numbers */
4582 /* The new device is added to the array.
4583 * If the array has a persistent superblock, we read the
4584 * superblock to initialise info and check validity.
4585 * Otherwise, only checking done is that in bind_rdev_to_array,
4586 * which mainly checks size.
4587 */
4588 char *e;
4589 int major = simple_strtoul(buf, &e, 10);
4590 int minor;
4591 dev_t dev;
NeilBrown3cb03002011-10-11 16:45:26 +11004592 struct md_rdev *rdev;
NeilBrown6d7ff7382006-01-06 00:21:16 -08004593 int err;
4594
4595 if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
4596 return -EINVAL;
4597 minor = simple_strtoul(e+1, &e, 10);
4598 if (*e && *e != '\n')
4599 return -EINVAL;
4600 dev = MKDEV(major, minor);
4601 if (major != MAJOR(dev) ||
4602 minor != MINOR(dev))
4603 return -EOVERFLOW;
4604
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02004605 flush_rdev_wq(mddev);
NeilBrown67918752014-12-15 12:57:01 +11004606 err = mddev_lock(mddev);
4607 if (err)
4608 return err;
NeilBrown6d7ff7382006-01-06 00:21:16 -08004609 if (mddev->persistent) {
4610 rdev = md_import_device(dev, mddev->major_version,
4611 mddev->minor_version);
4612 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
NeilBrown3cb03002011-10-11 16:45:26 +11004613 struct md_rdev *rdev0
4614 = list_entry(mddev->disks.next,
4615 struct md_rdev, same_set);
NeilBrown6d7ff7382006-01-06 00:21:16 -08004616 err = super_types[mddev->major_version]
4617 .load_super(rdev, rdev0, mddev->minor_version);
4618 if (err < 0)
4619 goto out;
4620 }
NeilBrownc5d79ad2008-02-06 01:39:54 -08004621 } else if (mddev->external)
4622 rdev = md_import_device(dev, -2, -1);
4623 else
NeilBrown6d7ff7382006-01-06 00:21:16 -08004624 rdev = md_import_device(dev, -1, -1);
4625
NeilBrown9a8c0fa2015-06-25 17:06:40 +10004626 if (IS_ERR(rdev)) {
4627 mddev_unlock(mddev);
NeilBrown6d7ff7382006-01-06 00:21:16 -08004628 return PTR_ERR(rdev);
NeilBrown9a8c0fa2015-06-25 17:06:40 +10004629 }
NeilBrown6d7ff7382006-01-06 00:21:16 -08004630 err = bind_rdev_to_array(rdev, mddev);
4631 out:
4632 if (err)
4633 export_rdev(rdev);
NeilBrown67918752014-12-15 12:57:01 +11004634 mddev_unlock(mddev);
Alexey Obitotskiy5492c462017-07-28 15:49:25 +02004635 if (!err)
Guoqing Jiang54679482021-10-04 23:34:53 +08004636 md_new_event();
NeilBrown6d7ff7382006-01-06 00:21:16 -08004637 return err ? err : len;
4638}
4639
4640static struct md_sysfs_entry md_new_device =
NeilBrown80ca3a42006-07-10 04:44:18 -07004641__ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
NeilBrown3b343802006-01-06 00:20:47 -08004642
4643static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004644bitmap_store(struct mddev *mddev, const char *buf, size_t len)
Paul Clements9b1d1da2006-10-03 01:15:49 -07004645{
4646 char *end;
4647 unsigned long chunk, end_chunk;
NeilBrown67918752014-12-15 12:57:01 +11004648 int err;
Paul Clements9b1d1da2006-10-03 01:15:49 -07004649
NeilBrown67918752014-12-15 12:57:01 +11004650 err = mddev_lock(mddev);
4651 if (err)
4652 return err;
Paul Clements9b1d1da2006-10-03 01:15:49 -07004653 if (!mddev->bitmap)
4654 goto out;
4655 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
4656 while (*buf) {
4657 chunk = end_chunk = simple_strtoul(buf, &end, 0);
4658 if (buf == end) break;
4659 if (*end == '-') { /* range */
4660 buf = end + 1;
4661 end_chunk = simple_strtoul(buf, &end, 0);
4662 if (buf == end) break;
4663 }
4664 if (*end && !isspace(*end)) break;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07004665 md_bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
André Goddard Rosae7d28602009-12-14 18:01:06 -08004666 buf = skip_spaces(end);
Paul Clements9b1d1da2006-10-03 01:15:49 -07004667 }
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07004668 md_bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
Paul Clements9b1d1da2006-10-03 01:15:49 -07004669out:
NeilBrown67918752014-12-15 12:57:01 +11004670 mddev_unlock(mddev);
Paul Clements9b1d1da2006-10-03 01:15:49 -07004671 return len;
4672}
4673
4674static struct md_sysfs_entry md_bitmap =
4675__ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
4676
4677static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004678size_show(struct mddev *mddev, char *page)
NeilBrowna35b0d62006-01-06 00:20:49 -08004679{
Andre Noll58c0fed2009-03-31 14:33:13 +11004680 return sprintf(page, "%llu\n",
4681 (unsigned long long)mddev->dev_sectors / 2);
NeilBrowna35b0d62006-01-06 00:20:49 -08004682}
4683
NeilBrownfd01b882011-10-11 16:47:53 +11004684static int update_size(struct mddev *mddev, sector_t num_sectors);
NeilBrowna35b0d62006-01-06 00:20:49 -08004685
4686static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004687size_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrowna35b0d62006-01-06 00:20:49 -08004688{
4689 /* If array is inactive, we can reduce the component size, but
4690 * not increase it (except from 0).
4691 * If array is active, we can try an on-line resize
4692 */
Dan Williamsb522adc2009-03-31 15:00:31 +11004693 sector_t sectors;
4694 int err = strict_blocks_to_sectors(buf, &sectors);
NeilBrowna35b0d62006-01-06 00:20:49 -08004695
Andre Noll58c0fed2009-03-31 14:33:13 +11004696 if (err < 0)
4697 return err;
NeilBrown67918752014-12-15 12:57:01 +11004698 err = mddev_lock(mddev);
4699 if (err)
4700 return err;
NeilBrowna35b0d62006-01-06 00:20:49 -08004701 if (mddev->pers) {
Andre Noll58c0fed2009-03-31 14:33:13 +11004702 err = update_size(mddev, sectors);
Xiao Ni4ba1e782016-06-12 17:18:00 +08004703 if (err == 0)
4704 md_update_sb(mddev, 1);
NeilBrowna35b0d62006-01-06 00:20:49 -08004705 } else {
Andre Noll58c0fed2009-03-31 14:33:13 +11004706 if (mddev->dev_sectors == 0 ||
4707 mddev->dev_sectors > sectors)
4708 mddev->dev_sectors = sectors;
NeilBrowna35b0d62006-01-06 00:20:49 -08004709 else
4710 err = -ENOSPC;
4711 }
NeilBrown67918752014-12-15 12:57:01 +11004712 mddev_unlock(mddev);
NeilBrowna35b0d62006-01-06 00:20:49 -08004713 return err ? err : len;
4714}
4715
4716static struct md_sysfs_entry md_size =
NeilBrown80ca3a42006-07-10 04:44:18 -07004717__ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
NeilBrowna35b0d62006-01-06 00:20:49 -08004718
Masanari Iida83f0d772012-10-30 00:18:08 +09004719/* Metadata version.
NeilBrowne6910632008-02-06 01:39:51 -08004720 * This is one of
4721 * 'none' for arrays with no metadata (good luck...)
4722 * 'external' for arrays with externally managed metadata,
NeilBrown8bb93aa2006-01-06 00:20:50 -08004723 * or N.M for internally known formats
4724 */
4725static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004726metadata_show(struct mddev *mddev, char *page)
NeilBrown8bb93aa2006-01-06 00:20:50 -08004727{
4728 if (mddev->persistent)
4729 return sprintf(page, "%d.%d\n",
4730 mddev->major_version, mddev->minor_version);
NeilBrowne6910632008-02-06 01:39:51 -08004731 else if (mddev->external)
4732 return sprintf(page, "external:%s\n", mddev->metadata_type);
NeilBrown8bb93aa2006-01-06 00:20:50 -08004733 else
4734 return sprintf(page, "none\n");
4735}
4736
4737static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004738metadata_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown8bb93aa2006-01-06 00:20:50 -08004739{
4740 int major, minor;
4741 char *e;
NeilBrown67918752014-12-15 12:57:01 +11004742 int err;
NeilBrownea43ddd2008-10-13 11:55:11 +11004743 /* Changing the details of 'external' metadata is
4744 * always permitted. Otherwise there must be
4745 * no devices attached to the array.
4746 */
NeilBrown67918752014-12-15 12:57:01 +11004747
4748 err = mddev_lock(mddev);
4749 if (err)
4750 return err;
4751 err = -EBUSY;
NeilBrownea43ddd2008-10-13 11:55:11 +11004752 if (mddev->external && strncmp(buf, "external:", 9) == 0)
4753 ;
4754 else if (!list_empty(&mddev->disks))
NeilBrown67918752014-12-15 12:57:01 +11004755 goto out_unlock;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004756
NeilBrown67918752014-12-15 12:57:01 +11004757 err = 0;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004758 if (cmd_match(buf, "none")) {
4759 mddev->persistent = 0;
NeilBrowne6910632008-02-06 01:39:51 -08004760 mddev->external = 0;
4761 mddev->major_version = 0;
4762 mddev->minor_version = 90;
NeilBrown67918752014-12-15 12:57:01 +11004763 goto out_unlock;
NeilBrowne6910632008-02-06 01:39:51 -08004764 }
4765 if (strncmp(buf, "external:", 9) == 0) {
NeilBrown20a49ff2008-02-06 01:39:57 -08004766 size_t namelen = len-9;
NeilBrowne6910632008-02-06 01:39:51 -08004767 if (namelen >= sizeof(mddev->metadata_type))
4768 namelen = sizeof(mddev->metadata_type)-1;
4769 strncpy(mddev->metadata_type, buf+9, namelen);
4770 mddev->metadata_type[namelen] = 0;
4771 if (namelen && mddev->metadata_type[namelen-1] == '\n')
4772 mddev->metadata_type[--namelen] = 0;
4773 mddev->persistent = 0;
4774 mddev->external = 1;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004775 mddev->major_version = 0;
4776 mddev->minor_version = 90;
NeilBrown67918752014-12-15 12:57:01 +11004777 goto out_unlock;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004778 }
4779 major = simple_strtoul(buf, &e, 10);
NeilBrown67918752014-12-15 12:57:01 +11004780 err = -EINVAL;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004781 if (e==buf || *e != '.')
NeilBrown67918752014-12-15 12:57:01 +11004782 goto out_unlock;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004783 buf = e+1;
4784 minor = simple_strtoul(buf, &e, 10);
NeilBrown3f9d7b02006-12-22 01:11:41 -08004785 if (e==buf || (*e && *e != '\n') )
NeilBrown67918752014-12-15 12:57:01 +11004786 goto out_unlock;
4787 err = -ENOENT;
Ahmed S. Darwish50511da2007-05-09 02:35:34 -07004788 if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
NeilBrown67918752014-12-15 12:57:01 +11004789 goto out_unlock;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004790 mddev->major_version = major;
4791 mddev->minor_version = minor;
4792 mddev->persistent = 1;
NeilBrowne6910632008-02-06 01:39:51 -08004793 mddev->external = 0;
NeilBrown67918752014-12-15 12:57:01 +11004794 err = 0;
4795out_unlock:
4796 mddev_unlock(mddev);
4797 return err ?: len;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004798}
4799
4800static struct md_sysfs_entry md_metadata =
NeilBrown750f1992014-09-30 08:53:05 +10004801__ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
NeilBrown8bb93aa2006-01-06 00:20:50 -08004802
NeilBrowna35b0d62006-01-06 00:20:49 -08004803static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004804action_show(struct mddev *mddev, char *page)
NeilBrown24dd4692005-11-08 21:39:26 -08004805{
NeilBrown7eec3142005-11-08 21:39:44 -08004806 char *type = "idle";
NeilBrownb7b17c92014-12-15 12:56:59 +11004807 unsigned long recovery = mddev->recovery;
4808 if (test_bit(MD_RECOVERY_FROZEN, &recovery))
NeilBrownb6a9ce62009-05-26 09:41:17 +10004809 type = "frozen";
NeilBrownb7b17c92014-12-15 12:56:59 +11004810 else if (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
4811 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery))) {
4812 if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
NeilBrownccfcc3c2006-03-27 01:18:09 -08004813 type = "reshape";
NeilBrownb7b17c92014-12-15 12:56:59 +11004814 else if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
4815 if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
NeilBrown24dd4692005-11-08 21:39:26 -08004816 type = "resync";
NeilBrownb7b17c92014-12-15 12:56:59 +11004817 else if (test_bit(MD_RECOVERY_CHECK, &recovery))
NeilBrown24dd4692005-11-08 21:39:26 -08004818 type = "check";
4819 else
4820 type = "repair";
NeilBrownb7b17c92014-12-15 12:56:59 +11004821 } else if (test_bit(MD_RECOVERY_RECOVER, &recovery))
NeilBrown24dd4692005-11-08 21:39:26 -08004822 type = "recover";
NeilBrown985ca972015-07-06 12:26:57 +10004823 else if (mddev->reshape_position != MaxSector)
4824 type = "reshape";
NeilBrown24dd4692005-11-08 21:39:26 -08004825 }
4826 return sprintf(page, "%s\n", type);
4827}
4828
4829static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004830action_store(struct mddev *mddev, const char *page, size_t len)
NeilBrown24dd4692005-11-08 21:39:26 -08004831{
NeilBrown7eec3142005-11-08 21:39:44 -08004832 if (!mddev->pers || !mddev->pers->sync_request)
4833 return -EINVAL;
4834
NeilBrownb6a9ce62009-05-26 09:41:17 +10004835
4836 if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
NeilBrown56ccc112015-05-28 17:53:29 +10004837 if (cmd_match(page, "frozen"))
4838 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4839 else
4840 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
NeilBrown8e8e2512015-06-12 19:51:27 +10004841 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
4842 mddev_lock(mddev) == 0) {
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02004843 if (work_pending(&mddev->del_work))
4844 flush_workqueue(md_misc_wq);
NeilBrown8e8e2512015-06-12 19:51:27 +10004845 if (mddev->sync_thread) {
4846 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
NeilBrown67918752014-12-15 12:57:01 +11004847 md_reap_sync_thread(mddev);
NeilBrown67918752014-12-15 12:57:01 +11004848 }
NeilBrown8e8e2512015-06-12 19:51:27 +10004849 mddev_unlock(mddev);
NeilBrown7eec3142005-11-08 21:39:44 -08004850 }
NeilBrown312045e2015-12-21 11:01:21 +11004851 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
NeilBrown24dd4692005-11-08 21:39:26 -08004852 return -EBUSY;
Neil Brown72a23c22008-06-28 08:31:41 +10004853 else if (cmd_match(page, "resync"))
NeilBrown56ccc112015-05-28 17:53:29 +10004854 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
Neil Brown72a23c22008-06-28 08:31:41 +10004855 else if (cmd_match(page, "recover")) {
NeilBrown56ccc112015-05-28 17:53:29 +10004856 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
Neil Brown72a23c22008-06-28 08:31:41 +10004857 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
Neil Brown72a23c22008-06-28 08:31:41 +10004858 } else if (cmd_match(page, "reshape")) {
NeilBrown16484bf2006-03-27 01:18:13 -08004859 int err;
4860 if (mddev->pers->start_reshape == NULL)
4861 return -EINVAL;
NeilBrown67918752014-12-15 12:57:01 +11004862 err = mddev_lock(mddev);
4863 if (!err) {
NeilBrown312045e2015-12-21 11:01:21 +11004864 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4865 err = -EBUSY;
4866 else {
4867 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4868 err = mddev->pers->start_reshape(mddev);
4869 }
NeilBrown67918752014-12-15 12:57:01 +11004870 mddev_unlock(mddev);
4871 }
NeilBrown16484bf2006-03-27 01:18:13 -08004872 if (err)
4873 return err;
Junxiao Bie1a86db2020-07-14 16:10:26 -07004874 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
NeilBrown16484bf2006-03-27 01:18:13 -08004875 } else {
NeilBrownbce74da2006-01-06 00:20:41 -08004876 if (cmd_match(page, "check"))
NeilBrown7eec3142005-11-08 21:39:44 -08004877 set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
NeilBrown2adc7d42006-05-20 14:59:57 -07004878 else if (!cmd_match(page, "repair"))
NeilBrown7eec3142005-11-08 21:39:44 -08004879 return -EINVAL;
NeilBrown56ccc112015-05-28 17:53:29 +10004880 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
NeilBrown7eec3142005-11-08 21:39:44 -08004881 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
4882 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
NeilBrown7eec3142005-11-08 21:39:44 -08004883 }
NeilBrown48c26dd2012-10-11 14:19:39 +11004884 if (mddev->ro == 2) {
4885 /* A write to sync_action is enough to justify
4886 * canceling read-auto mode
4887 */
4888 mddev->ro = 0;
4889 md_wakeup_thread(mddev->sync_thread);
4890 }
NeilBrown03c902e2006-01-06 00:20:46 -08004891 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
NeilBrown24dd4692005-11-08 21:39:26 -08004892 md_wakeup_thread(mddev->thread);
NeilBrown00bcb4a2010-06-01 19:37:23 +10004893 sysfs_notify_dirent_safe(mddev->sysfs_action);
NeilBrown24dd4692005-11-08 21:39:26 -08004894 return len;
4895}
4896
Jonathan Brassowc4a39552013-06-25 01:23:59 -05004897static struct md_sysfs_entry md_scan_mode =
NeilBrown750f1992014-09-30 08:53:05 +10004898__ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
Jonathan Brassowc4a39552013-06-25 01:23:59 -05004899
4900static ssize_t
4901last_sync_action_show(struct mddev *mddev, char *page)
4902{
4903 return sprintf(page, "%s\n", mddev->last_sync_action);
4904}
4905
4906static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action);
4907
NeilBrown9d888832005-11-08 21:39:26 -08004908static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004909mismatch_cnt_show(struct mddev *mddev, char *page)
NeilBrown9d888832005-11-08 21:39:26 -08004910{
4911 return sprintf(page, "%llu\n",
Jianpeng Ma7f7583d2012-10-11 14:17:59 +11004912 (unsigned long long)
4913 atomic64_read(&mddev->resync_mismatches));
NeilBrown9d888832005-11-08 21:39:26 -08004914}
4915
NeilBrown80ca3a42006-07-10 04:44:18 -07004916static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
NeilBrown9d888832005-11-08 21:39:26 -08004917
NeilBrown88202a02006-01-06 00:21:36 -08004918static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004919sync_min_show(struct mddev *mddev, char *page)
NeilBrown88202a02006-01-06 00:21:36 -08004920{
4921 return sprintf(page, "%d (%s)\n", speed_min(mddev),
4922 mddev->sync_speed_min ? "local": "system");
4923}
4924
4925static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004926sync_min_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown88202a02006-01-06 00:21:36 -08004927{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004928 unsigned int min;
4929 int rv;
4930
NeilBrown88202a02006-01-06 00:21:36 -08004931 if (strncmp(buf, "system", 6)==0) {
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004932 min = 0;
4933 } else {
4934 rv = kstrtouint(buf, 10, &min);
4935 if (rv < 0)
4936 return rv;
4937 if (min == 0)
4938 return -EINVAL;
NeilBrown88202a02006-01-06 00:21:36 -08004939 }
NeilBrown88202a02006-01-06 00:21:36 -08004940 mddev->sync_speed_min = min;
4941 return len;
4942}
4943
4944static struct md_sysfs_entry md_sync_min =
4945__ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
4946
4947static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004948sync_max_show(struct mddev *mddev, char *page)
NeilBrown88202a02006-01-06 00:21:36 -08004949{
4950 return sprintf(page, "%d (%s)\n", speed_max(mddev),
4951 mddev->sync_speed_max ? "local": "system");
4952}
4953
4954static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004955sync_max_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown88202a02006-01-06 00:21:36 -08004956{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004957 unsigned int max;
4958 int rv;
4959
NeilBrown88202a02006-01-06 00:21:36 -08004960 if (strncmp(buf, "system", 6)==0) {
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004961 max = 0;
4962 } else {
4963 rv = kstrtouint(buf, 10, &max);
4964 if (rv < 0)
4965 return rv;
4966 if (max == 0)
4967 return -EINVAL;
NeilBrown88202a02006-01-06 00:21:36 -08004968 }
NeilBrown88202a02006-01-06 00:21:36 -08004969 mddev->sync_speed_max = max;
4970 return len;
4971}
4972
4973static struct md_sysfs_entry md_sync_max =
4974__ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
4975
Iustin Popd7f3d292007-10-16 23:30:54 -07004976static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004977degraded_show(struct mddev *mddev, char *page)
Iustin Popd7f3d292007-10-16 23:30:54 -07004978{
4979 return sprintf(page, "%d\n", mddev->degraded);
4980}
4981static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
NeilBrown88202a02006-01-06 00:21:36 -08004982
4983static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004984sync_force_parallel_show(struct mddev *mddev, char *page)
Bernd Schubert90b08712008-05-23 13:04:38 -07004985{
4986 return sprintf(page, "%d\n", mddev->parallel_resync);
4987}
4988
4989static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004990sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len)
Bernd Schubert90b08712008-05-23 13:04:38 -07004991{
4992 long n;
4993
Jingoo Hanb29bebd2013-06-01 16:15:16 +09004994 if (kstrtol(buf, 10, &n))
Bernd Schubert90b08712008-05-23 13:04:38 -07004995 return -EINVAL;
4996
4997 if (n != 0 && n != 1)
4998 return -EINVAL;
4999
5000 mddev->parallel_resync = n;
5001
5002 if (mddev->sync_thread)
5003 wake_up(&resync_wait);
5004
5005 return len;
5006}
5007
5008/* force parallel resync, even with shared block devices */
5009static struct md_sysfs_entry md_sync_force_parallel =
5010__ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
5011 sync_force_parallel_show, sync_force_parallel_store);
5012
5013static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005014sync_speed_show(struct mddev *mddev, char *page)
NeilBrown88202a02006-01-06 00:21:36 -08005015{
5016 unsigned long resync, dt, db;
NeilBrownd1a7c502009-03-31 15:24:32 +11005017 if (mddev->curr_resync == 0)
5018 return sprintf(page, "none\n");
Andre Noll9687a602008-03-25 22:24:09 +01005019 resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
5020 dt = (jiffies - mddev->resync_mark) / HZ;
NeilBrown88202a02006-01-06 00:21:36 -08005021 if (!dt) dt++;
Andre Noll9687a602008-03-25 22:24:09 +01005022 db = resync - mddev->resync_mark_cnt;
5023 return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
NeilBrown88202a02006-01-06 00:21:36 -08005024}
5025
NeilBrown80ca3a42006-07-10 04:44:18 -07005026static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
NeilBrown88202a02006-01-06 00:21:36 -08005027
5028static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005029sync_completed_show(struct mddev *mddev, char *page)
NeilBrown88202a02006-01-06 00:21:36 -08005030{
RĂ©mi RĂ©rolle13ae8642011-01-14 09:14:34 +11005031 unsigned long long max_sectors, resync;
NeilBrown88202a02006-01-06 00:21:36 -08005032
NeilBrownacb180b2009-04-14 16:28:34 +10005033 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5034 return sprintf(page, "none\n");
5035
NeilBrown72f36d52012-10-11 14:25:57 +11005036 if (mddev->curr_resync == 1 ||
5037 mddev->curr_resync == 2)
5038 return sprintf(page, "delayed\n");
5039
NeilBrownc804cde2012-05-21 09:28:33 +10005040 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
5041 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
Andre Noll58c0fed2009-03-31 14:33:13 +11005042 max_sectors = mddev->resync_max_sectors;
NeilBrown88202a02006-01-06 00:21:36 -08005043 else
Andre Noll58c0fed2009-03-31 14:33:13 +11005044 max_sectors = mddev->dev_sectors;
NeilBrown88202a02006-01-06 00:21:36 -08005045
NeilBrownacb180b2009-04-14 16:28:34 +10005046 resync = mddev->curr_resync_completed;
RĂ©mi RĂ©rolle13ae8642011-01-14 09:14:34 +11005047 return sprintf(page, "%llu / %llu\n", resync, max_sectors);
NeilBrown88202a02006-01-06 00:21:36 -08005048}
5049
NeilBrown750f1992014-09-30 08:53:05 +10005050static struct md_sysfs_entry md_sync_completed =
5051 __ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL);
NeilBrown88202a02006-01-06 00:21:36 -08005052
NeilBrowne464eaf2006-03-27 01:18:14 -08005053static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005054min_sync_show(struct mddev *mddev, char *page)
Neil Brown5e96ee62008-06-28 08:31:24 +10005055{
5056 return sprintf(page, "%llu\n",
5057 (unsigned long long)mddev->resync_min);
5058}
5059static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005060min_sync_store(struct mddev *mddev, const char *buf, size_t len)
Neil Brown5e96ee62008-06-28 08:31:24 +10005061{
5062 unsigned long long min;
NeilBrown23da4222014-12-15 12:57:01 +11005063 int err;
NeilBrown23da4222014-12-15 12:57:01 +11005064
Jingoo Hanb29bebd2013-06-01 16:15:16 +09005065 if (kstrtoull(buf, 10, &min))
Neil Brown5e96ee62008-06-28 08:31:24 +10005066 return -EINVAL;
NeilBrown23da4222014-12-15 12:57:01 +11005067
5068 spin_lock(&mddev->lock);
5069 err = -EINVAL;
Neil Brown5e96ee62008-06-28 08:31:24 +10005070 if (min > mddev->resync_max)
NeilBrown23da4222014-12-15 12:57:01 +11005071 goto out_unlock;
5072
5073 err = -EBUSY;
Neil Brown5e96ee62008-06-28 08:31:24 +10005074 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
NeilBrown23da4222014-12-15 12:57:01 +11005075 goto out_unlock;
Neil Brown5e96ee62008-06-28 08:31:24 +10005076
NeilBrown50c37b12015-03-23 17:36:38 +11005077 /* Round down to multiple of 4K for safety */
5078 mddev->resync_min = round_down(min, 8);
NeilBrown23da4222014-12-15 12:57:01 +11005079 err = 0;
Neil Brown5e96ee62008-06-28 08:31:24 +10005080
NeilBrown23da4222014-12-15 12:57:01 +11005081out_unlock:
5082 spin_unlock(&mddev->lock);
5083 return err ?: len;
Neil Brown5e96ee62008-06-28 08:31:24 +10005084}
5085
5086static struct md_sysfs_entry md_min_sync =
5087__ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
5088
5089static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005090max_sync_show(struct mddev *mddev, char *page)
NeilBrownc6207272008-02-06 01:39:52 -08005091{
5092 if (mddev->resync_max == MaxSector)
5093 return sprintf(page, "max\n");
5094 else
5095 return sprintf(page, "%llu\n",
5096 (unsigned long long)mddev->resync_max);
5097}
5098static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005099max_sync_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrownc6207272008-02-06 01:39:52 -08005100{
NeilBrown23da4222014-12-15 12:57:01 +11005101 int err;
5102 spin_lock(&mddev->lock);
NeilBrownc6207272008-02-06 01:39:52 -08005103 if (strncmp(buf, "max", 3) == 0)
5104 mddev->resync_max = MaxSector;
5105 else {
Neil Brown5e96ee62008-06-28 08:31:24 +10005106 unsigned long long max;
NeilBrown23da4222014-12-15 12:57:01 +11005107 int chunk;
5108
5109 err = -EINVAL;
Jingoo Hanb29bebd2013-06-01 16:15:16 +09005110 if (kstrtoull(buf, 10, &max))
NeilBrown23da4222014-12-15 12:57:01 +11005111 goto out_unlock;
Neil Brown5e96ee62008-06-28 08:31:24 +10005112 if (max < mddev->resync_min)
NeilBrown23da4222014-12-15 12:57:01 +11005113 goto out_unlock;
5114
5115 err = -EBUSY;
NeilBrownc6207272008-02-06 01:39:52 -08005116 if (max < mddev->resync_max &&
NeilBrown4d484a42009-08-13 10:41:50 +10005117 mddev->ro == 0 &&
NeilBrownc6207272008-02-06 01:39:52 -08005118 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
NeilBrown23da4222014-12-15 12:57:01 +11005119 goto out_unlock;
NeilBrownc6207272008-02-06 01:39:52 -08005120
5121 /* Must be a multiple of chunk_size */
NeilBrown23da4222014-12-15 12:57:01 +11005122 chunk = mddev->chunk_sectors;
5123 if (chunk) {
raz ben yehuda2ac06c32009-06-16 17:01:42 +10005124 sector_t temp = max;
NeilBrown23da4222014-12-15 12:57:01 +11005125
5126 err = -EINVAL;
5127 if (sector_div(temp, chunk))
5128 goto out_unlock;
NeilBrownc6207272008-02-06 01:39:52 -08005129 }
5130 mddev->resync_max = max;
5131 }
5132 wake_up(&mddev->recovery_wait);
NeilBrown23da4222014-12-15 12:57:01 +11005133 err = 0;
5134out_unlock:
5135 spin_unlock(&mddev->lock);
5136 return err ?: len;
NeilBrownc6207272008-02-06 01:39:52 -08005137}
5138
5139static struct md_sysfs_entry md_max_sync =
5140__ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
5141
5142static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005143suspend_lo_show(struct mddev *mddev, char *page)
NeilBrowne464eaf2006-03-27 01:18:14 -08005144{
5145 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
5146}
5147
5148static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005149suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrowne464eaf2006-03-27 01:18:14 -08005150{
NeilBrownb03e0cc2017-10-19 12:49:15 +11005151 unsigned long long new;
NeilBrown67918752014-12-15 12:57:01 +11005152 int err;
NeilBrowne464eaf2006-03-27 01:18:14 -08005153
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03005154 err = kstrtoull(buf, 10, &new);
5155 if (err < 0)
5156 return err;
5157 if (new != (sector_t)new)
NeilBrowne464eaf2006-03-27 01:18:14 -08005158 return -EINVAL;
NeilBrown23ddff32011-01-14 09:14:34 +11005159
NeilBrown67918752014-12-15 12:57:01 +11005160 err = mddev_lock(mddev);
5161 if (err)
5162 return err;
5163 err = -EINVAL;
5164 if (mddev->pers == NULL ||
5165 mddev->pers->quiesce == NULL)
5166 goto unlock;
NeilBrownb03e0cc2017-10-19 12:49:15 +11005167 mddev_suspend(mddev);
NeilBrown23ddff32011-01-14 09:14:34 +11005168 mddev->suspend_lo = new;
NeilBrownb03e0cc2017-10-19 12:49:15 +11005169 mddev_resume(mddev);
5170
NeilBrown67918752014-12-15 12:57:01 +11005171 err = 0;
5172unlock:
5173 mddev_unlock(mddev);
5174 return err ?: len;
NeilBrowne464eaf2006-03-27 01:18:14 -08005175}
5176static struct md_sysfs_entry md_suspend_lo =
5177__ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
5178
NeilBrowne464eaf2006-03-27 01:18:14 -08005179static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005180suspend_hi_show(struct mddev *mddev, char *page)
NeilBrowne464eaf2006-03-27 01:18:14 -08005181{
5182 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
5183}
5184
5185static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005186suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrowne464eaf2006-03-27 01:18:14 -08005187{
NeilBrownb03e0cc2017-10-19 12:49:15 +11005188 unsigned long long new;
NeilBrown67918752014-12-15 12:57:01 +11005189 int err;
NeilBrowne464eaf2006-03-27 01:18:14 -08005190
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03005191 err = kstrtoull(buf, 10, &new);
5192 if (err < 0)
5193 return err;
5194 if (new != (sector_t)new)
NeilBrowne464eaf2006-03-27 01:18:14 -08005195 return -EINVAL;
NeilBrown23ddff32011-01-14 09:14:34 +11005196
NeilBrown67918752014-12-15 12:57:01 +11005197 err = mddev_lock(mddev);
5198 if (err)
5199 return err;
5200 err = -EINVAL;
NeilBrownb03e0cc2017-10-19 12:49:15 +11005201 if (mddev->pers == NULL)
NeilBrown67918752014-12-15 12:57:01 +11005202 goto unlock;
NeilBrownb03e0cc2017-10-19 12:49:15 +11005203
5204 mddev_suspend(mddev);
NeilBrown23ddff32011-01-14 09:14:34 +11005205 mddev->suspend_hi = new;
NeilBrownb03e0cc2017-10-19 12:49:15 +11005206 mddev_resume(mddev);
5207
NeilBrown67918752014-12-15 12:57:01 +11005208 err = 0;
5209unlock:
5210 mddev_unlock(mddev);
5211 return err ?: len;
NeilBrowne464eaf2006-03-27 01:18:14 -08005212}
5213static struct md_sysfs_entry md_suspend_hi =
5214__ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
5215
NeilBrown08a02ec2007-05-09 02:35:38 -07005216static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005217reshape_position_show(struct mddev *mddev, char *page)
NeilBrown08a02ec2007-05-09 02:35:38 -07005218{
5219 if (mddev->reshape_position != MaxSector)
5220 return sprintf(page, "%llu\n",
5221 (unsigned long long)mddev->reshape_position);
5222 strcpy(page, "none\n");
5223 return 5;
5224}
5225
5226static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005227reshape_position_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown08a02ec2007-05-09 02:35:38 -07005228{
NeilBrownc6563a82012-05-21 09:27:00 +10005229 struct md_rdev *rdev;
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03005230 unsigned long long new;
NeilBrown67918752014-12-15 12:57:01 +11005231 int err;
NeilBrown67918752014-12-15 12:57:01 +11005232
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03005233 err = kstrtoull(buf, 10, &new);
5234 if (err < 0)
5235 return err;
5236 if (new != (sector_t)new)
NeilBrown08a02ec2007-05-09 02:35:38 -07005237 return -EINVAL;
NeilBrown67918752014-12-15 12:57:01 +11005238 err = mddev_lock(mddev);
5239 if (err)
5240 return err;
5241 err = -EBUSY;
5242 if (mddev->pers)
5243 goto unlock;
NeilBrown08a02ec2007-05-09 02:35:38 -07005244 mddev->reshape_position = new;
5245 mddev->delta_disks = 0;
NeilBrown2c810cd2012-05-21 09:27:00 +10005246 mddev->reshape_backwards = 0;
NeilBrown08a02ec2007-05-09 02:35:38 -07005247 mddev->new_level = mddev->level;
5248 mddev->new_layout = mddev->layout;
Andre Noll664e7c42009-06-18 08:45:27 +10005249 mddev->new_chunk_sectors = mddev->chunk_sectors;
NeilBrownc6563a82012-05-21 09:27:00 +10005250 rdev_for_each(rdev, mddev)
5251 rdev->new_data_offset = rdev->data_offset;
NeilBrown67918752014-12-15 12:57:01 +11005252 err = 0;
5253unlock:
5254 mddev_unlock(mddev);
5255 return err ?: len;
NeilBrown08a02ec2007-05-09 02:35:38 -07005256}
5257
5258static struct md_sysfs_entry md_reshape_position =
5259__ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
5260 reshape_position_store);
5261
Dan Williamsb522adc2009-03-31 15:00:31 +11005262static ssize_t
NeilBrown2c810cd2012-05-21 09:27:00 +10005263reshape_direction_show(struct mddev *mddev, char *page)
5264{
5265 return sprintf(page, "%s\n",
5266 mddev->reshape_backwards ? "backwards" : "forwards");
5267}
5268
5269static ssize_t
5270reshape_direction_store(struct mddev *mddev, const char *buf, size_t len)
5271{
5272 int backwards = 0;
NeilBrown67918752014-12-15 12:57:01 +11005273 int err;
5274
NeilBrown2c810cd2012-05-21 09:27:00 +10005275 if (cmd_match(buf, "forwards"))
5276 backwards = 0;
5277 else if (cmd_match(buf, "backwards"))
5278 backwards = 1;
5279 else
5280 return -EINVAL;
5281 if (mddev->reshape_backwards == backwards)
5282 return len;
5283
NeilBrown67918752014-12-15 12:57:01 +11005284 err = mddev_lock(mddev);
5285 if (err)
5286 return err;
NeilBrown2c810cd2012-05-21 09:27:00 +10005287 /* check if we are allowed to change */
5288 if (mddev->delta_disks)
NeilBrown67918752014-12-15 12:57:01 +11005289 err = -EBUSY;
5290 else if (mddev->persistent &&
NeilBrown2c810cd2012-05-21 09:27:00 +10005291 mddev->major_version == 0)
NeilBrown67918752014-12-15 12:57:01 +11005292 err = -EINVAL;
5293 else
5294 mddev->reshape_backwards = backwards;
5295 mddev_unlock(mddev);
5296 return err ?: len;
NeilBrown2c810cd2012-05-21 09:27:00 +10005297}
5298
5299static struct md_sysfs_entry md_reshape_direction =
5300__ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show,
5301 reshape_direction_store);
5302
5303static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005304array_size_show(struct mddev *mddev, char *page)
Dan Williamsb522adc2009-03-31 15:00:31 +11005305{
5306 if (mddev->external_size)
5307 return sprintf(page, "%llu\n",
5308 (unsigned long long)mddev->array_sectors/2);
5309 else
5310 return sprintf(page, "default\n");
5311}
5312
5313static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005314array_size_store(struct mddev *mddev, const char *buf, size_t len)
Dan Williamsb522adc2009-03-31 15:00:31 +11005315{
5316 sector_t sectors;
NeilBrown67918752014-12-15 12:57:01 +11005317 int err;
5318
5319 err = mddev_lock(mddev);
5320 if (err)
5321 return err;
Dan Williamsb522adc2009-03-31 15:00:31 +11005322
Guoqing Jiangab5a98b2016-05-02 11:33:13 -04005323 /* cluster raid doesn't support change array_sectors */
Zhilong Liub6708832017-04-10 14:15:55 +08005324 if (mddev_is_clustered(mddev)) {
5325 mddev_unlock(mddev);
Guoqing Jiangab5a98b2016-05-02 11:33:13 -04005326 return -EINVAL;
Zhilong Liub6708832017-04-10 14:15:55 +08005327 }
Guoqing Jiangab5a98b2016-05-02 11:33:13 -04005328
Dan Williamsb522adc2009-03-31 15:00:31 +11005329 if (strncmp(buf, "default", 7) == 0) {
5330 if (mddev->pers)
5331 sectors = mddev->pers->size(mddev, 0, 0);
5332 else
5333 sectors = mddev->array_sectors;
5334
5335 mddev->external_size = 0;
5336 } else {
5337 if (strict_blocks_to_sectors(buf, &sectors) < 0)
NeilBrown67918752014-12-15 12:57:01 +11005338 err = -EINVAL;
5339 else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
5340 err = -E2BIG;
5341 else
5342 mddev->external_size = 1;
Dan Williamsb522adc2009-03-31 15:00:31 +11005343 }
5344
NeilBrown67918752014-12-15 12:57:01 +11005345 if (!err) {
5346 mddev->array_sectors = sectors;
Christoph Hellwig2c247c52020-11-16 15:57:11 +01005347 if (mddev->pers)
5348 set_capacity_and_notify(mddev->gendisk,
5349 mddev->array_sectors);
NeilBrowncbe6ef12011-02-16 13:58:38 +11005350 }
NeilBrown67918752014-12-15 12:57:01 +11005351 mddev_unlock(mddev);
5352 return err ?: len;
Dan Williamsb522adc2009-03-31 15:00:31 +11005353}
5354
5355static struct md_sysfs_entry md_array_size =
5356__ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
5357 array_size_store);
NeilBrowne464eaf2006-03-27 01:18:14 -08005358
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005359static ssize_t
5360consistency_policy_show(struct mddev *mddev, char *page)
5361{
5362 int ret;
5363
5364 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
5365 ret = sprintf(page, "journal\n");
5366 } else if (test_bit(MD_HAS_PPL, &mddev->flags)) {
5367 ret = sprintf(page, "ppl\n");
5368 } else if (mddev->bitmap) {
5369 ret = sprintf(page, "bitmap\n");
5370 } else if (mddev->pers) {
5371 if (mddev->pers->sync_request)
5372 ret = sprintf(page, "resync\n");
5373 else
5374 ret = sprintf(page, "none\n");
5375 } else {
5376 ret = sprintf(page, "unknown\n");
5377 }
5378
5379 return ret;
5380}
5381
5382static ssize_t
5383consistency_policy_store(struct mddev *mddev, const char *buf, size_t len)
5384{
Artur Paszkiewiczba903a32017-03-09 10:00:03 +01005385 int err = 0;
5386
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005387 if (mddev->pers) {
Artur Paszkiewiczba903a32017-03-09 10:00:03 +01005388 if (mddev->pers->change_consistency_policy)
5389 err = mddev->pers->change_consistency_policy(mddev, buf);
5390 else
5391 err = -EBUSY;
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005392 } else if (mddev->external && strncmp(buf, "ppl", 3) == 0) {
5393 set_bit(MD_HAS_PPL, &mddev->flags);
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005394 } else {
Artur Paszkiewiczba903a32017-03-09 10:00:03 +01005395 err = -EINVAL;
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005396 }
Artur Paszkiewiczba903a32017-03-09 10:00:03 +01005397
5398 return err ? err : len;
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005399}
5400
5401static struct md_sysfs_entry md_consistency_policy =
5402__ATTR(consistency_policy, S_IRUGO | S_IWUSR, consistency_policy_show,
5403 consistency_policy_store);
5404
Guoqing Jiang9a567842019-07-24 11:09:19 +02005405static ssize_t fail_last_dev_show(struct mddev *mddev, char *page)
5406{
5407 return sprintf(page, "%d\n", mddev->fail_last_dev);
5408}
5409
5410/*
5411 * Setting fail_last_dev to true to allow last device to be forcibly removed
5412 * from RAID1/RAID10.
5413 */
5414static ssize_t
5415fail_last_dev_store(struct mddev *mddev, const char *buf, size_t len)
5416{
5417 int ret;
5418 bool value;
5419
5420 ret = kstrtobool(buf, &value);
5421 if (ret)
5422 return ret;
5423
5424 if (value != mddev->fail_last_dev)
5425 mddev->fail_last_dev = value;
5426
5427 return len;
5428}
5429static struct md_sysfs_entry md_fail_last_dev =
5430__ATTR(fail_last_dev, S_IRUGO | S_IWUSR, fail_last_dev_show,
5431 fail_last_dev_store);
5432
Guoqing Jiang3938f5f2019-12-23 10:48:56 +01005433static ssize_t serialize_policy_show(struct mddev *mddev, char *page)
5434{
5435 if (mddev->pers == NULL || (mddev->pers->level != 1))
5436 return sprintf(page, "n/a\n");
5437 else
5438 return sprintf(page, "%d\n", mddev->serialize_policy);
5439}
5440
5441/*
5442 * Setting serialize_policy to true to enforce write IO is not reordered
5443 * for raid1.
5444 */
5445static ssize_t
5446serialize_policy_store(struct mddev *mddev, const char *buf, size_t len)
5447{
5448 int err;
5449 bool value;
5450
5451 err = kstrtobool(buf, &value);
5452 if (err)
5453 return err;
5454
5455 if (value == mddev->serialize_policy)
5456 return len;
5457
5458 err = mddev_lock(mddev);
5459 if (err)
5460 return err;
5461 if (mddev->pers == NULL || (mddev->pers->level != 1)) {
5462 pr_err("md: serialize_policy is only effective for raid1\n");
5463 err = -EINVAL;
5464 goto unlock;
5465 }
5466
5467 mddev_suspend(mddev);
5468 if (value)
5469 mddev_create_serial_pool(mddev, NULL, true);
5470 else
5471 mddev_destroy_serial_pool(mddev, NULL, true);
5472 mddev->serialize_policy = value;
5473 mddev_resume(mddev);
5474unlock:
5475 mddev_unlock(mddev);
5476 return err ?: len;
5477}
5478
5479static struct md_sysfs_entry md_serialize_policy =
5480__ATTR(serialize_policy, S_IRUGO | S_IWUSR, serialize_policy_show,
5481 serialize_policy_store);
5482
5483
NeilBrowneae17012005-11-08 21:39:23 -08005484static struct attribute *md_default_attrs[] = {
5485 &md_level.attr,
NeilBrownd4dbd022006-06-26 00:27:59 -07005486 &md_layout.attr,
NeilBrowneae17012005-11-08 21:39:23 -08005487 &md_raid_disks.attr,
Sebastian Parschauerec164d072020-07-28 12:01:39 +02005488 &md_uuid.attr,
NeilBrown3b343802006-01-06 00:20:47 -08005489 &md_chunk_size.attr,
NeilBrowna35b0d62006-01-06 00:20:49 -08005490 &md_size.attr,
NeilBrowna94213b2006-06-26 00:28:00 -07005491 &md_resync_start.attr,
NeilBrown8bb93aa2006-01-06 00:20:50 -08005492 &md_metadata.attr,
NeilBrown6d7ff7382006-01-06 00:21:16 -08005493 &md_new_device.attr,
NeilBrown16f17b32006-06-26 00:27:37 -07005494 &md_safe_delay.attr,
NeilBrown9e653b62006-06-26 00:27:58 -07005495 &md_array_state.attr,
NeilBrown08a02ec2007-05-09 02:35:38 -07005496 &md_reshape_position.attr,
NeilBrown2c810cd2012-05-21 09:27:00 +10005497 &md_reshape_direction.attr,
Dan Williamsb522adc2009-03-31 15:00:31 +11005498 &md_array_size.attr,
Robert Becker1e509152009-12-14 12:49:58 +11005499 &max_corr_read_errors.attr,
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005500 &md_consistency_policy.attr,
Guoqing Jiang9a567842019-07-24 11:09:19 +02005501 &md_fail_last_dev.attr,
Guoqing Jiang3938f5f2019-12-23 10:48:56 +01005502 &md_serialize_policy.attr,
NeilBrown411036f2005-11-08 21:39:40 -08005503 NULL,
5504};
5505
Christoph Hellwig51238e7f2021-09-01 13:38:31 +02005506static const struct attribute_group md_default_group = {
5507 .attrs = md_default_attrs,
5508};
5509
NeilBrown411036f2005-11-08 21:39:40 -08005510static struct attribute *md_redundancy_attrs[] = {
NeilBrown24dd4692005-11-08 21:39:26 -08005511 &md_scan_mode.attr,
Jonathan Brassowc4a39552013-06-25 01:23:59 -05005512 &md_last_scan_mode.attr,
NeilBrown9d888832005-11-08 21:39:26 -08005513 &md_mismatches.attr,
NeilBrown88202a02006-01-06 00:21:36 -08005514 &md_sync_min.attr,
5515 &md_sync_max.attr,
5516 &md_sync_speed.attr,
Bernd Schubert90b08712008-05-23 13:04:38 -07005517 &md_sync_force_parallel.attr,
NeilBrown88202a02006-01-06 00:21:36 -08005518 &md_sync_completed.attr,
Neil Brown5e96ee62008-06-28 08:31:24 +10005519 &md_min_sync.attr,
NeilBrownc6207272008-02-06 01:39:52 -08005520 &md_max_sync.attr,
NeilBrowne464eaf2006-03-27 01:18:14 -08005521 &md_suspend_lo.attr,
5522 &md_suspend_hi.attr,
Paul Clements9b1d1da2006-10-03 01:15:49 -07005523 &md_bitmap.attr,
Iustin Popd7f3d292007-10-16 23:30:54 -07005524 &md_degraded.attr,
NeilBrowneae17012005-11-08 21:39:23 -08005525 NULL,
5526};
Rikard Falkebornc32dc042021-05-29 12:30:49 +02005527static const struct attribute_group md_redundancy_group = {
NeilBrown411036f2005-11-08 21:39:40 -08005528 .name = NULL,
5529 .attrs = md_redundancy_attrs,
5530};
5531
Christoph Hellwig51238e7f2021-09-01 13:38:31 +02005532static const struct attribute_group *md_attr_groups[] = {
5533 &md_default_group,
5534 &md_bitmap_group,
5535 NULL,
5536};
5537
NeilBrowneae17012005-11-08 21:39:23 -08005538static ssize_t
5539md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
5540{
5541 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
NeilBrownfd01b882011-10-11 16:47:53 +11005542 struct mddev *mddev = container_of(kobj, struct mddev, kobj);
NeilBrown96de1e62005-11-08 21:39:39 -08005543 ssize_t rv;
NeilBrowneae17012005-11-08 21:39:23 -08005544
5545 if (!entry->show)
5546 return -EIO;
NeilBrownaf8a2432011-12-08 15:49:46 +11005547 spin_lock(&all_mddevs_lock);
5548 if (list_empty(&mddev->all_mddevs)) {
5549 spin_unlock(&all_mddevs_lock);
5550 return -EBUSY;
5551 }
5552 mddev_get(mddev);
5553 spin_unlock(&all_mddevs_lock);
5554
NeilBrownb7b17c92014-12-15 12:56:59 +11005555 rv = entry->show(mddev, page);
NeilBrownaf8a2432011-12-08 15:49:46 +11005556 mddev_put(mddev);
NeilBrown96de1e62005-11-08 21:39:39 -08005557 return rv;
NeilBrowneae17012005-11-08 21:39:23 -08005558}
5559
5560static ssize_t
5561md_attr_store(struct kobject *kobj, struct attribute *attr,
5562 const char *page, size_t length)
5563{
5564 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
NeilBrownfd01b882011-10-11 16:47:53 +11005565 struct mddev *mddev = container_of(kobj, struct mddev, kobj);
NeilBrown96de1e62005-11-08 21:39:39 -08005566 ssize_t rv;
NeilBrowneae17012005-11-08 21:39:23 -08005567
5568 if (!entry->store)
5569 return -EIO;
NeilBrown67463ac2006-07-10 04:44:19 -07005570 if (!capable(CAP_SYS_ADMIN))
5571 return -EACCES;
NeilBrownaf8a2432011-12-08 15:49:46 +11005572 spin_lock(&all_mddevs_lock);
5573 if (list_empty(&mddev->all_mddevs)) {
5574 spin_unlock(&all_mddevs_lock);
5575 return -EBUSY;
5576 }
5577 mddev_get(mddev);
5578 spin_unlock(&all_mddevs_lock);
NeilBrown67918752014-12-15 12:57:01 +11005579 rv = entry->store(mddev, page, length);
NeilBrownaf8a2432011-12-08 15:49:46 +11005580 mddev_put(mddev);
NeilBrown96de1e62005-11-08 21:39:39 -08005581 return rv;
NeilBrowneae17012005-11-08 21:39:23 -08005582}
5583
5584static void md_free(struct kobject *ko)
5585{
NeilBrownfd01b882011-10-11 16:47:53 +11005586 struct mddev *mddev = container_of(ko, struct mddev, kobj);
NeilBrowna21d1502009-01-09 08:31:09 +11005587
5588 if (mddev->sysfs_state)
5589 sysfs_put(mddev->sysfs_state);
Junxiao Bie1a86db2020-07-14 16:10:26 -07005590 if (mddev->sysfs_level)
5591 sysfs_put(mddev->sysfs_level);
5592
Christoph Hellwig0f1d2e02021-05-21 07:51:04 +02005593 if (mddev->gendisk) {
Bart Van Assched8115c352018-02-28 10:15:29 -08005594 del_gendisk(mddev->gendisk);
Christoph Hellwig0f1d2e02021-05-21 07:51:04 +02005595 blk_cleanup_disk(mddev->gendisk);
5596 }
NeilBrown4ad23a972017-03-15 14:05:14 +11005597 percpu_ref_exit(&mddev->writes_pending);
NeilBrowna21d1502009-01-09 08:31:09 +11005598
Kent Overstreet28dec872018-06-07 20:52:54 -04005599 bioset_exit(&mddev->bio_set);
5600 bioset_exit(&mddev->sync_set);
Guoqing Jiangdaee2022021-06-03 17:21:06 +08005601 if (mddev->level != 1 && mddev->level != 10)
5602 bioset_exit(&mddev->io_acct_set);
NeilBrowneae17012005-11-08 21:39:23 -08005603 kfree(mddev);
5604}
5605
Emese Revfy52cf25d2010-01-19 02:58:23 +01005606static const struct sysfs_ops md_sysfs_ops = {
NeilBrowneae17012005-11-08 21:39:23 -08005607 .show = md_attr_show,
5608 .store = md_attr_store,
5609};
5610static struct kobj_type md_ktype = {
5611 .release = md_free,
5612 .sysfs_ops = &md_sysfs_ops,
Christoph Hellwig51238e7f2021-09-01 13:38:31 +02005613 .default_groups = md_attr_groups,
NeilBrowneae17012005-11-08 21:39:23 -08005614};
5615
Linus Torvalds1da177e2005-04-16 15:20:36 -07005616int mdp_major = 0;
5617
Dan Williams5fd3a172009-03-04 00:57:25 -07005618static void mddev_delayed_delete(struct work_struct *ws)
5619{
NeilBrownfd01b882011-10-11 16:47:53 +11005620 struct mddev *mddev = container_of(ws, struct mddev, del_work);
Dan Williams5fd3a172009-03-04 00:57:25 -07005621
Dan Williams5fd3a172009-03-04 00:57:25 -07005622 kobject_del(&mddev->kobj);
5623 kobject_put(&mddev->kobj);
5624}
5625
NeilBrown4ad23a972017-03-15 14:05:14 +11005626static void no_op(struct percpu_ref *r) {}
5627
NeilBrowna415c0f2017-06-05 16:05:13 +10005628int mddev_init_writes_pending(struct mddev *mddev)
5629{
5630 if (mddev->writes_pending.percpu_count_ptr)
5631 return 0;
Roman Gushchinddde2af2019-05-07 10:01:49 -07005632 if (percpu_ref_init(&mddev->writes_pending, no_op,
5633 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL) < 0)
NeilBrowna415c0f2017-06-05 16:05:13 +10005634 return -ENOMEM;
5635 /* We want to start with the refcount at zero */
5636 percpu_ref_put(&mddev->writes_pending);
5637 return 0;
5638}
5639EXPORT_SYMBOL_GPL(mddev_init_writes_pending);
5640
NeilBrownefeb53c2009-01-09 08:31:10 +11005641static int md_alloc(dev_t dev, char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005642{
NeilBrown039b7222017-04-12 16:26:13 +10005643 /*
5644 * If dev is zero, name is the name of a device to allocate with
5645 * an arbitrary minor number. It will be "md_???"
5646 * If dev is non-zero it must be a device number with a MAJOR of
5647 * MD_MAJOR or mdp_major. In this case, if "name" is NULL, then
5648 * the device is being created by opening a node in /dev.
5649 * If "name" is not NULL, the device is being created by
5650 * writing to /sys/module/md_mod/parameters/new_array.
5651 */
Arjan van de Ven48c9c272006-03-27 01:18:20 -08005652 static DEFINE_MUTEX(disks_mutex);
Christoph Hellwig0d809b32021-04-12 10:05:30 +02005653 struct mddev *mddev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005654 struct gendisk *disk;
NeilBrownefeb53c2009-01-09 08:31:10 +11005655 int partitioned;
5656 int shift;
5657 int unit;
Christoph Hellwig0d809b32021-04-12 10:05:30 +02005658 int error ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005659
Christoph Hellwig0d809b32021-04-12 10:05:30 +02005660 /*
5661 * Wait for any previous instance of this device to be completely
5662 * removed (mddev_delayed_delete).
NeilBrownd3374822009-01-09 08:31:10 +11005663 */
Tejun Heoe804ac72010-10-15 15:36:08 +02005664 flush_workqueue(md_misc_wq);
NeilBrownd3374822009-01-09 08:31:10 +11005665
Arjan van de Ven48c9c272006-03-27 01:18:20 -08005666 mutex_lock(&disks_mutex);
Christoph Hellwig0d809b32021-04-12 10:05:30 +02005667 mddev = mddev_alloc(dev);
5668 if (IS_ERR(mddev)) {
5669 mutex_unlock(&disks_mutex);
5670 return PTR_ERR(mddev);
5671 }
5672
5673 partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
5674 shift = partitioned ? MdpMinorShift : 0;
5675 unit = MINOR(mddev->unit) >> shift;
NeilBrownefeb53c2009-01-09 08:31:10 +11005676
NeilBrown039b7222017-04-12 16:26:13 +10005677 if (name && !dev) {
NeilBrownefeb53c2009-01-09 08:31:10 +11005678 /* Need to ensure that 'name' is not a duplicate.
5679 */
NeilBrownfd01b882011-10-11 16:47:53 +11005680 struct mddev *mddev2;
NeilBrownefeb53c2009-01-09 08:31:10 +11005681 spin_lock(&all_mddevs_lock);
5682
5683 list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
5684 if (mddev2->gendisk &&
5685 strcmp(mddev2->gendisk->disk_name, name) == 0) {
5686 spin_unlock(&all_mddevs_lock);
Christoph Hellwig0d809b32021-04-12 10:05:30 +02005687 error = -EEXIST;
Christoph Hellwig7ad10692021-09-01 13:38:33 +02005688 goto out_unlock_disks_mutex;
NeilBrownefeb53c2009-01-09 08:31:10 +11005689 }
5690 spin_unlock(&all_mddevs_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005691 }
NeilBrown039b7222017-04-12 16:26:13 +10005692 if (name && dev)
5693 /*
5694 * Creating /dev/mdNNN via "newarray", so adjust hold_active.
5695 */
5696 mddev->hold_active = UNTIL_STOP;
NeilBrown8b765392009-01-09 08:31:08 +11005697
NeilBrown0909dc42009-07-01 12:27:21 +10005698 error = -ENOMEM;
Christoph Hellwig0f1d2e02021-05-21 07:51:04 +02005699 disk = blk_alloc_disk(NUMA_NO_NODE);
5700 if (!disk)
Christoph Hellwig7ad10692021-09-01 13:38:33 +02005701 goto out_unlock_disks_mutex;
NeilBrown409c57f2009-03-31 14:39:39 +11005702
NeilBrownefeb53c2009-01-09 08:31:10 +11005703 disk->major = MAJOR(mddev->unit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005704 disk->first_minor = unit << shift;
Christoph Hellwig0f1d2e02021-05-21 07:51:04 +02005705 disk->minors = 1 << shift;
NeilBrownefeb53c2009-01-09 08:31:10 +11005706 if (name)
5707 strcpy(disk->disk_name, name);
5708 else if (partitioned)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005709 sprintf(disk->disk_name, "md_d%d", unit);
Greg Kroah-Hartmance7b0f462005-06-20 21:15:16 -07005710 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07005711 sprintf(disk->disk_name, "md%d", unit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005712 disk->fops = &md_fops;
5713 disk->private_data = mddev;
Christoph Hellwig0f1d2e02021-05-21 07:51:04 +02005714
5715 mddev->queue = disk->queue;
5716 blk_set_stacking_limits(&mddev->queue->limits);
Jens Axboe56883a72016-03-30 10:16:53 -06005717 blk_queue_write_cache(mddev->queue, true, true);
Christoph Hellwiga564e232020-07-08 14:25:41 +02005718 disk->events |= DISK_EVENT_MEDIA_CHANGE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005719 mddev->gendisk = disk;
Luis Chamberlain9be68dd2021-09-01 13:38:30 +02005720 error = add_disk(disk);
Christoph Hellwig7ad10692021-09-01 13:38:33 +02005721 if (error)
5722 goto out_cleanup_disk;
NeilBrownb0140892011-05-10 17:49:01 +10005723
Kent Overstreet28dec872018-06-07 20:52:54 -04005724 error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md");
Christoph Hellwig7ad10692021-09-01 13:38:33 +02005725 if (error)
5726 goto out_del_gendisk;
5727
5728 kobject_uevent(&mddev->kobj, KOBJ_ADD);
5729 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
5730 mddev->sysfs_level = sysfs_get_dirent_safe(mddev->kobj.sd, "level");
5731 goto out_unlock_disks_mutex;
5732
5733out_del_gendisk:
5734 del_gendisk(disk);
5735out_cleanup_disk:
5736 blk_cleanup_disk(disk);
5737out_unlock_disks_mutex:
Christoph Hellwig94f3cd72021-09-01 13:38:32 +02005738 mutex_unlock(&disks_mutex);
NeilBrownd3374822009-01-09 08:31:10 +11005739 mddev_put(mddev);
NeilBrown0909dc42009-07-01 12:27:21 +10005740 return error;
NeilBrownefeb53c2009-01-09 08:31:10 +11005741}
5742
Christoph Hellwig28144f92020-10-29 15:58:34 +01005743static void md_probe(dev_t dev)
NeilBrownefeb53c2009-01-09 08:31:10 +11005744{
Christoph Hellwig28144f92020-10-29 15:58:34 +01005745 if (MAJOR(dev) == MD_MAJOR && MINOR(dev) >= 512)
5746 return;
NeilBrown78b63502017-04-12 16:26:13 +10005747 if (create_on_open)
5748 md_alloc(dev, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005749}
5750
Kees Cooke4dca7b2017-10-17 19:04:42 -07005751static int add_named_array(const char *val, const struct kernel_param *kp)
NeilBrownefeb53c2009-01-09 08:31:10 +11005752{
NeilBrown039b7222017-04-12 16:26:13 +10005753 /*
5754 * val must be "md_*" or "mdNNN".
5755 * For "md_*" we allocate an array with a large free minor number, and
NeilBrownefeb53c2009-01-09 08:31:10 +11005756 * set the name to val. val must not already be an active name.
NeilBrown039b7222017-04-12 16:26:13 +10005757 * For "mdNNN" we allocate an array with the minor number NNN
5758 * which must not already be in use.
NeilBrownefeb53c2009-01-09 08:31:10 +11005759 */
5760 int len = strlen(val);
5761 char buf[DISK_NAME_LEN];
NeilBrown039b7222017-04-12 16:26:13 +10005762 unsigned long devnum;
NeilBrownefeb53c2009-01-09 08:31:10 +11005763
5764 while (len && val[len-1] == '\n')
5765 len--;
5766 if (len >= DISK_NAME_LEN)
5767 return -E2BIG;
5768 strlcpy(buf, val, len+1);
NeilBrown039b7222017-04-12 16:26:13 +10005769 if (strncmp(buf, "md_", 3) == 0)
5770 return md_alloc(0, buf);
5771 if (strncmp(buf, "md", 2) == 0 &&
5772 isdigit(buf[2]) &&
5773 kstrtoul(buf+2, 10, &devnum) == 0 &&
5774 devnum <= MINORMASK)
5775 return md_alloc(MKDEV(MD_MAJOR, devnum), NULL);
5776
5777 return -EINVAL;
NeilBrownefeb53c2009-01-09 08:31:10 +11005778}
5779
Kees Cook8376d3c2017-10-16 17:01:48 -07005780static void md_safemode_timeout(struct timer_list *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005781{
Kees Cook8376d3c2017-10-16 17:01:48 -07005782 struct mddev *mddev = from_timer(mddev, t, safemode_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005783
NeilBrown4ad23a972017-03-15 14:05:14 +11005784 mddev->safemode = 1;
5785 if (mddev->external)
5786 sysfs_notify_dirent_safe(mddev->sysfs_state);
5787
Linus Torvalds1da177e2005-04-16 15:20:36 -07005788 md_wakeup_thread(mddev->thread);
5789}
5790
NeilBrown6ff8d8ec2006-01-06 00:20:15 -08005791static int start_dirty_degraded;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005792
NeilBrownfd01b882011-10-11 16:47:53 +11005793int md_run(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005794{
NeilBrown2604b702006-01-06 00:20:36 -08005795 int err;
NeilBrown3cb03002011-10-11 16:45:26 +11005796 struct md_rdev *rdev;
NeilBrown84fc4b52011-10-11 16:49:58 +11005797 struct md_personality *pers;
Vishal Vermaf51d46d2021-12-21 20:06:19 +00005798 bool nowait = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005799
NeilBrowna757e642005-04-16 15:26:42 -07005800 if (list_empty(&mddev->disks))
5801 /* cannot run an array with no devices.. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005802 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005803
5804 if (mddev->pers)
5805 return -EBUSY;
NeilBrownbb4f1e92010-08-08 21:18:03 +10005806 /* Cannot run until previous stop completes properly */
5807 if (mddev->sysfs_active)
5808 return -EBUSY;
NeilBrownb6eb1272010-04-15 10:13:47 +10005809
Linus Torvalds1da177e2005-04-16 15:20:36 -07005810 /*
5811 * Analyze all RAID superblock(s)
5812 */
NeilBrown1ec4a932008-02-06 01:39:53 -08005813 if (!mddev->raid_disks) {
5814 if (!mddev->persistent)
5815 return -EINVAL;
Yufen Yu6a5cb532019-10-16 16:00:03 +08005816 err = analyze_sbs(mddev);
5817 if (err)
5818 return -EINVAL;
NeilBrown1ec4a932008-02-06 01:39:53 -08005819 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005820
NeilBrownd9d166c2006-01-06 00:20:51 -08005821 if (mddev->level != LEVEL_NONE)
5822 request_module("md-level-%d", mddev->level);
5823 else if (mddev->clevel[0])
5824 request_module("md-%s", mddev->clevel);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005825
5826 /*
5827 * Drop all container device buffers, from now on
5828 * the only valid external interface is through the md
5829 * device.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005830 */
Heinz Mauelshagen4b6c1062018-02-02 23:13:19 +01005831 mddev->has_superblocks = false;
NeilBrowndafb20f2012-03-19 12:46:39 +11005832 rdev_for_each(rdev, mddev) {
NeilBrownb2d444d2005-11-08 21:39:31 -08005833 if (test_bit(Faulty, &rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005834 continue;
5835 sync_blockdev(rdev->bdev);
Peter Zijlstraf98393a2007-05-06 14:49:54 -07005836 invalidate_bdev(rdev->bdev);
Christoph Hellwigd7a47832021-02-01 14:17:20 +01005837 if (mddev->ro != 1 && rdev_read_only(rdev)) {
NeilBrown97b20ef2017-04-13 08:53:48 +10005838 mddev->ro = 1;
5839 if (mddev->gendisk)
5840 set_disk_ro(mddev->gendisk, 1);
5841 }
NeilBrownf0d76d72007-07-17 04:06:12 -07005842
Heinz Mauelshagen4b6c1062018-02-02 23:13:19 +01005843 if (rdev->sb_page)
5844 mddev->has_superblocks = true;
5845
NeilBrownf0d76d72007-07-17 04:06:12 -07005846 /* perform some consistency tests on the device.
5847 * We don't want the data to overlap the metadata,
Andre Noll58c0fed2009-03-31 14:33:13 +11005848 * Internal Bitmap issues have been handled elsewhere.
NeilBrownf0d76d72007-07-17 04:06:12 -07005849 */
Jonathan Brassowa6ff7e02011-01-14 09:14:34 +11005850 if (rdev->meta_bdev) {
5851 /* Nothing to check */;
5852 } else if (rdev->data_offset < rdev->sb_start) {
Andre Noll58c0fed2009-03-31 14:33:13 +11005853 if (mddev->dev_sectors &&
5854 rdev->data_offset + mddev->dev_sectors
Andre Noll0f420352008-07-11 22:02:23 +10005855 > rdev->sb_start) {
NeilBrown9d487392016-11-02 14:16:49 +11005856 pr_warn("md: %s: data overlaps metadata\n",
5857 mdname(mddev));
NeilBrownf0d76d72007-07-17 04:06:12 -07005858 return -EINVAL;
5859 }
5860 } else {
Andre Noll0f420352008-07-11 22:02:23 +10005861 if (rdev->sb_start + rdev->sb_size/512
NeilBrownf0d76d72007-07-17 04:06:12 -07005862 > rdev->data_offset) {
NeilBrown9d487392016-11-02 14:16:49 +11005863 pr_warn("md: %s: metadata overlaps data\n",
5864 mdname(mddev));
NeilBrownf0d76d72007-07-17 04:06:12 -07005865 return -EINVAL;
5866 }
5867 }
NeilBrown00bcb4a2010-06-01 19:37:23 +10005868 sysfs_notify_dirent_safe(rdev->sysfs_state);
Vishal Vermaf51d46d2021-12-21 20:06:19 +00005869 nowait = nowait && blk_queue_nowait(bdev_get_queue(rdev->bdev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005870 }
5871
Vishal Vermaf51d46d2021-12-21 20:06:19 +00005872 /* Set the NOWAIT flags if all underlying devices support it */
5873 if (nowait)
5874 blk_queue_flag_set(QUEUE_FLAG_NOWAIT, mddev->queue);
5875
Kent Overstreetafeee512018-05-20 18:25:52 -04005876 if (!bioset_initialized(&mddev->bio_set)) {
5877 err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
5878 if (err)
5879 return err;
Ming Lei10273172017-02-14 23:29:00 +08005880 }
Kent Overstreetafeee512018-05-20 18:25:52 -04005881 if (!bioset_initialized(&mddev->sync_set)) {
5882 err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
5883 if (err)
Guoqing Jiang10764812021-05-25 17:46:17 +08005884 goto exit_bio_set;
5885 }
NeilBrowna167f662010-10-26 18:31:13 +11005886
Linus Torvalds1da177e2005-04-16 15:20:36 -07005887 spin_lock(&pers_lock);
NeilBrownd9d166c2006-01-06 00:20:51 -08005888 pers = find_pers(mddev->level, mddev->clevel);
NeilBrown2604b702006-01-06 00:20:36 -08005889 if (!pers || !try_module_get(pers->owner)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005890 spin_unlock(&pers_lock);
NeilBrownd9d166c2006-01-06 00:20:51 -08005891 if (mddev->level != LEVEL_NONE)
NeilBrown9d487392016-11-02 14:16:49 +11005892 pr_warn("md: personality for level %d is not loaded!\n",
5893 mddev->level);
NeilBrownd9d166c2006-01-06 00:20:51 -08005894 else
NeilBrown9d487392016-11-02 14:16:49 +11005895 pr_warn("md: personality for level %s is not loaded!\n",
5896 mddev->clevel);
Shaohua Libfc9dfd2018-06-13 08:39:49 -07005897 err = -EINVAL;
5898 goto abort;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005899 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005900 spin_unlock(&pers_lock);
NeilBrown34817e82009-03-31 14:39:38 +11005901 if (mddev->level != pers->level) {
5902 mddev->level = pers->level;
5903 mddev->new_level = pers->level;
5904 }
NeilBrownd9d166c2006-01-06 00:20:51 -08005905 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005906
NeilBrownf6705572006-03-27 01:18:11 -08005907 if (mddev->reshape_position != MaxSector &&
NeilBrown63c70c42006-03-27 01:18:13 -08005908 pers->start_reshape == NULL) {
NeilBrownf6705572006-03-27 01:18:11 -08005909 /* This personality cannot handle reshaping... */
NeilBrownf6705572006-03-27 01:18:11 -08005910 module_put(pers->owner);
Shaohua Libfc9dfd2018-06-13 08:39:49 -07005911 err = -EINVAL;
5912 goto abort;
NeilBrownf6705572006-03-27 01:18:11 -08005913 }
5914
NeilBrown7dd5e7c32007-02-28 20:11:35 -08005915 if (pers->sync_request) {
5916 /* Warn if this is a potentially silly
5917 * configuration.
5918 */
5919 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
NeilBrown3cb03002011-10-11 16:45:26 +11005920 struct md_rdev *rdev2;
NeilBrown7dd5e7c32007-02-28 20:11:35 -08005921 int warned = 0;
Cheng Renquan159ec1f2009-01-09 08:31:08 +11005922
NeilBrowndafb20f2012-03-19 12:46:39 +11005923 rdev_for_each(rdev, mddev)
5924 rdev_for_each(rdev2, mddev) {
NeilBrown7dd5e7c32007-02-28 20:11:35 -08005925 if (rdev < rdev2 &&
Christoph Hellwig61a27e1f2020-09-03 07:40:58 +02005926 rdev->bdev->bd_disk ==
5927 rdev2->bdev->bd_disk) {
NeilBrown9d487392016-11-02 14:16:49 +11005928 pr_warn("%s: WARNING: %s appears to be on the same physical disk as %s.\n",
5929 mdname(mddev),
5930 bdevname(rdev->bdev,b),
5931 bdevname(rdev2->bdev,b2));
NeilBrown7dd5e7c32007-02-28 20:11:35 -08005932 warned = 1;
5933 }
5934 }
Cheng Renquan159ec1f2009-01-09 08:31:08 +11005935
NeilBrown7dd5e7c32007-02-28 20:11:35 -08005936 if (warned)
NeilBrown9d487392016-11-02 14:16:49 +11005937 pr_warn("True protection against single-disk failure might be compromised.\n");
NeilBrown7dd5e7c32007-02-28 20:11:35 -08005938 }
5939
NeilBrown657390d2005-08-26 18:34:16 -07005940 mddev->recovery = 0;
Andre Noll58c0fed2009-03-31 14:33:13 +11005941 /* may be over-ridden by personality */
5942 mddev->resync_max_sectors = mddev->dev_sectors;
5943
NeilBrown6ff8d8ec2006-01-06 00:20:15 -08005944 mddev->ok_start_degraded = start_dirty_degraded;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005945
NeilBrown0f9552b52009-12-30 12:08:50 +11005946 if (start_readonly && mddev->ro == 0)
NeilBrownf91de922005-11-08 21:39:36 -08005947 mddev->ro = 2; /* read-only, but switch on first write */
5948
NeilBrown36d091f2014-12-15 12:56:58 +11005949 err = pers->run(mddev);
Andre Noll13e53df2008-03-26 00:07:03 +01005950 if (err)
NeilBrown9d487392016-11-02 14:16:49 +11005951 pr_warn("md: pers->run() failed ...\n");
NeilBrown36d091f2014-12-15 12:56:58 +11005952 else if (pers->size(mddev, 0, 0) < mddev->array_sectors) {
NeilBrown9d487392016-11-02 14:16:49 +11005953 WARN_ONCE(!mddev->external_size,
5954 "%s: default size too small, but 'external_size' not in effect?\n",
5955 __func__);
5956 pr_warn("md: invalid array_size %llu > default size %llu\n",
5957 (unsigned long long)mddev->array_sectors / 2,
5958 (unsigned long long)pers->size(mddev, 0, 0) / 2);
Dan Williamsb522adc2009-03-31 15:00:31 +11005959 err = -EINVAL;
Dan Williamsb522adc2009-03-31 15:00:31 +11005960 }
NeilBrown36d091f2014-12-15 12:56:58 +11005961 if (err == 0 && pers->sync_request &&
NeilBrownef99bf42012-05-22 13:55:08 +10005962 (mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05005963 struct bitmap *bitmap;
5964
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07005965 bitmap = md_bitmap_create(mddev, -1);
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05005966 if (IS_ERR(bitmap)) {
5967 err = PTR_ERR(bitmap);
NeilBrown9d487392016-11-02 14:16:49 +11005968 pr_warn("%s: failed to create bitmap (%d)\n",
5969 mdname(mddev), err);
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05005970 } else
5971 mddev->bitmap = bitmap;
5972
NeilBrownb15c2e52006-01-06 00:20:16 -08005973 }
Guoqing Jiangd4945492019-06-14 17:10:39 +08005974 if (err)
5975 goto bitmap_abort;
Guoqing Jiang3e148a32019-06-19 17:30:46 +08005976
5977 if (mddev->bitmap_info.max_write_behind > 0) {
Guoqing Jiang3e173ab2019-12-23 10:48:54 +01005978 bool create_pool = false;
Guoqing Jiang3e148a32019-06-19 17:30:46 +08005979
5980 rdev_for_each(rdev, mddev) {
5981 if (test_bit(WriteMostly, &rdev->flags) &&
Guoqing Jiang404659c2019-12-23 10:48:53 +01005982 rdev_init_serial(rdev))
Guoqing Jiang3e173ab2019-12-23 10:48:54 +01005983 create_pool = true;
Guoqing Jiang3e148a32019-06-19 17:30:46 +08005984 }
Guoqing Jiang3e173ab2019-12-23 10:48:54 +01005985 if (create_pool && mddev->serial_info_pool == NULL) {
Guoqing Jiang404659c2019-12-23 10:48:53 +01005986 mddev->serial_info_pool =
5987 mempool_create_kmalloc_pool(NR_SERIAL_INFOS,
5988 sizeof(struct serial_info));
5989 if (!mddev->serial_info_pool) {
Guoqing Jiang3e148a32019-06-19 17:30:46 +08005990 err = -ENOMEM;
Guoqing Jiangd4945492019-06-14 17:10:39 +08005991 goto bitmap_abort;
Guoqing Jiang3e148a32019-06-19 17:30:46 +08005992 }
5993 }
5994 }
5995
NeilBrown5c675f82014-12-15 12:56:56 +11005996 if (mddev->queue) {
Shaohua Libb086a82016-09-30 09:45:40 -07005997 bool nonrot = true;
5998
5999 rdev_for_each(rdev, mddev) {
6000 if (rdev->raid_disk >= 0 &&
6001 !blk_queue_nonrot(bdev_get_queue(rdev->bdev))) {
6002 nonrot = false;
6003 break;
6004 }
6005 }
6006 if (mddev->degraded)
6007 nonrot = false;
6008 if (nonrot)
Bart Van Assche8b904b52018-03-07 17:10:10 -08006009 blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue);
Shaohua Libb086a82016-09-30 09:45:40 -07006010 else
Bart Van Assche8b904b52018-03-07 17:10:10 -08006011 blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue);
Guoqing Jiang10764812021-05-25 17:46:17 +08006012 blk_queue_flag_set(QUEUE_FLAG_IO_STAT, mddev->queue);
NeilBrown5c675f82014-12-15 12:56:56 +11006013 }
NeilBrown36d091f2014-12-15 12:56:58 +11006014 if (pers->sync_request) {
NeilBrown00bcb4a2010-06-01 19:37:23 +10006015 if (mddev->kobj.sd &&
6016 sysfs_create_group(&mddev->kobj, &md_redundancy_group))
NeilBrown9d487392016-11-02 14:16:49 +11006017 pr_warn("md: cannot register extra attributes for %s\n",
6018 mdname(mddev));
NeilBrown00bcb4a2010-06-01 19:37:23 +10006019 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
Junxiao Bie8efa9b2020-08-04 17:27:18 -07006020 mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed");
6021 mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded");
NeilBrown5e55e2f2007-03-26 21:32:14 -08006022 } else if (mddev->ro == 2) /* auto-readonly not meaningful */
NeilBrownfd9d49c2005-11-08 21:39:42 -08006023 mddev->ro = 0;
6024
Robert Becker1e509152009-12-14 12:49:58 +11006025 atomic_set(&mddev->max_corr_read_errors,
6026 MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006027 mddev->safemode = 0;
Goldwyn Rodrigues28c1b9f2015-10-22 16:01:25 +11006028 if (mddev_is_clustered(mddev))
6029 mddev->safemode_delay = 0;
6030 else
Zhao Heming7c9d5c52020-07-21 02:08:52 +08006031 mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006032 mddev->in_sync = 1;
NeilBrown0ca69882011-01-14 09:14:33 +11006033 smp_wmb();
NeilBrown36d091f2014-12-15 12:56:58 +11006034 spin_lock(&mddev->lock);
6035 mddev->pers = pers;
NeilBrown36d091f2014-12-15 12:56:58 +11006036 spin_unlock(&mddev->lock);
NeilBrowndafb20f2012-03-19 12:46:39 +11006037 rdev_for_each(rdev, mddev)
Namhyung Kim36fad852011-07-27 11:00:36 +10006038 if (rdev->raid_disk >= 0)
Yufen Yue5b521e2019-06-14 15:41:07 -07006039 sysfs_link_rdev(mddev, rdev); /* failure here is OK */
NeilBrownf72ffdd2014-09-30 14:23:59 +10006040
NeilBrowna4a3d262015-07-17 11:57:30 +10006041 if (mddev->degraded && !mddev->ro)
6042 /* This ensures that recovering status is reported immediately
6043 * via sysfs - until a lack of spares is confirmed.
6044 */
6045 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006046 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
NeilBrownf72ffdd2014-09-30 14:23:59 +10006047
Shaohua Li29530792016-12-08 15:48:19 -08006048 if (mddev->sb_flags)
NeilBrown850b2b422006-10-03 01:15:46 -07006049 md_update_sb(mddev, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006050
Guoqing Jiang54679482021-10-04 23:34:53 +08006051 md_new_event();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006052 return 0;
Xiao Nib1261942018-01-24 12:17:38 +08006053
Guoqing Jiangd4945492019-06-14 17:10:39 +08006054bitmap_abort:
6055 mddev_detach(mddev);
6056 if (mddev->private)
6057 pers->free(mddev, mddev->private);
6058 mddev->private = NULL;
6059 module_put(pers->owner);
6060 md_bitmap_destroy(mddev);
Xiao Nib1261942018-01-24 12:17:38 +08006061abort:
NeilBrown4bc034d2019-03-29 10:46:16 -07006062 bioset_exit(&mddev->sync_set);
Guoqing Jiang10764812021-05-25 17:46:17 +08006063exit_bio_set:
6064 bioset_exit(&mddev->bio_set);
Xiao Nib1261942018-01-24 12:17:38 +08006065 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006066}
NeilBrown390ee602010-06-01 19:37:27 +10006067EXPORT_SYMBOL_GPL(md_run);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006068
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02006069int do_md_run(struct mddev *mddev)
NeilBrownfe60b012010-03-29 11:10:42 +11006070{
6071 int err;
6072
NeilBrown9d4b45d2019-08-20 10:21:09 +10006073 set_bit(MD_NOT_READY, &mddev->flags);
NeilBrownfe60b012010-03-29 11:10:42 +11006074 err = md_run(mddev);
6075 if (err)
6076 goto out;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07006077 err = md_bitmap_load(mddev);
NeilBrown69e51b42010-06-01 19:37:35 +10006078 if (err) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07006079 md_bitmap_destroy(mddev);
NeilBrown69e51b42010-06-01 19:37:35 +10006080 goto out;
6081 }
Jonathan Brassow0fd018a2011-06-07 17:49:36 -05006082
Goldwyn Rodrigues28c1b9f2015-10-22 16:01:25 +11006083 if (mddev_is_clustered(mddev))
6084 md_allow_write(mddev);
6085
Song Liud5d885f2017-11-19 22:17:01 -08006086 /* run start up tasks that require md_thread */
6087 md_start(mddev);
6088
Jonathan Brassow0fd018a2011-06-07 17:49:36 -05006089 md_wakeup_thread(mddev->thread);
6090 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
6091
Christoph Hellwig2c247c52020-11-16 15:57:11 +01006092 set_capacity_and_notify(mddev->gendisk, mddev->array_sectors);
NeilBrown9d4b45d2019-08-20 10:21:09 +10006093 clear_bit(MD_NOT_READY, &mddev->flags);
NeilBrownf0b4f7e2011-02-24 17:26:41 +11006094 mddev->changed = 1;
NeilBrownfe60b012010-03-29 11:10:42 +11006095 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
NeilBrown9d4b45d2019-08-20 10:21:09 +10006096 sysfs_notify_dirent_safe(mddev->sysfs_state);
6097 sysfs_notify_dirent_safe(mddev->sysfs_action);
Junxiao Bie1a86db2020-07-14 16:10:26 -07006098 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
NeilBrownfe60b012010-03-29 11:10:42 +11006099out:
NeilBrown9d4b45d2019-08-20 10:21:09 +10006100 clear_bit(MD_NOT_READY, &mddev->flags);
NeilBrownfe60b012010-03-29 11:10:42 +11006101 return err;
6102}
6103
Song Liud5d885f2017-11-19 22:17:01 -08006104int md_start(struct mddev *mddev)
6105{
6106 int ret = 0;
6107
6108 if (mddev->pers->start) {
6109 set_bit(MD_RECOVERY_WAIT, &mddev->recovery);
6110 md_wakeup_thread(mddev->thread);
6111 ret = mddev->pers->start(mddev);
6112 clear_bit(MD_RECOVERY_WAIT, &mddev->recovery);
6113 md_wakeup_thread(mddev->sync_thread);
6114 }
6115 return ret;
6116}
6117EXPORT_SYMBOL_GPL(md_start);
6118
NeilBrownfd01b882011-10-11 16:47:53 +11006119static int restart_array(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006120{
6121 struct gendisk *disk = mddev->gendisk;
NeilBrown97b20ef2017-04-13 08:53:48 +10006122 struct md_rdev *rdev;
6123 bool has_journal = false;
6124 bool has_readonly = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006125
Andre Noll80fab1d2008-07-11 22:02:21 +10006126 /* Complain if it has no devices */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006127 if (list_empty(&mddev->disks))
Andre Noll80fab1d2008-07-11 22:02:21 +10006128 return -ENXIO;
6129 if (!mddev->pers)
6130 return -EINVAL;
6131 if (!mddev->ro)
6132 return -EBUSY;
Song Liu339421d2015-10-08 21:54:13 -07006133
NeilBrown97b20ef2017-04-13 08:53:48 +10006134 rcu_read_lock();
6135 rdev_for_each_rcu(rdev, mddev) {
6136 if (test_bit(Journal, &rdev->flags) &&
6137 !test_bit(Faulty, &rdev->flags))
6138 has_journal = true;
Christoph Hellwiga42e0d72021-02-01 14:17:21 +01006139 if (rdev_read_only(rdev))
NeilBrown97b20ef2017-04-13 08:53:48 +10006140 has_readonly = true;
Song Liu339421d2015-10-08 21:54:13 -07006141 }
NeilBrown97b20ef2017-04-13 08:53:48 +10006142 rcu_read_unlock();
6143 if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !has_journal)
6144 /* Don't restart rw with journal missing/faulty */
6145 return -EINVAL;
6146 if (has_readonly)
6147 return -EROFS;
Song Liu339421d2015-10-08 21:54:13 -07006148
Andre Noll80fab1d2008-07-11 22:02:21 +10006149 mddev->safemode = 0;
6150 mddev->ro = 0;
6151 set_disk_ro(disk, 0);
NeilBrown9d487392016-11-02 14:16:49 +11006152 pr_debug("md: %s switched to read-write mode.\n", mdname(mddev));
Andre Noll80fab1d2008-07-11 22:02:21 +10006153 /* Kick recovery or resync if necessary */
6154 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6155 md_wakeup_thread(mddev->thread);
6156 md_wakeup_thread(mddev->sync_thread);
NeilBrown00bcb4a2010-06-01 19:37:23 +10006157 sysfs_notify_dirent_safe(mddev->sysfs_state);
Andre Noll80fab1d2008-07-11 22:02:21 +10006158 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006159}
6160
NeilBrownfd01b882011-10-11 16:47:53 +11006161static void md_clean(struct mddev *mddev)
NeilBrown6177b472010-03-29 11:37:13 +11006162{
6163 mddev->array_sectors = 0;
6164 mddev->external_size = 0;
6165 mddev->dev_sectors = 0;
6166 mddev->raid_disks = 0;
6167 mddev->recovery_cp = 0;
6168 mddev->resync_min = 0;
6169 mddev->resync_max = MaxSector;
6170 mddev->reshape_position = MaxSector;
6171 mddev->external = 0;
6172 mddev->persistent = 0;
6173 mddev->level = LEVEL_NONE;
6174 mddev->clevel[0] = 0;
6175 mddev->flags = 0;
Shaohua Li29530792016-12-08 15:48:19 -08006176 mddev->sb_flags = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006177 mddev->ro = 0;
6178 mddev->metadata_type[0] = 0;
6179 mddev->chunk_sectors = 0;
6180 mddev->ctime = mddev->utime = 0;
6181 mddev->layout = 0;
6182 mddev->max_disks = 0;
6183 mddev->events = 0;
NeilBrowna8707c02010-05-18 09:28:43 +10006184 mddev->can_decrease_events = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006185 mddev->delta_disks = 0;
NeilBrown2c810cd2012-05-21 09:27:00 +10006186 mddev->reshape_backwards = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006187 mddev->new_level = LEVEL_NONE;
6188 mddev->new_layout = 0;
6189 mddev->new_chunk_sectors = 0;
6190 mddev->curr_resync = 0;
Jianpeng Ma7f7583d2012-10-11 14:17:59 +11006191 atomic64_set(&mddev->resync_mismatches, 0);
NeilBrown6177b472010-03-29 11:37:13 +11006192 mddev->suspend_lo = mddev->suspend_hi = 0;
6193 mddev->sync_speed_min = mddev->sync_speed_max = 0;
6194 mddev->recovery = 0;
6195 mddev->in_sync = 0;
NeilBrownf0b4f7e2011-02-24 17:26:41 +11006196 mddev->changed = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006197 mddev->degraded = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006198 mddev->safemode = 0;
NeilBrownbd691922015-06-25 17:01:40 +10006199 mddev->private = NULL;
Guoqing Jiangc20c33f2016-08-12 13:42:38 +08006200 mddev->cluster_info = NULL;
NeilBrown6177b472010-03-29 11:37:13 +11006201 mddev->bitmap_info.offset = 0;
6202 mddev->bitmap_info.default_offset = 0;
NeilBrown6409bb02012-05-22 13:55:07 +10006203 mddev->bitmap_info.default_space = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006204 mddev->bitmap_info.chunksize = 0;
6205 mddev->bitmap_info.daemon_sleep = 0;
6206 mddev->bitmap_info.max_write_behind = 0;
Guoqing Jiangc20c33f2016-08-12 13:42:38 +08006207 mddev->bitmap_info.nodes = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006208}
6209
NeilBrownfd01b882011-10-11 16:47:53 +11006210static void __md_stop_writes(struct mddev *mddev)
NeilBrowna047e122010-03-29 12:07:53 +11006211{
NeilBrown6b6204e2013-05-09 09:48:30 +10006212 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
Guoqing Jiang21e09582020-04-04 23:57:07 +02006213 if (work_pending(&mddev->del_work))
6214 flush_workqueue(md_misc_wq);
NeilBrowna047e122010-03-29 12:07:53 +11006215 if (mddev->sync_thread) {
NeilBrowna047e122010-03-29 12:07:53 +11006216 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10006217 md_reap_sync_thread(mddev);
NeilBrowna047e122010-03-29 12:07:53 +11006218 }
6219
6220 del_timer_sync(&mddev->safemode_timer);
6221
Shaohua Li034e33f2016-11-21 10:29:19 -08006222 if (mddev->pers && mddev->pers->quiesce) {
6223 mddev->pers->quiesce(mddev, 1);
6224 mddev->pers->quiesce(mddev, 0);
6225 }
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07006226 md_bitmap_flush(mddev);
NeilBrowna047e122010-03-29 12:07:53 +11006227
NeilBrownb6d428c2013-04-24 11:42:42 +10006228 if (mddev->ro == 0 &&
Goldwyn Rodrigues28c1b9f2015-10-22 16:01:25 +11006229 ((!mddev->in_sync && !mddev_is_clustered(mddev)) ||
Shaohua Li29530792016-12-08 15:48:19 -08006230 mddev->sb_flags)) {
NeilBrowna047e122010-03-29 12:07:53 +11006231 /* mark array as shutdown cleanly */
Goldwyn Rodrigues28c1b9f2015-10-22 16:01:25 +11006232 if (!mddev_is_clustered(mddev))
6233 mddev->in_sync = 1;
NeilBrowna047e122010-03-29 12:07:53 +11006234 md_update_sb(mddev, 1);
6235 }
Guoqing Jiang69b00b52019-12-23 10:49:00 +01006236 /* disable policy to guarantee rdevs free resources for serialization */
6237 mddev->serialize_policy = 0;
6238 mddev_destroy_serial_pool(mddev, NULL, true);
NeilBrowna047e122010-03-29 12:07:53 +11006239}
NeilBrowndefad612011-01-14 09:14:33 +11006240
NeilBrownfd01b882011-10-11 16:47:53 +11006241void md_stop_writes(struct mddev *mddev)
NeilBrowndefad612011-01-14 09:14:33 +11006242{
NeilBrown29f097c2013-11-14 17:54:51 +11006243 mddev_lock_nointr(mddev);
NeilBrowndefad612011-01-14 09:14:33 +11006244 __md_stop_writes(mddev);
6245 mddev_unlock(mddev);
6246}
NeilBrown390ee602010-06-01 19:37:27 +10006247EXPORT_SYMBOL_GPL(md_stop_writes);
NeilBrowna047e122010-03-29 12:07:53 +11006248
NeilBrown5aa61f42014-12-15 12:56:57 +11006249static void mddev_detach(struct mddev *mddev)
6250{
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07006251 md_bitmap_wait_behind_writes(mddev);
Guoqing Jiang6b40bec2020-02-11 11:10:04 +01006252 if (mddev->pers && mddev->pers->quiesce && !mddev->suspended) {
NeilBrown5aa61f42014-12-15 12:56:57 +11006253 mddev->pers->quiesce(mddev, 1);
6254 mddev->pers->quiesce(mddev, 0);
6255 }
6256 md_unregister_thread(&mddev->thread);
6257 if (mddev->queue)
6258 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
6259}
6260
NeilBrown5eff3c42012-11-19 10:47:48 +11006261static void __md_stop(struct mddev *mddev)
NeilBrown6177b472010-03-29 11:37:13 +11006262{
NeilBrown36d091f2014-12-15 12:56:58 +11006263 struct md_personality *pers = mddev->pers;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07006264 md_bitmap_destroy(mddev);
NeilBrown5aa61f42014-12-15 12:56:57 +11006265 mddev_detach(mddev);
NeilBrownee5d0042015-07-22 10:20:07 +10006266 /* Ensure ->event_work is done */
Guoqing Jiang21e09582020-04-04 23:57:07 +02006267 if (mddev->event_work.func)
6268 flush_workqueue(md_misc_wq);
NeilBrown36d091f2014-12-15 12:56:58 +11006269 spin_lock(&mddev->lock);
NeilBrown6177b472010-03-29 11:37:13 +11006270 mddev->pers = NULL;
NeilBrown36d091f2014-12-15 12:56:58 +11006271 spin_unlock(&mddev->lock);
zhangyue07641b52021-11-16 10:35:26 +08006272 if (mddev->private)
6273 pers->free(mddev, mddev->private);
NeilBrownbd691922015-06-25 17:01:40 +10006274 mddev->private = NULL;
NeilBrown36d091f2014-12-15 12:56:58 +11006275 if (pers->sync_request && mddev->to_remove == NULL)
6276 mddev->to_remove = &md_redundancy_group;
6277 module_put(pers->owner);
NeilBrowncca9cf92010-04-01 12:08:16 +11006278 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
Jack Wang6aaa58c2018-10-19 16:21:31 +02006279}
6280
6281void md_stop(struct mddev *mddev)
6282{
6283 /* stop the array and free an attached data structures.
6284 * This is called from dm-raid
6285 */
6286 __md_stop(mddev);
Kent Overstreetafeee512018-05-20 18:25:52 -04006287 bioset_exit(&mddev->bio_set);
6288 bioset_exit(&mddev->sync_set);
Guoqing Jiangdaee2022021-06-03 17:21:06 +08006289 if (mddev->level != 1 && mddev->level != 10)
6290 bioset_exit(&mddev->io_acct_set);
NeilBrown5eff3c42012-11-19 10:47:48 +11006291}
6292
NeilBrown390ee602010-06-01 19:37:27 +10006293EXPORT_SYMBOL_GPL(md_stop);
NeilBrown6177b472010-03-29 11:37:13 +11006294
NeilBrowna05b7ea2012-07-19 15:59:18 +10006295static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
NeilBrowna4bd82d2010-03-29 13:23:10 +11006296{
6297 int err = 0;
NeilBrown30b8feb2013-11-14 15:16:17 +11006298 int did_freeze = 0;
6299
6300 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
6301 did_freeze = 1;
6302 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6303 md_wakeup_thread(mddev->thread);
6304 }
NeilBrownf851b602014-12-11 10:02:10 +11006305 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
NeilBrown30b8feb2013-11-14 15:16:17 +11006306 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
NeilBrownf851b602014-12-11 10:02:10 +11006307 if (mddev->sync_thread)
NeilBrown30b8feb2013-11-14 15:16:17 +11006308 /* Thread might be blocked waiting for metadata update
6309 * which will now never happen */
6310 wake_up_process(mddev->sync_thread->tsk);
NeilBrownf851b602014-12-11 10:02:10 +11006311
Shaohua Li29530792016-12-08 15:48:19 -08006312 if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
NeilBrown88724bf2015-09-24 14:00:51 +10006313 return -EBUSY;
NeilBrown30b8feb2013-11-14 15:16:17 +11006314 mddev_unlock(mddev);
NeilBrownf851b602014-12-11 10:02:10 +11006315 wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
6316 &mddev->recovery));
NeilBrown88724bf2015-09-24 14:00:51 +10006317 wait_event(mddev->sb_wait,
Shaohua Li29530792016-12-08 15:48:19 -08006318 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
NeilBrown30b8feb2013-11-14 15:16:17 +11006319 mddev_lock_nointr(mddev);
6320
NeilBrowna4bd82d2010-03-29 13:23:10 +11006321 mutex_lock(&mddev->open_mutex);
NeilBrown9ba3b7f2014-09-09 14:00:15 +10006322 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
NeilBrown30b8feb2013-11-14 15:16:17 +11006323 mddev->sync_thread ||
Guoqing Jiangaf8d8e62016-08-12 13:42:37 +08006324 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
NeilBrown9d487392016-11-02 14:16:49 +11006325 pr_warn("md: %s still in use.\n",mdname(mddev));
NeilBrown30b8feb2013-11-14 15:16:17 +11006326 if (did_freeze) {
6327 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
NeilBrown45eaf452014-10-29 08:49:50 +11006328 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
NeilBrown30b8feb2013-11-14 15:16:17 +11006329 md_wakeup_thread(mddev->thread);
6330 }
NeilBrowna4bd82d2010-03-29 13:23:10 +11006331 err = -EBUSY;
6332 goto out;
6333 }
6334 if (mddev->pers) {
NeilBrowndefad612011-01-14 09:14:33 +11006335 __md_stop_writes(mddev);
NeilBrowna4bd82d2010-03-29 13:23:10 +11006336
6337 err = -ENXIO;
6338 if (mddev->ro==1)
6339 goto out;
6340 mddev->ro = 1;
6341 set_disk_ro(mddev->gendisk, 1);
6342 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
NeilBrown45eaf452014-10-29 08:49:50 +11006343 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6344 md_wakeup_thread(mddev->thread);
NeilBrown00bcb4a2010-06-01 19:37:23 +10006345 sysfs_notify_dirent_safe(mddev->sysfs_state);
NeilBrown30b8feb2013-11-14 15:16:17 +11006346 err = 0;
NeilBrowna4bd82d2010-03-29 13:23:10 +11006347 }
6348out:
6349 mutex_unlock(&mddev->open_mutex);
6350 return err;
6351}
6352
NeilBrown9e653b62006-06-26 00:27:58 -07006353/* mode:
6354 * 0 - completely stop and dis-assemble array
NeilBrown9e653b62006-06-26 00:27:58 -07006355 * 2 - stop but do not disassemble array
6356 */
NeilBrownf72ffdd2014-09-30 14:23:59 +10006357static int do_md_stop(struct mddev *mddev, int mode,
NeilBrowna05b7ea2012-07-19 15:59:18 +10006358 struct block_device *bdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006359{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006360 struct gendisk *disk = mddev->gendisk;
NeilBrown3cb03002011-10-11 16:45:26 +11006361 struct md_rdev *rdev;
NeilBrown30b8feb2013-11-14 15:16:17 +11006362 int did_freeze = 0;
6363
6364 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
6365 did_freeze = 1;
6366 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6367 md_wakeup_thread(mddev->thread);
6368 }
NeilBrownf851b602014-12-11 10:02:10 +11006369 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
NeilBrown30b8feb2013-11-14 15:16:17 +11006370 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
NeilBrownf851b602014-12-11 10:02:10 +11006371 if (mddev->sync_thread)
NeilBrown30b8feb2013-11-14 15:16:17 +11006372 /* Thread might be blocked waiting for metadata update
6373 * which will now never happen */
6374 wake_up_process(mddev->sync_thread->tsk);
NeilBrownf851b602014-12-11 10:02:10 +11006375
NeilBrown30b8feb2013-11-14 15:16:17 +11006376 mddev_unlock(mddev);
NeilBrownf851b602014-12-11 10:02:10 +11006377 wait_event(resync_wait, (mddev->sync_thread == NULL &&
6378 !test_bit(MD_RECOVERY_RUNNING,
6379 &mddev->recovery)));
NeilBrown30b8feb2013-11-14 15:16:17 +11006380 mddev_lock_nointr(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006381
NeilBrownc8c00a62009-08-10 12:50:52 +10006382 mutex_lock(&mddev->open_mutex);
NeilBrown9ba3b7f2014-09-09 14:00:15 +10006383 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
NeilBrown30b8feb2013-11-14 15:16:17 +11006384 mddev->sysfs_active ||
6385 mddev->sync_thread ||
Guoqing Jiangaf8d8e62016-08-12 13:42:37 +08006386 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
NeilBrown9d487392016-11-02 14:16:49 +11006387 pr_warn("md: %s still in use.\n",mdname(mddev));
NeilBrown6e17b022010-08-07 21:41:19 +10006388 mutex_unlock(&mddev->open_mutex);
NeilBrown30b8feb2013-11-14 15:16:17 +11006389 if (did_freeze) {
6390 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
NeilBrown45eaf452014-10-29 08:49:50 +11006391 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
NeilBrown30b8feb2013-11-14 15:16:17 +11006392 md_wakeup_thread(mddev->thread);
6393 }
NeilBrown260fa032013-08-27 16:44:13 +10006394 return -EBUSY;
6395 }
NeilBrown6e17b022010-08-07 21:41:19 +10006396 if (mddev->pers) {
NeilBrowna4bd82d2010-03-29 13:23:10 +11006397 if (mddev->ro)
6398 set_disk_ro(disk, 0);
NeilBrown409c57f2009-03-31 14:39:39 +11006399
NeilBrowndefad612011-01-14 09:14:33 +11006400 __md_stop_writes(mddev);
NeilBrown5eff3c42012-11-19 10:47:48 +11006401 __md_stop(mddev);
NeilBrown6177b472010-03-29 11:37:13 +11006402
NeilBrowna4bd82d2010-03-29 13:23:10 +11006403 /* tell userspace to handle 'inactive' */
NeilBrown00bcb4a2010-06-01 19:37:23 +10006404 sysfs_notify_dirent_safe(mddev->sysfs_state);
NeilBrown0d4ca602006-12-10 02:20:44 -08006405
NeilBrowndafb20f2012-03-19 12:46:39 +11006406 rdev_for_each(rdev, mddev)
Namhyung Kim36fad852011-07-27 11:00:36 +10006407 if (rdev->raid_disk >= 0)
6408 sysfs_unlink_rdev(mddev, rdev);
NeilBrownc4647292009-05-07 12:51:06 +10006409
Christoph Hellwig2c247c52020-11-16 15:57:11 +01006410 set_capacity_and_notify(disk, 0);
NeilBrown6e17b022010-08-07 21:41:19 +10006411 mutex_unlock(&mddev->open_mutex);
NeilBrownf0b4f7e2011-02-24 17:26:41 +11006412 mddev->changed = 1;
NeilBrown0d4ca602006-12-10 02:20:44 -08006413
NeilBrowna4bd82d2010-03-29 13:23:10 +11006414 if (mddev->ro)
6415 mddev->ro = 0;
NeilBrown6e17b022010-08-07 21:41:19 +10006416 } else
6417 mutex_unlock(&mddev->open_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006418 /*
6419 * Free resources if final stop
6420 */
NeilBrown9e653b62006-06-26 00:27:58 -07006421 if (mode == 0) {
NeilBrown9d487392016-11-02 14:16:49 +11006422 pr_info("md: %s stopped.\n", mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006423
NeilBrownc3d97142009-12-14 12:49:52 +11006424 if (mddev->bitmap_info.file) {
NeilBrown4af1a042014-12-15 12:57:00 +11006425 struct file *f = mddev->bitmap_info.file;
6426 spin_lock(&mddev->lock);
NeilBrownc3d97142009-12-14 12:49:52 +11006427 mddev->bitmap_info.file = NULL;
NeilBrown4af1a042014-12-15 12:57:00 +11006428 spin_unlock(&mddev->lock);
6429 fput(f);
NeilBrown978f9462006-02-02 14:28:05 -08006430 }
NeilBrownc3d97142009-12-14 12:49:52 +11006431 mddev->bitmap_info.offset = 0;
NeilBrown978f9462006-02-02 14:28:05 -08006432
Linus Torvalds1da177e2005-04-16 15:20:36 -07006433 export_array(mddev);
6434
NeilBrown6177b472010-03-29 11:37:13 +11006435 md_clean(mddev);
NeilBrownefeb53c2009-01-09 08:31:10 +11006436 if (mddev->hold_active == UNTIL_STOP)
6437 mddev->hold_active = 0;
NeilBrowna4bd82d2010-03-29 13:23:10 +11006438 }
Guoqing Jiang54679482021-10-04 23:34:53 +08006439 md_new_event();
NeilBrown00bcb4a2010-06-01 19:37:23 +10006440 sysfs_notify_dirent_safe(mddev->sysfs_state);
NeilBrown6e17b022010-08-07 21:41:19 +10006441 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006442}
6443
Jeff Garzikfdee8ae2006-12-10 02:20:50 -08006444#ifndef MODULE
NeilBrownfd01b882011-10-11 16:47:53 +11006445static void autorun_array(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006446{
NeilBrown3cb03002011-10-11 16:45:26 +11006447 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006448 int err;
6449
NeilBrowna757e642005-04-16 15:26:42 -07006450 if (list_empty(&mddev->disks))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006451 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006452
NeilBrown9d487392016-11-02 14:16:49 +11006453 pr_info("md: running: ");
Linus Torvalds1da177e2005-04-16 15:20:36 -07006454
NeilBrowndafb20f2012-03-19 12:46:39 +11006455 rdev_for_each(rdev, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006456 char b[BDEVNAME_SIZE];
NeilBrown9d487392016-11-02 14:16:49 +11006457 pr_cont("<%s>", bdevname(rdev->bdev,b));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006458 }
NeilBrown9d487392016-11-02 14:16:49 +11006459 pr_cont("\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07006460
NeilBrownd710e132008-10-13 11:55:12 +11006461 err = do_md_run(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006462 if (err) {
NeilBrown9d487392016-11-02 14:16:49 +11006463 pr_warn("md: do_md_run() returned %d\n", err);
NeilBrowna05b7ea2012-07-19 15:59:18 +10006464 do_md_stop(mddev, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006465 }
6466}
6467
6468/*
6469 * lets try to run arrays based on all disks that have arrived
6470 * until now. (those are in pending_raid_disks)
6471 *
6472 * the method: pick the first pending disk, collect all disks with
6473 * the same UUID, remove all from the pending list and put them into
6474 * the 'same_array' list. Then order this list based on superblock
6475 * update time (freshest comes first), kick out 'old' disks and
6476 * compare superblocks. If everything's fine then run it.
6477 *
6478 * If "unit" is allocated, then bump its reference count
6479 */
6480static void autorun_devices(int part)
6481{
NeilBrown3cb03002011-10-11 16:45:26 +11006482 struct md_rdev *rdev0, *rdev, *tmp;
NeilBrownfd01b882011-10-11 16:47:53 +11006483 struct mddev *mddev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006484 char b[BDEVNAME_SIZE];
6485
NeilBrown9d487392016-11-02 14:16:49 +11006486 pr_info("md: autorun ...\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07006487 while (!list_empty(&pending_raid_disks)) {
NeilBrowne8703fe2006-10-03 01:15:59 -07006488 int unit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006489 dev_t dev;
NeilBrownad01c9e2006-03-27 01:18:07 -08006490 LIST_HEAD(candidates);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006491 rdev0 = list_entry(pending_raid_disks.next,
NeilBrown3cb03002011-10-11 16:45:26 +11006492 struct md_rdev, same_set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006493
NeilBrown9d487392016-11-02 14:16:49 +11006494 pr_debug("md: considering %s ...\n", bdevname(rdev0->bdev,b));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006495 INIT_LIST_HEAD(&candidates);
Cheng Renquan159ec1f2009-01-09 08:31:08 +11006496 rdev_for_each_list(rdev, tmp, &pending_raid_disks)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006497 if (super_90_load(rdev, rdev0, 0) >= 0) {
NeilBrown9d487392016-11-02 14:16:49 +11006498 pr_debug("md: adding %s ...\n",
6499 bdevname(rdev->bdev,b));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006500 list_move(&rdev->same_set, &candidates);
6501 }
6502 /*
6503 * now we have a set of devices, with all of them having
6504 * mostly sane superblocks. It's time to allocate the
6505 * mddev.
6506 */
NeilBrowne8703fe2006-10-03 01:15:59 -07006507 if (part) {
6508 dev = MKDEV(mdp_major,
6509 rdev0->preferred_minor << MdpMinorShift);
6510 unit = MINOR(dev) >> MdpMinorShift;
6511 } else {
6512 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
6513 unit = MINOR(dev);
6514 }
6515 if (rdev0->preferred_minor != unit) {
NeilBrown9d487392016-11-02 14:16:49 +11006516 pr_warn("md: unit number in %s is bad: %d\n",
6517 bdevname(rdev0->bdev, b), rdev0->preferred_minor);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006518 break;
6519 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006520
Christoph Hellwig28144f92020-10-29 15:58:34 +01006521 md_probe(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006522 mddev = mddev_find(dev);
Christoph Hellwig65aa97c2021-04-03 18:15:29 +02006523 if (!mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006524 break;
Christoph Hellwig65aa97c2021-04-03 18:15:29 +02006525
NeilBrownf72ffdd2014-09-30 14:23:59 +10006526 if (mddev_lock(mddev))
NeilBrown9d487392016-11-02 14:16:49 +11006527 pr_warn("md: %s locked, cannot run\n", mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006528 else if (mddev->raid_disks || mddev->major_version
6529 || !list_empty(&mddev->disks)) {
NeilBrown9d487392016-11-02 14:16:49 +11006530 pr_warn("md: %s already running, cannot run %s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006531 mdname(mddev), bdevname(rdev0->bdev,b));
6532 mddev_unlock(mddev);
6533 } else {
NeilBrown9d487392016-11-02 14:16:49 +11006534 pr_debug("md: created %s\n", mdname(mddev));
NeilBrown1ec4a932008-02-06 01:39:53 -08006535 mddev->persistent = 1;
Cheng Renquan159ec1f2009-01-09 08:31:08 +11006536 rdev_for_each_list(rdev, tmp, &candidates) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006537 list_del_init(&rdev->same_set);
6538 if (bind_rdev_to_array(rdev, mddev))
6539 export_rdev(rdev);
6540 }
6541 autorun_array(mddev);
6542 mddev_unlock(mddev);
6543 }
6544 /* on success, candidates will be empty, on error
6545 * it won't...
6546 */
Cheng Renquan159ec1f2009-01-09 08:31:08 +11006547 rdev_for_each_list(rdev, tmp, &candidates) {
NeilBrown4b809912008-07-21 17:05:25 +10006548 list_del_init(&rdev->same_set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006549 export_rdev(rdev);
NeilBrown4b809912008-07-21 17:05:25 +10006550 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006551 mddev_put(mddev);
6552 }
NeilBrown9d487392016-11-02 14:16:49 +11006553 pr_info("md: ... autorun DONE.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07006554}
Jeff Garzikfdee8ae2006-12-10 02:20:50 -08006555#endif /* !MODULE */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006556
NeilBrownf72ffdd2014-09-30 14:23:59 +10006557static int get_version(void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006558{
6559 mdu_version_t ver;
6560
6561 ver.major = MD_MAJOR_VERSION;
6562 ver.minor = MD_MINOR_VERSION;
6563 ver.patchlevel = MD_PATCHLEVEL_VERSION;
6564
6565 if (copy_to_user(arg, &ver, sizeof(ver)))
6566 return -EFAULT;
6567
6568 return 0;
6569}
6570
NeilBrownf72ffdd2014-09-30 14:23:59 +10006571static int get_array_info(struct mddev *mddev, void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006572{
6573 mdu_array_info_t info;
NeilBrowna9f326e2009-09-23 18:06:41 +10006574 int nr,working,insync,failed,spare;
NeilBrown3cb03002011-10-11 16:45:26 +11006575 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006576
NeilBrown1ca69c42012-10-11 13:37:33 +11006577 nr = working = insync = failed = spare = 0;
6578 rcu_read_lock();
6579 rdev_for_each_rcu(rdev, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006580 nr++;
NeilBrownb2d444d2005-11-08 21:39:31 -08006581 if (test_bit(Faulty, &rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006582 failed++;
6583 else {
6584 working++;
NeilBrownb2d444d2005-11-08 21:39:31 -08006585 if (test_bit(In_sync, &rdev->flags))
NeilBrownf72ffdd2014-09-30 14:23:59 +10006586 insync++;
Song Liub347af82016-08-11 17:14:45 -07006587 else if (test_bit(Journal, &rdev->flags))
6588 /* TODO: add journal count to md_u.h */
6589 ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006590 else
6591 spare++;
6592 }
6593 }
NeilBrown1ca69c42012-10-11 13:37:33 +11006594 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006595
6596 info.major_version = mddev->major_version;
6597 info.minor_version = mddev->minor_version;
6598 info.patch_version = MD_PATCHLEVEL_VERSION;
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11006599 info.ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006600 info.level = mddev->level;
Andre Noll58c0fed2009-03-31 14:33:13 +11006601 info.size = mddev->dev_sectors / 2;
6602 if (info.size != mddev->dev_sectors / 2) /* overflow */
NeilBrown284ae7c2006-02-03 03:03:40 -08006603 info.size = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006604 info.nr_disks = nr;
6605 info.raid_disks = mddev->raid_disks;
6606 info.md_minor = mddev->md_minor;
6607 info.not_persistent= !mddev->persistent;
6608
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11006609 info.utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006610 info.state = 0;
6611 if (mddev->in_sync)
6612 info.state = (1<<MD_SB_CLEAN);
NeilBrownc3d97142009-12-14 12:49:52 +11006613 if (mddev->bitmap && mddev->bitmap_info.offset)
NeilBrown9bd35922014-07-02 11:35:06 +10006614 info.state |= (1<<MD_SB_BITMAP_PRESENT);
Goldwyn Rodriguesca8895d2014-11-26 12:22:03 -06006615 if (mddev_is_clustered(mddev))
6616 info.state |= (1<<MD_SB_CLUSTERED);
NeilBrowna9f326e2009-09-23 18:06:41 +10006617 info.active_disks = insync;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006618 info.working_disks = working;
6619 info.failed_disks = failed;
6620 info.spare_disks = spare;
6621
6622 info.layout = mddev->layout;
Andre Noll9d8f0362009-06-18 08:45:01 +10006623 info.chunk_size = mddev->chunk_sectors << 9;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006624
6625 if (copy_to_user(arg, &info, sizeof(info)))
6626 return -EFAULT;
6627
6628 return 0;
6629}
6630
NeilBrownf72ffdd2014-09-30 14:23:59 +10006631static int get_bitmap_file(struct mddev *mddev, void __user * arg)
NeilBrown32a76272005-06-21 17:17:14 -07006632{
6633 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
NeilBrownf4ad3d32014-12-15 12:57:00 +11006634 char *ptr;
NeilBrown4af1a042014-12-15 12:57:00 +11006635 int err;
NeilBrown32a76272005-06-21 17:17:14 -07006636
Benjamin Randazzob6878d92015-07-25 16:36:50 +02006637 file = kzalloc(sizeof(*file), GFP_NOIO);
NeilBrown32a76272005-06-21 17:17:14 -07006638 if (!file)
NeilBrown4af1a042014-12-15 12:57:00 +11006639 return -ENOMEM;
NeilBrown32a76272005-06-21 17:17:14 -07006640
NeilBrown32a76272005-06-21 17:17:14 -07006641 err = 0;
NeilBrown4af1a042014-12-15 12:57:00 +11006642 spin_lock(&mddev->lock);
Benjamin Randazzo25eafe12015-07-25 16:36:50 +02006643 /* bitmap enabled */
6644 if (mddev->bitmap_info.file) {
6645 ptr = file_path(mddev->bitmap_info.file, file->pathname,
6646 sizeof(file->pathname));
6647 if (IS_ERR(ptr))
6648 err = PTR_ERR(ptr);
6649 else
6650 memmove(file->pathname, ptr,
6651 sizeof(file->pathname)-(ptr-file->pathname));
6652 }
NeilBrown4af1a042014-12-15 12:57:00 +11006653 spin_unlock(&mddev->lock);
6654
6655 if (err == 0 &&
6656 copy_to_user(arg, file, sizeof(*file)))
NeilBrown32a76272005-06-21 17:17:14 -07006657 err = -EFAULT;
NeilBrown4af1a042014-12-15 12:57:00 +11006658
NeilBrown32a76272005-06-21 17:17:14 -07006659 kfree(file);
6660 return err;
6661}
6662
NeilBrownf72ffdd2014-09-30 14:23:59 +10006663static int get_disk_info(struct mddev *mddev, void __user * arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006664{
6665 mdu_disk_info_t info;
NeilBrown3cb03002011-10-11 16:45:26 +11006666 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006667
6668 if (copy_from_user(&info, arg, sizeof(info)))
6669 return -EFAULT;
6670
NeilBrown1ca69c42012-10-11 13:37:33 +11006671 rcu_read_lock();
Goldwyn Rodrigues57d051d2015-04-14 10:43:55 -05006672 rdev = md_find_rdev_nr_rcu(mddev, info.number);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006673 if (rdev) {
6674 info.major = MAJOR(rdev->bdev->bd_dev);
6675 info.minor = MINOR(rdev->bdev->bd_dev);
6676 info.raid_disk = rdev->raid_disk;
6677 info.state = 0;
NeilBrownb2d444d2005-11-08 21:39:31 -08006678 if (test_bit(Faulty, &rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006679 info.state |= (1<<MD_DISK_FAULTY);
NeilBrownb2d444d2005-11-08 21:39:31 -08006680 else if (test_bit(In_sync, &rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006681 info.state |= (1<<MD_DISK_ACTIVE);
6682 info.state |= (1<<MD_DISK_SYNC);
6683 }
Shaohua Li9efdca12015-10-12 16:59:50 -07006684 if (test_bit(Journal, &rdev->flags))
Song Liubac624f2015-08-13 14:31:55 -07006685 info.state |= (1<<MD_DISK_JOURNAL);
NeilBrown8ddf9ef2005-09-09 16:23:45 -07006686 if (test_bit(WriteMostly, &rdev->flags))
6687 info.state |= (1<<MD_DISK_WRITEMOSTLY);
NeilBrown688834e2016-11-18 16:16:11 +11006688 if (test_bit(FailFast, &rdev->flags))
6689 info.state |= (1<<MD_DISK_FAILFAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006690 } else {
6691 info.major = info.minor = 0;
6692 info.raid_disk = -1;
6693 info.state = (1<<MD_DISK_REMOVED);
6694 }
NeilBrown1ca69c42012-10-11 13:37:33 +11006695 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006696
6697 if (copy_to_user(arg, &info, sizeof(info)))
6698 return -EFAULT;
6699
6700 return 0;
6701}
6702
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02006703int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006704{
6705 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
NeilBrown3cb03002011-10-11 16:45:26 +11006706 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006707 dev_t dev = MKDEV(info->major,info->minor);
6708
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006709 if (mddev_is_clustered(mddev) &&
6710 !(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) {
NeilBrown9d487392016-11-02 14:16:49 +11006711 pr_warn("%s: Cannot add to clustered mddev.\n",
6712 mdname(mddev));
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006713 return -EINVAL;
6714 }
6715
Linus Torvalds1da177e2005-04-16 15:20:36 -07006716 if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
6717 return -EOVERFLOW;
6718
6719 if (!mddev->raid_disks) {
6720 int err;
6721 /* expecting a device which has a superblock */
6722 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
6723 if (IS_ERR(rdev)) {
NeilBrown9d487392016-11-02 14:16:49 +11006724 pr_warn("md: md_import_device returned %ld\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006725 PTR_ERR(rdev));
6726 return PTR_ERR(rdev);
6727 }
6728 if (!list_empty(&mddev->disks)) {
NeilBrown3cb03002011-10-11 16:45:26 +11006729 struct md_rdev *rdev0
6730 = list_entry(mddev->disks.next,
6731 struct md_rdev, same_set);
NeilBrowna9f326e2009-09-23 18:06:41 +10006732 err = super_types[mddev->major_version]
Linus Torvalds1da177e2005-04-16 15:20:36 -07006733 .load_super(rdev, rdev0, mddev->minor_version);
6734 if (err < 0) {
NeilBrown9d487392016-11-02 14:16:49 +11006735 pr_warn("md: %s has different UUID to %s\n",
NeilBrownf72ffdd2014-09-30 14:23:59 +10006736 bdevname(rdev->bdev,b),
Linus Torvalds1da177e2005-04-16 15:20:36 -07006737 bdevname(rdev0->bdev,b2));
6738 export_rdev(rdev);
6739 return -EINVAL;
6740 }
6741 }
6742 err = bind_rdev_to_array(rdev, mddev);
6743 if (err)
6744 export_rdev(rdev);
6745 return err;
6746 }
6747
6748 /*
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02006749 * md_add_new_disk can be used once the array is assembled
Linus Torvalds1da177e2005-04-16 15:20:36 -07006750 * to add "hot spares". They must already have a superblock
6751 * written
6752 */
6753 if (mddev->pers) {
6754 int err;
6755 if (!mddev->pers->hot_add_disk) {
NeilBrown9d487392016-11-02 14:16:49 +11006756 pr_warn("%s: personality does not support diskops!\n",
6757 mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006758 return -EINVAL;
6759 }
NeilBrown7b1e35f2005-09-09 16:23:50 -07006760 if (mddev->persistent)
6761 rdev = md_import_device(dev, mddev->major_version,
6762 mddev->minor_version);
6763 else
6764 rdev = md_import_device(dev, -1, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006765 if (IS_ERR(rdev)) {
NeilBrown9d487392016-11-02 14:16:49 +11006766 pr_warn("md: md_import_device returned %ld\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006767 PTR_ERR(rdev));
6768 return PTR_ERR(rdev);
6769 }
NeilBrown1a855a02010-12-09 16:36:28 +11006770 /* set saved_raid_disk if appropriate */
NeilBrown41158c72005-06-21 17:17:25 -07006771 if (!mddev->persistent) {
6772 if (info->state & (1<<MD_DISK_SYNC) &&
NeilBrownbf572542011-01-12 09:03:35 +11006773 info->raid_disk < mddev->raid_disks) {
NeilBrown41158c72005-06-21 17:17:25 -07006774 rdev->raid_disk = info->raid_disk;
NeilBrownbf572542011-01-12 09:03:35 +11006775 set_bit(In_sync, &rdev->flags);
NeilBrown8313b8e2013-12-12 10:13:33 +11006776 clear_bit(Bitmap_sync, &rdev->flags);
NeilBrownbf572542011-01-12 09:03:35 +11006777 } else
NeilBrown41158c72005-06-21 17:17:25 -07006778 rdev->raid_disk = -1;
NeilBrownf4667222013-12-09 12:04:56 +11006779 rdev->saved_raid_disk = rdev->raid_disk;
NeilBrown41158c72005-06-21 17:17:25 -07006780 } else
6781 super_types[mddev->major_version].
6782 validate_super(mddev, rdev);
NeilBrownbedd86b2011-05-11 14:26:20 +10006783 if ((info->state & (1<<MD_DISK_SYNC)) &&
NeilBrownf4563092012-07-03 15:59:06 +10006784 rdev->raid_disk != info->raid_disk) {
NeilBrownbedd86b2011-05-11 14:26:20 +10006785 /* This was a hot-add request, but events doesn't
6786 * match, so reject it.
6787 */
6788 export_rdev(rdev);
6789 return -EINVAL;
6790 }
6791
NeilBrownb2d444d2005-11-08 21:39:31 -08006792 clear_bit(In_sync, &rdev->flags); /* just to be sure */
NeilBrown8ddf9ef2005-09-09 16:23:45 -07006793 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
6794 set_bit(WriteMostly, &rdev->flags);
NeilBrown575a80f2009-03-31 14:33:13 +11006795 else
6796 clear_bit(WriteMostly, &rdev->flags);
NeilBrown688834e2016-11-18 16:16:11 +11006797 if (info->state & (1<<MD_DISK_FAILFAST))
6798 set_bit(FailFast, &rdev->flags);
6799 else
6800 clear_bit(FailFast, &rdev->flags);
NeilBrown8ddf9ef2005-09-09 16:23:45 -07006801
Shaohua Lif6b6ec52015-12-21 10:51:02 +11006802 if (info->state & (1<<MD_DISK_JOURNAL)) {
6803 struct md_rdev *rdev2;
6804 bool has_journal = false;
6805
6806 /* make sure no existing journal disk */
6807 rdev_for_each(rdev2, mddev) {
6808 if (test_bit(Journal, &rdev2->flags)) {
6809 has_journal = true;
6810 break;
6811 }
6812 }
NeilBrown230b55f2017-10-17 14:24:09 +11006813 if (has_journal || mddev->bitmap) {
Shaohua Lif6b6ec52015-12-21 10:51:02 +11006814 export_rdev(rdev);
6815 return -EBUSY;
6816 }
Song Liubac624f2015-08-13 14:31:55 -07006817 set_bit(Journal, &rdev->flags);
Shaohua Lif6b6ec52015-12-21 10:51:02 +11006818 }
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006819 /*
6820 * check whether the device shows up in other nodes
6821 */
6822 if (mddev_is_clustered(mddev)) {
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006823 if (info->state & (1 << MD_DISK_CANDIDATE))
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006824 set_bit(Candidate, &rdev->flags);
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006825 else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) {
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006826 /* --add initiated by this node */
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006827 err = md_cluster_ops->add_new_disk(mddev, rdev);
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006828 if (err) {
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006829 export_rdev(rdev);
6830 return err;
6831 }
6832 }
6833 }
6834
Linus Torvalds1da177e2005-04-16 15:20:36 -07006835 rdev->raid_disk = -1;
6836 err = bind_rdev_to_array(rdev, mddev);
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006837
Linus Torvalds1da177e2005-04-16 15:20:36 -07006838 if (err)
6839 export_rdev(rdev);
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006840
6841 if (mddev_is_clustered(mddev)) {
Guoqing Jiange566aef2016-08-12 13:42:34 +08006842 if (info->state & (1 << MD_DISK_CANDIDATE)) {
6843 if (!err) {
6844 err = md_cluster_ops->new_disk_ack(mddev,
6845 err == 0);
6846 if (err)
6847 md_kick_rdev_from_array(rdev);
6848 }
6849 } else {
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006850 if (err)
6851 md_cluster_ops->add_new_disk_cancel(mddev);
6852 else
6853 err = add_bound_rdev(rdev);
6854 }
6855
6856 } else if (!err)
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05006857 err = add_bound_rdev(rdev);
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006858
Linus Torvalds1da177e2005-04-16 15:20:36 -07006859 return err;
6860 }
6861
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02006862 /* otherwise, md_add_new_disk is only allowed
Linus Torvalds1da177e2005-04-16 15:20:36 -07006863 * for major_version==0 superblocks
6864 */
6865 if (mddev->major_version != 0) {
NeilBrown9d487392016-11-02 14:16:49 +11006866 pr_warn("%s: ADD_NEW_DISK not supported\n", mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006867 return -EINVAL;
6868 }
6869
6870 if (!(info->state & (1<<MD_DISK_FAULTY))) {
6871 int err;
NeilBrownd710e132008-10-13 11:55:12 +11006872 rdev = md_import_device(dev, -1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006873 if (IS_ERR(rdev)) {
NeilBrown9d487392016-11-02 14:16:49 +11006874 pr_warn("md: error, md_import_device() returned %ld\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006875 PTR_ERR(rdev));
6876 return PTR_ERR(rdev);
6877 }
6878 rdev->desc_nr = info->number;
6879 if (info->raid_disk < mddev->raid_disks)
6880 rdev->raid_disk = info->raid_disk;
6881 else
6882 rdev->raid_disk = -1;
6883
Linus Torvalds1da177e2005-04-16 15:20:36 -07006884 if (rdev->raid_disk < mddev->raid_disks)
NeilBrownb2d444d2005-11-08 21:39:31 -08006885 if (info->state & (1<<MD_DISK_SYNC))
6886 set_bit(In_sync, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006887
NeilBrown8ddf9ef2005-09-09 16:23:45 -07006888 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
6889 set_bit(WriteMostly, &rdev->flags);
NeilBrown688834e2016-11-18 16:16:11 +11006890 if (info->state & (1<<MD_DISK_FAILFAST))
6891 set_bit(FailFast, &rdev->flags);
NeilBrown8ddf9ef2005-09-09 16:23:45 -07006892
Linus Torvalds1da177e2005-04-16 15:20:36 -07006893 if (!mddev->persistent) {
NeilBrown9d487392016-11-02 14:16:49 +11006894 pr_debug("md: nonpersistent superblock ...\n");
Christoph Hellwig0fe80342021-10-18 12:11:06 +02006895 rdev->sb_start = bdev_nr_sectors(rdev->bdev);
Mike Snitzer77304d22010-11-08 14:39:12 +01006896 } else
Jonathan Brassow57b2caa2011-01-14 09:14:33 +11006897 rdev->sb_start = calc_dev_sboffset(rdev);
NeilBrown8190e752009-06-18 08:48:58 +10006898 rdev->sectors = rdev->sb_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006899
NeilBrown2bf071b2006-01-06 00:20:55 -08006900 err = bind_rdev_to_array(rdev, mddev);
6901 if (err) {
6902 export_rdev(rdev);
6903 return err;
6904 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006905 }
6906
6907 return 0;
6908}
6909
NeilBrownf72ffdd2014-09-30 14:23:59 +10006910static int hot_remove_disk(struct mddev *mddev, dev_t dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006911{
6912 char b[BDEVNAME_SIZE];
NeilBrown3cb03002011-10-11 16:45:26 +11006913 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006914
Yufen Yuc42a0e22018-05-04 18:08:10 +08006915 if (!mddev->pers)
6916 return -ENODEV;
6917
Linus Torvalds1da177e2005-04-16 15:20:36 -07006918 rdev = find_rdev(mddev, dev);
6919 if (!rdev)
6920 return -ENXIO;
6921
Goldwyn Rodrigues2910ff12015-09-28 10:27:26 -05006922 if (rdev->raid_disk < 0)
6923 goto kick_rdev;
Goldwyn Rodrigues293467a2014-06-07 01:44:51 -05006924
NeilBrown3ea8929d2013-04-24 11:42:41 +10006925 clear_bit(Blocked, &rdev->flags);
6926 remove_and_add_spares(mddev, rdev);
6927
Linus Torvalds1da177e2005-04-16 15:20:36 -07006928 if (rdev->raid_disk >= 0)
6929 goto busy;
6930
Goldwyn Rodrigues2910ff12015-09-28 10:27:26 -05006931kick_rdev:
Zhao Hemingbca5b062020-11-19 19:41:34 +08006932 if (mddev_is_clustered(mddev)) {
6933 if (md_cluster_ops->remove_disk(mddev, rdev))
6934 goto busy;
6935 }
Goldwyn Rodrigues88bcfef2015-04-14 10:44:44 -05006936
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05006937 md_kick_rdev_from_array(rdev);
Shaohua Li29530792016-12-08 15:48:19 -08006938 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrown060b0682016-11-04 16:46:03 +11006939 if (mddev->thread)
6940 md_wakeup_thread(mddev->thread);
6941 else
6942 md_update_sb(mddev, 1);
Guoqing Jiang54679482021-10-04 23:34:53 +08006943 md_new_event();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006944
6945 return 0;
6946busy:
NeilBrown9d487392016-11-02 14:16:49 +11006947 pr_debug("md: cannot remove active disk %s from %s ...\n",
6948 bdevname(rdev->bdev,b), mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006949 return -EBUSY;
6950}
6951
NeilBrownf72ffdd2014-09-30 14:23:59 +10006952static int hot_add_disk(struct mddev *mddev, dev_t dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006953{
6954 char b[BDEVNAME_SIZE];
6955 int err;
NeilBrown3cb03002011-10-11 16:45:26 +11006956 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006957
6958 if (!mddev->pers)
6959 return -ENODEV;
6960
6961 if (mddev->major_version != 0) {
NeilBrown9d487392016-11-02 14:16:49 +11006962 pr_warn("%s: HOT_ADD may only be used with version-0 superblocks.\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006963 mdname(mddev));
6964 return -EINVAL;
6965 }
6966 if (!mddev->pers->hot_add_disk) {
NeilBrown9d487392016-11-02 14:16:49 +11006967 pr_warn("%s: personality does not support diskops!\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006968 mdname(mddev));
6969 return -EINVAL;
6970 }
6971
NeilBrownd710e132008-10-13 11:55:12 +11006972 rdev = md_import_device(dev, -1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006973 if (IS_ERR(rdev)) {
NeilBrown9d487392016-11-02 14:16:49 +11006974 pr_warn("md: error, md_import_device() returned %ld\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006975 PTR_ERR(rdev));
6976 return -EINVAL;
6977 }
6978
6979 if (mddev->persistent)
Jonathan Brassow57b2caa2011-01-14 09:14:33 +11006980 rdev->sb_start = calc_dev_sboffset(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006981 else
Christoph Hellwig0fe80342021-10-18 12:11:06 +02006982 rdev->sb_start = bdev_nr_sectors(rdev->bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006983
NeilBrown8190e752009-06-18 08:48:58 +10006984 rdev->sectors = rdev->sb_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006985
NeilBrownb2d444d2005-11-08 21:39:31 -08006986 if (test_bit(Faulty, &rdev->flags)) {
NeilBrown9d487392016-11-02 14:16:49 +11006987 pr_warn("md: can not hot-add faulty %s disk to %s!\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006988 bdevname(rdev->bdev,b), mdname(mddev));
6989 err = -EINVAL;
6990 goto abort_export;
6991 }
Goldwyn Rodrigues293467a2014-06-07 01:44:51 -05006992
NeilBrownb2d444d2005-11-08 21:39:31 -08006993 clear_bit(In_sync, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006994 rdev->desc_nr = -1;
NeilBrown58427302006-10-06 00:44:04 -07006995 rdev->saved_raid_disk = -1;
NeilBrown2bf071b2006-01-06 00:20:55 -08006996 err = bind_rdev_to_array(rdev, mddev);
6997 if (err)
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05006998 goto abort_export;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006999
7000 /*
7001 * The rest should better be atomic, we can have disk failures
7002 * noticed in interrupt contexts ...
7003 */
7004
Linus Torvalds1da177e2005-04-16 15:20:36 -07007005 rdev->raid_disk = -1;
7006
Shaohua Li29530792016-12-08 15:48:19 -08007007 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrown060b0682016-11-04 16:46:03 +11007008 if (!mddev->thread)
7009 md_update_sb(mddev, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007010 /*
Vishal Vermaf51d46d2021-12-21 20:06:19 +00007011 * If the new disk does not support REQ_NOWAIT,
7012 * disable on the whole MD.
7013 */
7014 if (!blk_queue_nowait(bdev_get_queue(rdev->bdev))) {
7015 pr_info("%s: Disabling nowait because %s does not support nowait\n",
7016 mdname(mddev), bdevname(rdev->bdev, b));
7017 blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, mddev->queue);
7018 }
7019 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07007020 * Kick recovery, maybe this spare has to be added to the
7021 * array immediately.
7022 */
7023 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7024 md_wakeup_thread(mddev->thread);
Guoqing Jiang54679482021-10-04 23:34:53 +08007025 md_new_event();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007026 return 0;
7027
Linus Torvalds1da177e2005-04-16 15:20:36 -07007028abort_export:
7029 export_rdev(rdev);
7030 return err;
7031}
7032
NeilBrownfd01b882011-10-11 16:47:53 +11007033static int set_bitmap_file(struct mddev *mddev, int fd)
NeilBrown32a76272005-06-21 17:17:14 -07007034{
NeilBrown035328c2014-04-09 12:25:40 +10007035 int err = 0;
NeilBrown32a76272005-06-21 17:17:14 -07007036
NeilBrown36fa3062005-09-09 16:23:45 -07007037 if (mddev->pers) {
NeilBrownd66b1b32014-08-08 15:40:24 +10007038 if (!mddev->pers->quiesce || !mddev->thread)
NeilBrown36fa3062005-09-09 16:23:45 -07007039 return -EBUSY;
7040 if (mddev->recovery || mddev->sync_thread)
7041 return -EBUSY;
7042 /* we should be able to change the bitmap.. */
NeilBrown32a76272005-06-21 17:17:14 -07007043 }
7044
NeilBrown36fa3062005-09-09 16:23:45 -07007045 if (fd >= 0) {
NeilBrown035328c2014-04-09 12:25:40 +10007046 struct inode *inode;
NeilBrown1e594bb2014-12-15 12:57:00 +11007047 struct file *f;
NeilBrown36fa3062005-09-09 16:23:45 -07007048
NeilBrown1e594bb2014-12-15 12:57:00 +11007049 if (mddev->bitmap || mddev->bitmap_info.file)
7050 return -EEXIST; /* cannot add when bitmap is present */
7051 f = fget(fd);
7052
7053 if (f == NULL) {
NeilBrown9d487392016-11-02 14:16:49 +11007054 pr_warn("%s: error: failed to get bitmap file\n",
7055 mdname(mddev));
NeilBrown36fa3062005-09-09 16:23:45 -07007056 return -EBADF;
7057 }
7058
NeilBrown1e594bb2014-12-15 12:57:00 +11007059 inode = f->f_mapping->host;
NeilBrown035328c2014-04-09 12:25:40 +10007060 if (!S_ISREG(inode->i_mode)) {
NeilBrown9d487392016-11-02 14:16:49 +11007061 pr_warn("%s: error: bitmap file must be a regular file\n",
7062 mdname(mddev));
NeilBrown035328c2014-04-09 12:25:40 +10007063 err = -EBADF;
NeilBrown1e594bb2014-12-15 12:57:00 +11007064 } else if (!(f->f_mode & FMODE_WRITE)) {
NeilBrown9d487392016-11-02 14:16:49 +11007065 pr_warn("%s: error: bitmap file must open for write\n",
7066 mdname(mddev));
NeilBrown035328c2014-04-09 12:25:40 +10007067 err = -EBADF;
7068 } else if (atomic_read(&inode->i_writecount) != 1) {
NeilBrown9d487392016-11-02 14:16:49 +11007069 pr_warn("%s: error: bitmap file is already in use\n",
7070 mdname(mddev));
NeilBrown035328c2014-04-09 12:25:40 +10007071 err = -EBUSY;
7072 }
7073 if (err) {
NeilBrown1e594bb2014-12-15 12:57:00 +11007074 fput(f);
NeilBrown36fa3062005-09-09 16:23:45 -07007075 return err;
7076 }
NeilBrown1e594bb2014-12-15 12:57:00 +11007077 mddev->bitmap_info.file = f;
NeilBrownc3d97142009-12-14 12:49:52 +11007078 mddev->bitmap_info.offset = 0; /* file overrides offset */
NeilBrown36fa3062005-09-09 16:23:45 -07007079 } else if (mddev->bitmap == NULL)
7080 return -ENOENT; /* cannot remove what isn't there */
7081 err = 0;
7082 if (mddev->pers) {
NeilBrown69e51b42010-06-01 19:37:35 +10007083 if (fd >= 0) {
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05007084 struct bitmap *bitmap;
7085
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007086 bitmap = md_bitmap_create(mddev, -1);
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007087 mddev_suspend(mddev);
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05007088 if (!IS_ERR(bitmap)) {
7089 mddev->bitmap = bitmap;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007090 err = md_bitmap_load(mddev);
NeilBrownba599ac2015-02-25 11:44:11 +11007091 } else
7092 err = PTR_ERR(bitmap);
NeilBrown52a0d492017-10-17 13:46:43 +11007093 if (err) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007094 md_bitmap_destroy(mddev);
NeilBrown52a0d492017-10-17 13:46:43 +11007095 fd = -1;
7096 }
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007097 mddev_resume(mddev);
NeilBrown52a0d492017-10-17 13:46:43 +11007098 } else if (fd < 0) {
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007099 mddev_suspend(mddev);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007100 md_bitmap_destroy(mddev);
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007101 mddev_resume(mddev);
NeilBrownd7375ab2006-06-26 00:27:43 -07007102 }
NeilBrownd7375ab2006-06-26 00:27:43 -07007103 }
7104 if (fd < 0) {
NeilBrown4af1a042014-12-15 12:57:00 +11007105 struct file *f = mddev->bitmap_info.file;
7106 if (f) {
7107 spin_lock(&mddev->lock);
7108 mddev->bitmap_info.file = NULL;
7109 spin_unlock(&mddev->lock);
7110 fput(f);
7111 }
NeilBrown36fa3062005-09-09 16:23:45 -07007112 }
7113
NeilBrown32a76272005-06-21 17:17:14 -07007114 return err;
7115}
7116
Linus Torvalds1da177e2005-04-16 15:20:36 -07007117/*
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02007118 * md_set_array_info is used two different ways
Linus Torvalds1da177e2005-04-16 15:20:36 -07007119 * The original usage is when creating a new array.
7120 * In this usage, raid_disks is > 0 and it together with
7121 * level, size, not_persistent,layout,chunksize determine the
7122 * shape of the array.
7123 * This will always create an array with a type-0.90.0 superblock.
7124 * The newer usage is when assembling an array.
7125 * In this case raid_disks will be 0, and the major_version field is
7126 * use to determine which style super-blocks are to be found on the devices.
7127 * The minor and patch _version numbers are also kept incase the
7128 * super_block handler wishes to interpret them.
7129 */
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02007130int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007131{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007132 if (info->raid_disks == 0) {
7133 /* just setting version number for superblock loading */
7134 if (info->major_version < 0 ||
Ahmed S. Darwish50511da2007-05-09 02:35:34 -07007135 info->major_version >= ARRAY_SIZE(super_types) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07007136 super_types[info->major_version].name == NULL) {
7137 /* maybe try to auto-load a module? */
NeilBrown9d487392016-11-02 14:16:49 +11007138 pr_warn("md: superblock version %d not known\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07007139 info->major_version);
7140 return -EINVAL;
7141 }
7142 mddev->major_version = info->major_version;
7143 mddev->minor_version = info->minor_version;
7144 mddev->patch_version = info->patch_version;
NeilBrown3f9d7b02006-12-22 01:11:41 -08007145 mddev->persistent = !info->not_persistent;
NeilBrowncbd19982009-12-30 12:08:49 +11007146 /* ensure mddev_put doesn't delete this now that there
7147 * is some minimal configuration.
7148 */
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11007149 mddev->ctime = ktime_get_real_seconds();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007150 return 0;
7151 }
7152 mddev->major_version = MD_MAJOR_VERSION;
7153 mddev->minor_version = MD_MINOR_VERSION;
7154 mddev->patch_version = MD_PATCHLEVEL_VERSION;
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11007155 mddev->ctime = ktime_get_real_seconds();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007156
7157 mddev->level = info->level;
NeilBrown17115e02006-01-16 22:14:57 -08007158 mddev->clevel[0] = 0;
Andre Noll58c0fed2009-03-31 14:33:13 +11007159 mddev->dev_sectors = 2 * (sector_t)info->size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007160 mddev->raid_disks = info->raid_disks;
7161 /* don't set md_minor, it is determined by which /dev/md* was
7162 * openned
7163 */
7164 if (info->state & (1<<MD_SB_CLEAN))
7165 mddev->recovery_cp = MaxSector;
7166 else
7167 mddev->recovery_cp = 0;
7168 mddev->persistent = ! info->not_persistent;
NeilBrowne6910632008-02-06 01:39:51 -08007169 mddev->external = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007170
7171 mddev->layout = info->layout;
NeilBrown33f2c352019-09-09 16:52:29 +10007172 if (mddev->level == 0)
7173 /* Cannot trust RAID0 layout info here */
7174 mddev->layout = -1;
Andre Noll9d8f0362009-06-18 08:45:01 +10007175 mddev->chunk_sectors = info->chunk_size >> 9;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007176
Shaohua Li29530792016-12-08 15:48:19 -08007177 if (mddev->persistent) {
NeilBrown1b3bae42017-03-01 07:31:28 +11007178 mddev->max_disks = MD_SB_DISKS;
7179 mddev->flags = 0;
7180 mddev->sb_flags = 0;
Shaohua Li29530792016-12-08 15:48:19 -08007181 }
7182 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007183
NeilBrownc3d97142009-12-14 12:49:52 +11007184 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
NeilBrown6409bb02012-05-22 13:55:07 +10007185 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
NeilBrownc3d97142009-12-14 12:49:52 +11007186 mddev->bitmap_info.offset = 0;
NeilBrownb2a27032005-11-28 13:44:12 -08007187
NeilBrownf6705572006-03-27 01:18:11 -08007188 mddev->reshape_position = MaxSector;
7189
Linus Torvalds1da177e2005-04-16 15:20:36 -07007190 /*
7191 * Generate a 128 bit UUID
7192 */
7193 get_random_bytes(mddev->uuid, 16);
7194
NeilBrownf6705572006-03-27 01:18:11 -08007195 mddev->new_level = mddev->level;
Andre Noll664e7c42009-06-18 08:45:27 +10007196 mddev->new_chunk_sectors = mddev->chunk_sectors;
NeilBrownf6705572006-03-27 01:18:11 -08007197 mddev->new_layout = mddev->layout;
7198 mddev->delta_disks = 0;
NeilBrown2c810cd2012-05-21 09:27:00 +10007199 mddev->reshape_backwards = 0;
NeilBrownf6705572006-03-27 01:18:11 -08007200
Linus Torvalds1da177e2005-04-16 15:20:36 -07007201 return 0;
7202}
7203
NeilBrownfd01b882011-10-11 16:47:53 +11007204void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors)
Dan Williams1f403622009-03-31 14:59:03 +11007205{
Shaohua Liefa4b772017-10-18 22:08:13 -07007206 lockdep_assert_held(&mddev->reconfig_mutex);
Dan Williamsb522adc2009-03-31 15:00:31 +11007207
7208 if (mddev->external_size)
7209 return;
7210
Dan Williams1f403622009-03-31 14:59:03 +11007211 mddev->array_sectors = array_sectors;
7212}
7213EXPORT_SYMBOL(md_set_array_sectors);
7214
NeilBrownfd01b882011-10-11 16:47:53 +11007215static int update_size(struct mddev *mddev, sector_t num_sectors)
NeilBrowna35b0d62006-01-06 00:20:49 -08007216{
NeilBrown3cb03002011-10-11 16:45:26 +11007217 struct md_rdev *rdev;
NeilBrowna35b0d62006-01-06 00:20:49 -08007218 int rv;
Andre Nolld71f9f82008-07-11 22:02:22 +10007219 int fit = (num_sectors == 0);
Guoqing Jiang818da592017-03-01 16:42:40 +08007220 sector_t old_dev_sectors = mddev->dev_sectors;
Guoqing Jiangab5a98b2016-05-02 11:33:13 -04007221
NeilBrowna35b0d62006-01-06 00:20:49 -08007222 if (mddev->pers->resize == NULL)
7223 return -EINVAL;
Andre Nolld71f9f82008-07-11 22:02:22 +10007224 /* The "num_sectors" is the number of sectors of each device that
7225 * is used. This can only make sense for arrays with redundancy.
7226 * linear and raid0 always use whatever space is available. We can only
7227 * consider changing this number if no resync or reconstruction is
7228 * happening, and if the new size is acceptable. It must fit before the
Andre Noll0f420352008-07-11 22:02:23 +10007229 * sb_start or, if that is <data_offset, it must fit before the size
Andre Nolld71f9f82008-07-11 22:02:22 +10007230 * of each device. If num_sectors is zero, we find the largest size
7231 * that fits.
NeilBrowna35b0d62006-01-06 00:20:49 -08007232 */
NeilBrownf851b602014-12-11 10:02:10 +11007233 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
7234 mddev->sync_thread)
NeilBrowna35b0d62006-01-06 00:20:49 -08007235 return -EBUSY;
NeilBrownbd8839e2014-05-28 13:39:21 +10007236 if (mddev->ro)
7237 return -EROFS;
NeilBrowna4a61252012-05-22 13:55:27 +10007238
NeilBrowndafb20f2012-03-19 12:46:39 +11007239 rdev_for_each(rdev, mddev) {
Andre Nolldd8ac332009-03-31 14:33:13 +11007240 sector_t avail = rdev->sectors;
NeilBrown01ab5662006-10-28 10:38:30 -07007241
Andre Nolld71f9f82008-07-11 22:02:22 +10007242 if (fit && (num_sectors == 0 || num_sectors > avail))
7243 num_sectors = avail;
7244 if (avail < num_sectors)
NeilBrowna35b0d62006-01-06 00:20:49 -08007245 return -ENOSPC;
7246 }
Andre Nolld71f9f82008-07-11 22:02:22 +10007247 rv = mddev->pers->resize(mddev, num_sectors);
Guoqing Jiangc9483632017-02-24 11:15:23 +08007248 if (!rv) {
Guoqing Jiang818da592017-03-01 16:42:40 +08007249 if (mddev_is_clustered(mddev))
7250 md_cluster_ops->update_size(mddev, old_dev_sectors);
7251 else if (mddev->queue) {
Christoph Hellwig2c247c52020-11-16 15:57:11 +01007252 set_capacity_and_notify(mddev->gendisk,
7253 mddev->array_sectors);
Guoqing Jiangc9483632017-02-24 11:15:23 +08007254 }
7255 }
NeilBrowna35b0d62006-01-06 00:20:49 -08007256 return rv;
7257}
7258
NeilBrownfd01b882011-10-11 16:47:53 +11007259static int update_raid_disks(struct mddev *mddev, int raid_disks)
NeilBrownda943b992006-01-06 00:20:54 -08007260{
7261 int rv;
NeilBrownc6563a82012-05-21 09:27:00 +10007262 struct md_rdev *rdev;
NeilBrownda943b992006-01-06 00:20:54 -08007263 /* change the number of raid disks */
NeilBrown63c70c42006-03-27 01:18:13 -08007264 if (mddev->pers->check_reshape == NULL)
NeilBrownda943b992006-01-06 00:20:54 -08007265 return -EINVAL;
NeilBrownbd8839e2014-05-28 13:39:21 +10007266 if (mddev->ro)
7267 return -EROFS;
NeilBrownda943b992006-01-06 00:20:54 -08007268 if (raid_disks <= 0 ||
NeilBrown233fca32010-04-14 17:02:09 +10007269 (mddev->max_disks && raid_disks >= mddev->max_disks))
NeilBrownda943b992006-01-06 00:20:54 -08007270 return -EINVAL;
NeilBrownf851b602014-12-11 10:02:10 +11007271 if (mddev->sync_thread ||
7272 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
Zhao Heminga8da01f2020-11-19 19:41:33 +08007273 test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) ||
NeilBrownf851b602014-12-11 10:02:10 +11007274 mddev->reshape_position != MaxSector)
NeilBrownda943b992006-01-06 00:20:54 -08007275 return -EBUSY;
NeilBrownc6563a82012-05-21 09:27:00 +10007276
7277 rdev_for_each(rdev, mddev) {
7278 if (mddev->raid_disks < raid_disks &&
7279 rdev->data_offset < rdev->new_data_offset)
7280 return -EINVAL;
7281 if (mddev->raid_disks > raid_disks &&
7282 rdev->data_offset > rdev->new_data_offset)
7283 return -EINVAL;
7284 }
7285
NeilBrown63c70c42006-03-27 01:18:13 -08007286 mddev->delta_disks = raid_disks - mddev->raid_disks;
NeilBrown2c810cd2012-05-21 09:27:00 +10007287 if (mddev->delta_disks < 0)
7288 mddev->reshape_backwards = 1;
7289 else if (mddev->delta_disks > 0)
7290 mddev->reshape_backwards = 0;
NeilBrown63c70c42006-03-27 01:18:13 -08007291
7292 rv = mddev->pers->check_reshape(mddev);
NeilBrown2c810cd2012-05-21 09:27:00 +10007293 if (rv < 0) {
NeilBrownde171cb2011-01-31 11:57:42 +11007294 mddev->delta_disks = 0;
NeilBrown2c810cd2012-05-21 09:27:00 +10007295 mddev->reshape_backwards = 0;
7296 }
NeilBrownda943b992006-01-06 00:20:54 -08007297 return rv;
7298}
7299
Linus Torvalds1da177e2005-04-16 15:20:36 -07007300/*
7301 * update_array_info is used to change the configuration of an
7302 * on-line array.
7303 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
7304 * fields in the info are checked against the array.
7305 * Any differences that cannot be handled will cause an error.
7306 * Normally, only one change can be managed at a time.
7307 */
NeilBrownfd01b882011-10-11 16:47:53 +11007308static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007309{
7310 int rv = 0;
7311 int cnt = 0;
NeilBrown36fa3062005-09-09 16:23:45 -07007312 int state = 0;
7313
7314 /* calculate expected state,ignoring low bits */
NeilBrownc3d97142009-12-14 12:49:52 +11007315 if (mddev->bitmap && mddev->bitmap_info.offset)
NeilBrown36fa3062005-09-09 16:23:45 -07007316 state |= (1 << MD_SB_BITMAP_PRESENT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007317
7318 if (mddev->major_version != info->major_version ||
7319 mddev->minor_version != info->minor_version ||
7320/* mddev->patch_version != info->patch_version || */
7321 mddev->ctime != info->ctime ||
7322 mddev->level != info->level ||
7323/* mddev->layout != info->layout || */
Firo Yang4e023612015-06-11 09:41:10 +08007324 mddev->persistent != !info->not_persistent ||
Andre Noll9d8f0362009-06-18 08:45:01 +10007325 mddev->chunk_sectors != info->chunk_size >> 9 ||
NeilBrown36fa3062005-09-09 16:23:45 -07007326 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
7327 ((state^info->state) & 0xfffffe00)
7328 )
Linus Torvalds1da177e2005-04-16 15:20:36 -07007329 return -EINVAL;
7330 /* Check there is only one change */
Andre Noll58c0fed2009-03-31 14:33:13 +11007331 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
7332 cnt++;
7333 if (mddev->raid_disks != info->raid_disks)
7334 cnt++;
7335 if (mddev->layout != info->layout)
7336 cnt++;
7337 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
7338 cnt++;
7339 if (cnt == 0)
7340 return 0;
7341 if (cnt > 1)
7342 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007343
7344 if (mddev->layout != info->layout) {
7345 /* Change layout
7346 * we don't need to do anything at the md level, the
7347 * personality will take care of it all.
7348 */
NeilBrown50ac1682009-06-18 08:47:55 +10007349 if (mddev->pers->check_reshape == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007350 return -EINVAL;
NeilBrown597a7112009-06-18 08:47:42 +10007351 else {
7352 mddev->new_layout = info->layout;
NeilBrown50ac1682009-06-18 08:47:55 +10007353 rv = mddev->pers->check_reshape(mddev);
NeilBrown597a7112009-06-18 08:47:42 +10007354 if (rv)
7355 mddev->new_layout = mddev->layout;
7356 return rv;
7357 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007358 }
Andre Noll58c0fed2009-03-31 14:33:13 +11007359 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
Andre Nolld71f9f82008-07-11 22:02:22 +10007360 rv = update_size(mddev, (sector_t)info->size * 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007361
NeilBrownda943b992006-01-06 00:20:54 -08007362 if (mddev->raid_disks != info->raid_disks)
7363 rv = update_raid_disks(mddev, info->raid_disks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007364
NeilBrown36fa3062005-09-09 16:23:45 -07007365 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
Goldwyn Rodrigues293467a2014-06-07 01:44:51 -05007366 if (mddev->pers->quiesce == NULL || mddev->thread == NULL) {
7367 rv = -EINVAL;
7368 goto err;
7369 }
7370 if (mddev->recovery || mddev->sync_thread) {
7371 rv = -EBUSY;
7372 goto err;
7373 }
NeilBrown36fa3062005-09-09 16:23:45 -07007374 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05007375 struct bitmap *bitmap;
NeilBrown36fa3062005-09-09 16:23:45 -07007376 /* add the bitmap */
Goldwyn Rodrigues293467a2014-06-07 01:44:51 -05007377 if (mddev->bitmap) {
7378 rv = -EEXIST;
7379 goto err;
7380 }
7381 if (mddev->bitmap_info.default_offset == 0) {
7382 rv = -EINVAL;
7383 goto err;
7384 }
NeilBrownc3d97142009-12-14 12:49:52 +11007385 mddev->bitmap_info.offset =
7386 mddev->bitmap_info.default_offset;
NeilBrown6409bb02012-05-22 13:55:07 +10007387 mddev->bitmap_info.space =
7388 mddev->bitmap_info.default_space;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007389 bitmap = md_bitmap_create(mddev, -1);
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007390 mddev_suspend(mddev);
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05007391 if (!IS_ERR(bitmap)) {
7392 mddev->bitmap = bitmap;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007393 rv = md_bitmap_load(mddev);
NeilBrownba599ac2015-02-25 11:44:11 +11007394 } else
7395 rv = PTR_ERR(bitmap);
NeilBrown36fa3062005-09-09 16:23:45 -07007396 if (rv)
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007397 md_bitmap_destroy(mddev);
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007398 mddev_resume(mddev);
NeilBrown36fa3062005-09-09 16:23:45 -07007399 } else {
7400 /* remove the bitmap */
Goldwyn Rodrigues293467a2014-06-07 01:44:51 -05007401 if (!mddev->bitmap) {
7402 rv = -ENOENT;
7403 goto err;
7404 }
7405 if (mddev->bitmap->storage.file) {
7406 rv = -EINVAL;
7407 goto err;
7408 }
Guoqing Jiangf6a2dc62015-12-21 10:51:00 +11007409 if (mddev->bitmap_info.nodes) {
7410 /* hold PW on all the bitmap lock */
7411 if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) {
NeilBrown9d487392016-11-02 14:16:49 +11007412 pr_warn("md: can't change bitmap to none since the array is in use by more than one node\n");
Guoqing Jiangf6a2dc62015-12-21 10:51:00 +11007413 rv = -EPERM;
7414 md_cluster_ops->unlock_all_bitmaps(mddev);
7415 goto err;
7416 }
7417
7418 mddev->bitmap_info.nodes = 0;
7419 md_cluster_ops->leave(mddev);
Zhao Hemingedee9df2020-07-21 02:08:53 +08007420 module_put(md_cluster_mod);
Zhao Heming7c9d5c52020-07-21 02:08:52 +08007421 mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY;
Guoqing Jiangf6a2dc62015-12-21 10:51:00 +11007422 }
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007423 mddev_suspend(mddev);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007424 md_bitmap_destroy(mddev);
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007425 mddev_resume(mddev);
NeilBrownc3d97142009-12-14 12:49:52 +11007426 mddev->bitmap_info.offset = 0;
NeilBrown36fa3062005-09-09 16:23:45 -07007427 }
7428 }
NeilBrown850b2b422006-10-03 01:15:46 -07007429 md_update_sb(mddev, 1);
Goldwyn Rodrigues293467a2014-06-07 01:44:51 -05007430 return rv;
7431err:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007432 return rv;
7433}
7434
NeilBrownfd01b882011-10-11 16:47:53 +11007435static int set_disk_faulty(struct mddev *mddev, dev_t dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007436{
NeilBrown3cb03002011-10-11 16:45:26 +11007437 struct md_rdev *rdev;
NeilBrown1ca69c42012-10-11 13:37:33 +11007438 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007439
7440 if (mddev->pers == NULL)
7441 return -ENODEV;
7442
NeilBrown1ca69c42012-10-11 13:37:33 +11007443 rcu_read_lock();
Tomasz Majchrzak1532d9e2017-12-27 10:31:40 +01007444 rdev = md_find_rdev_rcu(mddev, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007445 if (!rdev)
NeilBrown1ca69c42012-10-11 13:37:33 +11007446 err = -ENODEV;
7447 else {
7448 md_error(mddev, rdev);
7449 if (!test_bit(Faulty, &rdev->flags))
7450 err = -EBUSY;
7451 }
7452 rcu_read_unlock();
7453 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007454}
7455
Andre Noll2f9618c2008-04-25 18:57:58 +02007456/*
7457 * We have a problem here : there is no easy way to give a CHS
7458 * virtual geometry. We currently pretend that we have a 2 heads
7459 * 4 sectors (with a BIG number of cylinders...). This drives
7460 * dosfs just mad... ;-)
7461 */
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08007462static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
7463{
NeilBrownfd01b882011-10-11 16:47:53 +11007464 struct mddev *mddev = bdev->bd_disk->private_data;
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08007465
7466 geo->heads = 2;
7467 geo->sectors = 4;
NeilBrown49ce6ce2010-03-29 10:51:42 +11007468 geo->cylinders = mddev->array_sectors / 8;
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08007469 return 0;
7470}
7471
Nicolas Schichancb335f82014-01-15 16:58:52 +01007472static inline bool md_ioctl_valid(unsigned int cmd)
7473{
7474 switch (cmd) {
7475 case ADD_NEW_DISK:
Nicolas Schichancb335f82014-01-15 16:58:52 +01007476 case GET_ARRAY_INFO:
7477 case GET_BITMAP_FILE:
7478 case GET_DISK_INFO:
7479 case HOT_ADD_DISK:
7480 case HOT_REMOVE_DISK:
Nicolas Schichancb335f82014-01-15 16:58:52 +01007481 case RAID_VERSION:
7482 case RESTART_ARRAY_RW:
7483 case RUN_ARRAY:
7484 case SET_ARRAY_INFO:
7485 case SET_BITMAP_FILE:
7486 case SET_DISK_FAULTY:
7487 case STOP_ARRAY:
7488 case STOP_ARRAY_RO:
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05007489 case CLUSTERED_DISK_NACK:
Nicolas Schichancb335f82014-01-15 16:58:52 +01007490 return true;
7491 default:
7492 return false;
7493 }
7494}
7495
Al Viroa39907f2008-03-02 10:31:15 -05007496static int md_ioctl(struct block_device *bdev, fmode_t mode,
Linus Torvalds1da177e2005-04-16 15:20:36 -07007497 unsigned int cmd, unsigned long arg)
7498{
7499 int err = 0;
7500 void __user *argp = (void __user *)arg;
NeilBrownfd01b882011-10-11 16:47:53 +11007501 struct mddev *mddev = NULL;
NeilBrown065e5192017-04-06 11:16:33 +08007502 bool did_set_md_closing = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007503
Nicolas Schichancb335f82014-01-15 16:58:52 +01007504 if (!md_ioctl_valid(cmd))
7505 return -ENOTTY;
7506
NeilBrown506c9e42011-12-23 10:17:26 +11007507 switch (cmd) {
7508 case RAID_VERSION:
7509 case GET_ARRAY_INFO:
7510 case GET_DISK_INFO:
7511 break;
7512 default:
7513 if (!capable(CAP_SYS_ADMIN))
7514 return -EACCES;
7515 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007516
7517 /*
7518 * Commands dealing with the RAID driver but not any
7519 * particular array:
7520 */
NeilBrownc02c0ae2012-12-11 13:39:21 +11007521 switch (cmd) {
7522 case RAID_VERSION:
7523 err = get_version(argp);
NeilBrown3adc28d2014-09-30 15:46:41 +10007524 goto out;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007525 default:;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007526 }
7527
7528 /*
7529 * Commands creating/starting a new array:
7530 */
7531
Al Viroa39907f2008-03-02 10:31:15 -05007532 mddev = bdev->bd_disk->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007533
7534 if (!mddev) {
7535 BUG();
NeilBrown3adc28d2014-09-30 15:46:41 +10007536 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007537 }
7538
NeilBrown1ca69c42012-10-11 13:37:33 +11007539 /* Some actions do not requires the mutex */
7540 switch (cmd) {
7541 case GET_ARRAY_INFO:
7542 if (!mddev->raid_disks && !mddev->external)
7543 err = -ENODEV;
7544 else
7545 err = get_array_info(mddev, argp);
NeilBrown3adc28d2014-09-30 15:46:41 +10007546 goto out;
NeilBrown1ca69c42012-10-11 13:37:33 +11007547
7548 case GET_DISK_INFO:
7549 if (!mddev->raid_disks && !mddev->external)
7550 err = -ENODEV;
7551 else
7552 err = get_disk_info(mddev, argp);
NeilBrown3adc28d2014-09-30 15:46:41 +10007553 goto out;
NeilBrown1ca69c42012-10-11 13:37:33 +11007554
7555 case SET_DISK_FAULTY:
7556 err = set_disk_faulty(mddev, new_decode_dev(arg));
NeilBrown3adc28d2014-09-30 15:46:41 +10007557 goto out;
NeilBrown4af1a042014-12-15 12:57:00 +11007558
7559 case GET_BITMAP_FILE:
7560 err = get_bitmap_file(mddev, argp);
7561 goto out;
7562
NeilBrown1ca69c42012-10-11 13:37:33 +11007563 }
7564
Guoqing Jiang78b990c2020-04-04 23:57:10 +02007565 if (cmd == ADD_NEW_DISK || cmd == HOT_ADD_DISK)
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02007566 flush_rdev_wq(mddev);
NeilBrowna7a3f082012-12-11 13:35:54 +11007567
Hannes Reinecke90f5f7a2013-04-02 08:38:55 +02007568 if (cmd == HOT_REMOVE_DISK)
7569 /* need to ensure recovery thread has run */
7570 wait_event_interruptible_timeout(mddev->sb_wait,
7571 !test_bit(MD_RECOVERY_NEEDED,
Shaohua Li82a301c2016-12-08 15:48:18 -08007572 &mddev->recovery),
Hannes Reinecke90f5f7a2013-04-02 08:38:55 +02007573 msecs_to_jiffies(5000));
NeilBrown260fa032013-08-27 16:44:13 +10007574 if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
7575 /* Need to flush page cache, and ensure no-one else opens
7576 * and writes
7577 */
7578 mutex_lock(&mddev->open_mutex);
NeilBrown9ba3b7f2014-09-09 14:00:15 +10007579 if (mddev->pers && atomic_read(&mddev->openers) > 1) {
NeilBrown260fa032013-08-27 16:44:13 +10007580 mutex_unlock(&mddev->open_mutex);
7581 err = -EBUSY;
NeilBrown3adc28d2014-09-30 15:46:41 +10007582 goto out;
NeilBrown260fa032013-08-27 16:44:13 +10007583 }
Dae R. Jeongc731b842020-10-22 10:21:28 +09007584 if (test_and_set_bit(MD_CLOSING, &mddev->flags)) {
7585 mutex_unlock(&mddev->open_mutex);
7586 err = -EBUSY;
7587 goto out;
7588 }
NeilBrown065e5192017-04-06 11:16:33 +08007589 did_set_md_closing = true;
NeilBrown260fa032013-08-27 16:44:13 +10007590 mutex_unlock(&mddev->open_mutex);
7591 sync_blockdev(bdev);
7592 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007593 err = mddev_lock(mddev);
7594 if (err) {
NeilBrown9d487392016-11-02 14:16:49 +11007595 pr_debug("md: ioctl lock interrupted, reason %d, cmd %d\n",
7596 err, cmd);
NeilBrown3adc28d2014-09-30 15:46:41 +10007597 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007598 }
7599
NeilBrownc02c0ae2012-12-11 13:39:21 +11007600 if (cmd == SET_ARRAY_INFO) {
7601 mdu_array_info_t info;
7602 if (!arg)
7603 memset(&info, 0, sizeof(info));
7604 else if (copy_from_user(&info, argp, sizeof(info))) {
7605 err = -EFAULT;
NeilBrown3adc28d2014-09-30 15:46:41 +10007606 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007607 }
7608 if (mddev->pers) {
7609 err = update_array_info(mddev, &info);
7610 if (err) {
NeilBrown9d487392016-11-02 14:16:49 +11007611 pr_warn("md: couldn't update array info. %d\n", err);
NeilBrown3adc28d2014-09-30 15:46:41 +10007612 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007613 }
NeilBrown3adc28d2014-09-30 15:46:41 +10007614 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007615 }
7616 if (!list_empty(&mddev->disks)) {
NeilBrown9d487392016-11-02 14:16:49 +11007617 pr_warn("md: array %s already has disks!\n", mdname(mddev));
NeilBrownc02c0ae2012-12-11 13:39:21 +11007618 err = -EBUSY;
NeilBrown3adc28d2014-09-30 15:46:41 +10007619 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007620 }
7621 if (mddev->raid_disks) {
NeilBrown9d487392016-11-02 14:16:49 +11007622 pr_warn("md: array %s already initialised!\n", mdname(mddev));
NeilBrownc02c0ae2012-12-11 13:39:21 +11007623 err = -EBUSY;
NeilBrown3adc28d2014-09-30 15:46:41 +10007624 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007625 }
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02007626 err = md_set_array_info(mddev, &info);
NeilBrownc02c0ae2012-12-11 13:39:21 +11007627 if (err) {
NeilBrown9d487392016-11-02 14:16:49 +11007628 pr_warn("md: couldn't set array info. %d\n", err);
NeilBrown3adc28d2014-09-30 15:46:41 +10007629 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007630 }
NeilBrown3adc28d2014-09-30 15:46:41 +10007631 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007632 }
7633
7634 /*
7635 * Commands querying/configuring an existing array:
7636 */
NeilBrown32a76272005-06-21 17:17:14 -07007637 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
NeilBrown3f9d7b02006-12-22 01:11:41 -08007638 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
NeilBrowna17184a2008-02-06 01:39:55 -08007639 if ((!mddev->raid_disks && !mddev->external)
7640 && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
7641 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
7642 && cmd != GET_BITMAP_FILE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007643 err = -ENODEV;
NeilBrown3adc28d2014-09-30 15:46:41 +10007644 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007645 }
7646
7647 /*
7648 * Commands even a read-only array can execute:
7649 */
NeilBrownc02c0ae2012-12-11 13:39:21 +11007650 switch (cmd) {
NeilBrownc02c0ae2012-12-11 13:39:21 +11007651 case RESTART_ARRAY_RW:
7652 err = restart_array(mddev);
NeilBrown3adc28d2014-09-30 15:46:41 +10007653 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007654
7655 case STOP_ARRAY:
7656 err = do_md_stop(mddev, 0, bdev);
NeilBrown3adc28d2014-09-30 15:46:41 +10007657 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007658
7659 case STOP_ARRAY_RO:
7660 err = md_set_readonly(mddev, bdev);
NeilBrown3adc28d2014-09-30 15:46:41 +10007661 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007662
NeilBrown3ea8929d2013-04-24 11:42:41 +10007663 case HOT_REMOVE_DISK:
7664 err = hot_remove_disk(mddev, new_decode_dev(arg));
NeilBrown3adc28d2014-09-30 15:46:41 +10007665 goto unlock;
NeilBrown3ea8929d2013-04-24 11:42:41 +10007666
NeilBrown7ceb17e2013-04-24 11:42:42 +10007667 case ADD_NEW_DISK:
7668 /* We can support ADD_NEW_DISK on read-only arrays
Wei Fang466ad292016-03-21 19:19:30 +08007669 * only if we are re-adding a preexisting device.
NeilBrown7ceb17e2013-04-24 11:42:42 +10007670 * So require mddev->pers and MD_DISK_SYNC.
7671 */
7672 if (mddev->pers) {
7673 mdu_disk_info_t info;
7674 if (copy_from_user(&info, argp, sizeof(info)))
7675 err = -EFAULT;
7676 else if (!(info.state & (1<<MD_DISK_SYNC)))
7677 /* Need to clear read-only for this */
7678 break;
7679 else
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02007680 err = md_add_new_disk(mddev, &info);
NeilBrown3adc28d2014-09-30 15:46:41 +10007681 goto unlock;
NeilBrown7ceb17e2013-04-24 11:42:42 +10007682 }
7683 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007684 }
7685
7686 /*
7687 * The remaining ioctls are changing the state of the
NeilBrownf91de922005-11-08 21:39:36 -08007688 * superblock, so we do not allow them on read-only arrays.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007689 */
NeilBrown326eb172014-09-30 15:36:28 +10007690 if (mddev->ro && mddev->pers) {
NeilBrownf91de922005-11-08 21:39:36 -08007691 if (mddev->ro == 2) {
7692 mddev->ro = 0;
NeilBrown00bcb4a2010-06-01 19:37:23 +10007693 sysfs_notify_dirent_safe(mddev->sysfs_state);
Neil Brown0fd62b82008-06-28 08:31:36 +10007694 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
NeilBrownf3378b42013-02-28 11:59:03 +11007695 /* mddev_unlock will wake thread */
7696 /* If a device failed while we were read-only, we
7697 * need to make sure the metadata is updated now.
7698 */
Shaohua Li29530792016-12-08 15:48:19 -08007699 if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) {
NeilBrownf3378b42013-02-28 11:59:03 +11007700 mddev_unlock(mddev);
7701 wait_event(mddev->sb_wait,
Shaohua Li29530792016-12-08 15:48:19 -08007702 !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) &&
7703 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
NeilBrown29f097c2013-11-14 17:54:51 +11007704 mddev_lock_nointr(mddev);
NeilBrownf3378b42013-02-28 11:59:03 +11007705 }
NeilBrownf91de922005-11-08 21:39:36 -08007706 } else {
7707 err = -EROFS;
NeilBrown3adc28d2014-09-30 15:46:41 +10007708 goto unlock;
NeilBrownf91de922005-11-08 21:39:36 -08007709 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007710 }
7711
NeilBrownc02c0ae2012-12-11 13:39:21 +11007712 switch (cmd) {
7713 case ADD_NEW_DISK:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007714 {
NeilBrownc02c0ae2012-12-11 13:39:21 +11007715 mdu_disk_info_t info;
7716 if (copy_from_user(&info, argp, sizeof(info)))
7717 err = -EFAULT;
7718 else
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02007719 err = md_add_new_disk(mddev, &info);
NeilBrown3adc28d2014-09-30 15:46:41 +10007720 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007721 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007722
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05007723 case CLUSTERED_DISK_NACK:
7724 if (mddev_is_clustered(mddev))
7725 md_cluster_ops->new_disk_ack(mddev, false);
7726 else
7727 err = -EINVAL;
7728 goto unlock;
7729
NeilBrownc02c0ae2012-12-11 13:39:21 +11007730 case HOT_ADD_DISK:
7731 err = hot_add_disk(mddev, new_decode_dev(arg));
NeilBrown3adc28d2014-09-30 15:46:41 +10007732 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007733
NeilBrownc02c0ae2012-12-11 13:39:21 +11007734 case RUN_ARRAY:
7735 err = do_md_run(mddev);
NeilBrown3adc28d2014-09-30 15:46:41 +10007736 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007737
NeilBrownc02c0ae2012-12-11 13:39:21 +11007738 case SET_BITMAP_FILE:
7739 err = set_bitmap_file(mddev, (int)arg);
NeilBrown3adc28d2014-09-30 15:46:41 +10007740 goto unlock;
NeilBrown32a76272005-06-21 17:17:14 -07007741
NeilBrownc02c0ae2012-12-11 13:39:21 +11007742 default:
7743 err = -EINVAL;
NeilBrown3adc28d2014-09-30 15:46:41 +10007744 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007745 }
7746
NeilBrown3adc28d2014-09-30 15:46:41 +10007747unlock:
NeilBrownd3374822009-01-09 08:31:10 +11007748 if (mddev->hold_active == UNTIL_IOCTL &&
7749 err != -EINVAL)
7750 mddev->hold_active = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007751 mddev_unlock(mddev);
NeilBrown3adc28d2014-09-30 15:46:41 +10007752out:
NeilBrown065e5192017-04-06 11:16:33 +08007753 if(did_set_md_closing)
7754 clear_bit(MD_CLOSING, &mddev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007755 return err;
7756}
Arnd Bergmannaa98aa32009-12-14 12:50:05 +11007757#ifdef CONFIG_COMPAT
7758static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
7759 unsigned int cmd, unsigned long arg)
7760{
7761 switch (cmd) {
7762 case HOT_REMOVE_DISK:
7763 case HOT_ADD_DISK:
7764 case SET_DISK_FAULTY:
7765 case SET_BITMAP_FILE:
7766 /* These take in integer arg, do not convert */
7767 break;
7768 default:
7769 arg = (unsigned long)compat_ptr(arg);
7770 break;
7771 }
7772
7773 return md_ioctl(bdev, mode, cmd, arg);
7774}
7775#endif /* CONFIG_COMPAT */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007776
Christoph Hellwig118cf082020-11-03 11:00:13 +01007777static int md_set_read_only(struct block_device *bdev, bool ro)
7778{
7779 struct mddev *mddev = bdev->bd_disk->private_data;
7780 int err;
7781
7782 err = mddev_lock(mddev);
7783 if (err)
7784 return err;
7785
7786 if (!mddev->raid_disks && !mddev->external) {
7787 err = -ENODEV;
7788 goto out_unlock;
7789 }
7790
7791 /*
7792 * Transitioning to read-auto need only happen for arrays that call
7793 * md_write_start and which are not ready for writes yet.
7794 */
7795 if (!ro && mddev->ro == 1 && mddev->pers) {
7796 err = restart_array(mddev);
7797 if (err)
7798 goto out_unlock;
7799 mddev->ro = 2;
7800 }
7801
7802out_unlock:
7803 mddev_unlock(mddev);
7804 return err;
7805}
7806
Al Viroa39907f2008-03-02 10:31:15 -05007807static int md_open(struct block_device *bdev, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007808{
7809 /*
7810 * Succeed if we can lock the mddev, which confirms that
7811 * it isn't being stopped right now.
7812 */
NeilBrownfd01b882011-10-11 16:47:53 +11007813 struct mddev *mddev = mddev_find(bdev->bd_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007814 int err;
7815
Yuanhan Liu0c098222012-05-22 13:55:32 +10007816 if (!mddev)
7817 return -ENODEV;
7818
NeilBrownd3374822009-01-09 08:31:10 +11007819 if (mddev->gendisk != bdev->bd_disk) {
7820 /* we are racing with mddev_put which is discarding this
7821 * bd_disk.
7822 */
7823 mddev_put(mddev);
7824 /* Wait until bdev->bd_disk is definitely gone */
Guoqing Jiangf6766ff2020-04-04 23:57:09 +02007825 if (work_pending(&mddev->del_work))
7826 flush_workqueue(md_misc_wq);
Zhao Heming6a4db2a2021-04-03 11:01:25 +08007827 return -EBUSY;
NeilBrownd3374822009-01-09 08:31:10 +11007828 }
7829 BUG_ON(mddev != bdev->bd_disk->private_data);
7830
NeilBrownc8c00a62009-08-10 12:50:52 +10007831 if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07007832 goto out;
7833
Guoqing Jiangaf8d8e62016-08-12 13:42:37 +08007834 if (test_bit(MD_CLOSING, &mddev->flags)) {
7835 mutex_unlock(&mddev->open_mutex);
NeilBrowne2342ca2016-12-05 16:40:50 +11007836 err = -ENODEV;
7837 goto out;
Guoqing Jiangaf8d8e62016-08-12 13:42:37 +08007838 }
7839
Linus Torvalds1da177e2005-04-16 15:20:36 -07007840 err = 0;
NeilBrownf2ea68c2008-07-21 17:05:25 +10007841 atomic_inc(&mddev->openers);
NeilBrownc8c00a62009-08-10 12:50:52 +10007842 mutex_unlock(&mddev->open_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007843
Christoph Hellwig818077d2020-09-08 16:53:43 +02007844 bdev_check_media_change(bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007845 out:
NeilBrowne2342ca2016-12-05 16:40:50 +11007846 if (err)
7847 mddev_put(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007848 return err;
7849}
7850
Al Virodb2a1442013-05-05 21:52:57 -04007851static void md_release(struct gendisk *disk, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007852{
NeilBrownf72ffdd2014-09-30 14:23:59 +10007853 struct mddev *mddev = disk->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007854
Eric Sesterhenn52e5f9d2006-10-03 23:33:23 +02007855 BUG_ON(!mddev);
NeilBrownf2ea68c2008-07-21 17:05:25 +10007856 atomic_dec(&mddev->openers);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007857 mddev_put(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007858}
NeilBrownf0b4f7e2011-02-24 17:26:41 +11007859
Christoph Hellwiga564e232020-07-08 14:25:41 +02007860static unsigned int md_check_events(struct gendisk *disk, unsigned int clearing)
NeilBrownf0b4f7e2011-02-24 17:26:41 +11007861{
NeilBrownfd01b882011-10-11 16:47:53 +11007862 struct mddev *mddev = disk->private_data;
Christoph Hellwiga564e232020-07-08 14:25:41 +02007863 unsigned int ret = 0;
NeilBrownf0b4f7e2011-02-24 17:26:41 +11007864
Christoph Hellwiga564e232020-07-08 14:25:41 +02007865 if (mddev->changed)
7866 ret = DISK_EVENT_MEDIA_CHANGE;
NeilBrownf0b4f7e2011-02-24 17:26:41 +11007867 mddev->changed = 0;
Christoph Hellwiga564e232020-07-08 14:25:41 +02007868 return ret;
NeilBrownf0b4f7e2011-02-24 17:26:41 +11007869}
Christoph Hellwiga564e232020-07-08 14:25:41 +02007870
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02007871const struct block_device_operations md_fops =
Linus Torvalds1da177e2005-04-16 15:20:36 -07007872{
7873 .owner = THIS_MODULE,
Christoph Hellwigc62b37d2020-07-01 10:59:43 +02007874 .submit_bio = md_submit_bio,
Al Viroa39907f2008-03-02 10:31:15 -05007875 .open = md_open,
7876 .release = md_release,
NeilBrownb492b852009-05-26 12:57:36 +10007877 .ioctl = md_ioctl,
Arnd Bergmannaa98aa32009-12-14 12:50:05 +11007878#ifdef CONFIG_COMPAT
7879 .compat_ioctl = md_compat_ioctl,
7880#endif
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08007881 .getgeo = md_getgeo,
Christoph Hellwiga564e232020-07-08 14:25:41 +02007882 .check_events = md_check_events,
Christoph Hellwig118cf082020-11-03 11:00:13 +01007883 .set_read_only = md_set_read_only,
Linus Torvalds1da177e2005-04-16 15:20:36 -07007884};
7885
NeilBrownf72ffdd2014-09-30 14:23:59 +10007886static int md_thread(void *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007887{
NeilBrown2b8bf342011-10-11 16:48:23 +11007888 struct md_thread *thread = arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007889
Linus Torvalds1da177e2005-04-16 15:20:36 -07007890 /*
7891 * md_thread is a 'system-thread', it's priority should be very
7892 * high. We avoid resource deadlocks individually in each
7893 * raid personality. (RAID5 does preallocation) We also use RR and
7894 * the very same RT priority as kswapd, thus we will never get
7895 * into a priority inversion deadlock.
7896 *
7897 * we definitely have to have equal or higher priority than
7898 * bdflush, otherwise bdflush will deadlock if there are too
7899 * many dirty RAID5 blocks.
7900 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007901
NeilBrown6985c432005-10-19 21:23:47 -07007902 allow_signal(SIGKILL);
NeilBrowna6fb0932005-09-09 16:23:56 -07007903 while (!kthread_should_stop()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007904
NeilBrown93588e22005-11-15 00:09:12 -08007905 /* We need to wait INTERRUPTIBLE so that
7906 * we don't add to the load-average.
7907 * That means we need to be sure no signals are
7908 * pending
7909 */
7910 if (signal_pending(current))
7911 flush_signals(current);
7912
7913 wait_event_interruptible_timeout
7914 (thread->wqueue,
7915 test_bit(THREAD_WAKEUP, &thread->flags)
Shaohua Lice1ccd02016-11-21 10:29:18 -08007916 || kthread_should_stop() || kthread_should_park(),
NeilBrown93588e22005-11-15 00:09:12 -08007917 thread->timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007918
NeilBrown6c987912011-01-14 09:13:53 +11007919 clear_bit(THREAD_WAKEUP, &thread->flags);
Shaohua Lice1ccd02016-11-21 10:29:18 -08007920 if (kthread_should_park())
7921 kthread_parkme();
NeilBrown6c987912011-01-14 09:13:53 +11007922 if (!kthread_should_stop())
Shaohua Li4ed87312012-10-11 13:34:00 +11007923 thread->run(thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007924 }
NeilBrowna6fb0932005-09-09 16:23:56 -07007925
Linus Torvalds1da177e2005-04-16 15:20:36 -07007926 return 0;
7927}
7928
NeilBrown2b8bf342011-10-11 16:48:23 +11007929void md_wakeup_thread(struct md_thread *thread)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007930{
7931 if (thread) {
NeilBrown36a4e1f2011-10-07 14:23:17 +11007932 pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm);
Guoqing Jiangd1d90142017-10-09 10:32:48 +08007933 set_bit(THREAD_WAKEUP, &thread->flags);
7934 wake_up(&thread->wqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007935 }
7936}
NeilBrown6c144d32014-09-30 16:15:38 +10007937EXPORT_SYMBOL(md_wakeup_thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007938
Shaohua Li4ed87312012-10-11 13:34:00 +11007939struct md_thread *md_register_thread(void (*run) (struct md_thread *),
7940 struct mddev *mddev, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007941{
NeilBrown2b8bf342011-10-11 16:48:23 +11007942 struct md_thread *thread;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007943
NeilBrown2b8bf342011-10-11 16:48:23 +11007944 thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007945 if (!thread)
7946 return NULL;
7947
Linus Torvalds1da177e2005-04-16 15:20:36 -07007948 init_waitqueue_head(&thread->wqueue);
7949
Linus Torvalds1da177e2005-04-16 15:20:36 -07007950 thread->run = run;
7951 thread->mddev = mddev;
NeilBrown32a76272005-06-21 17:17:14 -07007952 thread->timeout = MAX_SCHEDULE_TIMEOUT;
NeilBrown0da3c612009-09-23 18:09:45 +10007953 thread->tsk = kthread_run(md_thread, thread,
7954 "%s_%s",
7955 mdname(thread->mddev),
NeilBrown02326052012-07-03 15:56:52 +10007956 name);
NeilBrowna6fb0932005-09-09 16:23:56 -07007957 if (IS_ERR(thread->tsk)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007958 kfree(thread);
7959 return NULL;
7960 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007961 return thread;
7962}
NeilBrown6c144d32014-09-30 16:15:38 +10007963EXPORT_SYMBOL(md_register_thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007964
NeilBrown2b8bf342011-10-11 16:48:23 +11007965void md_unregister_thread(struct md_thread **threadp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007966{
NeilBrown2b8bf342011-10-11 16:48:23 +11007967 struct md_thread *thread = *threadp;
NeilBrowne0cf8f02009-03-31 14:39:39 +11007968 if (!thread)
7969 return;
NeilBrown36a4e1f2011-10-07 14:23:17 +11007970 pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
NeilBrown01f96c02011-09-21 15:30:20 +10007971 /* Locking ensures that mddev_unlock does not wake_up a
7972 * non-existent thread
7973 */
7974 spin_lock(&pers_lock);
7975 *threadp = NULL;
7976 spin_unlock(&pers_lock);
NeilBrowna6fb0932005-09-09 16:23:56 -07007977
7978 kthread_stop(thread->tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007979 kfree(thread);
7980}
NeilBrown6c144d32014-09-30 16:15:38 +10007981EXPORT_SYMBOL(md_unregister_thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007982
NeilBrownfd01b882011-10-11 16:47:53 +11007983void md_error(struct mddev *mddev, struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007984{
NeilBrownb2d444d2005-11-08 21:39:31 -08007985 if (!rdev || test_bit(Faulty, &rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07007986 return;
Dan Williams6bfe0b42008-04-30 00:52:32 -07007987
NeilBrownde393cd2011-07-28 11:31:48 +10007988 if (!mddev->pers || !mddev->pers->error_handler)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007989 return;
7990 mddev->pers->error_handler(mddev,rdev);
Neil Brown72a23c22008-06-28 08:31:41 +10007991 if (mddev->degraded)
7992 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
NeilBrown00bcb4a2010-06-01 19:37:23 +10007993 sysfs_notify_dirent_safe(rdev->sysfs_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007994 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7995 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7996 md_wakeup_thread(mddev->thread);
NeilBrown768a4182010-07-26 11:49:55 +10007997 if (mddev->event_work.func)
Tejun Heoe804ac72010-10-15 15:36:08 +02007998 queue_work(md_misc_wq, &mddev->event_work);
Guoqing Jiang54679482021-10-04 23:34:53 +08007999 md_new_event();
Linus Torvalds1da177e2005-04-16 15:20:36 -07008000}
NeilBrown6c144d32014-09-30 16:15:38 +10008001EXPORT_SYMBOL(md_error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008002
8003/* seq_file implementation /proc/mdstat */
8004
8005static void status_unused(struct seq_file *seq)
8006{
8007 int i = 0;
NeilBrown3cb03002011-10-11 16:45:26 +11008008 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008009
8010 seq_printf(seq, "unused devices: ");
8011
Cheng Renquan159ec1f2009-01-09 08:31:08 +11008012 list_for_each_entry(rdev, &pending_raid_disks, same_set) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008013 char b[BDEVNAME_SIZE];
8014 i++;
8015 seq_printf(seq, "%s ",
8016 bdevname(rdev->bdev,b));
8017 }
8018 if (!i)
8019 seq_printf(seq, "<none>");
8020
8021 seq_printf(seq, "\n");
8022}
8023
NeilBrownf7851be2015-07-02 17:12:58 +10008024static int status_resync(struct seq_file *seq, struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008025{
NeilBrowndd71cf62009-05-07 12:49:35 +10008026 sector_t max_sectors, resync, res;
Mariusz Tkaczyk9642fa72019-06-13 16:11:41 +02008027 unsigned long dt, db = 0;
8028 sector_t rt, curr_mark_cnt, resync_mark_cnt;
8029 int scale, recovery_active;
NeilBrown4588b422006-03-27 01:18:04 -08008030 unsigned int per_milli;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008031
NeilBrownc804cde2012-05-21 09:28:33 +10008032 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
8033 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
NeilBrowndd71cf62009-05-07 12:49:35 +10008034 max_sectors = mddev->resync_max_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008035 else
NeilBrowndd71cf62009-05-07 12:49:35 +10008036 max_sectors = mddev->dev_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008037
NeilBrownf7851be2015-07-02 17:12:58 +10008038 resync = mddev->curr_resync;
8039 if (resync <= 3) {
8040 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
8041 /* Still cleaning up */
8042 resync = max_sectors;
Nate Daileyd2e2ec82017-11-30 11:33:30 -05008043 } else if (resync > max_sectors)
8044 resync = max_sectors;
8045 else
NeilBrownf7851be2015-07-02 17:12:58 +10008046 resync -= atomic_read(&mddev->recovery_active);
8047
8048 if (resync == 0) {
Guoqing Jiang0357ba22018-07-02 16:26:25 +08008049 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery)) {
8050 struct md_rdev *rdev;
8051
8052 rdev_for_each(rdev, mddev)
8053 if (rdev->raid_disk >= 0 &&
8054 !test_bit(Faulty, &rdev->flags) &&
8055 rdev->recovery_offset != MaxSector &&
8056 rdev->recovery_offset) {
8057 seq_printf(seq, "\trecover=REMOTE");
8058 return 1;
8059 }
8060 if (mddev->reshape_position != MaxSector)
8061 seq_printf(seq, "\treshape=REMOTE");
8062 else
8063 seq_printf(seq, "\tresync=REMOTE");
8064 return 1;
8065 }
NeilBrownf7851be2015-07-02 17:12:58 +10008066 if (mddev->recovery_cp < MaxSector) {
8067 seq_printf(seq, "\tresync=PENDING");
8068 return 1;
8069 }
8070 return 0;
8071 }
8072 if (resync < 3) {
8073 seq_printf(seq, "\tresync=DELAYED");
8074 return 1;
8075 }
8076
NeilBrown403df472014-09-30 15:52:29 +10008077 WARN_ON(max_sectors == 0);
NeilBrown4588b422006-03-27 01:18:04 -08008078 /* Pick 'scale' such that (resync>>scale)*1000 will fit
NeilBrowndd71cf62009-05-07 12:49:35 +10008079 * in a sector_t, and (max_sectors>>scale) will fit in a
NeilBrown4588b422006-03-27 01:18:04 -08008080 * u32, as those are the requirements for sector_div.
8081 * Thus 'scale' must be at least 10
8082 */
8083 scale = 10;
8084 if (sizeof(sector_t) > sizeof(unsigned long)) {
NeilBrowndd71cf62009-05-07 12:49:35 +10008085 while ( max_sectors/2 > (1ULL<<(scale+32)))
NeilBrown4588b422006-03-27 01:18:04 -08008086 scale++;
8087 }
8088 res = (resync>>scale)*1000;
NeilBrowndd71cf62009-05-07 12:49:35 +10008089 sector_div(res, (u32)((max_sectors>>scale)+1));
NeilBrown4588b422006-03-27 01:18:04 -08008090
8091 per_milli = res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008092 {
NeilBrown4588b422006-03-27 01:18:04 -08008093 int i, x = per_milli/50, y = 20-x;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008094 seq_printf(seq, "[");
8095 for (i = 0; i < x; i++)
8096 seq_printf(seq, "=");
8097 seq_printf(seq, ">");
8098 for (i = 0; i < y; i++)
8099 seq_printf(seq, ".");
8100 seq_printf(seq, "] ");
8101 }
NeilBrown4588b422006-03-27 01:18:04 -08008102 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
NeilBrownccfcc3c2006-03-27 01:18:09 -08008103 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
8104 "reshape" :
NeilBrown61df9d92006-10-03 01:15:57 -07008105 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
8106 "check" :
8107 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
8108 "resync" : "recovery"))),
8109 per_milli/10, per_milli % 10,
NeilBrowndd71cf62009-05-07 12:49:35 +10008110 (unsigned long long) resync/2,
8111 (unsigned long long) max_sectors/2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008112
8113 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07008114 * dt: time from mark until now
8115 * db: blocks written from mark until now
8116 * rt: remaining time
NeilBrowndd71cf62009-05-07 12:49:35 +10008117 *
Mariusz Tkaczyk9642fa72019-06-13 16:11:41 +02008118 * rt is a sector_t, which is always 64bit now. We are keeping
8119 * the original algorithm, but it is not really necessary.
8120 *
8121 * Original algorithm:
8122 * So we divide before multiply in case it is 32bit and close
8123 * to the limit.
8124 * We scale the divisor (db) by 32 to avoid losing precision
8125 * near the end of resync when the number of remaining sectors
8126 * is close to 'db'.
8127 * We then divide rt by 32 after multiplying by db to compensate.
8128 * The '+1' avoids division by zero if db is very small.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008129 */
8130 dt = ((jiffies - mddev->resync_mark) / HZ);
8131 if (!dt) dt++;
Mariusz Tkaczyk9642fa72019-06-13 16:11:41 +02008132
8133 curr_mark_cnt = mddev->curr_mark_cnt;
8134 recovery_active = atomic_read(&mddev->recovery_active);
8135 resync_mark_cnt = mddev->resync_mark_cnt;
8136
8137 if (curr_mark_cnt >= (recovery_active + resync_mark_cnt))
8138 db = curr_mark_cnt - (recovery_active + resync_mark_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008139
NeilBrowndd71cf62009-05-07 12:49:35 +10008140 rt = max_sectors - resync; /* number of remaining sectors */
Mariusz Tkaczyk9642fa72019-06-13 16:11:41 +02008141 rt = div64_u64(rt, db/32+1);
NeilBrowndd71cf62009-05-07 12:49:35 +10008142 rt *= dt;
8143 rt >>= 5;
8144
8145 seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
8146 ((unsigned long)rt % 60)/6);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008147
NeilBrownff4e8d92006-07-10 04:44:16 -07008148 seq_printf(seq, " speed=%ldK/sec", db/2/dt);
NeilBrownf7851be2015-07-02 17:12:58 +10008149 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008150}
8151
8152static void *md_seq_start(struct seq_file *seq, loff_t *pos)
8153{
8154 struct list_head *tmp;
8155 loff_t l = *pos;
NeilBrownfd01b882011-10-11 16:47:53 +11008156 struct mddev *mddev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008157
Jan Glauber7abfaba2021-03-17 15:04:39 +01008158 if (l == 0x10000) {
8159 ++*pos;
8160 return (void *)2;
8161 }
8162 if (l > 0x10000)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008163 return NULL;
8164 if (!l--)
8165 /* header */
8166 return (void*)1;
8167
8168 spin_lock(&all_mddevs_lock);
8169 list_for_each(tmp,&all_mddevs)
8170 if (!l--) {
NeilBrownfd01b882011-10-11 16:47:53 +11008171 mddev = list_entry(tmp, struct mddev, all_mddevs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008172 mddev_get(mddev);
8173 spin_unlock(&all_mddevs_lock);
8174 return mddev;
8175 }
8176 spin_unlock(&all_mddevs_lock);
8177 if (!l--)
8178 return (void*)2;/* tail */
8179 return NULL;
8180}
8181
8182static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
8183{
8184 struct list_head *tmp;
NeilBrownfd01b882011-10-11 16:47:53 +11008185 struct mddev *next_mddev, *mddev = v;
NeilBrownf72ffdd2014-09-30 14:23:59 +10008186
Linus Torvalds1da177e2005-04-16 15:20:36 -07008187 ++*pos;
8188 if (v == (void*)2)
8189 return NULL;
8190
8191 spin_lock(&all_mddevs_lock);
8192 if (v == (void*)1)
8193 tmp = all_mddevs.next;
8194 else
8195 tmp = mddev->all_mddevs.next;
8196 if (tmp != &all_mddevs)
NeilBrownfd01b882011-10-11 16:47:53 +11008197 next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs));
Linus Torvalds1da177e2005-04-16 15:20:36 -07008198 else {
8199 next_mddev = (void*)2;
8200 *pos = 0x10000;
NeilBrownf72ffdd2014-09-30 14:23:59 +10008201 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008202 spin_unlock(&all_mddevs_lock);
8203
8204 if (v != (void*)1)
8205 mddev_put(mddev);
8206 return next_mddev;
8207
8208}
8209
8210static void md_seq_stop(struct seq_file *seq, void *v)
8211{
NeilBrownfd01b882011-10-11 16:47:53 +11008212 struct mddev *mddev = v;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008213
8214 if (mddev && v != (void*)1 && v != (void*)2)
8215 mddev_put(mddev);
8216}
8217
8218static int md_seq_show(struct seq_file *seq, void *v)
8219{
NeilBrownfd01b882011-10-11 16:47:53 +11008220 struct mddev *mddev = v;
Andre Nolldd8ac332009-03-31 14:33:13 +11008221 sector_t sectors;
NeilBrown3cb03002011-10-11 16:45:26 +11008222 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008223
8224 if (v == (void*)1) {
NeilBrown84fc4b52011-10-11 16:49:58 +11008225 struct md_personality *pers;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008226 seq_printf(seq, "Personalities : ");
8227 spin_lock(&pers_lock);
NeilBrown2604b702006-01-06 00:20:36 -08008228 list_for_each_entry(pers, &pers_list, list)
8229 seq_printf(seq, "[%s] ", pers->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008230
8231 spin_unlock(&pers_lock);
8232 seq_printf(seq, "\n");
Kay Sieversf1514632011-07-12 20:48:39 +02008233 seq->poll_event = atomic_read(&md_event_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008234 return 0;
8235 }
8236 if (v == (void*)2) {
8237 status_unused(seq);
8238 return 0;
8239 }
8240
NeilBrown36d091f2014-12-15 12:56:58 +11008241 spin_lock(&mddev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008242 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
8243 seq_printf(seq, "%s : %sactive", mdname(mddev),
8244 mddev->pers ? "" : "in");
8245 if (mddev->pers) {
NeilBrownf91de922005-11-08 21:39:36 -08008246 if (mddev->ro==1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008247 seq_printf(seq, " (read-only)");
NeilBrownf91de922005-11-08 21:39:36 -08008248 if (mddev->ro==2)
NeilBrown52720ae2008-03-10 11:43:47 -07008249 seq_printf(seq, " (auto-read-only)");
Linus Torvalds1da177e2005-04-16 15:20:36 -07008250 seq_printf(seq, " %s", mddev->pers->name);
8251 }
8252
Andre Nolldd8ac332009-03-31 14:33:13 +11008253 sectors = 0;
NeilBrownf97fcad2014-12-15 12:56:59 +11008254 rcu_read_lock();
8255 rdev_for_each_rcu(rdev, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008256 char b[BDEVNAME_SIZE];
8257 seq_printf(seq, " %s[%d]",
8258 bdevname(rdev->bdev,b), rdev->desc_nr);
NeilBrown8ddf9ef2005-09-09 16:23:45 -07008259 if (test_bit(WriteMostly, &rdev->flags))
8260 seq_printf(seq, "(W)");
Shaohua Li9efdca12015-10-12 16:59:50 -07008261 if (test_bit(Journal, &rdev->flags))
8262 seq_printf(seq, "(J)");
NeilBrownb2d444d2005-11-08 21:39:31 -08008263 if (test_bit(Faulty, &rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008264 seq_printf(seq, "(F)");
8265 continue;
NeilBrown2d78f8c2011-12-23 10:17:51 +11008266 }
8267 if (rdev->raid_disk < 0)
NeilBrownb325a322005-09-09 16:24:00 -07008268 seq_printf(seq, "(S)"); /* spare */
NeilBrown2d78f8c2011-12-23 10:17:51 +11008269 if (test_bit(Replacement, &rdev->flags))
8270 seq_printf(seq, "(R)");
Andre Nolldd8ac332009-03-31 14:33:13 +11008271 sectors += rdev->sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008272 }
NeilBrownf97fcad2014-12-15 12:56:59 +11008273 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07008274
8275 if (!list_empty(&mddev->disks)) {
8276 if (mddev->pers)
8277 seq_printf(seq, "\n %llu blocks",
Andre Nollf233ea52008-07-21 17:05:22 +10008278 (unsigned long long)
8279 mddev->array_sectors / 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008280 else
8281 seq_printf(seq, "\n %llu blocks",
Andre Nolldd8ac332009-03-31 14:33:13 +11008282 (unsigned long long)sectors / 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008283 }
NeilBrown1cd6bf12005-09-09 16:24:00 -07008284 if (mddev->persistent) {
8285 if (mddev->major_version != 0 ||
8286 mddev->minor_version != 90) {
8287 seq_printf(seq," super %d.%d",
8288 mddev->major_version,
8289 mddev->minor_version);
8290 }
NeilBrowne6910632008-02-06 01:39:51 -08008291 } else if (mddev->external)
8292 seq_printf(seq, " super external:%s",
8293 mddev->metadata_type);
8294 else
NeilBrown1cd6bf12005-09-09 16:24:00 -07008295 seq_printf(seq, " super non-persistent");
Linus Torvalds1da177e2005-04-16 15:20:36 -07008296
8297 if (mddev->pers) {
NeilBrownd710e132008-10-13 11:55:12 +11008298 mddev->pers->status(seq, mddev);
NeilBrownf72ffdd2014-09-30 14:23:59 +10008299 seq_printf(seq, "\n ");
NeilBrown8e1b39d2005-11-08 21:39:41 -08008300 if (mddev->pers->sync_request) {
NeilBrownf7851be2015-07-02 17:12:58 +10008301 if (status_resync(seq, mddev))
NeilBrown8e1b39d2005-11-08 21:39:41 -08008302 seq_printf(seq, "\n ");
NeilBrown8e1b39d2005-11-08 21:39:41 -08008303 }
NeilBrown32a76272005-06-21 17:17:14 -07008304 } else
8305 seq_printf(seq, "\n ");
8306
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07008307 md_bitmap_status(seq, mddev->bitmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008308
8309 seq_printf(seq, "\n");
8310 }
NeilBrown36d091f2014-12-15 12:56:58 +11008311 spin_unlock(&mddev->lock);
NeilBrownf72ffdd2014-09-30 14:23:59 +10008312
Linus Torvalds1da177e2005-04-16 15:20:36 -07008313 return 0;
8314}
8315
Jan Engelhardt110518b2009-05-07 12:49:37 +10008316static const struct seq_operations md_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008317 .start = md_seq_start,
8318 .next = md_seq_next,
8319 .stop = md_seq_stop,
8320 .show = md_seq_show,
8321};
8322
8323static int md_seq_open(struct inode *inode, struct file *file)
8324{
Kay Sieversf1514632011-07-12 20:48:39 +02008325 struct seq_file *seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008326 int error;
8327
8328 error = seq_open(file, &md_seq_ops);
NeilBrownd7603b72006-01-06 00:20:30 -08008329 if (error)
Kay Sieversf1514632011-07-12 20:48:39 +02008330 return error;
8331
8332 seq = file->private_data;
8333 seq->poll_event = atomic_read(&md_event_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008334 return error;
8335}
8336
NeilBrowne2f23b62014-04-09 14:33:51 +10008337static int md_unloading;
Al Viroafc9a422017-07-03 06:39:46 -04008338static __poll_t mdstat_poll(struct file *filp, poll_table *wait)
NeilBrownd7603b72006-01-06 00:20:30 -08008339{
Kay Sieversf1514632011-07-12 20:48:39 +02008340 struct seq_file *seq = filp->private_data;
Al Viroafc9a422017-07-03 06:39:46 -04008341 __poll_t mask;
NeilBrownd7603b72006-01-06 00:20:30 -08008342
NeilBrowne2f23b62014-04-09 14:33:51 +10008343 if (md_unloading)
Linus Torvaldsa9a08842018-02-11 14:34:03 -08008344 return EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI;
NeilBrownd7603b72006-01-06 00:20:30 -08008345 poll_wait(filp, &md_event_waiters, wait);
8346
8347 /* always allow read */
Linus Torvaldsa9a08842018-02-11 14:34:03 -08008348 mask = EPOLLIN | EPOLLRDNORM;
NeilBrownd7603b72006-01-06 00:20:30 -08008349
Kay Sieversf1514632011-07-12 20:48:39 +02008350 if (seq->poll_event != atomic_read(&md_event_count))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08008351 mask |= EPOLLERR | EPOLLPRI;
NeilBrownd7603b72006-01-06 00:20:30 -08008352 return mask;
8353}
8354
Alexey Dobriyan97a32532020-02-03 17:37:17 -08008355static const struct proc_ops mdstat_proc_ops = {
8356 .proc_open = md_seq_open,
8357 .proc_read = seq_read,
8358 .proc_lseek = seq_lseek,
8359 .proc_release = seq_release,
8360 .proc_poll = mdstat_poll,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008361};
8362
NeilBrown84fc4b52011-10-11 16:49:58 +11008363int register_md_personality(struct md_personality *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008364{
NeilBrown9d487392016-11-02 14:16:49 +11008365 pr_debug("md: %s personality registered for level %d\n",
8366 p->name, p->level);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008367 spin_lock(&pers_lock);
NeilBrown2604b702006-01-06 00:20:36 -08008368 list_add_tail(&p->list, &pers_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008369 spin_unlock(&pers_lock);
8370 return 0;
8371}
NeilBrown6c144d32014-09-30 16:15:38 +10008372EXPORT_SYMBOL(register_md_personality);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008373
NeilBrown84fc4b52011-10-11 16:49:58 +11008374int unregister_md_personality(struct md_personality *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008375{
NeilBrown9d487392016-11-02 14:16:49 +11008376 pr_debug("md: %s personality unregistered\n", p->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008377 spin_lock(&pers_lock);
NeilBrown2604b702006-01-06 00:20:36 -08008378 list_del_init(&p->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008379 spin_unlock(&pers_lock);
8380 return 0;
8381}
NeilBrown6c144d32014-09-30 16:15:38 +10008382EXPORT_SYMBOL(unregister_md_personality);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008383
NeilBrown6022e752015-08-13 12:32:55 +10008384int register_md_cluster_operations(struct md_cluster_operations *ops,
8385 struct module *module)
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008386{
NeilBrown6022e752015-08-13 12:32:55 +10008387 int ret = 0;
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008388 spin_lock(&pers_lock);
NeilBrown6022e752015-08-13 12:32:55 +10008389 if (md_cluster_ops != NULL)
8390 ret = -EALREADY;
8391 else {
8392 md_cluster_ops = ops;
8393 md_cluster_mod = module;
8394 }
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008395 spin_unlock(&pers_lock);
NeilBrown6022e752015-08-13 12:32:55 +10008396 return ret;
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008397}
8398EXPORT_SYMBOL(register_md_cluster_operations);
8399
8400int unregister_md_cluster_operations(void)
8401{
8402 spin_lock(&pers_lock);
8403 md_cluster_ops = NULL;
8404 spin_unlock(&pers_lock);
8405 return 0;
8406}
8407EXPORT_SYMBOL(unregister_md_cluster_operations);
8408
8409int md_setup_cluster(struct mddev *mddev, int nodes)
8410{
Zhao Heming7c9d5c52020-07-21 02:08:52 +08008411 int ret;
Guoqing Jiang47a7b0d2016-09-04 22:17:28 -04008412 if (!md_cluster_ops)
8413 request_module("md-cluster");
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008414 spin_lock(&pers_lock);
Guoqing Jiang47a7b0d2016-09-04 22:17:28 -04008415 /* ensure module won't be unloaded */
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008416 if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
Randy Dunlapdd3dc5f2021-12-25 18:24:11 -08008417 pr_warn("can't find md-cluster module or get its reference.\n");
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008418 spin_unlock(&pers_lock);
8419 return -ENOENT;
8420 }
8421 spin_unlock(&pers_lock);
8422
Zhao Heming7c9d5c52020-07-21 02:08:52 +08008423 ret = md_cluster_ops->join(mddev, nodes);
8424 if (!ret)
8425 mddev->safemode_delay = 0;
8426 return ret;
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008427}
8428
8429void md_cluster_stop(struct mddev *mddev)
8430{
Goldwyn Rodriguesc4ce8672014-03-29 10:20:02 -05008431 if (!md_cluster_ops)
8432 return;
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008433 md_cluster_ops->leave(mddev);
8434 module_put(md_cluster_mod);
8435}
8436
NeilBrownfd01b882011-10-11 16:47:53 +11008437static int is_mddev_idle(struct mddev *mddev, int init)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008438{
NeilBrownf72ffdd2014-09-30 14:23:59 +10008439 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008440 int idle;
NeilBrowneea1bf32009-03-31 14:27:02 +11008441 int curr_events;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008442
8443 idle = 1;
NeilBrown4b809912008-07-21 17:05:25 +10008444 rcu_read_lock();
8445 rdev_for_each_rcu(rdev, mddev) {
Christoph Hellwig4245e522020-09-03 07:40:59 +02008446 struct gendisk *disk = rdev->bdev->bd_disk;
Christoph Hellwig8446fe92020-11-24 09:36:54 +01008447 curr_events = (int)part_stat_read_accum(disk->part0, sectors) -
NeilBrowneea1bf32009-03-31 14:27:02 +11008448 atomic_read(&disk->sync_io);
NeilBrown713f6ab2007-07-17 04:06:12 -07008449 /* sync IO will cause sync_io to increase before the disk_stats
8450 * as sync_io is counted when a request starts, and
8451 * disk_stats is counted when it completes.
8452 * So resync activity will cause curr_events to be smaller than
8453 * when there was no such activity.
8454 * non-sync IO will cause disk_stat to increase without
8455 * increasing sync_io so curr_events will (eventually)
8456 * be larger than it was before. Once it becomes
8457 * substantially larger, the test below will cause
8458 * the array to appear non-idle, and resync will slow
8459 * down.
8460 * If there is a lot of outstanding resync activity when
8461 * we set last_event to curr_events, then all that activity
8462 * completing might cause the array to appear non-idle
8463 * and resync will be slowed down even though there might
8464 * not have been non-resync activity. This will only
8465 * happen once though. 'last_events' will soon reflect
8466 * the state where there is little or no outstanding
8467 * resync requests, and further resync activity will
8468 * always make curr_events less than last_events.
NeilBrownc0e48522005-11-18 01:11:01 -08008469 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07008470 */
NeilBrowneea1bf32009-03-31 14:27:02 +11008471 if (init || curr_events - rdev->last_events > 64) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008472 rdev->last_events = curr_events;
8473 idle = 0;
8474 }
8475 }
NeilBrown4b809912008-07-21 17:05:25 +10008476 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07008477 return idle;
8478}
8479
NeilBrownfd01b882011-10-11 16:47:53 +11008480void md_done_sync(struct mddev *mddev, int blocks, int ok)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008481{
8482 /* another "blocks" (512byte) blocks have been synced */
8483 atomic_sub(blocks, &mddev->recovery_active);
8484 wake_up(&mddev->recovery_wait);
8485 if (!ok) {
NeilBrowndfc70642008-05-23 13:04:39 -07008486 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
majianpeng0a19caa2012-11-19 19:57:34 +08008487 set_bit(MD_RECOVERY_ERROR, &mddev->recovery);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008488 md_wakeup_thread(mddev->thread);
8489 // stop recovery, signal do_sync ....
8490 }
8491}
NeilBrown6c144d32014-09-30 16:15:38 +10008492EXPORT_SYMBOL(md_done_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008493
NeilBrown06d91a52005-06-21 17:17:12 -07008494/* md_write_start(mddev, bi)
8495 * If we need to update some array metadata (e.g. 'active' flag
NeilBrown3d310eb2005-06-21 17:17:26 -07008496 * in superblock) before writing, schedule a superblock update
8497 * and wait for it to complete.
NeilBrowncc27b0c2017-06-05 16:49:39 +10008498 * A return value of 'false' means that the write wasn't recorded
8499 * and cannot proceed as the array is being suspend.
NeilBrown06d91a52005-06-21 17:17:12 -07008500 */
NeilBrowncc27b0c2017-06-05 16:49:39 +10008501bool md_write_start(struct mddev *mddev, struct bio *bi)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008502{
Neil Brown0fd62b82008-06-28 08:31:36 +10008503 int did_change = 0;
Heinz Mauelshagen4b6c1062018-02-02 23:13:19 +01008504
NeilBrown06d91a52005-06-21 17:17:12 -07008505 if (bio_data_dir(bi) != WRITE)
NeilBrowncc27b0c2017-06-05 16:49:39 +10008506 return true;
NeilBrown06d91a52005-06-21 17:17:12 -07008507
NeilBrownf91de922005-11-08 21:39:36 -08008508 BUG_ON(mddev->ro == 1);
8509 if (mddev->ro == 2) {
8510 /* need to switch to read/write */
8511 mddev->ro = 0;
8512 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8513 md_wakeup_thread(mddev->thread);
NeilBrown25156192008-03-04 14:29:32 -08008514 md_wakeup_thread(mddev->sync_thread);
Neil Brown0fd62b82008-06-28 08:31:36 +10008515 did_change = 1;
NeilBrownf91de922005-11-08 21:39:36 -08008516 }
NeilBrown4ad23a972017-03-15 14:05:14 +11008517 rcu_read_lock();
8518 percpu_ref_get(&mddev->writes_pending);
NeilBrown55cc39f2017-03-15 14:05:14 +11008519 smp_mb(); /* Match smp_mb in set_in_sync() */
NeilBrown31a59e32008-04-30 00:52:30 -07008520 if (mddev->safemode == 1)
8521 mddev->safemode = 0;
NeilBrown4ad23a972017-03-15 14:05:14 +11008522 /* sync_checkers is always 0 when writes_pending is in per-cpu mode */
NeilBrown81fe48e2017-08-08 16:56:36 +10008523 if (mddev->in_sync || mddev->sync_checkers) {
NeilBrown85572d72014-12-15 12:56:56 +11008524 spin_lock(&mddev->lock);
NeilBrown3d310eb2005-06-21 17:17:26 -07008525 if (mddev->in_sync) {
8526 mddev->in_sync = 0;
Shaohua Li29530792016-12-08 15:48:19 -08008527 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
8528 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
NeilBrown3d310eb2005-06-21 17:17:26 -07008529 md_wakeup_thread(mddev->thread);
Neil Brown0fd62b82008-06-28 08:31:36 +10008530 did_change = 1;
NeilBrown3d310eb2005-06-21 17:17:26 -07008531 }
NeilBrown85572d72014-12-15 12:56:56 +11008532 spin_unlock(&mddev->lock);
NeilBrown06d91a52005-06-21 17:17:12 -07008533 }
NeilBrown4ad23a972017-03-15 14:05:14 +11008534 rcu_read_unlock();
Neil Brown0fd62b82008-06-28 08:31:36 +10008535 if (did_change)
NeilBrown00bcb4a2010-06-01 19:37:23 +10008536 sysfs_notify_dirent_safe(mddev->sysfs_state);
Heinz Mauelshagen4b6c1062018-02-02 23:13:19 +01008537 if (!mddev->has_superblocks)
8538 return true;
NeilBrown09a44cc2008-05-23 13:04:36 -07008539 wait_event(mddev->sb_wait,
NeilBrownd47c8ad2017-10-05 16:23:16 +11008540 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) ||
8541 mddev->suspended);
NeilBrowncc27b0c2017-06-05 16:49:39 +10008542 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
8543 percpu_ref_put(&mddev->writes_pending);
8544 return false;
8545 }
8546 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008547}
NeilBrown6c144d32014-09-30 16:15:38 +10008548EXPORT_SYMBOL(md_write_start);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008549
NeilBrown49728052017-03-15 14:05:12 +11008550/* md_write_inc can only be called when md_write_start() has
8551 * already been called at least once of the current request.
8552 * It increments the counter and is useful when a single request
8553 * is split into several parts. Each part causes an increment and
8554 * so needs a matching md_write_end().
8555 * Unlike md_write_start(), it is safe to call md_write_inc() inside
8556 * a spinlocked region.
8557 */
8558void md_write_inc(struct mddev *mddev, struct bio *bi)
8559{
8560 if (bio_data_dir(bi) != WRITE)
8561 return;
8562 WARN_ON_ONCE(mddev->in_sync || mddev->ro);
NeilBrown4ad23a972017-03-15 14:05:14 +11008563 percpu_ref_get(&mddev->writes_pending);
NeilBrown49728052017-03-15 14:05:12 +11008564}
8565EXPORT_SYMBOL(md_write_inc);
8566
NeilBrownfd01b882011-10-11 16:47:53 +11008567void md_write_end(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008568{
NeilBrown4ad23a972017-03-15 14:05:14 +11008569 percpu_ref_put(&mddev->writes_pending);
8570
8571 if (mddev->safemode == 2)
8572 md_wakeup_thread(mddev->thread);
8573 else if (mddev->safemode_delay)
8574 /* The roundup() ensures this only performs locking once
8575 * every ->safemode_delay jiffies
8576 */
8577 mod_timer(&mddev->safemode_timer,
8578 roundup(jiffies, mddev->safemode_delay) +
8579 mddev->safemode_delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008580}
NeilBrown4ad23a972017-03-15 14:05:14 +11008581
NeilBrown6c144d32014-09-30 16:15:38 +10008582EXPORT_SYMBOL(md_write_end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008583
Xiao Nicf784082021-02-04 15:50:43 +08008584/* This is used by raid0 and raid10 */
8585void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
8586 struct bio *bio, sector_t start, sector_t size)
8587{
8588 struct bio *discard_bio = NULL;
8589
8590 if (__blkdev_issue_discard(rdev->bdev, start, size, GFP_NOIO, 0,
8591 &discard_bio) || !discard_bio)
8592 return;
8593
8594 bio_chain(discard_bio, bio);
8595 bio_clone_blkg_association(discard_bio, bio);
8596 if (mddev->gendisk)
8597 trace_block_bio_remap(discard_bio,
8598 disk_devt(mddev->gendisk),
8599 bio->bi_iter.bi_sector);
8600 submit_bio_noacct(discard_bio);
8601}
8602EXPORT_SYMBOL_GPL(md_submit_discard_bio);
8603
Xiao Ni0c031fd2021-12-10 17:31:15 +08008604int acct_bioset_init(struct mddev *mddev)
8605{
8606 int err = 0;
8607
8608 if (!bioset_initialized(&mddev->io_acct_set))
8609 err = bioset_init(&mddev->io_acct_set, BIO_POOL_SIZE,
8610 offsetof(struct md_io_acct, bio_clone), 0);
8611 return err;
8612}
8613EXPORT_SYMBOL_GPL(acct_bioset_init);
8614
8615void acct_bioset_exit(struct mddev *mddev)
8616{
8617 bioset_exit(&mddev->io_acct_set);
8618}
8619EXPORT_SYMBOL_GPL(acct_bioset_exit);
8620
Guoqing Jiang10764812021-05-25 17:46:17 +08008621static void md_end_io_acct(struct bio *bio)
8622{
8623 struct md_io_acct *md_io_acct = bio->bi_private;
8624 struct bio *orig_bio = md_io_acct->orig_bio;
8625
8626 orig_bio->bi_status = bio->bi_status;
8627
8628 bio_end_io_acct(orig_bio, md_io_acct->start_time);
8629 bio_put(bio);
8630 bio_endio(orig_bio);
8631}
8632
Guoqing Jiangdaee2022021-06-03 17:21:06 +08008633/*
8634 * Used by personalities that don't already clone the bio and thus can't
8635 * easily add the timestamp to their extended bio structure.
8636 */
Guoqing Jiang10764812021-05-25 17:46:17 +08008637void md_account_bio(struct mddev *mddev, struct bio **bio)
8638{
8639 struct md_io_acct *md_io_acct;
8640 struct bio *clone;
8641
8642 if (!blk_queue_io_stat((*bio)->bi_bdev->bd_disk->queue))
8643 return;
8644
8645 clone = bio_clone_fast(*bio, GFP_NOIO, &mddev->io_acct_set);
8646 md_io_acct = container_of(clone, struct md_io_acct, bio_clone);
8647 md_io_acct->orig_bio = *bio;
8648 md_io_acct->start_time = bio_start_io_acct(*bio);
8649
8650 clone->bi_end_io = md_end_io_acct;
8651 clone->bi_private = md_io_acct;
8652 *bio = clone;
8653}
8654EXPORT_SYMBOL_GPL(md_account_bio);
8655
NeilBrown2a2275d2007-01-26 00:57:11 -08008656/* md_allow_write(mddev)
8657 * Calling this ensures that the array is marked 'active' so that writes
8658 * may proceed without blocking. It is important to call this before
8659 * attempting a GFP_KERNEL allocation while holding the mddev lock.
8660 * Must be called with mddev_lock held.
8661 */
Artur Paszkiewicz2214c262017-05-08 11:56:55 +02008662void md_allow_write(struct mddev *mddev)
NeilBrown2a2275d2007-01-26 00:57:11 -08008663{
8664 if (!mddev->pers)
Artur Paszkiewicz2214c262017-05-08 11:56:55 +02008665 return;
NeilBrown2a2275d2007-01-26 00:57:11 -08008666 if (mddev->ro)
Artur Paszkiewicz2214c262017-05-08 11:56:55 +02008667 return;
Neil Brown1a0fd492008-06-28 08:31:27 +10008668 if (!mddev->pers->sync_request)
Artur Paszkiewicz2214c262017-05-08 11:56:55 +02008669 return;
NeilBrown2a2275d2007-01-26 00:57:11 -08008670
NeilBrown85572d72014-12-15 12:56:56 +11008671 spin_lock(&mddev->lock);
NeilBrown2a2275d2007-01-26 00:57:11 -08008672 if (mddev->in_sync) {
8673 mddev->in_sync = 0;
Shaohua Li29530792016-12-08 15:48:19 -08008674 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
8675 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
NeilBrown2a2275d2007-01-26 00:57:11 -08008676 if (mddev->safemode_delay &&
8677 mddev->safemode == 0)
8678 mddev->safemode = 1;
NeilBrown85572d72014-12-15 12:56:56 +11008679 spin_unlock(&mddev->lock);
NeilBrown2a2275d2007-01-26 00:57:11 -08008680 md_update_sb(mddev, 0);
NeilBrown00bcb4a2010-06-01 19:37:23 +10008681 sysfs_notify_dirent_safe(mddev->sysfs_state);
Artur Paszkiewicz2214c262017-05-08 11:56:55 +02008682 /* wait for the dirty state to be recorded in the metadata */
8683 wait_event(mddev->sb_wait,
Artur Paszkiewicz2214c262017-05-08 11:56:55 +02008684 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
NeilBrown2a2275d2007-01-26 00:57:11 -08008685 } else
NeilBrown85572d72014-12-15 12:56:56 +11008686 spin_unlock(&mddev->lock);
NeilBrown2a2275d2007-01-26 00:57:11 -08008687}
8688EXPORT_SYMBOL_GPL(md_allow_write);
8689
Linus Torvalds1da177e2005-04-16 15:20:36 -07008690#define SYNC_MARKS 10
8691#define SYNC_MARK_STEP (3*HZ)
majianpeng54f89342012-10-31 11:59:10 +11008692#define UPDATE_FREQUENCY (5*60*HZ)
Shaohua Li4ed87312012-10-11 13:34:00 +11008693void md_do_sync(struct md_thread *thread)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008694{
Shaohua Li4ed87312012-10-11 13:34:00 +11008695 struct mddev *mddev = thread->mddev;
NeilBrownfd01b882011-10-11 16:47:53 +11008696 struct mddev *mddev2;
Yufen Yue5b521e2019-06-14 15:41:07 -07008697 unsigned int currspeed = 0, window;
Xiao Niac7e50a2014-08-07 09:37:41 -04008698 sector_t max_sectors,j, io_sectors, recovery_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008699 unsigned long mark[SYNC_MARKS];
majianpeng54f89342012-10-31 11:59:10 +11008700 unsigned long update_time;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008701 sector_t mark_cnt[SYNC_MARKS];
8702 int last_mark,m;
8703 struct list_head *tmp;
8704 sector_t last_check;
NeilBrown57afd892005-06-21 17:17:13 -07008705 int skipped = 0;
NeilBrown3cb03002011-10-11 16:45:26 +11008706 struct md_rdev *rdev;
Jonathan Brassowc4a39552013-06-25 01:23:59 -05008707 char *desc, *action = NULL;
majianpeng7c2c57c2012-07-03 12:12:26 +10008708 struct blk_plug plug;
Guoqing Jiang41a9a0d2016-05-02 11:33:08 -04008709 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008710
8711 /* just incase thread restarts... */
Song Liud5d885f2017-11-19 22:17:01 -08008712 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
8713 test_bit(MD_RECOVERY_WAIT, &mddev->recovery))
Linus Torvalds1da177e2005-04-16 15:20:36 -07008714 return;
NeilBrown3991b312014-05-28 13:39:23 +10008715 if (mddev->ro) {/* never try to sync a read-only array */
8716 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008717 return;
NeilBrown3991b312014-05-28 13:39:23 +10008718 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008719
Guoqing Jiang41a9a0d2016-05-02 11:33:08 -04008720 if (mddev_is_clustered(mddev)) {
8721 ret = md_cluster_ops->resync_start(mddev);
8722 if (ret)
8723 goto skip;
8724
Guoqing Jiangbb8bf152016-06-02 23:32:04 -04008725 set_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags);
Guoqing Jiang41a9a0d2016-05-02 11:33:08 -04008726 if (!(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
8727 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ||
8728 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
8729 && ((unsigned long long)mddev->curr_resync_completed
8730 < (unsigned long long)mddev->resync_max_sectors))
8731 goto skip;
8732 }
8733
NeilBrown61df9d92006-10-03 01:15:57 -07008734 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
Jonathan Brassowc4a39552013-06-25 01:23:59 -05008735 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
NeilBrown61df9d92006-10-03 01:15:57 -07008736 desc = "data-check";
Jonathan Brassowc4a39552013-06-25 01:23:59 -05008737 action = "check";
8738 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
NeilBrown61df9d92006-10-03 01:15:57 -07008739 desc = "requested-resync";
Jonathan Brassowc4a39552013-06-25 01:23:59 -05008740 action = "repair";
8741 } else
NeilBrown61df9d92006-10-03 01:15:57 -07008742 desc = "resync";
8743 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
8744 desc = "reshape";
8745 else
8746 desc = "recovery";
8747
Jonathan Brassowc4a39552013-06-25 01:23:59 -05008748 mddev->last_sync_action = action ?: desc;
8749
Linus Torvalds1da177e2005-04-16 15:20:36 -07008750 /* we overload curr_resync somewhat here.
8751 * 0 == not engaged in resync at all
8752 * 2 == checking that there is no conflict with another sync
8753 * 1 == like 2, but have yielded to allow conflicting resync to
Yufen Yue5b521e2019-06-14 15:41:07 -07008754 * commence
Linus Torvalds1da177e2005-04-16 15:20:36 -07008755 * other == active in resync - this many blocks
8756 *
8757 * Before starting a resync we must have set curr_resync to
8758 * 2, and then checked that every "conflicting" array has curr_resync
8759 * less than ours. When we find one that is the same or higher
8760 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync
8761 * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
8762 * This will mean we have to start checking from the beginning again.
8763 *
8764 */
8765
8766 do {
Artur Paszkiewiczc622ca52016-08-16 14:26:08 +02008767 int mddev2_minor = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008768 mddev->curr_resync = 2;
8769
8770 try_again:
NeilBrown404e4b42009-12-30 15:25:23 +11008771 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
Linus Torvalds1da177e2005-04-16 15:20:36 -07008772 goto skip;
NeilBrown29ac4aa2008-02-06 01:39:58 -08008773 for_each_mddev(mddev2, tmp) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008774 if (mddev2 == mddev)
8775 continue;
Bernd Schubert90b08712008-05-23 13:04:38 -07008776 if (!mddev->parallel_resync
8777 && mddev2->curr_resync
8778 && match_mddev_units(mddev, mddev2)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008779 DEFINE_WAIT(wq);
8780 if (mddev < mddev2 && mddev->curr_resync == 2) {
8781 /* arbitrarily yield */
8782 mddev->curr_resync = 1;
8783 wake_up(&resync_wait);
8784 }
8785 if (mddev > mddev2 && mddev->curr_resync == 1)
8786 /* no need to wait here, we can wait the next
8787 * time 'round when curr_resync == 2
8788 */
8789 continue;
NeilBrown97441972008-09-19 11:49:54 +10008790 /* We need to wait 'interruptible' so as not to
8791 * contribute to the load average, and not to
8792 * be caught by 'softlockup'
8793 */
8794 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
NeilBrownc91abf52013-11-19 12:02:01 +11008795 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
NeilBrown8712e552005-10-26 01:58:58 -07008796 mddev2->curr_resync >= mddev->curr_resync) {
Artur Paszkiewiczc622ca52016-08-16 14:26:08 +02008797 if (mddev2_minor != mddev2->md_minor) {
8798 mddev2_minor = mddev2->md_minor;
NeilBrown9d487392016-11-02 14:16:49 +11008799 pr_info("md: delaying %s of %s until %s has finished (they share one or more physical units)\n",
8800 desc, mdname(mddev),
8801 mdname(mddev2));
Artur Paszkiewiczc622ca52016-08-16 14:26:08 +02008802 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008803 mddev_put(mddev2);
NeilBrown97441972008-09-19 11:49:54 +10008804 if (signal_pending(current))
8805 flush_signals(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008806 schedule();
8807 finish_wait(&resync_wait, &wq);
8808 goto try_again;
8809 }
8810 finish_wait(&resync_wait, &wq);
8811 }
8812 }
8813 } while (mddev->curr_resync < 2);
8814
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008815 j = 0;
NeilBrown9d888832005-11-08 21:39:26 -08008816 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008817 /* resync follows the size requested by the personality,
NeilBrown57afd892005-06-21 17:17:13 -07008818 * which defaults to physical size, but can be virtual size
Linus Torvalds1da177e2005-04-16 15:20:36 -07008819 */
8820 max_sectors = mddev->resync_max_sectors;
Jianpeng Ma7f7583d2012-10-11 14:17:59 +11008821 atomic64_set(&mddev->resync_mismatches, 0);
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008822 /* we don't use the checkpoint if there's a bitmap */
Neil Brown5e96ee62008-06-28 08:31:24 +10008823 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
8824 j = mddev->resync_min;
8825 else if (!mddev->bitmap)
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008826 j = mddev->recovery_cp;
Neil Brown5e96ee62008-06-28 08:31:24 +10008827
Guoqing Jiangcb9ee152018-10-18 16:37:47 +08008828 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
NeilBrownc804cde2012-05-21 09:28:33 +10008829 max_sectors = mddev->resync_max_sectors;
Guoqing Jiangcb9ee152018-10-18 16:37:47 +08008830 /*
8831 * If the original node aborts reshaping then we continue the
8832 * reshaping, so set j again to avoid restart reshape from the
8833 * first beginning
8834 */
8835 if (mddev_is_clustered(mddev) &&
8836 mddev->reshape_position != MaxSector)
8837 j = mddev->reshape_position;
8838 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008839 /* recovery follows the physical size of devices */
Andre Noll58c0fed2009-03-31 14:33:13 +11008840 max_sectors = mddev->dev_sectors;
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008841 j = MaxSector;
Dan Williams4e59ca72009-12-12 21:17:06 -07008842 rcu_read_lock();
NeilBrowndafb20f2012-03-19 12:46:39 +11008843 rdev_for_each_rcu(rdev, mddev)
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008844 if (rdev->raid_disk >= 0 &&
Shaohua Lif2076e72015-10-08 21:54:12 -07008845 !test_bit(Journal, &rdev->flags) &&
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008846 !test_bit(Faulty, &rdev->flags) &&
8847 !test_bit(In_sync, &rdev->flags) &&
8848 rdev->recovery_offset < j)
8849 j = rdev->recovery_offset;
Dan Williams4e59ca72009-12-12 21:17:06 -07008850 rcu_read_unlock();
NeilBrown133d4522014-07-02 12:04:14 +10008851
8852 /* If there is a bitmap, we need to make sure all
8853 * writes that started before we added a spare
8854 * complete before we start doing a recovery.
8855 * Otherwise the write might complete and (via
8856 * bitmap_endwrite) set a bit in the bitmap after the
8857 * recovery has checked that bit and skipped that
8858 * region.
8859 */
8860 if (mddev->bitmap) {
8861 mddev->pers->quiesce(mddev, 1);
8862 mddev->pers->quiesce(mddev, 0);
8863 }
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008864 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008865
NeilBrown9d487392016-11-02 14:16:49 +11008866 pr_info("md: %s of RAID array %s\n", desc, mdname(mddev));
8867 pr_debug("md: minimum _guaranteed_ speed: %d KB/sec/disk.\n", speed_min(mddev));
8868 pr_debug("md: using maximum available idle IO bandwidth (but not more than %d KB/sec) for %s.\n",
8869 speed_max(mddev), desc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008870
NeilBrowneea1bf32009-03-31 14:27:02 +11008871 is_mddev_idle(mddev, 1); /* this initializes IO event counters */
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008872
NeilBrown57afd892005-06-21 17:17:13 -07008873 io_sectors = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008874 for (m = 0; m < SYNC_MARKS; m++) {
8875 mark[m] = jiffies;
NeilBrown57afd892005-06-21 17:17:13 -07008876 mark_cnt[m] = io_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008877 }
8878 last_mark = 0;
8879 mddev->resync_mark = mark[last_mark];
8880 mddev->resync_mark_cnt = mark_cnt[last_mark];
8881
8882 /*
8883 * Tune reconstruction:
8884 */
Yufen Yue5b521e2019-06-14 15:41:07 -07008885 window = 32 * (PAGE_SIZE / 512);
NeilBrown9d487392016-11-02 14:16:49 +11008886 pr_debug("md: using %dk window, over a total of %lluk.\n",
8887 window/2, (unsigned long long)max_sectors/2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008888
8889 atomic_set(&mddev->recovery_active, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008890 last_check = 0;
8891
8892 if (j>2) {
NeilBrown9d487392016-11-02 14:16:49 +11008893 pr_debug("md: resuming %s of %s from checkpoint.\n",
8894 desc, mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07008895 mddev->curr_resync = j;
NeilBrown72f36d52012-10-11 14:25:57 +11008896 } else
8897 mddev->curr_resync = 3; /* no longer delayed */
NeilBrown75d3da42011-01-14 09:14:34 +11008898 mddev->curr_resync_completed = j;
Junxiao Bie1a86db2020-07-14 16:10:26 -07008899 sysfs_notify_dirent_safe(mddev->sysfs_completed);
Guoqing Jiang54679482021-10-04 23:34:53 +08008900 md_new_event();
majianpeng54f89342012-10-31 11:59:10 +11008901 update_time = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008902
majianpeng7c2c57c2012-07-03 12:12:26 +10008903 blk_start_plug(&plug);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008904 while (j < max_sectors) {
NeilBrown57afd892005-06-21 17:17:13 -07008905 sector_t sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008906
NeilBrown57afd892005-06-21 17:17:13 -07008907 skipped = 0;
NeilBrown97e4f422009-03-31 14:33:13 +11008908
NeilBrown7a91ee12009-05-26 12:57:21 +10008909 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8910 ((mddev->curr_resync > mddev->curr_resync_completed &&
8911 (mddev->curr_resync - mddev->curr_resync_completed)
8912 > (max_sectors >> 4)) ||
majianpeng54f89342012-10-31 11:59:10 +11008913 time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) ||
NeilBrown7a91ee12009-05-26 12:57:21 +10008914 (j - mddev->curr_resync_completed)*2
NeilBrownc5e19d92015-07-17 12:06:02 +10008915 >= mddev->resync_max - mddev->curr_resync_completed ||
8916 mddev->curr_resync_completed > mddev->resync_max
NeilBrown7a91ee12009-05-26 12:57:21 +10008917 )) {
NeilBrown97e4f422009-03-31 14:33:13 +11008918 /* time to update curr_resync_completed */
NeilBrown97e4f422009-03-31 14:33:13 +11008919 wait_event(mddev->recovery_wait,
8920 atomic_read(&mddev->recovery_active) == 0);
NeilBrown75d3da42011-01-14 09:14:34 +11008921 mddev->curr_resync_completed = j;
kernelmail35d78c62012-10-31 11:59:10 +11008922 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
8923 j > mddev->recovery_cp)
8924 mddev->recovery_cp = j;
majianpeng54f89342012-10-31 11:59:10 +11008925 update_time = jiffies;
Shaohua Li29530792016-12-08 15:48:19 -08008926 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
Junxiao Bie1a86db2020-07-14 16:10:26 -07008927 sysfs_notify_dirent_safe(mddev->sysfs_completed);
NeilBrown97e4f422009-03-31 14:33:13 +11008928 }
NeilBrownacb180b2009-04-14 16:28:34 +10008929
NeilBrownc91abf52013-11-19 12:02:01 +11008930 while (j >= mddev->resync_max &&
8931 !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
NeilBrowne62e58a2009-07-01 13:15:35 +10008932 /* As this condition is controlled by user-space,
8933 * we can block indefinitely, so use '_interruptible'
8934 * to avoid triggering warnings.
8935 */
8936 flush_signals(current); /* just in case */
8937 wait_event_interruptible(mddev->recovery_wait,
8938 mddev->resync_max > j
NeilBrownc91abf52013-11-19 12:02:01 +11008939 || test_bit(MD_RECOVERY_INTR,
8940 &mddev->recovery));
NeilBrowne62e58a2009-07-01 13:15:35 +10008941 }
NeilBrownacb180b2009-04-14 16:28:34 +10008942
NeilBrownc91abf52013-11-19 12:02:01 +11008943 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8944 break;
NeilBrownacb180b2009-04-14 16:28:34 +10008945
NeilBrown09314792015-02-19 16:04:40 +11008946 sectors = mddev->pers->sync_request(mddev, j, &skipped);
NeilBrown57afd892005-06-21 17:17:13 -07008947 if (sectors == 0) {
NeilBrowndfc70642008-05-23 13:04:39 -07008948 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
NeilBrownc91abf52013-11-19 12:02:01 +11008949 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008950 }
NeilBrown57afd892005-06-21 17:17:13 -07008951
8952 if (!skipped) { /* actual IO requested */
8953 io_sectors += sectors;
8954 atomic_add(sectors, &mddev->recovery_active);
8955 }
8956
NeilBrowne875ece2011-07-28 11:39:24 +10008957 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8958 break;
8959
Linus Torvalds1da177e2005-04-16 15:20:36 -07008960 j += sectors;
NeilBrown5ed1df22015-07-24 13:27:08 +10008961 if (j > max_sectors)
8962 /* when skipping, extra large numbers can be returned. */
8963 j = max_sectors;
NeilBrown72f36d52012-10-11 14:25:57 +11008964 if (j > 2)
8965 mddev->curr_resync = j;
NeilBrownff4e8d92006-07-10 04:44:16 -07008966 mddev->curr_mark_cnt = io_sectors;
NeilBrownd7603b72006-01-06 00:20:30 -08008967 if (last_check == 0)
NeilBrowne875ece2011-07-28 11:39:24 +10008968 /* this is the earliest that rebuild will be
NeilBrownd7603b72006-01-06 00:20:30 -08008969 * visible in /proc/mdstat
8970 */
Guoqing Jiang54679482021-10-04 23:34:53 +08008971 md_new_event();
NeilBrown57afd892005-06-21 17:17:13 -07008972
8973 if (last_check + window > io_sectors || j == max_sectors)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008974 continue;
8975
NeilBrown57afd892005-06-21 17:17:13 -07008976 last_check = io_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008977 repeat:
8978 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
8979 /* step marks */
8980 int next = (last_mark+1) % SYNC_MARKS;
8981
8982 mddev->resync_mark = mark[next];
8983 mddev->resync_mark_cnt = mark_cnt[next];
8984 mark[next] = jiffies;
NeilBrown57afd892005-06-21 17:17:13 -07008985 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008986 last_mark = next;
8987 }
8988
NeilBrownc91abf52013-11-19 12:02:01 +11008989 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8990 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008991
8992 /*
8993 * this loop exits only if either when we are slower than
8994 * the 'hard' speed limit, or the system was IO-idle for
8995 * a jiffy.
8996 * the system might be non-idle CPU-wise, but we only care
8997 * about not overloading the IO subsystem. (things like an
8998 * e2fsck being done on the RAID array should execute fast)
8999 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009000 cond_resched();
9001
Xiao Niac7e50a2014-08-07 09:37:41 -04009002 recovery_done = io_sectors - atomic_read(&mddev->recovery_active);
9003 currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2
NeilBrown57afd892005-06-21 17:17:13 -07009004 /((jiffies-mddev->resync_mark)/HZ +1) +1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009005
NeilBrown88202a02006-01-06 00:21:36 -08009006 if (currspeed > speed_min(mddev)) {
NeilBrownac8fa412015-02-19 16:55:00 +11009007 if (currspeed > speed_max(mddev)) {
NeilBrownc0e48522005-11-18 01:11:01 -08009008 msleep(500);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009009 goto repeat;
9010 }
NeilBrownac8fa412015-02-19 16:55:00 +11009011 if (!is_mddev_idle(mddev, 0)) {
9012 /*
9013 * Give other IO more of a chance.
9014 * The faster the devices, the less we wait.
9015 */
9016 wait_event(mddev->recovery_wait,
9017 !atomic_read(&mddev->recovery_active));
9018 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009019 }
9020 }
NeilBrown9d487392016-11-02 14:16:49 +11009021 pr_info("md: %s: %s %s.\n",mdname(mddev), desc,
9022 test_bit(MD_RECOVERY_INTR, &mddev->recovery)
9023 ? "interrupted" : "done");
Linus Torvalds1da177e2005-04-16 15:20:36 -07009024 /*
9025 * this also signals 'finished resyncing' to md_stop
9026 */
majianpeng7c2c57c2012-07-03 12:12:26 +10009027 blk_finish_plug(&plug);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009028 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
9029
NeilBrown5ed1df22015-07-24 13:27:08 +10009030 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
9031 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
NeilBrown1217e1d2016-10-28 15:59:41 +11009032 mddev->curr_resync > 3) {
NeilBrown5ed1df22015-07-24 13:27:08 +10009033 mddev->curr_resync_completed = mddev->curr_resync;
Junxiao Bie1a86db2020-07-14 16:10:26 -07009034 sysfs_notify_dirent_safe(mddev->sysfs_completed);
NeilBrown5ed1df22015-07-24 13:27:08 +10009035 }
NeilBrown09314792015-02-19 16:04:40 +11009036 mddev->pers->sync_request(mddev, max_sectors, &skipped);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009037
NeilBrowndfc70642008-05-23 13:04:39 -07009038 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
NeilBrown1217e1d2016-10-28 15:59:41 +11009039 mddev->curr_resync > 3) {
NeilBrown5fd6c1d2006-06-26 00:27:40 -07009040 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
9041 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
9042 if (mddev->curr_resync >= mddev->recovery_cp) {
NeilBrown9d487392016-11-02 14:16:49 +11009043 pr_debug("md: checkpointing %s of %s.\n",
9044 desc, mdname(mddev));
majianpeng0a19caa2012-11-19 19:57:34 +08009045 if (test_bit(MD_RECOVERY_ERROR,
9046 &mddev->recovery))
9047 mddev->recovery_cp =
9048 mddev->curr_resync_completed;
9049 else
9050 mddev->recovery_cp =
9051 mddev->curr_resync;
NeilBrown5fd6c1d2006-06-26 00:27:40 -07009052 }
9053 } else
9054 mddev->recovery_cp = MaxSector;
9055 } else {
9056 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
9057 mddev->curr_resync = MaxSector;
NeilBrowndb0505d2017-10-17 16:18:36 +11009058 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
9059 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) {
9060 rcu_read_lock();
9061 rdev_for_each_rcu(rdev, mddev)
9062 if (rdev->raid_disk >= 0 &&
9063 mddev->delta_disks >= 0 &&
9064 !test_bit(Journal, &rdev->flags) &&
9065 !test_bit(Faulty, &rdev->flags) &&
9066 !test_bit(In_sync, &rdev->flags) &&
9067 rdev->recovery_offset < mddev->curr_resync)
9068 rdev->recovery_offset = mddev->curr_resync;
9069 rcu_read_unlock();
9070 }
NeilBrown5fd6c1d2006-06-26 00:27:40 -07009071 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009072 }
NeilBrowndb91ff52012-02-07 12:01:51 +11009073 skip:
Guoqing Jiangbb8bf152016-06-02 23:32:04 -04009074 /* set CHANGE_PENDING here since maybe another update is needed,
9075 * so other nodes are informed. It should be harmless for normal
9076 * raid */
Shaohua Li29530792016-12-08 15:48:19 -08009077 set_mask_bits(&mddev->sb_flags, 0,
9078 BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS));
Goldwyn Rodriguesc186b122015-09-30 13:20:35 -05009079
BingJing Chang88763912018-02-22 13:34:46 +08009080 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
9081 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
9082 mddev->delta_disks > 0 &&
9083 mddev->pers->finish_reshape &&
9084 mddev->pers->size &&
9085 mddev->queue) {
9086 mddev_lock_nointr(mddev);
9087 md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0));
9088 mddev_unlock(mddev);
Christoph Hellwig2c247c52020-11-16 15:57:11 +01009089 if (!mddev_is_clustered(mddev))
9090 set_capacity_and_notify(mddev->gendisk,
9091 mddev->array_sectors);
BingJing Chang88763912018-02-22 13:34:46 +08009092 }
9093
NeilBrown23da4222014-12-15 12:57:01 +11009094 spin_lock(&mddev->lock);
NeilBrownc07b70a2009-12-14 12:49:48 +11009095 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
9096 /* We completed so min/max setting can be forgotten if used. */
9097 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
9098 mddev->resync_min = 0;
9099 mddev->resync_max = MaxSector;
9100 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
9101 mddev->resync_min = mddev->curr_resync_completed;
NeilBrownf7851be2015-07-02 17:12:58 +10009102 set_bit(MD_RECOVERY_DONE, &mddev->recovery);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009103 mddev->curr_resync = 0;
NeilBrown23da4222014-12-15 12:57:01 +11009104 spin_unlock(&mddev->lock);
9105
Linus Torvalds1da177e2005-04-16 15:20:36 -07009106 wake_up(&resync_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009107 md_wakeup_thread(mddev->thread);
NeilBrownc6207272008-02-06 01:39:52 -08009108 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009109}
NeilBrown29269552006-03-27 01:18:10 -08009110EXPORT_SYMBOL_GPL(md_do_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009111
NeilBrown746d3202013-04-24 11:42:41 +10009112static int remove_and_add_spares(struct mddev *mddev,
9113 struct md_rdev *this)
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009114{
NeilBrown3cb03002011-10-11 16:45:26 +11009115 struct md_rdev *rdev;
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009116 int spares = 0;
NeilBrownf2a371c2012-01-09 00:46:41 +11009117 int removed = 0;
NeilBrownd787be42016-06-02 16:19:53 +10009118 bool remove_some = false;
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009119
NeilBrown39772f02018-02-03 09:19:30 +11009120 if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
9121 /* Mustn't remove devices when resync thread is running */
9122 return 0;
9123
NeilBrownd787be42016-06-02 16:19:53 +10009124 rdev_for_each(rdev, mddev) {
NeilBrown746d3202013-04-24 11:42:41 +10009125 if ((this == NULL || rdev == this) &&
9126 rdev->raid_disk >= 0 &&
Dan Williams6bfe0b42008-04-30 00:52:32 -07009127 !test_bit(Blocked, &rdev->flags) &&
NeilBrownd787be42016-06-02 16:19:53 +10009128 test_bit(Faulty, &rdev->flags) &&
9129 atomic_read(&rdev->nr_pending)==0) {
9130 /* Faulty non-Blocked devices with nr_pending == 0
9131 * never get nr_pending incremented,
9132 * never get Faulty cleared, and never get Blocked set.
9133 * So we can synchronize_rcu now rather than once per device
9134 */
9135 remove_some = true;
9136 set_bit(RemoveSynchronized, &rdev->flags);
9137 }
9138 }
9139
9140 if (remove_some)
9141 synchronize_rcu();
9142 rdev_for_each(rdev, mddev) {
9143 if ((this == NULL || rdev == this) &&
9144 rdev->raid_disk >= 0 &&
9145 !test_bit(Blocked, &rdev->flags) &&
9146 ((test_bit(RemoveSynchronized, &rdev->flags) ||
Shaohua Lif2076e72015-10-08 21:54:12 -07009147 (!test_bit(In_sync, &rdev->flags) &&
9148 !test_bit(Journal, &rdev->flags))) &&
NeilBrownd787be42016-06-02 16:19:53 +10009149 atomic_read(&rdev->nr_pending)==0)) {
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009150 if (mddev->pers->hot_remove_disk(
NeilBrownb8321b62011-12-23 10:17:51 +11009151 mddev, rdev) == 0) {
Namhyung Kim36fad852011-07-27 11:00:36 +10009152 sysfs_unlink_rdev(mddev, rdev);
NeilBrown011abdc2018-04-26 14:46:29 +10009153 rdev->saved_raid_disk = rdev->raid_disk;
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009154 rdev->raid_disk = -1;
NeilBrownf2a371c2012-01-09 00:46:41 +11009155 removed++;
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009156 }
9157 }
NeilBrownd787be42016-06-02 16:19:53 +10009158 if (remove_some && test_bit(RemoveSynchronized, &rdev->flags))
9159 clear_bit(RemoveSynchronized, &rdev->flags);
9160 }
9161
Jonathan Brassow90584fc2013-03-07 16:24:26 -06009162 if (removed && mddev->kobj.sd)
Junxiao Bie1a86db2020-07-14 16:10:26 -07009163 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009164
Goldwyn Rodrigues2910ff12015-09-28 10:27:26 -05009165 if (this && removed)
NeilBrown746d3202013-04-24 11:42:41 +10009166 goto no_add;
9167
NeilBrowndafb20f2012-03-19 12:46:39 +11009168 rdev_for_each(rdev, mddev) {
Goldwyn Rodrigues2910ff12015-09-28 10:27:26 -05009169 if (this && this != rdev)
9170 continue;
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05009171 if (test_bit(Candidate, &rdev->flags))
9172 continue;
NeilBrown7bfec5f2011-12-23 10:17:53 +11009173 if (rdev->raid_disk >= 0 &&
9174 !test_bit(In_sync, &rdev->flags) &&
Shaohua Lif2076e72015-10-08 21:54:12 -07009175 !test_bit(Journal, &rdev->flags) &&
NeilBrown7bfec5f2011-12-23 10:17:53 +11009176 !test_bit(Faulty, &rdev->flags))
9177 spares++;
NeilBrown7ceb17e2013-04-24 11:42:42 +10009178 if (rdev->raid_disk >= 0)
9179 continue;
9180 if (test_bit(Faulty, &rdev->flags))
9181 continue;
Shaohua Lif6b6ec52015-12-21 10:51:02 +11009182 if (!test_bit(Journal, &rdev->flags)) {
9183 if (mddev->ro &&
9184 ! (rdev->saved_raid_disk >= 0 &&
9185 !test_bit(Bitmap_sync, &rdev->flags)))
9186 continue;
NeilBrown7ceb17e2013-04-24 11:42:42 +10009187
Shaohua Lif6b6ec52015-12-21 10:51:02 +11009188 rdev->recovery_offset = 0;
9189 }
Guoqing Jiang3f79cc22020-04-04 23:57:11 +02009190 if (mddev->pers->hot_add_disk(mddev, rdev) == 0) {
Damien Le Moal5e3b8a82020-07-16 13:54:40 +09009191 /* failure here is OK */
9192 sysfs_link_rdev(mddev, rdev);
Shaohua Lif6b6ec52015-12-21 10:51:02 +11009193 if (!test_bit(Journal, &rdev->flags))
9194 spares++;
Guoqing Jiang54679482021-10-04 23:34:53 +08009195 md_new_event();
Shaohua Li29530792016-12-08 15:48:19 -08009196 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrowndfc70642008-05-23 13:04:39 -07009197 }
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009198 }
NeilBrown746d3202013-04-24 11:42:41 +10009199no_add:
NeilBrown6dafab62012-09-19 12:54:22 +10009200 if (removed)
Shaohua Li29530792016-12-08 15:48:19 -08009201 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009202 return spares;
9203}
NeilBrown7ebc0be2011-01-14 09:14:33 +11009204
NeilBrownac05f252014-09-30 08:10:42 +10009205static void md_start_sync(struct work_struct *ws)
9206{
9207 struct mddev *mddev = container_of(ws, struct mddev, del_work);
Goldwyn Rodriguesc186b122015-09-30 13:20:35 -05009208
NeilBrownac05f252014-09-30 08:10:42 +10009209 mddev->sync_thread = md_register_thread(md_do_sync,
9210 mddev,
9211 "resync");
9212 if (!mddev->sync_thread) {
NeilBrown9d487392016-11-02 14:16:49 +11009213 pr_warn("%s: could not start resync thread...\n",
9214 mdname(mddev));
NeilBrownac05f252014-09-30 08:10:42 +10009215 /* leave the spares where they are, it shouldn't hurt */
9216 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9217 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
9218 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
9219 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
9220 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
NeilBrownf851b602014-12-11 10:02:10 +11009221 wake_up(&resync_wait);
NeilBrownac05f252014-09-30 08:10:42 +10009222 if (test_and_clear_bit(MD_RECOVERY_RECOVER,
9223 &mddev->recovery))
9224 if (mddev->sysfs_action)
9225 sysfs_notify_dirent_safe(mddev->sysfs_action);
9226 } else
9227 md_wakeup_thread(mddev->sync_thread);
9228 sysfs_notify_dirent_safe(mddev->sysfs_action);
Guoqing Jiang54679482021-10-04 23:34:53 +08009229 md_new_event();
NeilBrownac05f252014-09-30 08:10:42 +10009230}
9231
Linus Torvalds1da177e2005-04-16 15:20:36 -07009232/*
9233 * This routine is regularly called by all per-raid-array threads to
9234 * deal with generic issues like resync and super-block update.
9235 * Raid personalities that don't have a thread (linear/raid0) do not
9236 * need this as they never do any recovery or update the superblock.
9237 *
9238 * It does not do any resync itself, but rather "forks" off other threads
9239 * to do that as needed.
9240 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
9241 * "->recovery" and create a thread at ->sync_thread.
NeilBrowndfc70642008-05-23 13:04:39 -07009242 * When the thread finishes it sets MD_RECOVERY_DONE
Linus Torvalds1da177e2005-04-16 15:20:36 -07009243 * and wakeups up this thread which will reap the thread and finish up.
9244 * This thread also removes any faulty devices (with nr_pending == 0).
9245 *
9246 * The overall approach is:
9247 * 1/ if the superblock needs updating, update it.
9248 * 2/ If a recovery thread is running, don't do anything else.
9249 * 3/ If recovery has finished, clean up, possibly marking spares active.
9250 * 4/ If there are any faulty devices, remove them.
9251 * 5/ If array is degraded, try to add spares devices
9252 * 6/ If array has spares or is not in-sync, start a resync thread.
9253 */
NeilBrownfd01b882011-10-11 16:47:53 +11009254void md_check_recovery(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009255{
NeilBrown059421e2018-10-03 15:04:41 +10009256 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) {
9257 /* Write superblock - thread that called mddev_suspend()
9258 * holds reconfig_mutex for us.
9259 */
9260 set_bit(MD_UPDATING_SB, &mddev->flags);
9261 smp_mb__after_atomic();
9262 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags))
9263 md_update_sb(mddev, 0);
9264 clear_bit_unlock(MD_UPDATING_SB, &mddev->flags);
9265 wake_up(&mddev->sb_wait);
9266 }
9267
Jonathan Brassow68866e42011-06-08 15:10:08 +10009268 if (mddev->suspended)
9269 return;
9270
NeilBrown5f404022005-06-21 17:17:16 -07009271 if (mddev->bitmap)
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07009272 md_bitmap_daemon_work(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009273
NeilBrownfca4d842005-06-21 17:17:11 -07009274 if (signal_pending(current)) {
NeilBrown31a59e32008-04-30 00:52:30 -07009275 if (mddev->pers->sync_request && !mddev->external) {
NeilBrown9d487392016-11-02 14:16:49 +11009276 pr_debug("md: %s in immediate safe mode\n",
9277 mdname(mddev));
NeilBrownfca4d842005-06-21 17:17:11 -07009278 mddev->safemode = 2;
9279 }
9280 flush_signals(current);
9281 }
9282
NeilBrownc89a8ee2008-08-05 15:54:13 +10009283 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
9284 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009285 if ( ! (
Shaohua Li29530792016-12-08 15:48:19 -08009286 (mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07009287 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
NeilBrownfca4d842005-06-21 17:17:11 -07009288 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
NeilBrown31a59e32008-04-30 00:52:30 -07009289 (mddev->external == 0 && mddev->safemode == 1) ||
NeilBrown4ad23a972017-03-15 14:05:14 +11009290 (mddev->safemode == 2
NeilBrownfca4d842005-06-21 17:17:11 -07009291 && !mddev->in_sync && mddev->recovery_cp == MaxSector)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009292 ))
9293 return;
NeilBrownfca4d842005-06-21 17:17:11 -07009294
NeilBrowndf5b89b2006-03-27 01:18:20 -08009295 if (mddev_trylock(mddev)) {
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009296 int spares = 0;
NeilBrown480523f2019-08-20 10:21:09 +10009297 bool try_set_sync = mddev->safemode != 0;
NeilBrownfca4d842005-06-21 17:17:11 -07009298
Shaohua Liafc1f552017-08-11 20:34:45 -07009299 if (!mddev->external && mddev->safemode == 1)
NeilBrown33182d12017-08-08 16:56:36 +10009300 mddev->safemode = 0;
9301
NeilBrownc89a8ee2008-08-05 15:54:13 +10009302 if (mddev->ro) {
Neil Brownab16bfc2015-06-17 12:31:46 +10009303 struct md_rdev *rdev;
9304 if (!mddev->external && mddev->in_sync)
9305 /* 'Blocked' flag not needed as failed devices
9306 * will be recorded if array switched to read/write.
9307 * Leaving it set will prevent the device
9308 * from being removed.
9309 */
9310 rdev_for_each(rdev, mddev)
9311 clear_bit(Blocked, &rdev->flags);
NeilBrown7ceb17e2013-04-24 11:42:42 +10009312 /* On a read-only array we can:
9313 * - remove failed devices
9314 * - add already-in_sync devices if the array itself
9315 * is in-sync.
9316 * As we only add devices that are already in-sync,
9317 * we can activate the spares immediately.
NeilBrownc89a8ee2008-08-05 15:54:13 +10009318 */
NeilBrown7ceb17e2013-04-24 11:42:42 +10009319 remove_and_add_spares(mddev, NULL);
NeilBrown8313b8e2013-12-12 10:13:33 +11009320 /* There is no thread, but we need to call
9321 * ->spare_active and clear saved_raid_disk
9322 */
NeilBrown2ac295a2014-05-29 11:40:03 +10009323 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
NeilBrown8313b8e2013-12-12 10:13:33 +11009324 md_reap_sync_thread(mddev);
NeilBrowna4a3d262015-07-17 11:57:30 +10009325 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
NeilBrown8313b8e2013-12-12 10:13:33 +11009326 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
Shaohua Li29530792016-12-08 15:48:19 -08009327 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
NeilBrownc89a8ee2008-08-05 15:54:13 +10009328 goto unlock;
9329 }
9330
Guoqing Jiang659b2542015-12-21 10:50:59 +11009331 if (mddev_is_clustered(mddev)) {
Heming Zhaof7c7a2f2021-04-08 15:44:15 +08009332 struct md_rdev *rdev, *tmp;
Guoqing Jiang659b2542015-12-21 10:50:59 +11009333 /* kick the device if another node issued a
9334 * remove disk.
9335 */
Heming Zhaof7c7a2f2021-04-08 15:44:15 +08009336 rdev_for_each_safe(rdev, tmp, mddev) {
Guoqing Jiang659b2542015-12-21 10:50:59 +11009337 if (test_and_clear_bit(ClusterRemove, &rdev->flags) &&
9338 rdev->raid_disk < 0)
9339 md_kick_rdev_from_array(rdev);
9340 }
9341 }
9342
NeilBrown480523f2019-08-20 10:21:09 +10009343 if (try_set_sync && !mddev->external && !mddev->in_sync) {
NeilBrown85572d72014-12-15 12:56:56 +11009344 spin_lock(&mddev->lock);
NeilBrown6497709b2017-03-15 14:05:14 +11009345 set_in_sync(mddev);
NeilBrown85572d72014-12-15 12:56:56 +11009346 spin_unlock(&mddev->lock);
NeilBrownfca4d842005-06-21 17:17:11 -07009347 }
NeilBrownfca4d842005-06-21 17:17:11 -07009348
Shaohua Li29530792016-12-08 15:48:19 -08009349 if (mddev->sb_flags)
NeilBrown850b2b422006-10-03 01:15:46 -07009350 md_update_sb(mddev, 0);
NeilBrown06d91a52005-06-21 17:17:12 -07009351
Linus Torvalds1da177e2005-04-16 15:20:36 -07009352 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
9353 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
9354 /* resync/recovery still happening */
9355 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9356 goto unlock;
9357 }
9358 if (mddev->sync_thread) {
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009359 md_reap_sync_thread(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009360 goto unlock;
9361 }
Neil Brown72a23c22008-06-28 08:31:41 +10009362 /* Set RUNNING before clearing NEEDED to avoid
9363 * any transients in the value of "sync_action".
9364 */
NeilBrown72f36d52012-10-11 14:25:57 +11009365 mddev->curr_resync_completed = 0;
NeilBrown23da4222014-12-15 12:57:01 +11009366 spin_lock(&mddev->lock);
Neil Brown72a23c22008-06-28 08:31:41 +10009367 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
NeilBrown23da4222014-12-15 12:57:01 +11009368 spin_unlock(&mddev->lock);
NeilBrown24dd4692005-11-08 21:39:26 -08009369 /* Clear some bits that don't mean anything, but
9370 * might be left set
9371 */
NeilBrown24dd4692005-11-08 21:39:26 -08009372 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
9373 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009374
NeilBrowned209582012-04-24 10:23:14 +10009375 if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
9376 test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
NeilBrownac05f252014-09-30 08:10:42 +10009377 goto not_running;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009378 /* no recovery is running.
9379 * remove any failed drives, then
9380 * add spares if possible.
NeilBrown72f36d52012-10-11 14:25:57 +11009381 * Spares are also removed and re-added, to allow
Linus Torvalds1da177e2005-04-16 15:20:36 -07009382 * the personality to fail the re-add.
9383 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009384
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009385 if (mddev->reshape_position != MaxSector) {
NeilBrown50ac1682009-06-18 08:47:55 +10009386 if (mddev->pers->check_reshape == NULL ||
9387 mddev->pers->check_reshape(mddev) != 0)
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009388 /* Cannot proceed */
NeilBrownac05f252014-09-30 08:10:42 +10009389 goto not_running;
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009390 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
Neil Brown72a23c22008-06-28 08:31:41 +10009391 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
NeilBrown746d3202013-04-24 11:42:41 +10009392 } else if ((spares = remove_and_add_spares(mddev, NULL))) {
NeilBrown24dd4692005-11-08 21:39:26 -08009393 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9394 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
Dan Williams56ac36d2008-08-07 10:02:47 -07009395 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
Neil Brown72a23c22008-06-28 08:31:41 +10009396 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
NeilBrown24dd4692005-11-08 21:39:26 -08009397 } else if (mddev->recovery_cp < MaxSector) {
9398 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
Neil Brown72a23c22008-06-28 08:31:41 +10009399 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
NeilBrown24dd4692005-11-08 21:39:26 -08009400 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
9401 /* nothing to be done ... */
NeilBrownac05f252014-09-30 08:10:42 +10009402 goto not_running;
NeilBrown24dd4692005-11-08 21:39:26 -08009403
Linus Torvalds1da177e2005-04-16 15:20:36 -07009404 if (mddev->pers->sync_request) {
NeilBrownef99bf42012-05-22 13:55:08 +10009405 if (spares) {
NeilBrowna654b9d82005-06-21 17:17:27 -07009406 /* We are adding a device or devices to an array
9407 * which has the bitmap stored on all devices.
9408 * So make sure all bitmap pages get written
9409 */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07009410 md_bitmap_write_all(mddev->bitmap);
NeilBrowna654b9d82005-06-21 17:17:27 -07009411 }
NeilBrownac05f252014-09-30 08:10:42 +10009412 INIT_WORK(&mddev->del_work, md_start_sync);
9413 queue_work(md_misc_wq, &mddev->del_work);
9414 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009415 }
NeilBrownac05f252014-09-30 08:10:42 +10009416 not_running:
Neil Brown72a23c22008-06-28 08:31:41 +10009417 if (!mddev->sync_thread) {
9418 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
NeilBrownf851b602014-12-11 10:02:10 +11009419 wake_up(&resync_wait);
Neil Brown72a23c22008-06-28 08:31:41 +10009420 if (test_and_clear_bit(MD_RECOVERY_RECOVER,
9421 &mddev->recovery))
NeilBrown0c3573f2009-01-09 08:31:05 +11009422 if (mddev->sysfs_action)
NeilBrown00bcb4a2010-06-01 19:37:23 +10009423 sysfs_notify_dirent_safe(mddev->sysfs_action);
Neil Brown72a23c22008-06-28 08:31:41 +10009424 }
NeilBrownac05f252014-09-30 08:10:42 +10009425 unlock:
9426 wake_up(&mddev->sb_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009427 mddev_unlock(mddev);
9428 }
9429}
NeilBrown6c144d32014-09-30 16:15:38 +10009430EXPORT_SYMBOL(md_check_recovery);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009431
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009432void md_reap_sync_thread(struct mddev *mddev)
9433{
9434 struct md_rdev *rdev;
Guoqing Jiangaefb2e52018-10-18 16:37:44 +08009435 sector_t old_dev_sectors = mddev->dev_sectors;
9436 bool is_reshaped = false;
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009437
9438 /* resync has finished, collect result */
9439 md_unregister_thread(&mddev->sync_thread);
9440 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
Guoqing Jiang0d8ed0e92019-07-24 11:09:21 +02009441 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
9442 mddev->degraded != mddev->raid_disks) {
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009443 /* success...*/
9444 /* activate any spares */
9445 if (mddev->pers->spare_active(mddev)) {
Junxiao Bie1a86db2020-07-14 16:10:26 -07009446 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
Shaohua Li29530792016-12-08 15:48:19 -08009447 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009448 }
9449 }
9450 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
Guoqing Jiangaefb2e52018-10-18 16:37:44 +08009451 mddev->pers->finish_reshape) {
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009452 mddev->pers->finish_reshape(mddev);
Guoqing Jiangaefb2e52018-10-18 16:37:44 +08009453 if (mddev_is_clustered(mddev))
9454 is_reshaped = true;
9455 }
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009456
9457 /* If array is no-longer degraded, then any saved_raid_disk
NeilBrownf4667222013-12-09 12:04:56 +11009458 * information must be scrapped.
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009459 */
NeilBrownf4667222013-12-09 12:04:56 +11009460 if (!mddev->degraded)
9461 rdev_for_each(rdev, mddev)
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009462 rdev->saved_raid_disk = -1;
9463
9464 md_update_sb(mddev, 1);
Shaohua Li29530792016-12-08 15:48:19 -08009465 /* MD_SB_CHANGE_PENDING should be cleared by md_update_sb, so we can
Guoqing Jiangbb8bf152016-06-02 23:32:04 -04009466 * call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by
9467 * clustered raid */
9468 if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags))
9469 md_cluster_ops->resync_finish(mddev);
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009470 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
NeilBrownea358cd2015-06-12 20:05:04 +10009471 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009472 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9473 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
9474 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
9475 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
Guoqing Jiangaefb2e52018-10-18 16:37:44 +08009476 /*
9477 * We call md_cluster_ops->update_size here because sync_size could
9478 * be changed by md_update_sb, and MD_RECOVERY_RESHAPE is cleared,
9479 * so it is time to update size across cluster.
9480 */
9481 if (mddev_is_clustered(mddev) && is_reshaped
9482 && !test_bit(MD_CLOSING, &mddev->flags))
9483 md_cluster_ops->update_size(mddev, old_dev_sectors);
NeilBrownf851b602014-12-11 10:02:10 +11009484 wake_up(&resync_wait);
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009485 /* flag recovery needed just to double check */
9486 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9487 sysfs_notify_dirent_safe(mddev->sysfs_action);
Guoqing Jiang54679482021-10-04 23:34:53 +08009488 md_new_event();
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009489 if (mddev->event_work.func)
9490 queue_work(md_misc_wq, &mddev->event_work);
9491}
NeilBrown6c144d32014-09-30 16:15:38 +10009492EXPORT_SYMBOL(md_reap_sync_thread);
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009493
NeilBrownfd01b882011-10-11 16:47:53 +11009494void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev)
Dan Williams6bfe0b42008-04-30 00:52:32 -07009495{
NeilBrown00bcb4a2010-06-01 19:37:23 +10009496 sysfs_notify_dirent_safe(rdev->sysfs_state);
Dan Williams6bfe0b42008-04-30 00:52:32 -07009497 wait_event_timeout(rdev->blocked_wait,
NeilBrownde393cd2011-07-28 11:31:48 +10009498 !test_bit(Blocked, &rdev->flags) &&
9499 !test_bit(BlockedBadBlocks, &rdev->flags),
Dan Williams6bfe0b42008-04-30 00:52:32 -07009500 msecs_to_jiffies(5000));
9501 rdev_dec_pending(rdev, mddev);
9502}
9503EXPORT_SYMBOL(md_wait_for_blocked_rdev);
9504
NeilBrownc6563a82012-05-21 09:27:00 +10009505void md_finish_reshape(struct mddev *mddev)
9506{
9507 /* called be personality module when reshape completes. */
9508 struct md_rdev *rdev;
9509
9510 rdev_for_each(rdev, mddev) {
9511 if (rdev->data_offset > rdev->new_data_offset)
9512 rdev->sectors += rdev->data_offset - rdev->new_data_offset;
9513 else
9514 rdev->sectors -= rdev->new_data_offset - rdev->data_offset;
9515 rdev->data_offset = rdev->new_data_offset;
9516 }
9517}
9518EXPORT_SYMBOL(md_finish_reshape);
NeilBrown2230dfe2011-07-28 11:31:46 +10009519
Vishal Vermafc974ee2015-12-24 19:20:34 -07009520/* Bad block management */
NeilBrown2230dfe2011-07-28 11:31:46 +10009521
Vishal Vermafc974ee2015-12-24 19:20:34 -07009522/* Returns 1 on success, 0 on failure */
NeilBrown3cb03002011-10-11 16:45:26 +11009523int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
NeilBrownc6563a82012-05-21 09:27:00 +10009524 int is_new)
NeilBrown2230dfe2011-07-28 11:31:46 +10009525{
Guoqing Jiang85ad1d12016-05-03 22:22:13 -04009526 struct mddev *mddev = rdev->mddev;
NeilBrownc6563a82012-05-21 09:27:00 +10009527 int rv;
9528 if (is_new)
9529 s += rdev->new_data_offset;
9530 else
9531 s += rdev->data_offset;
Vishal Vermafc974ee2015-12-24 19:20:34 -07009532 rv = badblocks_set(&rdev->badblocks, s, sectors, 0);
9533 if (rv == 0) {
NeilBrown2230dfe2011-07-28 11:31:46 +10009534 /* Make sure they get written out promptly */
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02009535 if (test_bit(ExternalBbl, &rdev->flags))
Junxiao Bie1a86db2020-07-14 16:10:26 -07009536 sysfs_notify_dirent_safe(rdev->sysfs_unack_badblocks);
NeilBrown8bd2f0a2011-12-08 16:26:08 +11009537 sysfs_notify_dirent_safe(rdev->sysfs_state);
Shaohua Li29530792016-12-08 15:48:19 -08009538 set_mask_bits(&mddev->sb_flags, 0,
9539 BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING));
NeilBrown2230dfe2011-07-28 11:31:46 +10009540 md_wakeup_thread(rdev->mddev->thread);
Vishal Vermafc974ee2015-12-24 19:20:34 -07009541 return 1;
9542 } else
9543 return 0;
NeilBrown2230dfe2011-07-28 11:31:46 +10009544}
9545EXPORT_SYMBOL_GPL(rdev_set_badblocks);
9546
NeilBrownc6563a82012-05-21 09:27:00 +10009547int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
9548 int is_new)
NeilBrown2230dfe2011-07-28 11:31:46 +10009549{
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02009550 int rv;
NeilBrownc6563a82012-05-21 09:27:00 +10009551 if (is_new)
9552 s += rdev->new_data_offset;
9553 else
9554 s += rdev->data_offset;
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02009555 rv = badblocks_clear(&rdev->badblocks, s, sectors);
9556 if ((rv == 0) && test_bit(ExternalBbl, &rdev->flags))
Junxiao Bie1a86db2020-07-14 16:10:26 -07009557 sysfs_notify_dirent_safe(rdev->sysfs_badblocks);
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02009558 return rv;
NeilBrown2230dfe2011-07-28 11:31:46 +10009559}
9560EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
9561
Adrian Bunk75c96f82005-05-05 16:16:09 -07009562static int md_notify_reboot(struct notifier_block *this,
9563 unsigned long code, void *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009564{
9565 struct list_head *tmp;
NeilBrownfd01b882011-10-11 16:47:53 +11009566 struct mddev *mddev;
Daniel P. Berrange2dba6a92011-09-23 10:40:45 +01009567 int need_delay = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009568
NeilBrownc744a652012-03-19 12:46:37 +11009569 for_each_mddev(mddev, tmp) {
9570 if (mddev_trylock(mddev)) {
NeilBrown30b8aa92012-04-24 10:23:16 +10009571 if (mddev->pers)
9572 __md_stop_writes(mddev);
NeilBrown0f62fb22014-05-06 09:36:08 +10009573 if (mddev->persistent)
9574 mddev->safemode = 2;
NeilBrownc744a652012-03-19 12:46:37 +11009575 mddev_unlock(mddev);
Daniel P. Berrange2dba6a92011-09-23 10:40:45 +01009576 }
NeilBrownc744a652012-03-19 12:46:37 +11009577 need_delay = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009578 }
NeilBrownc744a652012-03-19 12:46:37 +11009579 /*
9580 * certain more exotic SCSI devices are known to be
9581 * volatile wrt too early system reboots. While the
9582 * right place to handle this issue is the given
9583 * driver, we do want to have a safe RAID driver ...
9584 */
9585 if (need_delay)
9586 mdelay(1000*1);
9587
Linus Torvalds1da177e2005-04-16 15:20:36 -07009588 return NOTIFY_DONE;
9589}
9590
Adrian Bunk75c96f82005-05-05 16:16:09 -07009591static struct notifier_block md_notifier = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009592 .notifier_call = md_notify_reboot,
9593 .next = NULL,
9594 .priority = INT_MAX, /* before any real devices */
9595};
9596
9597static void md_geninit(void)
9598{
NeilBrown36a4e1f2011-10-07 14:23:17 +11009599 pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
Linus Torvalds1da177e2005-04-16 15:20:36 -07009600
Alexey Dobriyan97a32532020-02-03 17:37:17 -08009601 proc_create("mdstat", S_IRUGO, NULL, &mdstat_proc_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009602}
9603
Adrian Bunk75c96f82005-05-05 16:16:09 -07009604static int __init md_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009605{
Tejun Heoe804ac72010-10-15 15:36:08 +02009606 int ret = -ENOMEM;
9607
Tejun Heoada609e2011-01-25 14:35:54 +01009608 md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0);
Tejun Heoe804ac72010-10-15 15:36:08 +02009609 if (!md_wq)
9610 goto err_wq;
9611
9612 md_misc_wq = alloc_workqueue("md_misc", 0, 0);
9613 if (!md_misc_wq)
9614 goto err_misc_wq;
9615
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02009616 md_rdev_misc_wq = alloc_workqueue("md_rdev_misc", 0, 0);
Guoqing Jiangcf0b9b42020-10-08 05:19:09 +02009617 if (!md_rdev_misc_wq)
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02009618 goto err_rdev_misc_wq;
9619
Christoph Hellwig28144f92020-10-29 15:58:34 +01009620 ret = __register_blkdev(MD_MAJOR, "md", md_probe);
9621 if (ret < 0)
Tejun Heoe804ac72010-10-15 15:36:08 +02009622 goto err_md;
9623
Christoph Hellwig28144f92020-10-29 15:58:34 +01009624 ret = __register_blkdev(0, "mdp", md_probe);
9625 if (ret < 0)
Tejun Heoe804ac72010-10-15 15:36:08 +02009626 goto err_mdp;
9627 mdp_major = ret;
9628
Linus Torvalds1da177e2005-04-16 15:20:36 -07009629 register_reboot_notifier(&md_notifier);
Eric W. Biederman0b4d4142007-02-14 00:34:09 -08009630 raid_table_header = register_sysctl_table(raid_root_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009631
9632 md_geninit();
NeilBrownd710e132008-10-13 11:55:12 +11009633 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009634
Tejun Heoe804ac72010-10-15 15:36:08 +02009635err_mdp:
9636 unregister_blkdev(MD_MAJOR, "md");
9637err_md:
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02009638 destroy_workqueue(md_rdev_misc_wq);
9639err_rdev_misc_wq:
Tejun Heoe804ac72010-10-15 15:36:08 +02009640 destroy_workqueue(md_misc_wq);
9641err_misc_wq:
9642 destroy_workqueue(md_wq);
9643err_wq:
9644 return ret;
9645}
Linus Torvalds1da177e2005-04-16 15:20:36 -07009646
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009647static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
Goldwyn Rodrigues1d7e3e92014-06-07 01:53:00 -05009648{
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009649 struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
Heming Zhaof7c7a2f2021-04-08 15:44:15 +08009650 struct md_rdev *rdev2, *tmp;
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009651 int role, ret;
9652 char b[BDEVNAME_SIZE];
Goldwyn Rodrigues1d7e3e92014-06-07 01:53:00 -05009653
Guoqing Jiang818da592017-03-01 16:42:40 +08009654 /*
9655 * If size is changed in another node then we need to
9656 * do resize as well.
9657 */
9658 if (mddev->dev_sectors != le64_to_cpu(sb->size)) {
9659 ret = mddev->pers->resize(mddev, le64_to_cpu(sb->size));
9660 if (ret)
9661 pr_info("md-cluster: resize failed\n");
9662 else
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07009663 md_bitmap_update_sb(mddev->bitmap);
Guoqing Jiang818da592017-03-01 16:42:40 +08009664 }
9665
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009666 /* Check for change of roles in the active devices */
Heming Zhaof7c7a2f2021-04-08 15:44:15 +08009667 rdev_for_each_safe(rdev2, tmp, mddev) {
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009668 if (test_bit(Faulty, &rdev2->flags))
9669 continue;
9670
9671 /* Check if the roles changed */
9672 role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]);
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05009673
9674 if (test_bit(Candidate, &rdev2->flags)) {
9675 if (role == 0xfffe) {
9676 pr_info("md: Removing Candidate device %s because add failed\n", bdevname(rdev2->bdev,b));
9677 md_kick_rdev_from_array(rdev2);
9678 continue;
9679 }
9680 else
9681 clear_bit(Candidate, &rdev2->flags);
9682 }
9683
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009684 if (role != rdev2->raid_disk) {
Guoqing Jiangca1e98e2018-10-18 16:37:45 +08009685 /*
9686 * got activated except reshape is happening.
9687 */
9688 if (rdev2->raid_disk == -1 && role != 0xffff &&
9689 !(le32_to_cpu(sb->feature_map) &
9690 MD_FEATURE_RESHAPE_ACTIVE)) {
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009691 rdev2->saved_raid_disk = role;
9692 ret = remove_and_add_spares(mddev, rdev2);
9693 pr_info("Activated spare: %s\n",
NeilBrown9d487392016-11-02 14:16:49 +11009694 bdevname(rdev2->bdev,b));
Guoqing Jianga5781832016-05-02 11:33:14 -04009695 /* wakeup mddev->thread here, so array could
9696 * perform resync with the new activated disk */
9697 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9698 md_wakeup_thread(mddev->thread);
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009699 }
9700 /* device faulty
9701 * We just want to do the minimum to mark the disk
9702 * as faulty. The recovery is performed by the
9703 * one who initiated the error.
9704 */
9705 if ((role == 0xfffe) || (role == 0xfffd)) {
9706 md_error(mddev, rdev2);
9707 clear_bit(Blocked, &rdev2->flags);
9708 }
9709 }
Goldwyn Rodrigues1d7e3e92014-06-07 01:53:00 -05009710 }
9711
Zhao Heminga8da01f2020-11-19 19:41:33 +08009712 if (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) {
9713 ret = update_raid_disks(mddev, le32_to_cpu(sb->raid_disks));
9714 if (ret)
9715 pr_warn("md: updating array disks failed. %d\n", ret);
9716 }
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009717
Guoqing Jiang7564bed2018-10-18 16:37:42 +08009718 /*
9719 * Since mddev->delta_disks has already updated in update_raid_disks,
9720 * so it is time to check reshape.
9721 */
9722 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) &&
9723 (le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
9724 /*
9725 * reshape is happening in the remote node, we need to
9726 * update reshape_position and call start_reshape.
9727 */
Christoph Hellwiged4d0a4e2019-04-04 18:56:10 +02009728 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
Guoqing Jiang7564bed2018-10-18 16:37:42 +08009729 if (mddev->pers->update_reshape_pos)
9730 mddev->pers->update_reshape_pos(mddev);
9731 if (mddev->pers->start_reshape)
9732 mddev->pers->start_reshape(mddev);
9733 } else if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) &&
9734 mddev->reshape_position != MaxSector &&
9735 !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
9736 /* reshape is just done in another node. */
9737 mddev->reshape_position = MaxSector;
9738 if (mddev->pers->update_reshape_pos)
9739 mddev->pers->update_reshape_pos(mddev);
9740 }
9741
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009742 /* Finally set the event to be up to date */
9743 mddev->events = le64_to_cpu(sb->events);
9744}
9745
9746static int read_rdev(struct mddev *mddev, struct md_rdev *rdev)
9747{
9748 int err;
9749 struct page *swapout = rdev->sb_page;
9750 struct mdp_superblock_1 *sb;
9751
9752 /* Store the sb page of the rdev in the swapout temporary
9753 * variable in case we err in the future
9754 */
9755 rdev->sb_page = NULL;
NeilBrown7f0f0d82016-11-02 14:16:49 +11009756 err = alloc_disk_sb(rdev);
9757 if (err == 0) {
9758 ClearPageUptodate(rdev->sb_page);
9759 rdev->sb_loaded = 0;
9760 err = super_types[mddev->major_version].
9761 load_super(rdev, NULL, mddev->minor_version);
9762 }
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009763 if (err < 0) {
9764 pr_warn("%s: %d Could not reload rdev(%d) err: %d. Restoring old values\n",
9765 __func__, __LINE__, rdev->desc_nr, err);
NeilBrown7f0f0d82016-11-02 14:16:49 +11009766 if (rdev->sb_page)
9767 put_page(rdev->sb_page);
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009768 rdev->sb_page = swapout;
9769 rdev->sb_loaded = 1;
9770 return err;
9771 }
9772
9773 sb = page_address(rdev->sb_page);
9774 /* Read the offset unconditionally, even if MD_FEATURE_RECOVERY_OFFSET
9775 * is not set
9776 */
9777
9778 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RECOVERY_OFFSET))
9779 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
9780
9781 /* The other node finished recovery, call spare_active to set
9782 * device In_sync and mddev->degraded
9783 */
9784 if (rdev->recovery_offset == MaxSector &&
9785 !test_bit(In_sync, &rdev->flags) &&
9786 mddev->pers->spare_active(mddev))
Junxiao Bie1a86db2020-07-14 16:10:26 -07009787 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009788
9789 put_page(swapout);
9790 return 0;
9791}
9792
9793void md_reload_sb(struct mddev *mddev, int nr)
9794{
9795 struct md_rdev *rdev;
9796 int err;
9797
9798 /* Find the rdev */
9799 rdev_for_each_rcu(rdev, mddev) {
9800 if (rdev->desc_nr == nr)
9801 break;
9802 }
9803
9804 if (!rdev || rdev->desc_nr != nr) {
9805 pr_warn("%s: %d Could not find rdev with nr %d\n", __func__, __LINE__, nr);
9806 return;
9807 }
9808
9809 err = read_rdev(mddev, rdev);
9810 if (err < 0)
9811 return;
9812
9813 check_sb_changes(mddev, rdev);
9814
9815 /* Read all rdev's to update recovery_offset */
Guoqing Jiang0ea99242018-04-09 17:01:21 +08009816 rdev_for_each_rcu(rdev, mddev) {
9817 if (!test_bit(Faulty, &rdev->flags))
9818 read_rdev(mddev, rdev);
9819 }
Goldwyn Rodrigues1d7e3e92014-06-07 01:53:00 -05009820}
9821EXPORT_SYMBOL(md_reload_sb);
9822
Linus Torvalds1da177e2005-04-16 15:20:36 -07009823#ifndef MODULE
9824
9825/*
9826 * Searches all registered partitions for autorun RAID arrays
9827 * at boot time.
9828 */
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009829
Cong Wang5b1f5bc32016-06-08 09:20:16 -07009830static DEFINE_MUTEX(detected_devices_mutex);
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009831static LIST_HEAD(all_detected_devices);
9832struct detected_devices_node {
9833 struct list_head list;
9834 dev_t dev;
9835};
Linus Torvalds1da177e2005-04-16 15:20:36 -07009836
9837void md_autodetect_dev(dev_t dev)
9838{
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009839 struct detected_devices_node *node_detected_dev;
9840
9841 node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
9842 if (node_detected_dev) {
9843 node_detected_dev->dev = dev;
Cong Wang5b1f5bc32016-06-08 09:20:16 -07009844 mutex_lock(&detected_devices_mutex);
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009845 list_add_tail(&node_detected_dev->list, &all_detected_devices);
Cong Wang5b1f5bc32016-06-08 09:20:16 -07009846 mutex_unlock(&detected_devices_mutex);
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009847 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009848}
9849
Christoph Hellwigd82fa812020-06-06 15:00:24 +02009850void md_autostart_arrays(int part)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009851{
NeilBrown3cb03002011-10-11 16:45:26 +11009852 struct md_rdev *rdev;
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009853 struct detected_devices_node *node_detected_dev;
9854 dev_t dev;
9855 int i_scanned, i_passed;
9856
9857 i_scanned = 0;
9858 i_passed = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009859
NeilBrown9d487392016-11-02 14:16:49 +11009860 pr_info("md: Autodetecting RAID arrays.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07009861
Cong Wang5b1f5bc32016-06-08 09:20:16 -07009862 mutex_lock(&detected_devices_mutex);
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009863 while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
9864 i_scanned++;
9865 node_detected_dev = list_entry(all_detected_devices.next,
9866 struct detected_devices_node, list);
9867 list_del(&node_detected_dev->list);
9868 dev = node_detected_dev->dev;
9869 kfree(node_detected_dev);
Shaohua Li90bcf1332016-09-14 14:26:54 -07009870 mutex_unlock(&detected_devices_mutex);
NeilBrowndf968c42007-07-17 04:06:11 -07009871 rdev = md_import_device(dev,0, 90);
Shaohua Li90bcf1332016-09-14 14:26:54 -07009872 mutex_lock(&detected_devices_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009873 if (IS_ERR(rdev))
9874 continue;
9875
NeilBrown403df472014-09-30 15:52:29 +10009876 if (test_bit(Faulty, &rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009877 continue;
NeilBrown403df472014-09-30 15:52:29 +10009878
NeilBrownd0fae182008-03-04 14:29:31 -08009879 set_bit(AutoDetected, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009880 list_add(&rdev->same_set, &pending_raid_disks);
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009881 i_passed++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009882 }
Cong Wang5b1f5bc32016-06-08 09:20:16 -07009883 mutex_unlock(&detected_devices_mutex);
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009884
NeilBrown9d487392016-11-02 14:16:49 +11009885 pr_debug("md: Scanned %d and added %d devices.\n", i_scanned, i_passed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009886
9887 autorun_devices(part);
9888}
9889
Jeff Garzikfdee8ae2006-12-10 02:20:50 -08009890#endif /* !MODULE */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009891
9892static __exit void md_exit(void)
9893{
NeilBrownfd01b882011-10-11 16:47:53 +11009894 struct mddev *mddev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009895 struct list_head *tmp;
NeilBrowne2f23b62014-04-09 14:33:51 +10009896 int delay = 1;
Greg Kroah-Hartman8ab5e4c2005-06-20 21:15:16 -07009897
Christoph Hellwig3dbd8c22009-03-31 14:27:02 +11009898 unregister_blkdev(MD_MAJOR,"md");
Linus Torvalds1da177e2005-04-16 15:20:36 -07009899 unregister_blkdev(mdp_major, "mdp");
9900 unregister_reboot_notifier(&md_notifier);
9901 unregister_sysctl_table(raid_table_header);
NeilBrowne2f23b62014-04-09 14:33:51 +10009902
9903 /* We cannot unload the modules while some process is
9904 * waiting for us in select() or poll() - wake them up
9905 */
9906 md_unloading = 1;
9907 while (waitqueue_active(&md_event_waiters)) {
9908 /* not safe to leave yet */
9909 wake_up(&md_event_waiters);
9910 msleep(delay);
9911 delay += delay;
9912 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009913 remove_proc_entry("mdstat", NULL);
NeilBrowne2f23b62014-04-09 14:33:51 +10009914
NeilBrown29ac4aa2008-02-06 01:39:58 -08009915 for_each_mddev(mddev, tmp) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009916 export_array(mddev);
NeilBrown93568632017-02-06 13:41:39 +11009917 mddev->ctime = 0;
NeilBrownd3374822009-01-09 08:31:10 +11009918 mddev->hold_active = 0;
NeilBrown93568632017-02-06 13:41:39 +11009919 /*
9920 * for_each_mddev() will call mddev_put() at the end of each
9921 * iteration. As the mddev is now fully clear, this will
9922 * schedule the mddev for destruction by a workqueue, and the
9923 * destroy_workqueue() below will wait for that to complete.
9924 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009925 }
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02009926 destroy_workqueue(md_rdev_misc_wq);
Tejun Heoe804ac72010-10-15 15:36:08 +02009927 destroy_workqueue(md_misc_wq);
9928 destroy_workqueue(md_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009929}
9930
Dan Williams685784a2007-07-09 11:56:42 -07009931subsys_initcall(md_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009932module_exit(md_exit)
9933
Kees Cooke4dca7b2017-10-17 19:04:42 -07009934static int get_ro(char *buffer, const struct kernel_param *kp)
NeilBrownf91de922005-11-08 21:39:36 -08009935{
Xiongfeng Wang3f999802020-05-11 16:23:25 +08009936 return sprintf(buffer, "%d\n", start_readonly);
NeilBrownf91de922005-11-08 21:39:36 -08009937}
Kees Cooke4dca7b2017-10-17 19:04:42 -07009938static int set_ro(const char *val, const struct kernel_param *kp)
NeilBrownf91de922005-11-08 21:39:36 -08009939{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03009940 return kstrtouint(val, 10, (unsigned int *)&start_readonly);
NeilBrownf91de922005-11-08 21:39:36 -08009941}
9942
NeilBrown80ca3a42006-07-10 04:44:18 -07009943module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
9944module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
NeilBrownefeb53c2009-01-09 08:31:10 +11009945module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
NeilBrown78b63502017-04-12 16:26:13 +10009946module_param(create_on_open, bool, S_IRUSR|S_IWUSR);
NeilBrownf91de922005-11-08 21:39:36 -08009947
Linus Torvalds1da177e2005-04-16 15:20:36 -07009948MODULE_LICENSE("GPL");
NeilBrown0efb9e62009-12-14 12:49:58 +11009949MODULE_DESCRIPTION("MD RAID framework");
NeilBrownaa1595e2005-08-04 12:53:32 -07009950MODULE_ALIAS("md");
NeilBrown72008652005-08-26 18:34:15 -07009951MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);