blob: 7d1bb24add31076e7bc0ec4a2f283959f9fad4d5 [file] [log] [blame]
Thomas Gleixneraf1a8892019-05-20 19:08:12 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 md.c : Multiple Devices driver for Linux
NeilBrownf72ffdd2014-09-30 14:23:59 +10004 Copyright (C) 1998, 1999, 2000 Ingo Molnar
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 completely rewritten, based on the MD driver code from Marc Zyngier
7
8 Changes:
9
10 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
11 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
12 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
13 - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
14 - kmod support by: Cyrus Durgin
15 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
16 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
17
18 - lots of fixes and improvements to the RAID1/RAID5 and generic
19 RAID code (such as request based resynchronization):
20
21 Neil Brown <neilb@cse.unsw.edu.au>.
22
NeilBrown32a76272005-06-21 17:17:14 -070023 - persistent bitmap code
24 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
25
NeilBrown9d487392016-11-02 14:16:49 +110026
27 Errors, Warnings, etc.
28 Please use:
29 pr_crit() for error conditions that risk data loss
30 pr_err() for error conditions that are unexpected, like an IO error
31 or internal inconsistency
32 pr_warn() for error conditions that could have been predicated, like
33 adding a device to an array when it has incompatible metadata
34 pr_info() for every interesting, very rare events, like an array starting
35 or stopping, or resync starting or stopping
36 pr_debug() for everything else.
37
Linus Torvalds1da177e2005-04-16 15:20:36 -070038*/
39
Guoqing Jiang963c5552019-06-14 17:10:36 +080040#include <linux/sched/mm.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +010041#include <linux/sched/signal.h>
NeilBrowna6fb0932005-09-09 16:23:56 -070042#include <linux/kthread.h>
NeilBrownbff61972009-03-31 14:33:13 +110043#include <linux/blkdev.h>
Vishal Vermafc974ee2015-12-24 19:20:34 -070044#include <linux/badblocks.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045#include <linux/sysctl.h>
NeilBrownbff61972009-03-31 14:33:13 +110046#include <linux/seq_file.h>
Al Viroff01bb42011-09-16 02:31:11 -040047#include <linux/fs.h>
NeilBrownd7603b72006-01-06 00:20:30 -080048#include <linux/poll.h>
NeilBrown16f17b32006-06-26 00:27:37 -070049#include <linux/ctype.h>
André Goddard Rosae7d28602009-12-14 18:01:06 -080050#include <linux/string.h>
NeilBrownfb4d8c72008-10-13 11:55:12 +110051#include <linux/hdreg.h>
52#include <linux/proc_fs.h>
53#include <linux/random.h>
Paul Gortmaker056075c2011-07-03 13:58:33 -040054#include <linux/module.h>
NeilBrownfb4d8c72008-10-13 11:55:12 +110055#include <linux/reboot.h>
NeilBrown32a76272005-06-21 17:17:14 -070056#include <linux/file.h>
Arnd Bergmannaa98aa32009-12-14 12:50:05 +110057#include <linux/compat.h>
Stephen Rothwell25570722008-10-15 09:09:21 +110058#include <linux/delay.h>
NeilBrownbff61972009-03-31 14:33:13 +110059#include <linux/raid/md_p.h>
60#include <linux/raid/md_u.h>
Christoph Hellwig74cc979c2020-03-24 08:25:19 +010061#include <linux/raid/detect.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090062#include <linux/slab.h>
NeilBrown4ad23a972017-03-15 14:05:14 +110063#include <linux/percpu-refcount.h>
Christoph Hellwigc6a564ff2020-03-25 16:48:42 +010064#include <linux/part_stat.h>
NeilBrown4ad23a972017-03-15 14:05:14 +110065
Shaohua Li504634f2016-11-18 09:44:08 -080066#include <trace/events/block.h>
NeilBrown43b2e5d2009-03-31 14:33:13 +110067#include "md.h"
Mike Snitzer935fe092017-10-10 17:02:41 -040068#include "md-bitmap.h"
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -050069#include "md-cluster.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070070
NeilBrown01f96c02011-09-21 15:30:20 +100071/* pers_list is a list of registered personalities protected
72 * by pers_lock.
73 * pers_lock does extra service to protect accesses to
74 * mddev->thread when the mutex cannot be held.
75 */
NeilBrown2604b702006-01-06 00:20:36 -080076static LIST_HEAD(pers_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -070077static DEFINE_SPINLOCK(pers_lock);
78
Kent Overstreet28dec872018-06-07 20:52:54 -040079static struct kobj_type md_ktype;
80
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -050081struct md_cluster_operations *md_cluster_ops;
Goldwyn Rodrigues589a1c42014-06-07 02:39:37 -050082EXPORT_SYMBOL(md_cluster_ops);
Christoph Hellwig2b598ee2019-04-04 18:56:14 +020083static struct module *md_cluster_mod;
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -050084
Bernd Schubert90b08712008-05-23 13:04:38 -070085static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
Tejun Heoe804ac72010-10-15 15:36:08 +020086static struct workqueue_struct *md_wq;
87static struct workqueue_struct *md_misc_wq;
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +020088static struct workqueue_struct *md_rdev_misc_wq;
Bernd Schubert90b08712008-05-23 13:04:38 -070089
NeilBrown746d3202013-04-24 11:42:41 +100090static int remove_and_add_spares(struct mddev *mddev,
91 struct md_rdev *this);
NeilBrown5aa61f42014-12-15 12:56:57 +110092static void mddev_detach(struct mddev *mddev);
NeilBrown746d3202013-04-24 11:42:41 +100093
Linus Torvalds1da177e2005-04-16 15:20:36 -070094/*
Robert Becker1e509152009-12-14 12:49:58 +110095 * Default number of read corrections we'll attempt on an rdev
96 * before ejecting it from the array. We divide the read error
97 * count by 2 for every hour elapsed between read errors.
98 */
99#define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
Zhao Heming7c9d5c52020-07-21 02:08:52 +0800100/* Default safemode delay: 200 msec */
101#define DEFAULT_SAFEMODE_DELAY ((200 * HZ)/1000 +1)
Robert Becker1e509152009-12-14 12:49:58 +1100102/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
104 * is 1000 KB/sec, so the extra system load does not show up that much.
105 * Increase it if you want to have more _guaranteed_ speed. Note that
Adrian Bunk338cec32005-09-10 00:26:54 -0700106 * the RAID driver will use the maximum available bandwidth if the IO
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 * subsystem is idle. There is also an 'absolute maximum' reconstruction
108 * speed limit - in case reconstruction slows down your system despite
109 * idle IO detection.
110 *
111 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
NeilBrown88202a02006-01-06 00:21:36 -0800112 * or /sys/block/mdX/md/sync_speed_{min,max}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 */
114
115static int sysctl_speed_limit_min = 1000;
116static int sysctl_speed_limit_max = 200000;
NeilBrownfd01b882011-10-11 16:47:53 +1100117static inline int speed_min(struct mddev *mddev)
NeilBrown88202a02006-01-06 00:21:36 -0800118{
119 return mddev->sync_speed_min ?
120 mddev->sync_speed_min : sysctl_speed_limit_min;
121}
122
NeilBrownfd01b882011-10-11 16:47:53 +1100123static inline int speed_max(struct mddev *mddev)
NeilBrown88202a02006-01-06 00:21:36 -0800124{
125 return mddev->sync_speed_max ?
126 mddev->sync_speed_max : sysctl_speed_limit_max;
127}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100129static void rdev_uninit_serial(struct md_rdev *rdev)
Guoqing Jiang3e148a32019-06-19 17:30:46 +0800130{
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100131 if (!test_and_clear_bit(CollisionCheck, &rdev->flags))
132 return;
Guoqing Jiang3e148a32019-06-19 17:30:46 +0800133
Guoqing Jiang025471f2019-12-23 10:49:01 +0100134 kvfree(rdev->serial);
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100135 rdev->serial = NULL;
Guoqing Jiang3e148a32019-06-19 17:30:46 +0800136}
137
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100138static void rdevs_uninit_serial(struct mddev *mddev)
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100139{
140 struct md_rdev *rdev;
141
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100142 rdev_for_each(rdev, mddev)
143 rdev_uninit_serial(rdev);
144}
145
146static int rdev_init_serial(struct md_rdev *rdev)
147{
Guoqing Jiang025471f2019-12-23 10:49:01 +0100148 /* serial_nums equals with BARRIER_BUCKETS_NR */
149 int i, serial_nums = 1 << ((PAGE_SHIFT - ilog2(sizeof(atomic_t))));
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100150 struct serial_in_rdev *serial = NULL;
151
152 if (test_bit(CollisionCheck, &rdev->flags))
153 return 0;
154
Guoqing Jiang025471f2019-12-23 10:49:01 +0100155 serial = kvmalloc(sizeof(struct serial_in_rdev) * serial_nums,
156 GFP_KERNEL);
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100157 if (!serial)
158 return -ENOMEM;
159
Guoqing Jiang025471f2019-12-23 10:49:01 +0100160 for (i = 0; i < serial_nums; i++) {
161 struct serial_in_rdev *serial_tmp = &serial[i];
162
163 spin_lock_init(&serial_tmp->serial_lock);
164 serial_tmp->serial_rb = RB_ROOT_CACHED;
165 init_waitqueue_head(&serial_tmp->serial_io_wait);
166 }
167
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100168 rdev->serial = serial;
169 set_bit(CollisionCheck, &rdev->flags);
170
171 return 0;
172}
173
174static int rdevs_init_serial(struct mddev *mddev)
175{
176 struct md_rdev *rdev;
177 int ret = 0;
178
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100179 rdev_for_each(rdev, mddev) {
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100180 ret = rdev_init_serial(rdev);
181 if (ret)
182 break;
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100183 }
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100184
185 /* Free all resources if pool is not existed */
186 if (ret && !mddev->serial_info_pool)
187 rdevs_uninit_serial(mddev);
188
189 return ret;
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100190}
191
Guoqing Jiang963c5552019-06-14 17:10:36 +0800192/*
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100193 * rdev needs to enable serial stuffs if it meets the conditions:
194 * 1. it is multi-queue device flaged with writemostly.
195 * 2. the write-behind mode is enabled.
196 */
197static int rdev_need_serial(struct md_rdev *rdev)
198{
199 return (rdev && rdev->mddev->bitmap_info.max_write_behind > 0 &&
Christoph Hellwige556f6b2020-06-26 10:01:56 +0200200 rdev->bdev->bd_disk->queue->nr_hw_queues != 1 &&
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100201 test_bit(WriteMostly, &rdev->flags));
202}
203
204/*
205 * Init resource for rdev(s), then create serial_info_pool if:
206 * 1. rdev is the first device which return true from rdev_enable_serial.
207 * 2. rdev is NULL, means we want to enable serialization for all rdevs.
Guoqing Jiang963c5552019-06-14 17:10:36 +0800208 */
Guoqing Jiang404659c2019-12-23 10:48:53 +0100209void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100210 bool is_suspend)
Guoqing Jiang963c5552019-06-14 17:10:36 +0800211{
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100212 int ret = 0;
213
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100214 if (rdev && !rdev_need_serial(rdev) &&
215 !test_bit(CollisionCheck, &rdev->flags))
Guoqing Jiang963c5552019-06-14 17:10:36 +0800216 return;
217
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100218 if (!is_suspend)
219 mddev_suspend(mddev);
220
221 if (!rdev)
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100222 ret = rdevs_init_serial(mddev);
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100223 else
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100224 ret = rdev_init_serial(rdev);
225 if (ret)
226 goto abort;
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100227
Guoqing Jiang404659c2019-12-23 10:48:53 +0100228 if (mddev->serial_info_pool == NULL) {
Coly Li3024ba22020-04-09 22:17:23 +0800229 /*
230 * already in memalloc noio context by
231 * mddev_suspend()
232 */
Guoqing Jiang404659c2019-12-23 10:48:53 +0100233 mddev->serial_info_pool =
234 mempool_create_kmalloc_pool(NR_SERIAL_INFOS,
235 sizeof(struct serial_info));
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100236 if (!mddev->serial_info_pool) {
237 rdevs_uninit_serial(mddev);
Guoqing Jiang404659c2019-12-23 10:48:53 +0100238 pr_err("can't alloc memory pool for serialization\n");
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100239 }
Guoqing Jiang963c5552019-06-14 17:10:36 +0800240 }
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100241
242abort:
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100243 if (!is_suspend)
244 mddev_resume(mddev);
Guoqing Jiang963c5552019-06-14 17:10:36 +0800245}
Guoqing Jiang963c5552019-06-14 17:10:36 +0800246
247/*
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100248 * Free resource from rdev(s), and destroy serial_info_pool under conditions:
249 * 1. rdev is the last device flaged with CollisionCheck.
250 * 2. when bitmap is destroyed while policy is not enabled.
251 * 3. for disable policy, the pool is destroyed only when no rdev needs it.
Guoqing Jiang963c5552019-06-14 17:10:36 +0800252 */
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100253void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
254 bool is_suspend)
Guoqing Jiang963c5552019-06-14 17:10:36 +0800255{
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100256 if (rdev && !test_bit(CollisionCheck, &rdev->flags))
Guoqing Jiang963c5552019-06-14 17:10:36 +0800257 return;
258
Guoqing Jiang404659c2019-12-23 10:48:53 +0100259 if (mddev->serial_info_pool) {
Guoqing Jiang963c5552019-06-14 17:10:36 +0800260 struct md_rdev *temp;
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100261 int num = 0; /* used to track if other rdevs need the pool */
Guoqing Jiang963c5552019-06-14 17:10:36 +0800262
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100263 if (!is_suspend)
264 mddev_suspend(mddev);
265 rdev_for_each(temp, mddev) {
266 if (!rdev) {
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100267 if (!mddev->serialize_policy ||
268 !rdev_need_serial(temp))
269 rdev_uninit_serial(temp);
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100270 else
271 num++;
272 } else if (temp != rdev &&
273 test_bit(CollisionCheck, &temp->flags))
Guoqing Jiang963c5552019-06-14 17:10:36 +0800274 num++;
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100275 }
276
277 if (rdev)
Guoqing Jiang69b00b52019-12-23 10:49:00 +0100278 rdev_uninit_serial(rdev);
Guoqing Jiangde31ee92019-12-23 10:48:57 +0100279
280 if (num)
281 pr_info("The mempool could be used by other devices\n");
282 else {
Guoqing Jiang404659c2019-12-23 10:48:53 +0100283 mempool_destroy(mddev->serial_info_pool);
284 mddev->serial_info_pool = NULL;
Guoqing Jiang963c5552019-06-14 17:10:36 +0800285 }
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +0100286 if (!is_suspend)
287 mddev_resume(mddev);
Guoqing Jiang963c5552019-06-14 17:10:36 +0800288 }
289}
290
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291static struct ctl_table_header *raid_table_header;
292
Joe Perches82592c32013-11-14 15:16:18 +1100293static struct ctl_table raid_table[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 .procname = "speed_limit_min",
296 .data = &sysctl_speed_limit_min,
297 .maxlen = sizeof(int),
NeilBrown80ca3a42006-07-10 04:44:18 -0700298 .mode = S_IRUGO|S_IWUSR,
Eric W. Biederman6d456112009-11-16 03:11:48 -0800299 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 },
301 {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 .procname = "speed_limit_max",
303 .data = &sysctl_speed_limit_max,
304 .maxlen = sizeof(int),
NeilBrown80ca3a42006-07-10 04:44:18 -0700305 .mode = S_IRUGO|S_IWUSR,
Eric W. Biederman6d456112009-11-16 03:11:48 -0800306 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 },
Eric W. Biederman894d2492009-11-05 14:34:02 -0800308 { }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309};
310
Joe Perches82592c32013-11-14 15:16:18 +1100311static struct ctl_table raid_dir_table[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 .procname = "raid",
314 .maxlen = 0,
NeilBrown80ca3a42006-07-10 04:44:18 -0700315 .mode = S_IRUGO|S_IXUGO,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 .child = raid_table,
317 },
Eric W. Biederman894d2492009-11-05 14:34:02 -0800318 { }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319};
320
Joe Perches82592c32013-11-14 15:16:18 +1100321static struct ctl_table raid_root_table[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 .procname = "dev",
324 .maxlen = 0,
325 .mode = 0555,
326 .child = raid_dir_table,
327 },
Eric W. Biederman894d2492009-11-05 14:34:02 -0800328 { }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329};
330
NeilBrownf91de922005-11-08 21:39:36 -0800331static int start_readonly;
332
NeilBrown78b63502017-04-12 16:26:13 +1000333/*
334 * The original mechanism for creating an md device is to create
335 * a device node in /dev and to open it. This causes races with device-close.
336 * The preferred method is to write to the "new_array" module parameter.
337 * This can avoid races.
338 * Setting create_on_open to false disables the original mechanism
339 * so all the races disappear.
340 */
341static bool create_on_open = true;
342
NeilBrowna167f662010-10-26 18:31:13 +1100343struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
NeilBrownfd01b882011-10-11 16:47:53 +1100344 struct mddev *mddev)
NeilBrowna167f662010-10-26 18:31:13 +1100345{
Kent Overstreetafeee512018-05-20 18:25:52 -0400346 if (!mddev || !bioset_initialized(&mddev->bio_set))
NeilBrowna167f662010-10-26 18:31:13 +1100347 return bio_alloc(gfp_mask, nr_iovecs);
348
Marcos Paulo de Souza62516912019-01-14 06:31:56 -0700349 return bio_alloc_bioset(gfp_mask, nr_iovecs, &mddev->bio_set);
NeilBrowna167f662010-10-26 18:31:13 +1100350}
351EXPORT_SYMBOL_GPL(bio_alloc_mddev);
352
NeilBrown5a850712017-06-21 09:12:21 +1000353static struct bio *md_bio_alloc_sync(struct mddev *mddev)
354{
Kent Overstreetafeee512018-05-20 18:25:52 -0400355 if (!mddev || !bioset_initialized(&mddev->sync_set))
NeilBrown5a850712017-06-21 09:12:21 +1000356 return bio_alloc(GFP_NOIO, 1);
357
Kent Overstreetafeee512018-05-20 18:25:52 -0400358 return bio_alloc_bioset(GFP_NOIO, 1, &mddev->sync_set);
NeilBrown5a850712017-06-21 09:12:21 +1000359}
360
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361/*
NeilBrownd7603b72006-01-06 00:20:30 -0800362 * We have a system wide 'event count' that is incremented
363 * on any 'interesting' event, and readers of /proc/mdstat
364 * can use 'poll' or 'select' to find out when the event
365 * count increases.
366 *
367 * Events are:
368 * start array, stop array, error, add device, remove device,
369 * start build, activate spare
370 */
NeilBrown2989ddb2006-01-06 00:20:43 -0800371static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
NeilBrownd7603b72006-01-06 00:20:30 -0800372static atomic_t md_event_count;
NeilBrownfd01b882011-10-11 16:47:53 +1100373void md_new_event(struct mddev *mddev)
NeilBrownd7603b72006-01-06 00:20:30 -0800374{
375 atomic_inc(&md_event_count);
376 wake_up(&md_event_waiters);
377}
NeilBrown29269552006-03-27 01:18:10 -0800378EXPORT_SYMBOL_GPL(md_new_event);
NeilBrownd7603b72006-01-06 00:20:30 -0800379
380/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 * Enables to iterate over all existing md arrays
382 * all_mddevs_lock protects this list.
383 */
384static LIST_HEAD(all_mddevs);
385static DEFINE_SPINLOCK(all_mddevs_lock);
386
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387/*
388 * iterates through all used mddevs in the system.
389 * We take care to grab the all_mddevs_lock whenever navigating
390 * the list, and to always hold a refcount when unlocked.
391 * Any code which breaks out of this loop while own
392 * a reference to the current mddev and must mddev_put it.
393 */
NeilBrownfd01b882011-10-11 16:47:53 +1100394#define for_each_mddev(_mddev,_tmp) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 \
NeilBrownf72ffdd2014-09-30 14:23:59 +1000396 for (({ spin_lock(&all_mddevs_lock); \
NeilBrownfd01b882011-10-11 16:47:53 +1100397 _tmp = all_mddevs.next; \
398 _mddev = NULL;}); \
399 ({ if (_tmp != &all_mddevs) \
400 mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 spin_unlock(&all_mddevs_lock); \
NeilBrownfd01b882011-10-11 16:47:53 +1100402 if (_mddev) mddev_put(_mddev); \
403 _mddev = list_entry(_tmp, struct mddev, all_mddevs); \
404 _tmp != &all_mddevs;}); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 ({ spin_lock(&all_mddevs_lock); \
NeilBrownfd01b882011-10-11 16:47:53 +1100406 _tmp = _tmp->next;}) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 )
408
NeilBrown409c57f2009-03-31 14:39:39 +1100409/* Rather than calling directly into the personality make_request function,
410 * IO requests come here first so that we can check if the device is
411 * being suspended pending a reconfiguration.
412 * We hold a refcount over the call to ->make_request. By the time that
413 * call has finished, the bio has been linked into some internal structure
414 * and so is visible to ->quiesce(), so we don't need the refcount any more.
415 */
NeilBrownb3143b92017-10-17 13:46:43 +1100416static bool is_suspended(struct mddev *mddev, struct bio *bio)
417{
418 if (mddev->suspended)
419 return true;
420 if (bio_data_dir(bio) != WRITE)
421 return false;
422 if (mddev->suspend_lo >= mddev->suspend_hi)
423 return false;
424 if (bio->bi_iter.bi_sector >= mddev->suspend_hi)
425 return false;
426 if (bio_end_sector(bio) < mddev->suspend_lo)
427 return false;
428 return true;
429}
430
Shaohua Li393debc2017-09-21 10:23:35 -0700431void md_handle_request(struct mddev *mddev, struct bio *bio)
432{
433check_suspended:
434 rcu_read_lock();
NeilBrownb3143b92017-10-17 13:46:43 +1100435 if (is_suspended(mddev, bio)) {
Shaohua Li393debc2017-09-21 10:23:35 -0700436 DEFINE_WAIT(__wait);
437 for (;;) {
438 prepare_to_wait(&mddev->sb_wait, &__wait,
439 TASK_UNINTERRUPTIBLE);
NeilBrownb3143b92017-10-17 13:46:43 +1100440 if (!is_suspended(mddev, bio))
Shaohua Li393debc2017-09-21 10:23:35 -0700441 break;
442 rcu_read_unlock();
443 schedule();
444 rcu_read_lock();
445 }
446 finish_wait(&mddev->sb_wait, &__wait);
447 }
448 atomic_inc(&mddev->active_io);
449 rcu_read_unlock();
450
451 if (!mddev->pers->make_request(mddev, bio)) {
452 atomic_dec(&mddev->active_io);
453 wake_up(&mddev->sb_wait);
454 goto check_suspended;
455 }
456
457 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
458 wake_up(&mddev->sb_wait);
459}
460EXPORT_SYMBOL(md_handle_request);
461
Artur Paszkiewicz41d2d842020-07-03 11:13:09 +0200462struct md_io {
463 struct mddev *mddev;
464 bio_end_io_t *orig_bi_end_io;
465 void *orig_bi_private;
Christoph Hellwig99dfc432021-01-24 11:02:37 +0100466 struct block_device *orig_bi_bdev;
Artur Paszkiewicz41d2d842020-07-03 11:13:09 +0200467 unsigned long start_time;
468};
469
470static void md_end_io(struct bio *bio)
471{
472 struct md_io *md_io = bio->bi_private;
473 struct mddev *mddev = md_io->mddev;
474
Christoph Hellwig99dfc432021-01-24 11:02:37 +0100475 bio_end_io_acct_remapped(bio, md_io->start_time, md_io->orig_bi_bdev);
Artur Paszkiewicz41d2d842020-07-03 11:13:09 +0200476
477 bio->bi_end_io = md_io->orig_bi_end_io;
478 bio->bi_private = md_io->orig_bi_private;
479
480 mempool_free(md_io, &mddev->md_io_pool);
481
482 if (bio->bi_end_io)
483 bio->bi_end_io(bio);
484}
485
Christoph Hellwigc62b37d2020-07-01 10:59:43 +0200486static blk_qc_t md_submit_bio(struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487{
NeilBrown49077322010-03-25 16:20:56 +1100488 const int rw = bio_data_dir(bio);
Christoph Hellwig309dca302021-01-24 11:02:34 +0100489 struct mddev *mddev = bio->bi_bdev->bd_disk->private_data;
NeilBrown49077322010-03-25 16:20:56 +1100490
Colin Ian King9a5a8592020-07-02 12:35:02 +0100491 if (mddev == NULL || mddev->pers == NULL) {
492 bio_io_error(bio);
493 return BLK_QC_T_NONE;
494 }
NeilBrown409c57f2009-03-31 14:39:39 +1100495
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -0300496 if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) {
497 bio_io_error(bio);
498 return BLK_QC_T_NONE;
499 }
500
Christoph Hellwigf695ca32020-07-01 10:59:39 +0200501 blk_queue_split(&bio);
Kent Overstreet54efd502015-04-23 22:37:18 -0700502
Sebastian Riemerbbfa57c2013-02-21 13:28:09 +1100503 if (mddev->ro == 1 && unlikely(rw == WRITE)) {
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200504 if (bio_sectors(bio) != 0)
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200505 bio->bi_status = BLK_STS_IOERR;
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200506 bio_endio(bio);
Jens Axboedece1632015-11-05 10:41:16 -0700507 return BLK_QC_T_NONE;
Sebastian Riemerbbfa57c2013-02-21 13:28:09 +1100508 }
NeilBrown49077322010-03-25 16:20:56 +1100509
Artur Paszkiewicz41d2d842020-07-03 11:13:09 +0200510 if (bio->bi_end_io != md_end_io) {
511 struct md_io *md_io;
512
513 md_io = mempool_alloc(&mddev->md_io_pool, GFP_NOIO);
514 md_io->mddev = mddev;
515 md_io->orig_bi_end_io = bio->bi_end_io;
516 md_io->orig_bi_private = bio->bi_private;
Christoph Hellwig99dfc432021-01-24 11:02:37 +0100517 md_io->orig_bi_bdev = bio->bi_bdev;
Artur Paszkiewicz41d2d842020-07-03 11:13:09 +0200518
519 bio->bi_end_io = md_end_io;
520 bio->bi_private = md_io;
521
Christoph Hellwig99dfc432021-01-24 11:02:37 +0100522 md_io->start_time = bio_start_io_acct(bio);
Artur Paszkiewicz41d2d842020-07-03 11:13:09 +0200523 }
524
Shaohua Li9c573de2016-04-25 16:52:38 -0700525 /* bio could be mergeable after passing to underlayer */
Jens Axboe1eff9d32016-08-05 15:35:16 -0600526 bio->bi_opf &= ~REQ_NOMERGE;
Shaohua Li393debc2017-09-21 10:23:35 -0700527
528 md_handle_request(mddev, bio);
NeilBrown49077322010-03-25 16:20:56 +1100529
Jens Axboedece1632015-11-05 10:41:16 -0700530 return BLK_QC_T_NONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531}
532
NeilBrown9e35b992010-04-06 14:23:02 +1000533/* mddev_suspend makes sure no new requests are submitted
534 * to the device, and that any requests that have been submitted
535 * are completely handled.
NeilBrownafa0f552014-12-15 12:56:58 +1100536 * Once mddev_detach() is called and completes, the module will be
537 * completely unused.
NeilBrown9e35b992010-04-06 14:23:02 +1000538 */
NeilBrownfd01b882011-10-11 16:47:53 +1100539void mddev_suspend(struct mddev *mddev)
NeilBrown409c57f2009-03-31 14:39:39 +1100540{
Heinz Mauelshagen092398d2016-05-03 19:43:57 +0200541 WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk);
NeilBrown4d5324f2017-10-19 12:17:16 +1100542 lockdep_assert_held(&mddev->reconfig_mutex);
Mikulas Patocka0dc10e52015-12-18 15:19:16 +1100543 if (mddev->suspended++)
544 return;
NeilBrown409c57f2009-03-31 14:39:39 +1100545 synchronize_rcu();
NeilBrowncc27b0c2017-06-05 16:49:39 +1000546 wake_up(&mddev->sb_wait);
NeilBrown35bfc522017-10-17 13:46:43 +1100547 set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags);
548 smp_mb__after_atomic();
NeilBrown409c57f2009-03-31 14:39:39 +1100549 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
550 mddev->pers->quiesce(mddev, 1);
NeilBrown35bfc522017-10-17 13:46:43 +1100551 clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags);
552 wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags));
Jonathan Brassow0d9f4f12012-05-16 04:06:14 -0500553
554 del_timer_sync(&mddev->safemode_timer);
Coly Li78f57ef2020-04-09 22:17:20 +0800555 /* restrict memory reclaim I/O during raid array is suspend */
556 mddev->noio_flag = memalloc_noio_save();
NeilBrown409c57f2009-03-31 14:39:39 +1100557}
NeilBrown390ee602010-06-01 19:37:27 +1000558EXPORT_SYMBOL_GPL(mddev_suspend);
NeilBrown409c57f2009-03-31 14:39:39 +1100559
NeilBrownfd01b882011-10-11 16:47:53 +1100560void mddev_resume(struct mddev *mddev)
NeilBrown409c57f2009-03-31 14:39:39 +1100561{
Coly Li78f57ef2020-04-09 22:17:20 +0800562 /* entred the memalloc scope from mddev_suspend() */
563 memalloc_noio_restore(mddev->noio_flag);
NeilBrown4d5324f2017-10-19 12:17:16 +1100564 lockdep_assert_held(&mddev->reconfig_mutex);
Mikulas Patocka0dc10e52015-12-18 15:19:16 +1100565 if (--mddev->suspended)
566 return;
NeilBrown409c57f2009-03-31 14:39:39 +1100567 wake_up(&mddev->sb_wait);
568 mddev->pers->quiesce(mddev, 0);
Jonathan Brassow0fd018a2011-06-07 17:49:36 -0500569
Jonathan Brassow47525e52012-05-22 13:55:29 +1000570 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
Jonathan Brassow0fd018a2011-06-07 17:49:36 -0500571 md_wakeup_thread(mddev->thread);
572 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
NeilBrown409c57f2009-03-31 14:39:39 +1100573}
NeilBrown390ee602010-06-01 19:37:27 +1000574EXPORT_SYMBOL_GPL(mddev_resume);
NeilBrown409c57f2009-03-31 14:39:39 +1100575
NeilBrowna2826aa2009-12-14 12:49:49 +1100576/*
Tejun Heoe9c74692010-09-03 11:56:18 +0200577 * Generic flush handling for md
NeilBrowna2826aa2009-12-14 12:49:49 +1100578 */
NeilBrown4bc034d2019-03-29 10:46:16 -0700579
580static void md_end_flush(struct bio *bio)
NeilBrowna2826aa2009-12-14 12:49:49 +1100581{
NeilBrown4bc034d2019-03-29 10:46:16 -0700582 struct md_rdev *rdev = bio->bi_private;
583 struct mddev *mddev = rdev->mddev;
NeilBrowna2826aa2009-12-14 12:49:49 +1100584
585 rdev_dec_pending(rdev, mddev);
586
NeilBrown4bc034d2019-03-29 10:46:16 -0700587 if (atomic_dec_and_test(&mddev->flush_pending)) {
588 /* The pre-request flush has finished */
589 queue_work(md_wq, &mddev->flush_work);
NeilBrowna2826aa2009-12-14 12:49:49 +1100590 }
NeilBrown4bc034d2019-03-29 10:46:16 -0700591 bio_put(bio);
NeilBrowna2826aa2009-12-14 12:49:49 +1100592}
593
NeilBrown4bc034d2019-03-29 10:46:16 -0700594static void md_submit_flush_data(struct work_struct *ws);
595
596static void submit_flushes(struct work_struct *ws)
NeilBrowna2826aa2009-12-14 12:49:49 +1100597{
NeilBrown4bc034d2019-03-29 10:46:16 -0700598 struct mddev *mddev = container_of(ws, struct mddev, flush_work);
NeilBrown3cb03002011-10-11 16:45:26 +1100599 struct md_rdev *rdev;
NeilBrowna2826aa2009-12-14 12:49:49 +1100600
NeilBrown2bc13b82019-03-29 10:46:17 -0700601 mddev->start_flush = ktime_get_boottime();
NeilBrown4bc034d2019-03-29 10:46:16 -0700602 INIT_WORK(&mddev->flush_work, md_submit_flush_data);
603 atomic_set(&mddev->flush_pending, 1);
NeilBrowna2826aa2009-12-14 12:49:49 +1100604 rcu_read_lock();
NeilBrowndafb20f2012-03-19 12:46:39 +1100605 rdev_for_each_rcu(rdev, mddev)
NeilBrowna2826aa2009-12-14 12:49:49 +1100606 if (rdev->raid_disk >= 0 &&
607 !test_bit(Faulty, &rdev->flags)) {
608 /* Take two references, one is dropped
609 * when request finishes, one after
610 * we reclaim rcu_read_lock
611 */
612 struct bio *bi;
613 atomic_inc(&rdev->nr_pending);
614 atomic_inc(&rdev->nr_pending);
615 rcu_read_unlock();
Shaohua Lib5e1b8c2012-05-21 09:26:59 +1000616 bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
Xiao Ni5a409b42018-05-21 11:49:54 +0800617 bi->bi_end_io = md_end_flush;
NeilBrown4bc034d2019-03-29 10:46:16 -0700618 bi->bi_private = rdev;
619 bio_set_dev(bi, rdev->bdev);
Christoph Hellwig70fd7612016-11-01 07:40:10 -0600620 bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
NeilBrown4bc034d2019-03-29 10:46:16 -0700621 atomic_inc(&mddev->flush_pending);
Mike Christie4e49ea42016-06-05 14:31:41 -0500622 submit_bio(bi);
NeilBrowna2826aa2009-12-14 12:49:49 +1100623 rcu_read_lock();
624 rdev_dec_pending(rdev, mddev);
625 }
626 rcu_read_unlock();
NeilBrown4bc034d2019-03-29 10:46:16 -0700627 if (atomic_dec_and_test(&mddev->flush_pending))
628 queue_work(md_wq, &mddev->flush_work);
629}
NeilBrowna2826aa2009-12-14 12:49:49 +1100630
NeilBrown4bc034d2019-03-29 10:46:16 -0700631static void md_submit_flush_data(struct work_struct *ws)
632{
633 struct mddev *mddev = container_of(ws, struct mddev, flush_work);
634 struct bio *bio = mddev->flush_bio;
635
636 /*
637 * must reset flush_bio before calling into md_handle_request to avoid a
638 * deadlock, because other bios passed md_handle_request suspend check
639 * could wait for this and below md_handle_request could wait for those
640 * bios because of suspend check
641 */
Xiao Nidc5d17a32020-12-10 14:33:32 +0800642 spin_lock_irq(&mddev->lock);
Pankaj Gupta81ba3c22020-11-11 06:16:56 +0100643 mddev->prev_flush_start = mddev->start_flush;
NeilBrown4bc034d2019-03-29 10:46:16 -0700644 mddev->flush_bio = NULL;
Xiao Nidc5d17a32020-12-10 14:33:32 +0800645 spin_unlock_irq(&mddev->lock);
NeilBrown4bc034d2019-03-29 10:46:16 -0700646 wake_up(&mddev->sb_wait);
647
648 if (bio->bi_iter.bi_size == 0) {
649 /* an empty barrier - all done */
650 bio_endio(bio);
651 } else {
652 bio->bi_opf &= ~REQ_PREFLUSH;
653 md_handle_request(mddev, bio);
NeilBrowna2826aa2009-12-14 12:49:49 +1100654 }
NeilBrowna2826aa2009-12-14 12:49:49 +1100655}
NeilBrown4bc034d2019-03-29 10:46:16 -0700656
David Jeffery775d7832019-09-16 13:15:14 -0400657/*
658 * Manages consolidation of flushes and submitting any flushes needed for
659 * a bio with REQ_PREFLUSH. Returns true if the bio is finished or is
660 * being finished in another context. Returns false if the flushing is
661 * complete but still needs the I/O portion of the bio to be processed.
662 */
663bool md_flush_request(struct mddev *mddev, struct bio *bio)
NeilBrown4bc034d2019-03-29 10:46:16 -0700664{
Pankaj Gupta81ba3c22020-11-11 06:16:56 +0100665 ktime_t req_start = ktime_get_boottime();
NeilBrown4bc034d2019-03-29 10:46:16 -0700666 spin_lock_irq(&mddev->lock);
Pankaj Gupta204d1a62020-11-11 06:16:57 +0100667 /* flush requests wait until ongoing flush completes,
668 * hence coalescing all the pending requests.
669 */
NeilBrown4bc034d2019-03-29 10:46:16 -0700670 wait_event_lock_irq(mddev->sb_wait,
NeilBrown2bc13b82019-03-29 10:46:17 -0700671 !mddev->flush_bio ||
Pankaj Guptaa23f2aa2020-11-11 06:16:58 +0100672 ktime_before(req_start, mddev->prev_flush_start),
NeilBrown4bc034d2019-03-29 10:46:16 -0700673 mddev->lock);
Pankaj Gupta204d1a62020-11-11 06:16:57 +0100674 /* new request after previous flush is completed */
Pankaj Guptaa23f2aa2020-11-11 06:16:58 +0100675 if (ktime_after(req_start, mddev->prev_flush_start)) {
NeilBrown2bc13b82019-03-29 10:46:17 -0700676 WARN_ON(mddev->flush_bio);
677 mddev->flush_bio = bio;
678 bio = NULL;
679 }
NeilBrown4bc034d2019-03-29 10:46:16 -0700680 spin_unlock_irq(&mddev->lock);
681
NeilBrown2bc13b82019-03-29 10:46:17 -0700682 if (!bio) {
683 INIT_WORK(&mddev->flush_work, submit_flushes);
684 queue_work(md_wq, &mddev->flush_work);
685 } else {
686 /* flush was performed for some other bio while we waited. */
687 if (bio->bi_iter.bi_size == 0)
688 /* an empty barrier - all done */
689 bio_endio(bio);
690 else {
691 bio->bi_opf &= ~REQ_PREFLUSH;
David Jeffery775d7832019-09-16 13:15:14 -0400692 return false;
NeilBrown2bc13b82019-03-29 10:46:17 -0700693 }
694 }
David Jeffery775d7832019-09-16 13:15:14 -0400695 return true;
NeilBrown4bc034d2019-03-29 10:46:16 -0700696}
Tejun Heoe9c74692010-09-03 11:56:18 +0200697EXPORT_SYMBOL(md_flush_request);
NeilBrown409c57f2009-03-31 14:39:39 +1100698
NeilBrownfd01b882011-10-11 16:47:53 +1100699static inline struct mddev *mddev_get(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700{
701 atomic_inc(&mddev->active);
702 return mddev;
703}
704
Dan Williams5fd3a172009-03-04 00:57:25 -0700705static void mddev_delayed_delete(struct work_struct *ws);
NeilBrownd3374822009-01-09 08:31:10 +1100706
NeilBrownfd01b882011-10-11 16:47:53 +1100707static void mddev_put(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708{
709 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
710 return;
NeilBrownd3374822009-01-09 08:31:10 +1100711 if (!mddev->raid_disks && list_empty(&mddev->disks) &&
NeilBrowncbd19982009-12-30 12:08:49 +1100712 mddev->ctime == 0 && !mddev->hold_active) {
713 /* Array is not configured at all, and not held active,
714 * so destroy it */
NeilBrownaf8a2432011-12-08 15:49:46 +1100715 list_del_init(&mddev->all_mddevs);
Kent Overstreet28dec872018-06-07 20:52:54 -0400716
717 /*
718 * Call queue_work inside the spinlock so that
719 * flush_workqueue() after mddev_find will succeed in waiting
720 * for the work to be done.
721 */
722 INIT_WORK(&mddev->del_work, mddev_delayed_delete);
723 queue_work(md_misc_wq, &mddev->del_work);
NeilBrownd3374822009-01-09 08:31:10 +1100724 }
725 spin_unlock(&all_mddevs_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726}
727
Kees Cook8376d3c2017-10-16 17:01:48 -0700728static void md_safemode_timeout(struct timer_list *t);
Sasha Levin25b2edf2015-07-24 18:19:58 -0400729
NeilBrownfd01b882011-10-11 16:47:53 +1100730void mddev_init(struct mddev *mddev)
NeilBrownfafd7fb2010-04-01 15:55:30 +1100731{
Kent Overstreet28dec872018-06-07 20:52:54 -0400732 kobject_init(&mddev->kobj, &md_ktype);
NeilBrownfafd7fb2010-04-01 15:55:30 +1100733 mutex_init(&mddev->open_mutex);
734 mutex_init(&mddev->reconfig_mutex);
735 mutex_init(&mddev->bitmap_info.mutex);
736 INIT_LIST_HEAD(&mddev->disks);
737 INIT_LIST_HEAD(&mddev->all_mddevs);
Kees Cook8376d3c2017-10-16 17:01:48 -0700738 timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0);
NeilBrownfafd7fb2010-04-01 15:55:30 +1100739 atomic_set(&mddev->active, 1);
740 atomic_set(&mddev->openers, 0);
741 atomic_set(&mddev->active_io, 0);
NeilBrown85572d72014-12-15 12:56:56 +1100742 spin_lock_init(&mddev->lock);
NeilBrown4bc034d2019-03-29 10:46:16 -0700743 atomic_set(&mddev->flush_pending, 0);
NeilBrownfafd7fb2010-04-01 15:55:30 +1100744 init_waitqueue_head(&mddev->sb_wait);
745 init_waitqueue_head(&mddev->recovery_wait);
746 mddev->reshape_position = MaxSector;
NeilBrown2c810cd2012-05-21 09:27:00 +1000747 mddev->reshape_backwards = 0;
Jonathan Brassowc4a39552013-06-25 01:23:59 -0500748 mddev->last_sync_action = "none";
NeilBrownfafd7fb2010-04-01 15:55:30 +1100749 mddev->resync_min = 0;
750 mddev->resync_max = MaxSector;
751 mddev->level = LEVEL_NONE;
752}
NeilBrown390ee602010-06-01 19:37:27 +1000753EXPORT_SYMBOL_GPL(mddev_init);
NeilBrownfafd7fb2010-04-01 15:55:30 +1100754
NeilBrownf72ffdd2014-09-30 14:23:59 +1000755static struct mddev *mddev_find(dev_t unit)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756{
NeilBrownfd01b882011-10-11 16:47:53 +1100757 struct mddev *mddev, *new = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758
NeilBrown8f5f02c2011-02-16 13:58:51 +1100759 if (unit && MAJOR(unit) != MD_MAJOR)
760 unit &= ~((1<<MdpMinorShift)-1);
761
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 retry:
763 spin_lock(&all_mddevs_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764
NeilBrownefeb53c2009-01-09 08:31:10 +1100765 if (unit) {
766 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
767 if (mddev->unit == unit) {
768 mddev_get(mddev);
769 spin_unlock(&all_mddevs_lock);
770 kfree(new);
771 return mddev;
772 }
773
774 if (new) {
775 list_add(&new->all_mddevs, &all_mddevs);
776 spin_unlock(&all_mddevs_lock);
777 new->hold_active = UNTIL_IOCTL;
778 return new;
779 }
780 } else if (new) {
781 /* find an unused unit number */
782 static int next_minor = 512;
783 int start = next_minor;
784 int is_free = 0;
785 int dev = 0;
786 while (!is_free) {
787 dev = MKDEV(MD_MAJOR, next_minor);
788 next_minor++;
789 if (next_minor > MINORMASK)
790 next_minor = 0;
791 if (next_minor == start) {
792 /* Oh dear, all in use. */
793 spin_unlock(&all_mddevs_lock);
794 kfree(new);
795 return NULL;
796 }
NeilBrownf72ffdd2014-09-30 14:23:59 +1000797
NeilBrownefeb53c2009-01-09 08:31:10 +1100798 is_free = 1;
799 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
800 if (mddev->unit == dev) {
801 is_free = 0;
802 break;
803 }
804 }
805 new->unit = dev;
806 new->md_minor = MINOR(dev);
807 new->hold_active = UNTIL_STOP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 list_add(&new->all_mddevs, &all_mddevs);
809 spin_unlock(&all_mddevs_lock);
810 return new;
811 }
812 spin_unlock(&all_mddevs_lock);
813
NeilBrown9ffae0c2006-01-06 00:20:32 -0800814 new = kzalloc(sizeof(*new), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815 if (!new)
816 return NULL;
817
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 new->unit = unit;
819 if (MAJOR(unit) == MD_MAJOR)
820 new->md_minor = MINOR(unit);
821 else
822 new->md_minor = MINOR(unit) >> MdpMinorShift;
823
NeilBrownfafd7fb2010-04-01 15:55:30 +1100824 mddev_init(new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826 goto retry;
827}
828
NeilBrownb6eb1272010-04-15 10:13:47 +1000829static struct attribute_group md_redundancy_group;
830
NeilBrown5c47daf2014-12-15 12:57:01 +1100831void mddev_unlock(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832{
NeilBrowna64c8762010-04-14 17:15:37 +1000833 if (mddev->to_remove) {
NeilBrownb6eb1272010-04-15 10:13:47 +1000834 /* These cannot be removed under reconfig_mutex as
835 * an access to the files will try to take reconfig_mutex
836 * while holding the file unremovable, which leads to
837 * a deadlock.
NeilBrownbb4f1e92010-08-08 21:18:03 +1000838 * So hold set sysfs_active while the remove in happeing,
839 * and anything else which might set ->to_remove or my
840 * otherwise change the sysfs namespace will fail with
841 * -EBUSY if sysfs_active is still set.
842 * We set sysfs_active under reconfig_mutex and elsewhere
843 * test it under the same mutex to ensure its correct value
844 * is seen.
NeilBrownb6eb1272010-04-15 10:13:47 +1000845 */
NeilBrowna64c8762010-04-14 17:15:37 +1000846 struct attribute_group *to_remove = mddev->to_remove;
847 mddev->to_remove = NULL;
NeilBrownbb4f1e92010-08-08 21:18:03 +1000848 mddev->sysfs_active = 1;
NeilBrownb6eb1272010-04-15 10:13:47 +1000849 mutex_unlock(&mddev->reconfig_mutex);
850
NeilBrown00bcb4a2010-06-01 19:37:23 +1000851 if (mddev->kobj.sd) {
852 if (to_remove != &md_redundancy_group)
853 sysfs_remove_group(&mddev->kobj, to_remove);
854 if (mddev->pers == NULL ||
855 mddev->pers->sync_request == NULL) {
856 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
857 if (mddev->sysfs_action)
858 sysfs_put(mddev->sysfs_action);
Junxiao Bie8efa9b2020-08-04 17:27:18 -0700859 if (mddev->sysfs_completed)
860 sysfs_put(mddev->sysfs_completed);
861 if (mddev->sysfs_degraded)
862 sysfs_put(mddev->sysfs_degraded);
NeilBrown00bcb4a2010-06-01 19:37:23 +1000863 mddev->sysfs_action = NULL;
Junxiao Bie8efa9b2020-08-04 17:27:18 -0700864 mddev->sysfs_completed = NULL;
865 mddev->sysfs_degraded = NULL;
NeilBrown00bcb4a2010-06-01 19:37:23 +1000866 }
NeilBrowna64c8762010-04-14 17:15:37 +1000867 }
NeilBrownbb4f1e92010-08-08 21:18:03 +1000868 mddev->sysfs_active = 0;
NeilBrownb6eb1272010-04-15 10:13:47 +1000869 } else
870 mutex_unlock(&mddev->reconfig_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871
Chris Dunlop751e67c2011-10-19 16:48:26 +1100872 /* As we've dropped the mutex we need a spinlock to
873 * make sure the thread doesn't disappear
NeilBrown01f96c02011-09-21 15:30:20 +1000874 */
875 spin_lock(&pers_lock);
NeilBrown005eca52005-08-22 13:11:08 -0700876 md_wakeup_thread(mddev->thread);
NeilBrown4d5324f2017-10-19 12:17:16 +1100877 wake_up(&mddev->sb_wait);
NeilBrown01f96c02011-09-21 15:30:20 +1000878 spin_unlock(&pers_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879}
NeilBrown5c47daf2014-12-15 12:57:01 +1100880EXPORT_SYMBOL_GPL(mddev_unlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881
Goldwyn Rodrigues57d051d2015-04-14 10:43:55 -0500882struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr)
NeilBrown1ca69c42012-10-11 13:37:33 +1100883{
884 struct md_rdev *rdev;
885
886 rdev_for_each_rcu(rdev, mddev)
887 if (rdev->desc_nr == nr)
888 return rdev;
889
890 return NULL;
891}
Goldwyn Rodrigues57d051d2015-04-14 10:43:55 -0500892EXPORT_SYMBOL_GPL(md_find_rdev_nr_rcu);
NeilBrown1ca69c42012-10-11 13:37:33 +1100893
894static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895{
NeilBrown3cb03002011-10-11 16:45:26 +1100896 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897
NeilBrowndafb20f2012-03-19 12:46:39 +1100898 rdev_for_each(rdev, mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 if (rdev->bdev->bd_dev == dev)
900 return rdev;
Cheng Renquan159ec1f2009-01-09 08:31:08 +1100901
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 return NULL;
903}
904
Tomasz Majchrzak1532d9e2017-12-27 10:31:40 +0100905struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev)
NeilBrown1ca69c42012-10-11 13:37:33 +1100906{
907 struct md_rdev *rdev;
908
909 rdev_for_each_rcu(rdev, mddev)
910 if (rdev->bdev->bd_dev == dev)
911 return rdev;
912
913 return NULL;
914}
Tomasz Majchrzak1532d9e2017-12-27 10:31:40 +0100915EXPORT_SYMBOL_GPL(md_find_rdev_rcu);
NeilBrown1ca69c42012-10-11 13:37:33 +1100916
NeilBrown84fc4b52011-10-11 16:49:58 +1100917static struct md_personality *find_pers(int level, char *clevel)
NeilBrown2604b702006-01-06 00:20:36 -0800918{
NeilBrown84fc4b52011-10-11 16:49:58 +1100919 struct md_personality *pers;
NeilBrownd9d166c2006-01-06 00:20:51 -0800920 list_for_each_entry(pers, &pers_list, list) {
921 if (level != LEVEL_NONE && pers->level == level)
NeilBrown2604b702006-01-06 00:20:36 -0800922 return pers;
NeilBrownd9d166c2006-01-06 00:20:51 -0800923 if (strcmp(pers->name, clevel)==0)
924 return pers;
925 }
NeilBrown2604b702006-01-06 00:20:36 -0800926 return NULL;
927}
928
Andre Nollb73df2d2008-07-11 22:02:23 +1000929/* return the offset of the super block in 512byte sectors */
NeilBrown3cb03002011-10-11 16:45:26 +1100930static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931{
Jonathan Brassow57b2caa2011-01-14 09:14:33 +1100932 sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512;
Andre Nollb73df2d2008-07-11 22:02:23 +1000933 return MD_NEW_SIZE_SECTORS(num_sectors);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934}
935
NeilBrownf72ffdd2014-09-30 14:23:59 +1000936static int alloc_disk_sb(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938 rdev->sb_page = alloc_page(GFP_KERNEL);
NeilBrown7f0f0d82016-11-02 14:16:49 +1100939 if (!rdev->sb_page)
Andre Nollebc24332008-07-11 22:02:20 +1000940 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941 return 0;
942}
943
NeilBrown545c8792012-05-22 13:54:30 +1000944void md_rdev_clear(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945{
946 if (rdev->sb_page) {
NeilBrown2d1f3b52006-01-06 00:20:31 -0800947 put_page(rdev->sb_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 rdev->sb_loaded = 0;
949 rdev->sb_page = NULL;
Andre Noll0f420352008-07-11 22:02:23 +1000950 rdev->sb_start = 0;
Andre Nolldd8ac332009-03-31 14:33:13 +1100951 rdev->sectors = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 }
NeilBrown2699b672011-07-28 11:31:47 +1000953 if (rdev->bb_page) {
954 put_page(rdev->bb_page);
955 rdev->bb_page = NULL;
956 }
Dan Williamsd3b407fb2016-01-06 12:19:22 -0800957 badblocks_exit(&rdev->badblocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958}
NeilBrown545c8792012-05-22 13:54:30 +1000959EXPORT_SYMBOL_GPL(md_rdev_clear);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200961static void super_written(struct bio *bio)
NeilBrown7bfa19f2005-06-21 17:17:28 -0700962{
NeilBrown3cb03002011-10-11 16:45:26 +1100963 struct md_rdev *rdev = bio->bi_private;
NeilBrownfd01b882011-10-11 16:47:53 +1100964 struct mddev *mddev = rdev->mddev;
NeilBrown7bfa19f2005-06-21 17:17:28 -0700965
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200966 if (bio->bi_status) {
Guoqing Jiangb3db8a22020-07-28 12:01:41 +0200967 pr_err("md: %s gets error=%d\n", __func__,
968 blk_status_to_errno(bio->bi_status));
NeilBrowna9701a32005-11-08 21:39:34 -0800969 md_error(mddev, rdev);
NeilBrown46533ff2016-11-18 16:16:11 +1100970 if (!test_bit(Faulty, &rdev->flags)
971 && (bio->bi_opf & MD_FAILFAST)) {
Shaohua Li29530792016-12-08 15:48:19 -0800972 set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags);
NeilBrown46533ff2016-11-18 16:16:11 +1100973 set_bit(LastDev, &rdev->flags);
974 }
975 } else
976 clear_bit(LastDev, &rdev->flags);
NeilBrown7bfa19f2005-06-21 17:17:28 -0700977
NeilBrowna9701a32005-11-08 21:39:34 -0800978 if (atomic_dec_and_test(&mddev->pending_writes))
979 wake_up(&mddev->sb_wait);
Shaohua Lied3b98c2016-03-29 14:00:19 -0700980 rdev_dec_pending(rdev, mddev);
Neil Brownf8b58ed2005-06-27 22:29:34 -0700981 bio_put(bio);
NeilBrown7bfa19f2005-06-21 17:17:28 -0700982}
983
NeilBrownfd01b882011-10-11 16:47:53 +1100984void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
NeilBrown7bfa19f2005-06-21 17:17:28 -0700985 sector_t sector, int size, struct page *page)
986{
987 /* write first size bytes of page to sector of rdev
988 * Increment mddev->pending_writes before returning
989 * and decrement it on completion, waking up sb_wait
990 * if zero is reached.
991 * If an error occurred, call md_error
992 */
NeilBrown46533ff2016-11-18 16:16:11 +1100993 struct bio *bio;
994 int ff = 0;
995
Heinz Mauelshagen4b6c1062018-02-02 23:13:19 +0100996 if (!page)
997 return;
998
NeilBrown46533ff2016-11-18 16:16:11 +1100999 if (test_bit(Faulty, &rdev->flags))
1000 return;
1001
NeilBrown5a850712017-06-21 09:12:21 +10001002 bio = md_bio_alloc_sync(mddev);
NeilBrown7bfa19f2005-06-21 17:17:28 -07001003
Shaohua Lied3b98c2016-03-29 14:00:19 -07001004 atomic_inc(&rdev->nr_pending);
1005
Christoph Hellwig74d46992017-08-23 19:10:32 +02001006 bio_set_dev(bio, rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev);
Kent Overstreet4f024f32013-10-11 15:44:27 -07001007 bio->bi_iter.bi_sector = sector;
NeilBrown7bfa19f2005-06-21 17:17:28 -07001008 bio_add_page(bio, page, size, 0);
1009 bio->bi_private = rdev;
1010 bio->bi_end_io = super_written;
NeilBrown46533ff2016-11-18 16:16:11 +11001011
1012 if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) &&
1013 test_bit(FailFast, &rdev->flags) &&
1014 !test_bit(LastDev, &rdev->flags))
1015 ff = MD_FAILFAST;
Jan Kara5a8948f2017-05-31 09:44:33 +02001016 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA | ff;
NeilBrowna9701a32005-11-08 21:39:34 -08001017
NeilBrown7bfa19f2005-06-21 17:17:28 -07001018 atomic_inc(&mddev->pending_writes);
Mike Christie4e49ea42016-06-05 14:31:41 -05001019 submit_bio(bio);
NeilBrowna9701a32005-11-08 21:39:34 -08001020}
1021
NeilBrown46533ff2016-11-18 16:16:11 +11001022int md_super_wait(struct mddev *mddev)
NeilBrowna9701a32005-11-08 21:39:34 -08001023{
Tejun Heoe9c74692010-09-03 11:56:18 +02001024 /* wait for all superblock writes that were scheduled to complete */
NeilBrown1967cd52014-09-09 14:20:28 +10001025 wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
Shaohua Li29530792016-12-08 15:48:19 -08001026 if (test_and_clear_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags))
NeilBrown46533ff2016-11-18 16:16:11 +11001027 return -EAGAIN;
1028 return 0;
NeilBrown7bfa19f2005-06-21 17:17:28 -07001029}
1030
NeilBrown3cb03002011-10-11 16:45:26 +11001031int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
Mike Christie796a5cf2016-06-05 14:32:07 -05001032 struct page *page, int op, int op_flags, bool metadata_op)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033{
NeilBrown5a850712017-06-21 09:12:21 +10001034 struct bio *bio = md_bio_alloc_sync(rdev->mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035 int ret;
1036
Christoph Hellwig74d46992017-08-23 19:10:32 +02001037 if (metadata_op && rdev->meta_bdev)
1038 bio_set_dev(bio, rdev->meta_bdev);
1039 else
1040 bio_set_dev(bio, rdev->bdev);
Mike Christie796a5cf2016-06-05 14:32:07 -05001041 bio_set_op_attrs(bio, op, op_flags);
Jonathan Brassowccebd4c2011-01-14 09:14:33 +11001042 if (metadata_op)
Kent Overstreet4f024f32013-10-11 15:44:27 -07001043 bio->bi_iter.bi_sector = sector + rdev->sb_start;
NeilBrown1fdd6fc92012-05-21 09:28:32 +10001044 else if (rdev->mddev->reshape_position != MaxSector &&
1045 (rdev->mddev->reshape_backwards ==
1046 (sector >= rdev->mddev->reshape_position)))
Kent Overstreet4f024f32013-10-11 15:44:27 -07001047 bio->bi_iter.bi_sector = sector + rdev->new_data_offset;
Jonathan Brassowccebd4c2011-01-14 09:14:33 +11001048 else
Kent Overstreet4f024f32013-10-11 15:44:27 -07001049 bio->bi_iter.bi_sector = sector + rdev->data_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050 bio_add_page(bio, page, size, 0);
Mike Christie4e49ea42016-06-05 14:31:41 -05001051
1052 submit_bio_wait(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001054 ret = !bio->bi_status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 bio_put(bio);
1056 return ret;
1057}
NeilBrowna8745db2006-01-06 00:20:34 -08001058EXPORT_SYMBOL_GPL(sync_page_io);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059
NeilBrownf72ffdd2014-09-30 14:23:59 +10001060static int read_disk_sb(struct md_rdev *rdev, int size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061{
1062 char b[BDEVNAME_SIZE];
NeilBrown403df472014-09-30 15:52:29 +10001063
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064 if (rdev->sb_loaded)
1065 return 0;
1066
Mike Christie796a5cf2016-06-05 14:32:07 -05001067 if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068 goto fail;
1069 rdev->sb_loaded = 1;
1070 return 0;
1071
1072fail:
NeilBrown9d487392016-11-02 14:16:49 +11001073 pr_err("md: disabled device %s, could not read superblock.\n",
1074 bdevname(rdev->bdev,b));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075 return -EINVAL;
1076}
1077
Amir Goldsteine6fd2092017-05-04 16:26:20 +03001078static int md_uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079{
NeilBrownf72ffdd2014-09-30 14:23:59 +10001080 return sb1->set_uuid0 == sb2->set_uuid0 &&
Andre Noll05710462008-07-11 22:02:20 +10001081 sb1->set_uuid1 == sb2->set_uuid1 &&
1082 sb1->set_uuid2 == sb2->set_uuid2 &&
1083 sb1->set_uuid3 == sb2->set_uuid3;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084}
1085
Amir Goldsteine6fd2092017-05-04 16:26:20 +03001086static int md_sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087{
1088 int ret;
1089 mdp_super_t *tmp1, *tmp2;
1090
1091 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
1092 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
1093
1094 if (!tmp1 || !tmp2) {
1095 ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096 goto abort;
1097 }
1098
1099 *tmp1 = *sb1;
1100 *tmp2 = *sb2;
1101
1102 /*
1103 * nr_disks is not constant
1104 */
1105 tmp1->nr_disks = 0;
1106 tmp2->nr_disks = 0;
1107
Andre Nollce0c8e02008-07-11 22:02:20 +10001108 ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109abort:
Jesper Juhl990a8ba2005-06-21 17:17:30 -07001110 kfree(tmp1);
1111 kfree(tmp2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112 return ret;
1113}
1114
NeilBrown4d167f02007-05-09 02:35:37 -07001115static u32 md_csum_fold(u32 csum)
1116{
1117 csum = (csum & 0xffff) + (csum >> 16);
1118 return (csum & 0xffff) + (csum >> 16);
1119}
1120
NeilBrownf72ffdd2014-09-30 14:23:59 +10001121static unsigned int calc_sb_csum(mdp_super_t *sb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122{
NeilBrown4d167f02007-05-09 02:35:37 -07001123 u64 newcsum = 0;
1124 u32 *sb32 = (u32*)sb;
1125 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126 unsigned int disk_csum, csum;
1127
1128 disk_csum = sb->sb_csum;
1129 sb->sb_csum = 0;
NeilBrown4d167f02007-05-09 02:35:37 -07001130
1131 for (i = 0; i < MD_SB_BYTES/4 ; i++)
1132 newcsum += sb32[i];
1133 csum = (newcsum & 0xffffffff) + (newcsum>>32);
1134
NeilBrown4d167f02007-05-09 02:35:37 -07001135#ifdef CONFIG_ALPHA
1136 /* This used to use csum_partial, which was wrong for several
1137 * reasons including that different results are returned on
1138 * different architectures. It isn't critical that we get exactly
1139 * the same return value as before (we always csum_fold before
1140 * testing, and that removes any differences). However as we
1141 * know that csum_partial always returned a 16bit value on
1142 * alphas, do a fold to maximise conformity to previous behaviour.
1143 */
1144 sb->sb_csum = md_csum_fold(disk_csum);
1145#else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146 sb->sb_csum = disk_csum;
NeilBrown4d167f02007-05-09 02:35:37 -07001147#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148 return csum;
1149}
1150
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151/*
1152 * Handle superblock details.
1153 * We want to be able to handle multiple superblock formats
1154 * so we have a common interface to them all, and an array of
1155 * different handlers.
1156 * We rely on user-space to write the initial superblock, and support
1157 * reading and updating of superblocks.
1158 * Interface methods are:
NeilBrown3cb03002011-10-11 16:45:26 +11001159 * int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160 * loads and validates a superblock on dev.
1161 * if refdev != NULL, compare superblocks on both devices
1162 * Return:
1163 * 0 - dev has a superblock that is compatible with refdev
1164 * 1 - dev has a superblock that is compatible and newer than refdev
1165 * so dev should be used as the refdev in future
1166 * -EINVAL superblock incompatible or invalid
1167 * -othererror e.g. -EIO
1168 *
NeilBrownfd01b882011-10-11 16:47:53 +11001169 * int validate_super(struct mddev *mddev, struct md_rdev *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170 * Verify that dev is acceptable into mddev.
1171 * The first time, mddev->raid_disks will be 0, and data from
1172 * dev should be merged in. Subsequent calls check that dev
1173 * is new enough. Return 0 or -EINVAL
1174 *
NeilBrownfd01b882011-10-11 16:47:53 +11001175 * void sync_super(struct mddev *mddev, struct md_rdev *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176 * Update the superblock for rdev with data in mddev
1177 * This does not write to disc.
1178 *
1179 */
1180
1181struct super_type {
Chris Webb0cd17fe2008-06-28 08:31:46 +10001182 char *name;
1183 struct module *owner;
NeilBrownc6563a82012-05-21 09:27:00 +10001184 int (*load_super)(struct md_rdev *rdev,
1185 struct md_rdev *refdev,
Chris Webb0cd17fe2008-06-28 08:31:46 +10001186 int minor_version);
NeilBrownc6563a82012-05-21 09:27:00 +10001187 int (*validate_super)(struct mddev *mddev,
1188 struct md_rdev *rdev);
1189 void (*sync_super)(struct mddev *mddev,
1190 struct md_rdev *rdev);
NeilBrown3cb03002011-10-11 16:45:26 +11001191 unsigned long long (*rdev_size_change)(struct md_rdev *rdev,
Andre Noll15f4a5f2008-07-21 14:42:12 +10001192 sector_t num_sectors);
NeilBrownc6563a82012-05-21 09:27:00 +10001193 int (*allow_new_offset)(struct md_rdev *rdev,
1194 unsigned long long new_offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195};
1196
1197/*
Andre Noll0894cc32009-06-18 08:49:23 +10001198 * Check that the given mddev has no bitmap.
1199 *
1200 * This function is called from the run method of all personalities that do not
1201 * support bitmaps. It prints an error message and returns non-zero if mddev
1202 * has a bitmap. Otherwise, it returns 0.
1203 *
1204 */
NeilBrownfd01b882011-10-11 16:47:53 +11001205int md_check_no_bitmap(struct mddev *mddev)
Andre Noll0894cc32009-06-18 08:49:23 +10001206{
NeilBrownc3d97142009-12-14 12:49:52 +11001207 if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
Andre Noll0894cc32009-06-18 08:49:23 +10001208 return 0;
NeilBrown9d487392016-11-02 14:16:49 +11001209 pr_warn("%s: bitmaps are not supported for %s\n",
Andre Noll0894cc32009-06-18 08:49:23 +10001210 mdname(mddev), mddev->pers->name);
1211 return 1;
1212}
1213EXPORT_SYMBOL(md_check_no_bitmap);
1214
1215/*
NeilBrownf72ffdd2014-09-30 14:23:59 +10001216 * load_super for 0.90.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217 */
NeilBrown3cb03002011-10-11 16:45:26 +11001218static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219{
1220 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1221 mdp_super_t *sb;
1222 int ret;
Yufen Yu228fc7d2019-10-30 18:47:02 +08001223 bool spare_disk = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224
1225 /*
Andre Noll0f420352008-07-11 22:02:23 +10001226 * Calculate the position of the superblock (512byte sectors),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227 * it's at the end of the disk.
1228 *
1229 * It also happens to be a multiple of 4Kb.
1230 */
Jonathan Brassow57b2caa2011-01-14 09:14:33 +11001231 rdev->sb_start = calc_dev_sboffset(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232
NeilBrown0002b272005-09-09 16:23:53 -07001233 ret = read_disk_sb(rdev, MD_SB_BYTES);
NeilBrown9d487392016-11-02 14:16:49 +11001234 if (ret)
1235 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236
1237 ret = -EINVAL;
1238
1239 bdevname(rdev->bdev, b);
Namhyung Kim65a06f062011-07-27 11:00:36 +10001240 sb = page_address(rdev->sb_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241
1242 if (sb->md_magic != MD_SB_MAGIC) {
NeilBrown9d487392016-11-02 14:16:49 +11001243 pr_warn("md: invalid raid superblock magic on %s\n", b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244 goto abort;
1245 }
1246
1247 if (sb->major_version != 0 ||
NeilBrownf6705572006-03-27 01:18:11 -08001248 sb->minor_version < 90 ||
1249 sb->minor_version > 91) {
NeilBrown9d487392016-11-02 14:16:49 +11001250 pr_warn("Bad version number %d.%d on %s\n",
1251 sb->major_version, sb->minor_version, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252 goto abort;
1253 }
1254
1255 if (sb->raid_disks <= 0)
1256 goto abort;
1257
NeilBrown4d167f02007-05-09 02:35:37 -07001258 if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
NeilBrown9d487392016-11-02 14:16:49 +11001259 pr_warn("md: invalid superblock checksum on %s\n", b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260 goto abort;
1261 }
1262
1263 rdev->preferred_minor = sb->md_minor;
1264 rdev->data_offset = 0;
NeilBrownc6563a82012-05-21 09:27:00 +10001265 rdev->new_data_offset = 0;
NeilBrown0002b272005-09-09 16:23:53 -07001266 rdev->sb_size = MD_SB_BYTES;
NeilBrown9f2f3832011-07-28 11:31:47 +10001267 rdev->badblocks.shift = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268
1269 if (sb->level == LEVEL_MULTIPATH)
1270 rdev->desc_nr = -1;
1271 else
1272 rdev->desc_nr = sb->this_disk.number;
1273
Yufen Yu228fc7d2019-10-30 18:47:02 +08001274 /* not spare disk, or LEVEL_MULTIPATH */
1275 if (sb->level == LEVEL_MULTIPATH ||
1276 (rdev->desc_nr >= 0 &&
Yufen Yu3b7436c2019-12-10 15:01:29 +08001277 rdev->desc_nr < MD_SB_DISKS &&
Yufen Yu228fc7d2019-10-30 18:47:02 +08001278 sb->disks[rdev->desc_nr].state &
1279 ((1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))))
1280 spare_disk = false;
1281
Harvey Harrison9a7b2b02008-04-28 02:15:49 -07001282 if (!refdev) {
Yufen Yu228fc7d2019-10-30 18:47:02 +08001283 if (!spare_disk)
Yufen Yu6a5cb532019-10-16 16:00:03 +08001284 ret = 1;
1285 else
1286 ret = 0;
Harvey Harrison9a7b2b02008-04-28 02:15:49 -07001287 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288 __u64 ev1, ev2;
Namhyung Kim65a06f062011-07-27 11:00:36 +10001289 mdp_super_t *refsb = page_address(refdev->sb_page);
Amir Goldsteine6fd2092017-05-04 16:26:20 +03001290 if (!md_uuid_equal(refsb, sb)) {
NeilBrown9d487392016-11-02 14:16:49 +11001291 pr_warn("md: %s has different UUID to %s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292 b, bdevname(refdev->bdev,b2));
1293 goto abort;
1294 }
Amir Goldsteine6fd2092017-05-04 16:26:20 +03001295 if (!md_sb_equal(refsb, sb)) {
NeilBrown9d487392016-11-02 14:16:49 +11001296 pr_warn("md: %s has same UUID but different superblock to %s\n",
1297 b, bdevname(refdev->bdev, b2));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298 goto abort;
1299 }
1300 ev1 = md_event(sb);
1301 ev2 = md_event(refsb);
Yufen Yu6a5cb532019-10-16 16:00:03 +08001302
Yufen Yu228fc7d2019-10-30 18:47:02 +08001303 if (!spare_disk && ev1 > ev2)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304 ret = 1;
NeilBrownf72ffdd2014-09-30 14:23:59 +10001305 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001306 ret = 0;
1307 }
NeilBrown8190e752009-06-18 08:48:58 +10001308 rdev->sectors = rdev->sb_start;
NeilBrown667a5312012-08-16 16:46:12 +10001309 /* Limit to 4TB as metadata cannot record more than that.
1310 * (not needed for Linear and RAID0 as metadata doesn't
1311 * record this size)
1312 */
Christoph Hellwig72deb452019-04-05 18:08:59 +02001313 if ((u64)rdev->sectors >= (2ULL << 32) && sb->level >= 1)
Arnd Bergmann3312c952015-12-21 10:51:01 +11001314 rdev->sectors = (sector_t)(2ULL << 32) - 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315
NeilBrown27a7b262011-09-10 17:21:28 +10001316 if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
NeilBrown2bf071b2006-01-06 00:20:55 -08001317 /* "this cannot possibly happen" ... */
1318 ret = -EINVAL;
1319
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320 abort:
1321 return ret;
1322}
1323
1324/*
1325 * validate_super for 0.90.0
1326 */
NeilBrownfd01b882011-10-11 16:47:53 +11001327static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328{
1329 mdp_disk_t *desc;
Namhyung Kim65a06f062011-07-27 11:00:36 +10001330 mdp_super_t *sb = page_address(rdev->sb_page);
NeilBrown07d84d102006-06-26 00:27:56 -07001331 __u64 ev1 = md_event(sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332
NeilBrown41158c72005-06-21 17:17:25 -07001333 rdev->raid_disk = -1;
NeilBrownc5d79ad2008-02-06 01:39:54 -08001334 clear_bit(Faulty, &rdev->flags);
1335 clear_bit(In_sync, &rdev->flags);
NeilBrown8313b8e2013-12-12 10:13:33 +11001336 clear_bit(Bitmap_sync, &rdev->flags);
NeilBrownc5d79ad2008-02-06 01:39:54 -08001337 clear_bit(WriteMostly, &rdev->flags);
NeilBrownc5d79ad2008-02-06 01:39:54 -08001338
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339 if (mddev->raid_disks == 0) {
1340 mddev->major_version = 0;
1341 mddev->minor_version = sb->minor_version;
1342 mddev->patch_version = sb->patch_version;
NeilBrowne6910632008-02-06 01:39:51 -08001343 mddev->external = 0;
Andre Noll9d8f0362009-06-18 08:45:01 +10001344 mddev->chunk_sectors = sb->chunk_size >> 9;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345 mddev->ctime = sb->ctime;
1346 mddev->utime = sb->utime;
1347 mddev->level = sb->level;
NeilBrownd9d166c2006-01-06 00:20:51 -08001348 mddev->clevel[0] = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349 mddev->layout = sb->layout;
1350 mddev->raid_disks = sb->raid_disks;
NeilBrown27a7b262011-09-10 17:21:28 +10001351 mddev->dev_sectors = ((sector_t)sb->size) * 2;
NeilBrown07d84d102006-06-26 00:27:56 -07001352 mddev->events = ev1;
NeilBrownc3d97142009-12-14 12:49:52 +11001353 mddev->bitmap_info.offset = 0;
NeilBrown6409bb02012-05-22 13:55:07 +10001354 mddev->bitmap_info.space = 0;
1355 /* bitmap can use 60 K after the 4K superblocks */
NeilBrownc3d97142009-12-14 12:49:52 +11001356 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
NeilBrown6409bb02012-05-22 13:55:07 +10001357 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
NeilBrown2c810cd2012-05-21 09:27:00 +10001358 mddev->reshape_backwards = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359
NeilBrownf6705572006-03-27 01:18:11 -08001360 if (mddev->minor_version >= 91) {
1361 mddev->reshape_position = sb->reshape_position;
1362 mddev->delta_disks = sb->delta_disks;
1363 mddev->new_level = sb->new_level;
1364 mddev->new_layout = sb->new_layout;
Andre Noll664e7c42009-06-18 08:45:27 +10001365 mddev->new_chunk_sectors = sb->new_chunk >> 9;
NeilBrown2c810cd2012-05-21 09:27:00 +10001366 if (mddev->delta_disks < 0)
1367 mddev->reshape_backwards = 1;
NeilBrownf6705572006-03-27 01:18:11 -08001368 } else {
1369 mddev->reshape_position = MaxSector;
1370 mddev->delta_disks = 0;
1371 mddev->new_level = mddev->level;
1372 mddev->new_layout = mddev->layout;
Andre Noll664e7c42009-06-18 08:45:27 +10001373 mddev->new_chunk_sectors = mddev->chunk_sectors;
NeilBrownf6705572006-03-27 01:18:11 -08001374 }
NeilBrown33f2c352019-09-09 16:52:29 +10001375 if (mddev->level == 0)
1376 mddev->layout = -1;
NeilBrownf6705572006-03-27 01:18:11 -08001377
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378 if (sb->state & (1<<MD_SB_CLEAN))
1379 mddev->recovery_cp = MaxSector;
1380 else {
NeilBrownf72ffdd2014-09-30 14:23:59 +10001381 if (sb->events_hi == sb->cp_events_hi &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 sb->events_lo == sb->cp_events_lo) {
1383 mddev->recovery_cp = sb->recovery_cp;
1384 } else
1385 mddev->recovery_cp = 0;
1386 }
1387
1388 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
1389 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
1390 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
1391 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
1392
1393 mddev->max_disks = MD_SB_DISKS;
NeilBrowna654b9d82005-06-21 17:17:27 -07001394
1395 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
NeilBrown6409bb02012-05-22 13:55:07 +10001396 mddev->bitmap_info.file == NULL) {
NeilBrownc3d97142009-12-14 12:49:52 +11001397 mddev->bitmap_info.offset =
1398 mddev->bitmap_info.default_offset;
NeilBrown6409bb02012-05-22 13:55:07 +10001399 mddev->bitmap_info.space =
Dave Jonesc9ad0202013-08-19 22:26:32 -04001400 mddev->bitmap_info.default_space;
NeilBrown6409bb02012-05-22 13:55:07 +10001401 }
NeilBrowna654b9d82005-06-21 17:17:27 -07001402
NeilBrown41158c72005-06-21 17:17:25 -07001403 } else if (mddev->pers == NULL) {
NeilBrownbe6800a2010-05-18 10:17:09 +10001404 /* Insist on good event counter while assembling, except
1405 * for spares (which don't need an event count) */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406 ++ev1;
NeilBrownbe6800a2010-05-18 10:17:09 +10001407 if (sb->disks[rdev->desc_nr].state & (
1408 (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
NeilBrownf72ffdd2014-09-30 14:23:59 +10001409 if (ev1 < mddev->events)
NeilBrownbe6800a2010-05-18 10:17:09 +10001410 return -EINVAL;
NeilBrown41158c72005-06-21 17:17:25 -07001411 } else if (mddev->bitmap) {
1412 /* if adding to array with a bitmap, then we can accept an
1413 * older device ... but not too old.
1414 */
NeilBrown41158c72005-06-21 17:17:25 -07001415 if (ev1 < mddev->bitmap->events_cleared)
1416 return 0;
NeilBrown8313b8e2013-12-12 10:13:33 +11001417 if (ev1 < mddev->events)
1418 set_bit(Bitmap_sync, &rdev->flags);
NeilBrown07d84d102006-06-26 00:27:56 -07001419 } else {
1420 if (ev1 < mddev->events)
1421 /* just a hot-add of a new device, leave raid_disk at -1 */
1422 return 0;
1423 }
NeilBrown41158c72005-06-21 17:17:25 -07001424
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425 if (mddev->level != LEVEL_MULTIPATH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426 desc = sb->disks + rdev->desc_nr;
1427
1428 if (desc->state & (1<<MD_DISK_FAULTY))
NeilBrownb2d444d2005-11-08 21:39:31 -08001429 set_bit(Faulty, &rdev->flags);
NeilBrown7c7546c2006-06-26 00:27:41 -07001430 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
1431 desc->raid_disk < mddev->raid_disks */) {
NeilBrownb2d444d2005-11-08 21:39:31 -08001432 set_bit(In_sync, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433 rdev->raid_disk = desc->raid_disk;
NeilBrownf4667222013-12-09 12:04:56 +11001434 rdev->saved_raid_disk = desc->raid_disk;
NeilBrown0261cd9f2009-11-13 17:40:48 +11001435 } else if (desc->state & (1<<MD_DISK_ACTIVE)) {
1436 /* active but not in sync implies recovery up to
1437 * reshape position. We don't know exactly where
1438 * that is, so set to zero for now */
1439 if (mddev->minor_version >= 91) {
1440 rdev->recovery_offset = 0;
1441 rdev->raid_disk = desc->raid_disk;
1442 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443 }
NeilBrown8ddf9ef2005-09-09 16:23:45 -07001444 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
1445 set_bit(WriteMostly, &rdev->flags);
NeilBrown688834e2016-11-18 16:16:11 +11001446 if (desc->state & (1<<MD_DISK_FAILFAST))
1447 set_bit(FailFast, &rdev->flags);
NeilBrown41158c72005-06-21 17:17:25 -07001448 } else /* MULTIPATH are always insync */
NeilBrownb2d444d2005-11-08 21:39:31 -08001449 set_bit(In_sync, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 return 0;
1451}
1452
1453/*
1454 * sync_super for 0.90.0
1455 */
NeilBrownfd01b882011-10-11 16:47:53 +11001456static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457{
1458 mdp_super_t *sb;
NeilBrown3cb03002011-10-11 16:45:26 +11001459 struct md_rdev *rdev2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460 int next_spare = mddev->raid_disks;
NeilBrown19133a42005-11-08 21:39:35 -08001461
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462 /* make rdev->sb match mddev data..
1463 *
1464 * 1/ zero out disks
1465 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
1466 * 3/ any empty disks < next_spare become removed
1467 *
1468 * disks[0] gets initialised to REMOVED because
1469 * we cannot be sure from other fields if it has
1470 * been initialised or not.
1471 */
1472 int i;
1473 int active=0, working=0,failed=0,spare=0,nr_disks=0;
1474
NeilBrown61181562005-09-09 16:24:02 -07001475 rdev->sb_size = MD_SB_BYTES;
1476
Namhyung Kim65a06f062011-07-27 11:00:36 +10001477 sb = page_address(rdev->sb_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478
1479 memset(sb, 0, sizeof(*sb));
1480
1481 sb->md_magic = MD_SB_MAGIC;
1482 sb->major_version = mddev->major_version;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483 sb->patch_version = mddev->patch_version;
1484 sb->gvalid_words = 0; /* ignored */
1485 memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
1486 memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
1487 memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
1488 memcpy(&sb->set_uuid3, mddev->uuid+12,4);
1489
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11001490 sb->ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491 sb->level = mddev->level;
Andre Noll58c0fed2009-03-31 14:33:13 +11001492 sb->size = mddev->dev_sectors / 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493 sb->raid_disks = mddev->raid_disks;
1494 sb->md_minor = mddev->md_minor;
NeilBrowne6910632008-02-06 01:39:51 -08001495 sb->not_persistent = 0;
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11001496 sb->utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497 sb->state = 0;
1498 sb->events_hi = (mddev->events>>32);
1499 sb->events_lo = (u32)mddev->events;
1500
NeilBrownf6705572006-03-27 01:18:11 -08001501 if (mddev->reshape_position == MaxSector)
1502 sb->minor_version = 90;
1503 else {
1504 sb->minor_version = 91;
1505 sb->reshape_position = mddev->reshape_position;
1506 sb->new_level = mddev->new_level;
1507 sb->delta_disks = mddev->delta_disks;
1508 sb->new_layout = mddev->new_layout;
Andre Noll664e7c42009-06-18 08:45:27 +10001509 sb->new_chunk = mddev->new_chunk_sectors << 9;
NeilBrownf6705572006-03-27 01:18:11 -08001510 }
1511 mddev->minor_version = sb->minor_version;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512 if (mddev->in_sync)
1513 {
1514 sb->recovery_cp = mddev->recovery_cp;
1515 sb->cp_events_hi = (mddev->events>>32);
1516 sb->cp_events_lo = (u32)mddev->events;
1517 if (mddev->recovery_cp == MaxSector)
1518 sb->state = (1<< MD_SB_CLEAN);
1519 } else
1520 sb->recovery_cp = 0;
1521
1522 sb->layout = mddev->layout;
Andre Noll9d8f0362009-06-18 08:45:01 +10001523 sb->chunk_size = mddev->chunk_sectors << 9;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524
NeilBrownc3d97142009-12-14 12:49:52 +11001525 if (mddev->bitmap && mddev->bitmap_info.file == NULL)
NeilBrowna654b9d82005-06-21 17:17:27 -07001526 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
1527
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528 sb->disks[0].state = (1<<MD_DISK_REMOVED);
NeilBrowndafb20f2012-03-19 12:46:39 +11001529 rdev_for_each(rdev2, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 mdp_disk_t *d;
NeilBrown86e6ffd2005-11-08 21:39:24 -08001531 int desc_nr;
NeilBrown0261cd9f2009-11-13 17:40:48 +11001532 int is_active = test_bit(In_sync, &rdev2->flags);
1533
1534 if (rdev2->raid_disk >= 0 &&
1535 sb->minor_version >= 91)
1536 /* we have nowhere to store the recovery_offset,
1537 * but if it is not below the reshape_position,
1538 * we can piggy-back on that.
1539 */
1540 is_active = 1;
1541 if (rdev2->raid_disk < 0 ||
1542 test_bit(Faulty, &rdev2->flags))
1543 is_active = 0;
1544 if (is_active)
NeilBrown86e6ffd2005-11-08 21:39:24 -08001545 desc_nr = rdev2->raid_disk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546 else
NeilBrown86e6ffd2005-11-08 21:39:24 -08001547 desc_nr = next_spare++;
NeilBrown19133a42005-11-08 21:39:35 -08001548 rdev2->desc_nr = desc_nr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 d = &sb->disks[rdev2->desc_nr];
1550 nr_disks++;
1551 d->number = rdev2->desc_nr;
1552 d->major = MAJOR(rdev2->bdev->bd_dev);
1553 d->minor = MINOR(rdev2->bdev->bd_dev);
NeilBrown0261cd9f2009-11-13 17:40:48 +11001554 if (is_active)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555 d->raid_disk = rdev2->raid_disk;
1556 else
1557 d->raid_disk = rdev2->desc_nr; /* compatibility */
NeilBrown1be78922006-03-27 01:18:03 -08001558 if (test_bit(Faulty, &rdev2->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559 d->state = (1<<MD_DISK_FAULTY);
NeilBrown0261cd9f2009-11-13 17:40:48 +11001560 else if (is_active) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561 d->state = (1<<MD_DISK_ACTIVE);
NeilBrown0261cd9f2009-11-13 17:40:48 +11001562 if (test_bit(In_sync, &rdev2->flags))
1563 d->state |= (1<<MD_DISK_SYNC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564 active++;
1565 working++;
1566 } else {
1567 d->state = 0;
1568 spare++;
1569 working++;
1570 }
NeilBrown8ddf9ef2005-09-09 16:23:45 -07001571 if (test_bit(WriteMostly, &rdev2->flags))
1572 d->state |= (1<<MD_DISK_WRITEMOSTLY);
NeilBrown688834e2016-11-18 16:16:11 +11001573 if (test_bit(FailFast, &rdev2->flags))
1574 d->state |= (1<<MD_DISK_FAILFAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576 /* now set the "removed" and "faulty" bits on any missing devices */
1577 for (i=0 ; i < mddev->raid_disks ; i++) {
1578 mdp_disk_t *d = &sb->disks[i];
1579 if (d->state == 0 && d->number == 0) {
1580 d->number = i;
1581 d->raid_disk = i;
1582 d->state = (1<<MD_DISK_REMOVED);
1583 d->state |= (1<<MD_DISK_FAULTY);
1584 failed++;
1585 }
1586 }
1587 sb->nr_disks = nr_disks;
1588 sb->active_disks = active;
1589 sb->working_disks = working;
1590 sb->failed_disks = failed;
1591 sb->spare_disks = spare;
1592
1593 sb->this_disk = sb->disks[rdev->desc_nr];
1594 sb->sb_csum = calc_sb_csum(sb);
1595}
1596
1597/*
Chris Webb0cd17fe2008-06-28 08:31:46 +10001598 * rdev_size_change for 0.90.0
1599 */
1600static unsigned long long
NeilBrown3cb03002011-10-11 16:45:26 +11001601super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
Chris Webb0cd17fe2008-06-28 08:31:46 +10001602{
Andre Noll58c0fed2009-03-31 14:33:13 +11001603 if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
Chris Webb0cd17fe2008-06-28 08:31:46 +10001604 return 0; /* component must fit device */
NeilBrownc3d97142009-12-14 12:49:52 +11001605 if (rdev->mddev->bitmap_info.offset)
Chris Webb0cd17fe2008-06-28 08:31:46 +10001606 return 0; /* can't move bitmap */
Jonathan Brassow57b2caa2011-01-14 09:14:33 +11001607 rdev->sb_start = calc_dev_sboffset(rdev);
Andre Noll15f4a5f2008-07-21 14:42:12 +10001608 if (!num_sectors || num_sectors > rdev->sb_start)
1609 num_sectors = rdev->sb_start;
NeilBrown27a7b262011-09-10 17:21:28 +10001610 /* Limit to 4TB as metadata cannot record more than that.
1611 * 4TB == 2^32 KB, or 2*2^32 sectors.
1612 */
Christoph Hellwig72deb452019-04-05 18:08:59 +02001613 if ((u64)num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
Arnd Bergmann3312c952015-12-21 10:51:01 +11001614 num_sectors = (sector_t)(2ULL << 32) - 2;
NeilBrown46533ff2016-11-18 16:16:11 +11001615 do {
1616 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
Chris Webb0cd17fe2008-06-28 08:31:46 +10001617 rdev->sb_page);
NeilBrown46533ff2016-11-18 16:16:11 +11001618 } while (md_super_wait(rdev->mddev) < 0);
Justin Maggardc26a44e2010-11-24 16:36:17 +11001619 return num_sectors;
Chris Webb0cd17fe2008-06-28 08:31:46 +10001620}
1621
NeilBrownc6563a82012-05-21 09:27:00 +10001622static int
1623super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset)
1624{
1625 /* non-zero offset changes not possible with v0.90 */
1626 return new_offset == 0;
1627}
Chris Webb0cd17fe2008-06-28 08:31:46 +10001628
1629/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630 * version 1 superblock
1631 */
1632
NeilBrownf72ffdd2014-09-30 14:23:59 +10001633static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634{
NeilBrown1c05b4b2006-10-21 10:24:08 -07001635 __le32 disk_csum;
1636 u32 csum;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637 unsigned long long newcsum;
1638 int size = 256 + le32_to_cpu(sb->max_dev)*2;
NeilBrown1c05b4b2006-10-21 10:24:08 -07001639 __le32 *isuper = (__le32*)sb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640
1641 disk_csum = sb->sb_csum;
1642 sb->sb_csum = 0;
1643 newcsum = 0;
NeilBrown1f3c9902012-12-11 13:09:00 +11001644 for (; size >= 4; size -= 4)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645 newcsum += le32_to_cpu(*isuper++);
1646
1647 if (size == 2)
NeilBrown1c05b4b2006-10-21 10:24:08 -07001648 newcsum += le16_to_cpu(*(__le16*) isuper);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649
1650 csum = (newcsum & 0xffffffff) + (newcsum >> 32);
1651 sb->sb_csum = disk_csum;
1652 return cpu_to_le32(csum);
1653}
1654
NeilBrown3cb03002011-10-11 16:45:26 +11001655static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656{
1657 struct mdp_superblock_1 *sb;
1658 int ret;
Andre Noll0f420352008-07-11 22:02:23 +10001659 sector_t sb_start;
NeilBrownc6563a82012-05-21 09:27:00 +10001660 sector_t sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
NeilBrown0002b272005-09-09 16:23:53 -07001662 int bmask;
Yufen Yu228fc7d2019-10-30 18:47:02 +08001663 bool spare_disk = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664
1665 /*
Andre Noll0f420352008-07-11 22:02:23 +10001666 * Calculate the position of the superblock in 512byte sectors.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667 * It is always aligned to a 4K boundary and
1668 * depeding on minor_version, it can be:
1669 * 0: At least 8K, but less than 12K, from end of device
1670 * 1: At start of device
1671 * 2: 4K from start of device.
1672 */
1673 switch(minor_version) {
1674 case 0:
Mike Snitzer77304d22010-11-08 14:39:12 +01001675 sb_start = i_size_read(rdev->bdev->bd_inode) >> 9;
Andre Noll0f420352008-07-11 22:02:23 +10001676 sb_start -= 8*2;
1677 sb_start &= ~(sector_t)(4*2-1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678 break;
1679 case 1:
Andre Noll0f420352008-07-11 22:02:23 +10001680 sb_start = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681 break;
1682 case 2:
Andre Noll0f420352008-07-11 22:02:23 +10001683 sb_start = 8;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684 break;
1685 default:
1686 return -EINVAL;
1687 }
Andre Noll0f420352008-07-11 22:02:23 +10001688 rdev->sb_start = sb_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689
NeilBrown0002b272005-09-09 16:23:53 -07001690 /* superblock is rarely larger than 1K, but it can be larger,
1691 * and it is safe to read 4k, so we do that
1692 */
1693 ret = read_disk_sb(rdev, 4096);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694 if (ret) return ret;
1695
Namhyung Kim65a06f062011-07-27 11:00:36 +10001696 sb = page_address(rdev->sb_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697
1698 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1699 sb->major_version != cpu_to_le32(1) ||
1700 le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
Andre Noll0f420352008-07-11 22:02:23 +10001701 le64_to_cpu(sb->super_offset) != rdev->sb_start ||
NeilBrown71c08052005-09-09 16:23:51 -07001702 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703 return -EINVAL;
1704
1705 if (calc_sb_1_csum(sb) != sb->sb_csum) {
NeilBrown9d487392016-11-02 14:16:49 +11001706 pr_warn("md: invalid superblock checksum on %s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707 bdevname(rdev->bdev,b));
1708 return -EINVAL;
1709 }
1710 if (le64_to_cpu(sb->data_size) < 10) {
NeilBrown9d487392016-11-02 14:16:49 +11001711 pr_warn("md: data_size too small on %s\n",
1712 bdevname(rdev->bdev,b));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713 return -EINVAL;
1714 }
NeilBrownc6563a82012-05-21 09:27:00 +10001715 if (sb->pad0 ||
1716 sb->pad3[0] ||
1717 memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1])))
1718 /* Some padding is non-zero, might be a new feature */
1719 return -EINVAL;
NeilBrowne11e93f2007-05-09 02:35:36 -07001720
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721 rdev->preferred_minor = 0xffff;
1722 rdev->data_offset = le64_to_cpu(sb->data_offset);
NeilBrownc6563a82012-05-21 09:27:00 +10001723 rdev->new_data_offset = rdev->data_offset;
1724 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
1725 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
1726 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
NeilBrown4dbcdc72006-01-06 00:20:52 -08001727 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728
NeilBrown0002b272005-09-09 16:23:53 -07001729 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
Martin K. Petersene1defc42009-05-22 17:17:49 -04001730 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
NeilBrown0002b272005-09-09 16:23:53 -07001731 if (rdev->sb_size & bmask)
NeilBrowna1801f82008-03-04 14:29:31 -08001732 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1733
1734 if (minor_version
Andre Noll0f420352008-07-11 22:02:23 +10001735 && rdev->data_offset < sb_start + (rdev->sb_size/512))
NeilBrowna1801f82008-03-04 14:29:31 -08001736 return -EINVAL;
NeilBrownc6563a82012-05-21 09:27:00 +10001737 if (minor_version
1738 && rdev->new_data_offset < sb_start + (rdev->sb_size/512))
1739 return -EINVAL;
NeilBrown0002b272005-09-09 16:23:53 -07001740
NeilBrown31b65a02006-07-10 04:44:14 -07001741 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1742 rdev->desc_nr = -1;
1743 else
1744 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1745
NeilBrown2699b672011-07-28 11:31:47 +10001746 if (!rdev->bb_page) {
1747 rdev->bb_page = alloc_page(GFP_KERNEL);
1748 if (!rdev->bb_page)
1749 return -ENOMEM;
1750 }
1751 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) &&
1752 rdev->badblocks.count == 0) {
1753 /* need to load the bad block list.
1754 * Currently we limit it to one page.
1755 */
1756 s32 offset;
1757 sector_t bb_sector;
Christoph Hellwig00485d02019-04-04 18:56:12 +02001758 __le64 *bbp;
NeilBrown2699b672011-07-28 11:31:47 +10001759 int i;
1760 int sectors = le16_to_cpu(sb->bblog_size);
1761 if (sectors > (PAGE_SIZE / 512))
1762 return -EINVAL;
1763 offset = le32_to_cpu(sb->bblog_offset);
1764 if (offset == 0)
1765 return -EINVAL;
1766 bb_sector = (long long)offset;
1767 if (!sync_page_io(rdev, bb_sector, sectors << 9,
Mike Christie796a5cf2016-06-05 14:32:07 -05001768 rdev->bb_page, REQ_OP_READ, 0, true))
NeilBrown2699b672011-07-28 11:31:47 +10001769 return -EIO;
Christoph Hellwig00485d02019-04-04 18:56:12 +02001770 bbp = (__le64 *)page_address(rdev->bb_page);
NeilBrown2699b672011-07-28 11:31:47 +10001771 rdev->badblocks.shift = sb->bblog_shift;
1772 for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
1773 u64 bb = le64_to_cpu(*bbp);
1774 int count = bb & (0x3ff);
1775 u64 sector = bb >> 10;
1776 sector <<= sb->bblog_shift;
1777 count <<= sb->bblog_shift;
1778 if (bb + 1 == 0)
1779 break;
Vishal Vermafc974ee2015-12-24 19:20:34 -07001780 if (badblocks_set(&rdev->badblocks, sector, count, 1))
NeilBrown2699b672011-07-28 11:31:47 +10001781 return -EINVAL;
1782 }
NeilBrown486adf72013-04-24 11:42:44 +10001783 } else if (sb->bblog_offset != 0)
1784 rdev->badblocks.shift = 0;
NeilBrown2699b672011-07-28 11:31:47 +10001785
Pawel Baldysiakddc08822017-08-16 17:13:45 +02001786 if ((le32_to_cpu(sb->feature_map) &
1787 (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS))) {
Artur Paszkiewiczea0213e2017-03-09 09:59:57 +01001788 rdev->ppl.offset = (__s16)le16_to_cpu(sb->ppl.offset);
1789 rdev->ppl.size = le16_to_cpu(sb->ppl.size);
1790 rdev->ppl.sector = rdev->sb_start + rdev->ppl.offset;
1791 }
1792
NeilBrown33f2c352019-09-09 16:52:29 +10001793 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT) &&
1794 sb->level != 0)
1795 return -EINVAL;
1796
Yufen Yu228fc7d2019-10-30 18:47:02 +08001797 /* not spare disk, or LEVEL_MULTIPATH */
1798 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH) ||
1799 (rdev->desc_nr >= 0 &&
1800 rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
1801 (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
1802 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL)))
1803 spare_disk = false;
Yufen Yu6a5cb532019-10-16 16:00:03 +08001804
Harvey Harrison9a7b2b02008-04-28 02:15:49 -07001805 if (!refdev) {
Yufen Yu228fc7d2019-10-30 18:47:02 +08001806 if (!spare_disk)
Yufen Yu6a5cb532019-10-16 16:00:03 +08001807 ret = 1;
1808 else
1809 ret = 0;
Harvey Harrison9a7b2b02008-04-28 02:15:49 -07001810 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811 __u64 ev1, ev2;
Namhyung Kim65a06f062011-07-27 11:00:36 +10001812 struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813
1814 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1815 sb->level != refsb->level ||
1816 sb->layout != refsb->layout ||
1817 sb->chunksize != refsb->chunksize) {
NeilBrown9d487392016-11-02 14:16:49 +11001818 pr_warn("md: %s has strangely different superblock to %s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819 bdevname(rdev->bdev,b),
1820 bdevname(refdev->bdev,b2));
1821 return -EINVAL;
1822 }
1823 ev1 = le64_to_cpu(sb->events);
1824 ev2 = le64_to_cpu(refsb->events);
1825
Yufen Yu228fc7d2019-10-30 18:47:02 +08001826 if (!spare_disk && ev1 > ev2)
NeilBrown8ed75462006-02-03 03:03:41 -08001827 ret = 1;
1828 else
1829 ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001830 }
NeilBrownc6563a82012-05-21 09:27:00 +10001831 if (minor_version) {
1832 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9);
1833 sectors -= rdev->data_offset;
1834 } else
1835 sectors = rdev->sb_start;
1836 if (sectors < le64_to_cpu(sb->data_size))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837 return -EINVAL;
Andre Nolldd8ac332009-03-31 14:33:13 +11001838 rdev->sectors = le64_to_cpu(sb->data_size);
NeilBrown8ed75462006-02-03 03:03:41 -08001839 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840}
1841
NeilBrownfd01b882011-10-11 16:47:53 +11001842static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001843{
Namhyung Kim65a06f062011-07-27 11:00:36 +10001844 struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
NeilBrown07d84d102006-06-26 00:27:56 -07001845 __u64 ev1 = le64_to_cpu(sb->events);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001846
NeilBrown41158c72005-06-21 17:17:25 -07001847 rdev->raid_disk = -1;
NeilBrownc5d79ad2008-02-06 01:39:54 -08001848 clear_bit(Faulty, &rdev->flags);
1849 clear_bit(In_sync, &rdev->flags);
NeilBrown8313b8e2013-12-12 10:13:33 +11001850 clear_bit(Bitmap_sync, &rdev->flags);
NeilBrownc5d79ad2008-02-06 01:39:54 -08001851 clear_bit(WriteMostly, &rdev->flags);
NeilBrownc5d79ad2008-02-06 01:39:54 -08001852
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853 if (mddev->raid_disks == 0) {
1854 mddev->major_version = 1;
1855 mddev->patch_version = 0;
NeilBrowne6910632008-02-06 01:39:51 -08001856 mddev->external = 0;
Andre Noll9d8f0362009-06-18 08:45:01 +10001857 mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11001858 mddev->ctime = le64_to_cpu(sb->ctime);
1859 mddev->utime = le64_to_cpu(sb->utime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001860 mddev->level = le32_to_cpu(sb->level);
NeilBrownd9d166c2006-01-06 00:20:51 -08001861 mddev->clevel[0] = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862 mddev->layout = le32_to_cpu(sb->layout);
1863 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
Andre Noll58c0fed2009-03-31 14:33:13 +11001864 mddev->dev_sectors = le64_to_cpu(sb->size);
NeilBrown07d84d102006-06-26 00:27:56 -07001865 mddev->events = ev1;
NeilBrownc3d97142009-12-14 12:49:52 +11001866 mddev->bitmap_info.offset = 0;
NeilBrown6409bb02012-05-22 13:55:07 +10001867 mddev->bitmap_info.space = 0;
1868 /* Default location for bitmap is 1K after superblock
1869 * using 3K - total of 4K
1870 */
NeilBrownc3d97142009-12-14 12:49:52 +11001871 mddev->bitmap_info.default_offset = 1024 >> 9;
NeilBrown6409bb02012-05-22 13:55:07 +10001872 mddev->bitmap_info.default_space = (4096-1024) >> 9;
NeilBrown2c810cd2012-05-21 09:27:00 +10001873 mddev->reshape_backwards = 0;
1874
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1876 memcpy(mddev->uuid, sb->set_uuid, 16);
1877
1878 mddev->max_disks = (4096-256)/2;
NeilBrowna654b9d82005-06-21 17:17:27 -07001879
NeilBrown71c08052005-09-09 16:23:51 -07001880 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
NeilBrown6409bb02012-05-22 13:55:07 +10001881 mddev->bitmap_info.file == NULL) {
NeilBrownc3d97142009-12-14 12:49:52 +11001882 mddev->bitmap_info.offset =
1883 (__s32)le32_to_cpu(sb->bitmap_offset);
NeilBrown6409bb02012-05-22 13:55:07 +10001884 /* Metadata doesn't record how much space is available.
1885 * For 1.0, we assume we can use up to the superblock
1886 * if before, else to 4K beyond superblock.
1887 * For others, assume no change is possible.
1888 */
1889 if (mddev->minor_version > 0)
1890 mddev->bitmap_info.space = 0;
1891 else if (mddev->bitmap_info.offset > 0)
1892 mddev->bitmap_info.space =
1893 8 - mddev->bitmap_info.offset;
1894 else
1895 mddev->bitmap_info.space =
1896 -mddev->bitmap_info.offset;
1897 }
NeilBrowne11e93f2007-05-09 02:35:36 -07001898
NeilBrownf6705572006-03-27 01:18:11 -08001899 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1900 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1901 mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1902 mddev->new_level = le32_to_cpu(sb->new_level);
1903 mddev->new_layout = le32_to_cpu(sb->new_layout);
Andre Noll664e7c42009-06-18 08:45:27 +10001904 mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
NeilBrown2c810cd2012-05-21 09:27:00 +10001905 if (mddev->delta_disks < 0 ||
1906 (mddev->delta_disks == 0 &&
1907 (le32_to_cpu(sb->feature_map)
1908 & MD_FEATURE_RESHAPE_BACKWARDS)))
1909 mddev->reshape_backwards = 1;
NeilBrownf6705572006-03-27 01:18:11 -08001910 } else {
1911 mddev->reshape_position = MaxSector;
1912 mddev->delta_disks = 0;
1913 mddev->new_level = mddev->level;
1914 mddev->new_layout = mddev->layout;
Andre Noll664e7c42009-06-18 08:45:27 +10001915 mddev->new_chunk_sectors = mddev->chunk_sectors;
NeilBrownf6705572006-03-27 01:18:11 -08001916 }
1917
NeilBrown33f2c352019-09-09 16:52:29 +10001918 if (mddev->level == 0 &&
1919 !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT))
1920 mddev->layout = -1;
1921
Song Liu486b0f72016-08-19 15:34:01 -07001922 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)
Shaohua Lia62ab492016-01-06 14:37:13 -08001923 set_bit(MD_HAS_JOURNAL, &mddev->flags);
Artur Paszkiewiczea0213e2017-03-09 09:59:57 +01001924
Pawel Baldysiakddc08822017-08-16 17:13:45 +02001925 if (le32_to_cpu(sb->feature_map) &
1926 (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS)) {
Artur Paszkiewiczea0213e2017-03-09 09:59:57 +01001927 if (le32_to_cpu(sb->feature_map) &
1928 (MD_FEATURE_BITMAP_OFFSET | MD_FEATURE_JOURNAL))
1929 return -EINVAL;
Pawel Baldysiakddc08822017-08-16 17:13:45 +02001930 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_PPL) &&
1931 (le32_to_cpu(sb->feature_map) &
1932 MD_FEATURE_MULTIPLE_PPLS))
1933 return -EINVAL;
Artur Paszkiewiczea0213e2017-03-09 09:59:57 +01001934 set_bit(MD_HAS_PPL, &mddev->flags);
1935 }
NeilBrown41158c72005-06-21 17:17:25 -07001936 } else if (mddev->pers == NULL) {
NeilBrownbe6800a2010-05-18 10:17:09 +10001937 /* Insist of good event counter while assembling, except for
1938 * spares (which don't need an event count) */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001939 ++ev1;
NeilBrownbe6800a2010-05-18 10:17:09 +10001940 if (rdev->desc_nr >= 0 &&
1941 rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
Song Liua3dfbda2015-10-08 21:54:11 -07001942 (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
1943 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL))
NeilBrownbe6800a2010-05-18 10:17:09 +10001944 if (ev1 < mddev->events)
1945 return -EINVAL;
NeilBrown41158c72005-06-21 17:17:25 -07001946 } else if (mddev->bitmap) {
1947 /* If adding to array with a bitmap, then we can accept an
1948 * older device, but not too old.
1949 */
NeilBrown41158c72005-06-21 17:17:25 -07001950 if (ev1 < mddev->bitmap->events_cleared)
1951 return 0;
NeilBrown8313b8e2013-12-12 10:13:33 +11001952 if (ev1 < mddev->events)
1953 set_bit(Bitmap_sync, &rdev->flags);
NeilBrown07d84d102006-06-26 00:27:56 -07001954 } else {
1955 if (ev1 < mddev->events)
1956 /* just a hot-add of a new device, leave raid_disk at -1 */
1957 return 0;
1958 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959 if (mddev->level != LEVEL_MULTIPATH) {
1960 int role;
NeilBrown3673f302009-08-03 10:59:56 +10001961 if (rdev->desc_nr < 0 ||
1962 rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
Song Liuc4d4c912015-08-13 14:31:54 -07001963 role = MD_DISK_ROLE_SPARE;
NeilBrown3673f302009-08-03 10:59:56 +10001964 rdev->desc_nr = -1;
1965 } else
1966 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967 switch(role) {
Song Liuc4d4c912015-08-13 14:31:54 -07001968 case MD_DISK_ROLE_SPARE: /* spare */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969 break;
Song Liuc4d4c912015-08-13 14:31:54 -07001970 case MD_DISK_ROLE_FAULTY: /* faulty */
NeilBrownb2d444d2005-11-08 21:39:31 -08001971 set_bit(Faulty, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972 break;
Song Liubac624f2015-08-13 14:31:55 -07001973 case MD_DISK_ROLE_JOURNAL: /* journal device */
1974 if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)) {
1975 /* journal device without journal feature */
NeilBrown9d487392016-11-02 14:16:49 +11001976 pr_warn("md: journal device provided without journal feature, ignoring the device\n");
Song Liubac624f2015-08-13 14:31:55 -07001977 return -EINVAL;
1978 }
1979 set_bit(Journal, &rdev->flags);
Shaohua Li3069aa82015-08-13 14:31:56 -07001980 rdev->journal_tail = le64_to_cpu(sb->journal_tail);
Shaohua Li9b156032015-12-18 15:19:16 +11001981 rdev->raid_disk = 0;
Song Liubac624f2015-08-13 14:31:55 -07001982 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983 default:
NeilBrownf4667222013-12-09 12:04:56 +11001984 rdev->saved_raid_disk = role;
NeilBrown5fd6c1d2006-06-26 00:27:40 -07001985 if ((le32_to_cpu(sb->feature_map) &
NeilBrownf4667222013-12-09 12:04:56 +11001986 MD_FEATURE_RECOVERY_OFFSET)) {
NeilBrown5fd6c1d2006-06-26 00:27:40 -07001987 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
NeilBrownf4667222013-12-09 12:04:56 +11001988 if (!(le32_to_cpu(sb->feature_map) &
1989 MD_FEATURE_RECOVERY_BITMAP))
1990 rdev->saved_raid_disk = -1;
Guoqing Jiang062f5b2a2019-07-24 11:09:20 +02001991 } else {
1992 /*
1993 * If the array is FROZEN, then the device can't
1994 * be in_sync with rest of array.
1995 */
1996 if (!test_bit(MD_RECOVERY_FROZEN,
1997 &mddev->recovery))
1998 set_bit(In_sync, &rdev->flags);
1999 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000 rdev->raid_disk = role;
2001 break;
2002 }
NeilBrown8ddf9ef2005-09-09 16:23:45 -07002003 if (sb->devflags & WriteMostly1)
2004 set_bit(WriteMostly, &rdev->flags);
NeilBrown688834e2016-11-18 16:16:11 +11002005 if (sb->devflags & FailFast1)
2006 set_bit(FailFast, &rdev->flags);
NeilBrown2d78f8c2011-12-23 10:17:51 +11002007 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
2008 set_bit(Replacement, &rdev->flags);
NeilBrown41158c72005-06-21 17:17:25 -07002009 } else /* MULTIPATH are always insync */
NeilBrownb2d444d2005-11-08 21:39:31 -08002010 set_bit(In_sync, &rdev->flags);
NeilBrown41158c72005-06-21 17:17:25 -07002011
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012 return 0;
2013}
2014
NeilBrownfd01b882011-10-11 16:47:53 +11002015static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016{
2017 struct mdp_superblock_1 *sb;
NeilBrown3cb03002011-10-11 16:45:26 +11002018 struct md_rdev *rdev2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019 int max_dev, i;
2020 /* make rdev->sb match mddev and rdev data. */
2021
Namhyung Kim65a06f062011-07-27 11:00:36 +10002022 sb = page_address(rdev->sb_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023
2024 sb->feature_map = 0;
2025 sb->pad0 = 0;
NeilBrown5fd6c1d2006-06-26 00:27:40 -07002026 sb->recovery_offset = cpu_to_le64(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027 memset(sb->pad3, 0, sizeof(sb->pad3));
2028
2029 sb->utime = cpu_to_le64((__u64)mddev->utime);
2030 sb->events = cpu_to_le64(mddev->events);
2031 if (mddev->in_sync)
2032 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
Shaohua Libd18f642015-09-02 13:49:50 -07002033 else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags))
2034 sb->resync_offset = cpu_to_le64(MaxSector);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035 else
2036 sb->resync_offset = cpu_to_le64(0);
2037
NeilBrown1c05b4b2006-10-21 10:24:08 -07002038 sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
NeilBrown4dbcdc72006-01-06 00:20:52 -08002039
NeilBrownf0ca3402006-02-02 14:28:04 -08002040 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
Andre Noll58c0fed2009-03-31 14:33:13 +11002041 sb->size = cpu_to_le64(mddev->dev_sectors);
Andre Noll9d8f0362009-06-18 08:45:01 +10002042 sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
NeilBrown62e1e382009-05-26 09:40:59 +10002043 sb->level = cpu_to_le32(mddev->level);
2044 sb->layout = cpu_to_le32(mddev->layout);
NeilBrown688834e2016-11-18 16:16:11 +11002045 if (test_bit(FailFast, &rdev->flags))
2046 sb->devflags |= FailFast1;
2047 else
2048 sb->devflags &= ~FailFast1;
NeilBrownf0ca3402006-02-02 14:28:04 -08002049
NeilBrownaeb9b2112011-08-25 14:43:08 +10002050 if (test_bit(WriteMostly, &rdev->flags))
2051 sb->devflags |= WriteMostly1;
2052 else
2053 sb->devflags &= ~WriteMostly1;
NeilBrownc6563a82012-05-21 09:27:00 +10002054 sb->data_offset = cpu_to_le64(rdev->data_offset);
2055 sb->data_size = cpu_to_le64(rdev->sectors);
NeilBrownaeb9b2112011-08-25 14:43:08 +10002056
NeilBrownc3d97142009-12-14 12:49:52 +11002057 if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
2058 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
NeilBrown71c08052005-09-09 16:23:51 -07002059 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
NeilBrowna654b9d82005-06-21 17:17:27 -07002060 }
NeilBrown5fd6c1d2006-06-26 00:27:40 -07002061
Shaohua Lif2076e72015-10-08 21:54:12 -07002062 if (rdev->raid_disk >= 0 && !test_bit(Journal, &rdev->flags) &&
NeilBrown97e4f422009-03-31 14:33:13 +11002063 !test_bit(In_sync, &rdev->flags)) {
NeilBrown93be75f2009-12-14 12:50:06 +11002064 sb->feature_map |=
2065 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
2066 sb->recovery_offset =
2067 cpu_to_le64(rdev->recovery_offset);
NeilBrownf4667222013-12-09 12:04:56 +11002068 if (rdev->saved_raid_disk >= 0 && mddev->bitmap)
2069 sb->feature_map |=
2070 cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP);
NeilBrown5fd6c1d2006-06-26 00:27:40 -07002071 }
Shaohua Li3069aa82015-08-13 14:31:56 -07002072 /* Note: recovery_offset and journal_tail share space */
2073 if (test_bit(Journal, &rdev->flags))
2074 sb->journal_tail = cpu_to_le64(rdev->journal_tail);
NeilBrown2d78f8c2011-12-23 10:17:51 +11002075 if (test_bit(Replacement, &rdev->flags))
2076 sb->feature_map |=
2077 cpu_to_le32(MD_FEATURE_REPLACEMENT);
NeilBrown5fd6c1d2006-06-26 00:27:40 -07002078
NeilBrownf6705572006-03-27 01:18:11 -08002079 if (mddev->reshape_position != MaxSector) {
2080 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
2081 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
2082 sb->new_layout = cpu_to_le32(mddev->new_layout);
2083 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
2084 sb->new_level = cpu_to_le32(mddev->new_level);
Andre Noll664e7c42009-06-18 08:45:27 +10002085 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
NeilBrown2c810cd2012-05-21 09:27:00 +10002086 if (mddev->delta_disks == 0 &&
2087 mddev->reshape_backwards)
2088 sb->feature_map
2089 |= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS);
NeilBrownc6563a82012-05-21 09:27:00 +10002090 if (rdev->new_data_offset != rdev->data_offset) {
2091 sb->feature_map
2092 |= cpu_to_le32(MD_FEATURE_NEW_OFFSET);
2093 sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset
2094 - rdev->data_offset));
2095 }
NeilBrownf6705572006-03-27 01:18:11 -08002096 }
NeilBrowna654b9d82005-06-21 17:17:27 -07002097
Goldwyn Rodrigues3c462c82015-08-19 07:35:54 +10002098 if (mddev_is_clustered(mddev))
2099 sb->feature_map |= cpu_to_le32(MD_FEATURE_CLUSTERED);
2100
NeilBrown2699b672011-07-28 11:31:47 +10002101 if (rdev->badblocks.count == 0)
2102 /* Nothing to do for bad blocks*/ ;
2103 else if (sb->bblog_offset == 0)
2104 /* Cannot record bad blocks on this device */
2105 md_error(mddev, rdev);
2106 else {
2107 struct badblocks *bb = &rdev->badblocks;
Christoph Hellwigae506402019-04-04 18:56:13 +02002108 __le64 *bbp = (__le64 *)page_address(rdev->bb_page);
NeilBrown2699b672011-07-28 11:31:47 +10002109 u64 *p = bb->page;
2110 sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
2111 if (bb->changed) {
2112 unsigned seq;
2113
2114retry:
2115 seq = read_seqbegin(&bb->lock);
2116
2117 memset(bbp, 0xff, PAGE_SIZE);
2118
2119 for (i = 0 ; i < bb->count ; i++) {
majianpeng35f9ac22012-11-08 08:56:27 +08002120 u64 internal_bb = p[i];
NeilBrown2699b672011-07-28 11:31:47 +10002121 u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
2122 | BB_LEN(internal_bb));
majianpeng35f9ac22012-11-08 08:56:27 +08002123 bbp[i] = cpu_to_le64(store_bb);
NeilBrown2699b672011-07-28 11:31:47 +10002124 }
NeilBrownd0962932012-03-19 12:46:41 +11002125 bb->changed = 0;
NeilBrown2699b672011-07-28 11:31:47 +10002126 if (read_seqretry(&bb->lock, seq))
2127 goto retry;
2128
2129 bb->sector = (rdev->sb_start +
2130 (int)le32_to_cpu(sb->bblog_offset));
2131 bb->size = le16_to_cpu(sb->bblog_size);
NeilBrown2699b672011-07-28 11:31:47 +10002132 }
2133 }
2134
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135 max_dev = 0;
NeilBrowndafb20f2012-03-19 12:46:39 +11002136 rdev_for_each(rdev2, mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137 if (rdev2->desc_nr+1 > max_dev)
2138 max_dev = rdev2->desc_nr+1;
NeilBrowna778b732007-05-23 13:58:10 -07002139
NeilBrown70471da2009-08-03 10:59:57 +10002140 if (max_dev > le32_to_cpu(sb->max_dev)) {
2141 int bmask;
NeilBrowna778b732007-05-23 13:58:10 -07002142 sb->max_dev = cpu_to_le32(max_dev);
NeilBrown70471da2009-08-03 10:59:57 +10002143 rdev->sb_size = max_dev * 2 + 256;
2144 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
2145 if (rdev->sb_size & bmask)
2146 rdev->sb_size = (rdev->sb_size | bmask) + 1;
NeilBrownddcf3522010-09-08 16:48:17 +10002147 } else
2148 max_dev = le32_to_cpu(sb->max_dev);
2149
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150 for (i=0; i<max_dev;i++)
Lidong Zhong8df72022017-06-12 10:45:55 +08002151 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
NeilBrownf72ffdd2014-09-30 14:23:59 +10002152
Song Liua97b7892015-10-08 21:54:09 -07002153 if (test_bit(MD_HAS_JOURNAL, &mddev->flags))
2154 sb->feature_map |= cpu_to_le32(MD_FEATURE_JOURNAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155
Artur Paszkiewiczea0213e2017-03-09 09:59:57 +01002156 if (test_bit(MD_HAS_PPL, &mddev->flags)) {
Pawel Baldysiakddc08822017-08-16 17:13:45 +02002157 if (test_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags))
2158 sb->feature_map |=
2159 cpu_to_le32(MD_FEATURE_MULTIPLE_PPLS);
2160 else
2161 sb->feature_map |= cpu_to_le32(MD_FEATURE_PPL);
Artur Paszkiewiczea0213e2017-03-09 09:59:57 +01002162 sb->ppl.offset = cpu_to_le16(rdev->ppl.offset);
2163 sb->ppl.size = cpu_to_le16(rdev->ppl.size);
2164 }
2165
NeilBrowndafb20f2012-03-19 12:46:39 +11002166 rdev_for_each(rdev2, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167 i = rdev2->desc_nr;
NeilBrownb2d444d2005-11-08 21:39:31 -08002168 if (test_bit(Faulty, &rdev2->flags))
Song Liuc4d4c912015-08-13 14:31:54 -07002169 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY);
NeilBrownb2d444d2005-11-08 21:39:31 -08002170 else if (test_bit(In_sync, &rdev2->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002171 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
Song Liua97b7892015-10-08 21:54:09 -07002172 else if (test_bit(Journal, &rdev2->flags))
Song Liubac624f2015-08-13 14:31:55 -07002173 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_JOURNAL);
NeilBrown93be75f2009-12-14 12:50:06 +11002174 else if (rdev2->raid_disk >= 0)
NeilBrown5fd6c1d2006-06-26 00:27:40 -07002175 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176 else
Song Liuc4d4c912015-08-13 14:31:54 -07002177 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178 }
2179
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180 sb->sb_csum = calc_sb_1_csum(sb);
2181}
2182
Xiao Nid9c0fa52020-06-30 15:55:36 +08002183static sector_t super_1_choose_bm_space(sector_t dev_size)
2184{
2185 sector_t bm_space;
2186
2187 /* if the device is bigger than 8Gig, save 64k for bitmap
2188 * usage, if bigger than 200Gig, save 128k
2189 */
2190 if (dev_size < 64*2)
2191 bm_space = 0;
2192 else if (dev_size - 64*2 >= 200*1024*1024*2)
2193 bm_space = 128*2;
2194 else if (dev_size - 4*2 > 8*1024*1024*2)
2195 bm_space = 64*2;
2196 else
2197 bm_space = 4*2;
2198 return bm_space;
2199}
2200
Chris Webb0cd17fe2008-06-28 08:31:46 +10002201static unsigned long long
NeilBrown3cb03002011-10-11 16:45:26 +11002202super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
Chris Webb0cd17fe2008-06-28 08:31:46 +10002203{
2204 struct mdp_superblock_1 *sb;
Andre Noll15f4a5f2008-07-21 14:42:12 +10002205 sector_t max_sectors;
Andre Noll58c0fed2009-03-31 14:33:13 +11002206 if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
Chris Webb0cd17fe2008-06-28 08:31:46 +10002207 return 0; /* component must fit device */
NeilBrownc6563a82012-05-21 09:27:00 +10002208 if (rdev->data_offset != rdev->new_data_offset)
2209 return 0; /* too confusing */
Andre Noll0f420352008-07-11 22:02:23 +10002210 if (rdev->sb_start < rdev->data_offset) {
Chris Webb0cd17fe2008-06-28 08:31:46 +10002211 /* minor versions 1 and 2; superblock before data */
Mike Snitzer77304d22010-11-08 14:39:12 +01002212 max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
Andre Noll15f4a5f2008-07-21 14:42:12 +10002213 max_sectors -= rdev->data_offset;
2214 if (!num_sectors || num_sectors > max_sectors)
2215 num_sectors = max_sectors;
NeilBrownc3d97142009-12-14 12:49:52 +11002216 } else if (rdev->mddev->bitmap_info.offset) {
Chris Webb0cd17fe2008-06-28 08:31:46 +10002217 /* minor version 0 with bitmap we can't move */
2218 return 0;
2219 } else {
2220 /* minor version 0; superblock after data */
Xiao Nid9c0fa52020-06-30 15:55:36 +08002221 sector_t sb_start, bm_space;
2222 sector_t dev_size = i_size_read(rdev->bdev->bd_inode) >> 9;
2223
2224 /* 8K is for superblock */
2225 sb_start = dev_size - 8*2;
Andre Noll0f420352008-07-11 22:02:23 +10002226 sb_start &= ~(sector_t)(4*2 - 1);
Xiao Nid9c0fa52020-06-30 15:55:36 +08002227
2228 bm_space = super_1_choose_bm_space(dev_size);
2229
2230 /* Space that can be used to store date needs to decrease
2231 * superblock bitmap space and bad block space(4K)
2232 */
2233 max_sectors = sb_start - bm_space - 4*2;
2234
Andre Noll15f4a5f2008-07-21 14:42:12 +10002235 if (!num_sectors || num_sectors > max_sectors)
2236 num_sectors = max_sectors;
Chris Webb0cd17fe2008-06-28 08:31:46 +10002237 }
Namhyung Kim65a06f062011-07-27 11:00:36 +10002238 sb = page_address(rdev->sb_page);
Andre Noll15f4a5f2008-07-21 14:42:12 +10002239 sb->data_size = cpu_to_le64(num_sectors);
Jason Yan3fb632e2017-03-10 11:27:23 +08002240 sb->super_offset = cpu_to_le64(rdev->sb_start);
Chris Webb0cd17fe2008-06-28 08:31:46 +10002241 sb->sb_csum = calc_sb_1_csum(sb);
NeilBrown46533ff2016-11-18 16:16:11 +11002242 do {
2243 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
2244 rdev->sb_page);
2245 } while (md_super_wait(rdev->mddev) < 0);
Justin Maggardc26a44e2010-11-24 16:36:17 +11002246 return num_sectors;
NeilBrownc6563a82012-05-21 09:27:00 +10002247
2248}
2249
2250static int
2251super_1_allow_new_offset(struct md_rdev *rdev,
2252 unsigned long long new_offset)
2253{
2254 /* All necessary checks on new >= old have been done */
2255 struct bitmap *bitmap;
2256 if (new_offset >= rdev->data_offset)
2257 return 1;
2258
2259 /* with 1.0 metadata, there is no metadata to tread on
2260 * so we can always move back */
2261 if (rdev->mddev->minor_version == 0)
2262 return 1;
2263
2264 /* otherwise we must be sure not to step on
2265 * any metadata, so stay:
2266 * 36K beyond start of superblock
2267 * beyond end of badblocks
2268 * beyond write-intent bitmap
2269 */
2270 if (rdev->sb_start + (32+4)*2 > new_offset)
2271 return 0;
2272 bitmap = rdev->mddev->bitmap;
2273 if (bitmap && !rdev->mddev->bitmap_info.file &&
2274 rdev->sb_start + rdev->mddev->bitmap_info.offset +
NeilBrown1ec885c2012-05-22 13:55:10 +10002275 bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset)
NeilBrownc6563a82012-05-21 09:27:00 +10002276 return 0;
2277 if (rdev->badblocks.sector + rdev->badblocks.size > new_offset)
2278 return 0;
2279
2280 return 1;
Chris Webb0cd17fe2008-06-28 08:31:46 +10002281}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282
Adrian Bunk75c96f82005-05-05 16:16:09 -07002283static struct super_type super_types[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284 [0] = {
2285 .name = "0.90.0",
2286 .owner = THIS_MODULE,
Chris Webb0cd17fe2008-06-28 08:31:46 +10002287 .load_super = super_90_load,
2288 .validate_super = super_90_validate,
2289 .sync_super = super_90_sync,
2290 .rdev_size_change = super_90_rdev_size_change,
NeilBrownc6563a82012-05-21 09:27:00 +10002291 .allow_new_offset = super_90_allow_new_offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292 },
2293 [1] = {
2294 .name = "md-1",
2295 .owner = THIS_MODULE,
Chris Webb0cd17fe2008-06-28 08:31:46 +10002296 .load_super = super_1_load,
2297 .validate_super = super_1_validate,
2298 .sync_super = super_1_sync,
2299 .rdev_size_change = super_1_rdev_size_change,
NeilBrownc6563a82012-05-21 09:27:00 +10002300 .allow_new_offset = super_1_allow_new_offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002301 },
2302};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303
NeilBrownfd01b882011-10-11 16:47:53 +11002304static void sync_super(struct mddev *mddev, struct md_rdev *rdev)
Jonathan Brassow076f9682011-06-07 17:51:30 -05002305{
2306 if (mddev->sync_super) {
2307 mddev->sync_super(mddev, rdev);
2308 return;
2309 }
2310
2311 BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));
2312
2313 super_types[mddev->major_version].sync_super(mddev, rdev);
2314}
2315
NeilBrownfd01b882011-10-11 16:47:53 +11002316static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317{
NeilBrown3cb03002011-10-11 16:45:26 +11002318 struct md_rdev *rdev, *rdev2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319
NeilBrown4b809912008-07-21 17:05:25 +10002320 rcu_read_lock();
Song Liu0b020e82015-09-03 23:00:35 -07002321 rdev_for_each_rcu(rdev, mddev1) {
2322 if (test_bit(Faulty, &rdev->flags) ||
2323 test_bit(Journal, &rdev->flags) ||
2324 rdev->raid_disk == -1)
2325 continue;
2326 rdev_for_each_rcu(rdev2, mddev2) {
2327 if (test_bit(Faulty, &rdev2->flags) ||
2328 test_bit(Journal, &rdev2->flags) ||
2329 rdev2->raid_disk == -1)
2330 continue;
Christoph Hellwig61a27e1f2020-09-03 07:40:58 +02002331 if (rdev->bdev->bd_disk == rdev2->bdev->bd_disk) {
NeilBrown4b809912008-07-21 17:05:25 +10002332 rcu_read_unlock();
NeilBrown7dd5e7c32007-02-28 20:11:35 -08002333 return 1;
NeilBrown4b809912008-07-21 17:05:25 +10002334 }
Song Liu0b020e82015-09-03 23:00:35 -07002335 }
2336 }
NeilBrown4b809912008-07-21 17:05:25 +10002337 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002338 return 0;
2339}
2340
2341static LIST_HEAD(pending_raid_disks);
2342
Andre Nollac5e7112009-08-03 10:59:47 +10002343/*
2344 * Try to register data integrity profile for an mddev
2345 *
2346 * This is called when an array is started and after a disk has been kicked
2347 * from the array. It only succeeds if all working and active component devices
2348 * are integrity capable with matching profiles.
2349 */
NeilBrownfd01b882011-10-11 16:47:53 +11002350int md_integrity_register(struct mddev *mddev)
Martin K. Petersen3f9d99c2009-03-31 14:27:02 +11002351{
NeilBrown3cb03002011-10-11 16:45:26 +11002352 struct md_rdev *rdev, *reference = NULL;
Martin K. Petersen3f9d99c2009-03-31 14:27:02 +11002353
Andre Nollac5e7112009-08-03 10:59:47 +10002354 if (list_empty(&mddev->disks))
2355 return 0; /* nothing to do */
Jonathan Brassow629acb62011-06-08 15:10:08 +10002356 if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
2357 return 0; /* shouldn't register, or already is */
NeilBrowndafb20f2012-03-19 12:46:39 +11002358 rdev_for_each(rdev, mddev) {
Andre Nollac5e7112009-08-03 10:59:47 +10002359 /* skip spares and non-functional disks */
2360 if (test_bit(Faulty, &rdev->flags))
2361 continue;
2362 if (rdev->raid_disk < 0)
2363 continue;
Andre Nollac5e7112009-08-03 10:59:47 +10002364 if (!reference) {
2365 /* Use the first rdev as the reference */
2366 reference = rdev;
2367 continue;
2368 }
2369 /* does this rdev's profile match the reference profile? */
2370 if (blk_integrity_compare(reference->bdev->bd_disk,
2371 rdev->bdev->bd_disk) < 0)
2372 return -EINVAL;
Martin K. Petersen3f9d99c2009-03-31 14:27:02 +11002373 }
Martin K. Petersen89078d52011-03-28 20:09:12 -04002374 if (!reference || !bdev_get_integrity(reference->bdev))
2375 return 0;
Andre Nollac5e7112009-08-03 10:59:47 +10002376 /*
2377 * All component devices are integrity capable and have matching
2378 * profiles, register the common profile for the md device.
2379 */
Martin K. Petersen25520d52015-10-21 13:19:49 -04002380 blk_integrity_register(mddev->gendisk,
2381 bdev_get_integrity(reference->bdev));
2382
NeilBrown9d487392016-11-02 14:16:49 +11002383 pr_debug("md: data integrity enabled on %s\n", mdname(mddev));
Kent Overstreetafeee512018-05-20 18:25:52 -04002384 if (bioset_integrity_create(&mddev->bio_set, BIO_POOL_SIZE)) {
NeilBrown9d487392016-11-02 14:16:49 +11002385 pr_err("md: failed to create integrity pool for %s\n",
Martin K. Petersena91a2782011-03-17 11:11:05 +01002386 mdname(mddev));
2387 return -EINVAL;
2388 }
Andre Nollac5e7112009-08-03 10:59:47 +10002389 return 0;
Martin K. Petersen3f9d99c2009-03-31 14:27:02 +11002390}
Andre Nollac5e7112009-08-03 10:59:47 +10002391EXPORT_SYMBOL(md_integrity_register);
2392
Dan Williams1501efa2016-01-13 16:00:07 -08002393/*
2394 * Attempt to add an rdev, but only if it is consistent with the current
2395 * integrity profile
2396 */
2397int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
Andre Nollac5e7112009-08-03 10:59:47 +10002398{
Jonathan Brassow2863b9e2012-10-11 13:38:58 +11002399 struct blk_integrity *bi_mddev;
Dan Williams1501efa2016-01-13 16:00:07 -08002400 char name[BDEVNAME_SIZE];
Jonathan Brassow2863b9e2012-10-11 13:38:58 +11002401
2402 if (!mddev->gendisk)
Dan Williams1501efa2016-01-13 16:00:07 -08002403 return 0;
Jonathan Brassow2863b9e2012-10-11 13:38:58 +11002404
Jonathan Brassow2863b9e2012-10-11 13:38:58 +11002405 bi_mddev = blk_get_integrity(mddev->gendisk);
Andre Nollac5e7112009-08-03 10:59:47 +10002406
2407 if (!bi_mddev) /* nothing to do */
Dan Williams1501efa2016-01-13 16:00:07 -08002408 return 0;
2409
2410 if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) {
NeilBrown9d487392016-11-02 14:16:49 +11002411 pr_err("%s: incompatible integrity profile for %s\n",
2412 mdname(mddev), bdevname(rdev->bdev, name));
Dan Williams1501efa2016-01-13 16:00:07 -08002413 return -ENXIO;
2414 }
2415
2416 return 0;
Andre Nollac5e7112009-08-03 10:59:47 +10002417}
2418EXPORT_SYMBOL(md_integrity_add_rdev);
Martin K. Petersen3f9d99c2009-03-31 14:27:02 +11002419
NeilBrownf72ffdd2014-09-30 14:23:59 +10002420static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002421{
NeilBrown7dd5e7c32007-02-28 20:11:35 -08002422 char b[BDEVNAME_SIZE];
NeilBrown5e55e2f2007-03-26 21:32:14 -08002423 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424
Dan Williams11e2ede2008-04-30 00:52:32 -07002425 /* prevent duplicates */
2426 if (find_rdev(mddev, rdev->bdev->bd_dev))
2427 return -EEXIST;
2428
NeilBrown97b20ef2017-04-13 08:53:48 +10002429 if ((bdev_read_only(rdev->bdev) || bdev_read_only(rdev->meta_bdev)) &&
2430 mddev->pers)
2431 return -EROFS;
2432
Andre Nolldd8ac332009-03-31 14:33:13 +11002433 /* make sure rdev->sectors exceeds mddev->dev_sectors */
Shaohua Lif6b6ec52015-12-21 10:51:02 +11002434 if (!test_bit(Journal, &rdev->flags) &&
2435 rdev->sectors &&
2436 (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) {
NeilBrowna778b732007-05-23 13:58:10 -07002437 if (mddev->pers) {
2438 /* Cannot change size, so fail
2439 * If mddev->level <= 0, then we don't care
2440 * about aligning sizes (e.g. linear)
2441 */
2442 if (mddev->level > 0)
2443 return -ENOSPC;
2444 } else
Andre Nolldd8ac332009-03-31 14:33:13 +11002445 mddev->dev_sectors = rdev->sectors;
NeilBrown2bf071b2006-01-06 00:20:55 -08002446 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002447
2448 /* Verify rdev->desc_nr is unique.
2449 * If it is -1, assign a free number, else
2450 * check number is not in use
2451 */
NeilBrown4878e9e2014-09-25 17:00:11 +10002452 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453 if (rdev->desc_nr < 0) {
2454 int choice = 0;
NeilBrown4878e9e2014-09-25 17:00:11 +10002455 if (mddev->pers)
2456 choice = mddev->raid_disks;
Goldwyn Rodrigues57d051d2015-04-14 10:43:55 -05002457 while (md_find_rdev_nr_rcu(mddev, choice))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458 choice++;
2459 rdev->desc_nr = choice;
2460 } else {
Goldwyn Rodrigues57d051d2015-04-14 10:43:55 -05002461 if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) {
NeilBrown4878e9e2014-09-25 17:00:11 +10002462 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002463 return -EBUSY;
NeilBrown4878e9e2014-09-25 17:00:11 +10002464 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002465 }
NeilBrown4878e9e2014-09-25 17:00:11 +10002466 rcu_read_unlock();
Shaohua Lif6b6ec52015-12-21 10:51:02 +11002467 if (!test_bit(Journal, &rdev->flags) &&
2468 mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
NeilBrown9d487392016-11-02 14:16:49 +11002469 pr_warn("md: %s: array is limited to %d devices\n",
2470 mdname(mddev), mddev->max_disks);
NeilBrownde01dfa2009-02-06 18:02:46 +11002471 return -EBUSY;
2472 }
NeilBrown19133a42005-11-08 21:39:35 -08002473 bdevname(rdev->bdev,b);
Rasmus Villemoes90a9bef2015-06-25 15:02:36 -07002474 strreplace(b, '/', '!');
Greg Kroah-Hartman649316b2007-12-17 23:05:35 -07002475
Linus Torvalds1da177e2005-04-16 15:20:36 -07002476 rdev->mddev = mddev;
NeilBrown9d487392016-11-02 14:16:49 +11002477 pr_debug("md: bind<%s>\n", b);
NeilBrown86e6ffd2005-11-08 21:39:24 -08002478
Guoqing Jiang963c5552019-06-14 17:10:36 +08002479 if (mddev->raid_disks)
Guoqing Jiang404659c2019-12-23 10:48:53 +01002480 mddev_create_serial_pool(mddev, rdev, false);
Guoqing Jiang963c5552019-06-14 17:10:36 +08002481
Greg Kroah-Hartmanb2d6db52007-12-17 23:05:35 -07002482 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
NeilBrown5e55e2f2007-03-26 21:32:14 -08002483 goto fail;
NeilBrown86e6ffd2005-11-08 21:39:24 -08002484
Damien Le Moal5e3b8a82020-07-16 13:54:40 +09002485 /* failure here is OK */
Christoph Hellwig8d652692020-11-17 08:18:55 +01002486 err = sysfs_create_link(&rdev->kobj, bdev_kobj(rdev->bdev), "block");
NeilBrown00bcb4a2010-06-01 19:37:23 +10002487 rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
Junxiao Bie1a86db2020-07-14 16:10:26 -07002488 rdev->sysfs_unack_badblocks =
2489 sysfs_get_dirent_safe(rdev->kobj.sd, "unacknowledged_bad_blocks");
2490 rdev->sysfs_badblocks =
2491 sysfs_get_dirent_safe(rdev->kobj.sd, "bad_blocks");
NeilBrown3c0ee632008-10-21 13:25:28 +11002492
NeilBrown4b809912008-07-21 17:05:25 +10002493 list_add_rcu(&rdev->same_set, &mddev->disks);
Tejun Heoe09b4572010-11-13 11:55:17 +01002494 bd_link_disk_holder(rdev->bdev, mddev->gendisk);
NeilBrown4044ba52009-01-09 08:31:11 +11002495
2496 /* May as well allow recovery to be retried once */
NeilBrown53890422011-07-27 11:00:36 +10002497 mddev->recovery_disabled++;
Martin K. Petersen3f9d99c2009-03-31 14:27:02 +11002498
Linus Torvalds1da177e2005-04-16 15:20:36 -07002499 return 0;
NeilBrown5e55e2f2007-03-26 21:32:14 -08002500
2501 fail:
NeilBrown9d487392016-11-02 14:16:49 +11002502 pr_warn("md: failed to register dev-%s for %s\n",
2503 b, mdname(mddev));
NeilBrown5e55e2f2007-03-26 21:32:14 -08002504 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002505}
2506
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02002507static void rdev_delayed_delete(struct work_struct *ws)
NeilBrown5792a282007-04-04 19:08:18 -07002508{
NeilBrown3cb03002011-10-11 16:45:26 +11002509 struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work);
NeilBrown5792a282007-04-04 19:08:18 -07002510 kobject_del(&rdev->kobj);
NeilBrown177a99b2008-02-06 01:39:56 -08002511 kobject_put(&rdev->kobj);
NeilBrown5792a282007-04-04 19:08:18 -07002512}
2513
NeilBrownf72ffdd2014-09-30 14:23:59 +10002514static void unbind_rdev_from_array(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002515{
2516 char b[BDEVNAME_SIZE];
NeilBrown403df472014-09-30 15:52:29 +10002517
Tejun Heo49731ba2011-01-14 18:43:57 +01002518 bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
NeilBrown4b809912008-07-21 17:05:25 +10002519 list_del_rcu(&rdev->same_set);
NeilBrown9d487392016-11-02 14:16:49 +11002520 pr_debug("md: unbind<%s>\n", bdevname(rdev->bdev,b));
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +01002521 mddev_destroy_serial_pool(rdev->mddev, rdev, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002522 rdev->mddev = NULL;
NeilBrown86e6ffd2005-11-08 21:39:24 -08002523 sysfs_remove_link(&rdev->kobj, "block");
NeilBrown3c0ee632008-10-21 13:25:28 +11002524 sysfs_put(rdev->sysfs_state);
Junxiao Bie1a86db2020-07-14 16:10:26 -07002525 sysfs_put(rdev->sysfs_unack_badblocks);
2526 sysfs_put(rdev->sysfs_badblocks);
NeilBrown3c0ee632008-10-21 13:25:28 +11002527 rdev->sysfs_state = NULL;
Junxiao Bie1a86db2020-07-14 16:10:26 -07002528 rdev->sysfs_unack_badblocks = NULL;
2529 rdev->sysfs_badblocks = NULL;
NeilBrown2230dfe2011-07-28 11:31:46 +10002530 rdev->badblocks.count = 0;
NeilBrown5792a282007-04-04 19:08:18 -07002531 /* We need to delay this, otherwise we can deadlock when
NeilBrown4b809912008-07-21 17:05:25 +10002532 * writing to 'remove' to "dev/state". We also need
2533 * to delay it due to rcu usage.
NeilBrown5792a282007-04-04 19:08:18 -07002534 */
NeilBrown4b809912008-07-21 17:05:25 +10002535 synchronize_rcu();
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02002536 INIT_WORK(&rdev->del_work, rdev_delayed_delete);
NeilBrown177a99b2008-02-06 01:39:56 -08002537 kobject_get(&rdev->kobj);
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02002538 queue_work(md_rdev_misc_wq, &rdev->del_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539}
2540
2541/*
2542 * prevent the device from being mounted, repartitioned or
2543 * otherwise reused by a RAID array (or any other kernel
2544 * subsystem), by bd_claiming the device.
2545 */
NeilBrown3cb03002011-10-11 16:45:26 +11002546static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547{
2548 int err = 0;
2549 struct block_device *bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550
Tejun Heod4d77622010-11-13 11:55:18 +01002551 bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
NeilBrown3cb03002011-10-11 16:45:26 +11002552 shared ? (struct md_rdev *)lock_rdev : rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002553 if (IS_ERR(bdev)) {
Christoph Hellwigea3edd42020-03-24 08:25:11 +01002554 pr_warn("md: could not open device unknown-block(%u,%u).\n",
2555 MAJOR(dev), MINOR(dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002556 return PTR_ERR(bdev);
2557 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002558 rdev->bdev = bdev;
2559 return err;
2560}
2561
NeilBrown3cb03002011-10-11 16:45:26 +11002562static void unlock_rdev(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002563{
2564 struct block_device *bdev = rdev->bdev;
2565 rdev->bdev = NULL;
Tejun Heoe525fd82010-11-13 11:55:17 +01002566 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002567}
2568
2569void md_autodetect_dev(dev_t dev);
2570
NeilBrownf72ffdd2014-09-30 14:23:59 +10002571static void export_rdev(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002572{
2573 char b[BDEVNAME_SIZE];
NeilBrown403df472014-09-30 15:52:29 +10002574
NeilBrown9d487392016-11-02 14:16:49 +11002575 pr_debug("md: export_rdev(%s)\n", bdevname(rdev->bdev,b));
NeilBrown545c8792012-05-22 13:54:30 +10002576 md_rdev_clear(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002577#ifndef MODULE
NeilBrownd0fae182008-03-04 14:29:31 -08002578 if (test_bit(AutoDetected, &rdev->flags))
2579 md_autodetect_dev(rdev->bdev->bd_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002580#endif
2581 unlock_rdev(rdev);
NeilBrown86e6ffd2005-11-08 21:39:24 -08002582 kobject_put(&rdev->kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002583}
2584
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05002585void md_kick_rdev_from_array(struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002586{
2587 unbind_rdev_from_array(rdev);
2588 export_rdev(rdev);
2589}
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05002590EXPORT_SYMBOL_GPL(md_kick_rdev_from_array);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591
NeilBrownfd01b882011-10-11 16:47:53 +11002592static void export_array(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002593{
NeilBrown0638bb02014-09-25 17:43:47 +10002594 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002595
NeilBrown0638bb02014-09-25 17:43:47 +10002596 while (!list_empty(&mddev->disks)) {
2597 rdev = list_first_entry(&mddev->disks, struct md_rdev,
2598 same_set);
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05002599 md_kick_rdev_from_array(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002601 mddev->raid_disks = 0;
2602 mddev->major_version = 0;
2603}
2604
NeilBrown6497709b2017-03-15 14:05:14 +11002605static bool set_in_sync(struct mddev *mddev)
2606{
Shaohua Liefa4b772017-10-18 22:08:13 -07002607 lockdep_assert_held(&mddev->lock);
NeilBrown4ad23a972017-03-15 14:05:14 +11002608 if (!mddev->in_sync) {
2609 mddev->sync_checkers++;
2610 spin_unlock(&mddev->lock);
2611 percpu_ref_switch_to_atomic_sync(&mddev->writes_pending);
2612 spin_lock(&mddev->lock);
2613 if (!mddev->in_sync &&
2614 percpu_ref_is_zero(&mddev->writes_pending)) {
NeilBrown6497709b2017-03-15 14:05:14 +11002615 mddev->in_sync = 1;
NeilBrown4ad23a972017-03-15 14:05:14 +11002616 /*
2617 * Ensure ->in_sync is visible before we clear
2618 * ->sync_checkers.
2619 */
NeilBrown55cc39f2017-03-15 14:05:14 +11002620 smp_mb();
NeilBrown6497709b2017-03-15 14:05:14 +11002621 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
2622 sysfs_notify_dirent_safe(mddev->sysfs_state);
2623 }
NeilBrown4ad23a972017-03-15 14:05:14 +11002624 if (--mddev->sync_checkers == 0)
2625 percpu_ref_switch_to_percpu(&mddev->writes_pending);
NeilBrown6497709b2017-03-15 14:05:14 +11002626 }
2627 if (mddev->safemode == 1)
2628 mddev->safemode = 0;
2629 return mddev->in_sync;
2630}
2631
NeilBrownf72ffdd2014-09-30 14:23:59 +10002632static void sync_sbs(struct mddev *mddev, int nospares)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002633{
NeilBrown42543762006-06-26 00:27:57 -07002634 /* Update each superblock (in-memory image), but
2635 * if we are allowed to, skip spares which already
2636 * have the right event counter, or have one earlier
2637 * (which would mean they aren't being marked as dirty
2638 * with the rest of the array)
2639 */
NeilBrown3cb03002011-10-11 16:45:26 +11002640 struct md_rdev *rdev;
NeilBrowndafb20f2012-03-19 12:46:39 +11002641 rdev_for_each(rdev, mddev) {
NeilBrown42543762006-06-26 00:27:57 -07002642 if (rdev->sb_events == mddev->events ||
2643 (nospares &&
2644 rdev->raid_disk < 0 &&
NeilBrown42543762006-06-26 00:27:57 -07002645 rdev->sb_events+1 == mddev->events)) {
2646 /* Don't update this superblock */
2647 rdev->sb_loaded = 2;
2648 } else {
Jonathan Brassow076f9682011-06-07 17:51:30 -05002649 sync_super(mddev, rdev);
NeilBrown42543762006-06-26 00:27:57 -07002650 rdev->sb_loaded = 1;
2651 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002652 }
2653}
2654
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002655static bool does_sb_need_changing(struct mddev *mddev)
2656{
2657 struct md_rdev *rdev;
2658 struct mdp_superblock_1 *sb;
2659 int role;
2660
2661 /* Find a good rdev */
2662 rdev_for_each(rdev, mddev)
2663 if ((rdev->raid_disk >= 0) && !test_bit(Faulty, &rdev->flags))
2664 break;
2665
2666 /* No good device found. */
2667 if (!rdev)
2668 return false;
2669
2670 sb = page_address(rdev->sb_page);
2671 /* Check if a device has become faulty or a spare become active */
2672 rdev_for_each(rdev, mddev) {
2673 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
2674 /* Device activated? */
2675 if (role == 0xffff && rdev->raid_disk >=0 &&
2676 !test_bit(Faulty, &rdev->flags))
2677 return true;
2678 /* Device turned faulty? */
2679 if (test_bit(Faulty, &rdev->flags) && (role < 0xfffd))
2680 return true;
2681 }
2682
2683 /* Check if any mddev parameters have changed */
2684 if ((mddev->dev_sectors != le64_to_cpu(sb->size)) ||
2685 (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) ||
Jason Yan13459212017-03-10 11:49:12 +08002686 (mddev->layout != le32_to_cpu(sb->layout)) ||
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002687 (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) ||
2688 (mddev->chunk_sectors != le32_to_cpu(sb->chunksize)))
2689 return true;
2690
2691 return false;
2692}
2693
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05002694void md_update_sb(struct mddev *mddev, int force_change)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002695{
NeilBrown3cb03002011-10-11 16:45:26 +11002696 struct md_rdev *rdev;
NeilBrown06d91a52005-06-21 17:17:12 -07002697 int sync_req;
NeilBrown42543762006-06-26 00:27:57 -07002698 int nospares = 0;
NeilBrown2699b672011-07-28 11:31:47 +10002699 int any_badblocks_changed = 0;
Guoqing Jiang23b63f92015-10-12 17:21:30 +08002700 int ret = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002701
NeilBrownd87f0642013-04-24 11:42:40 +10002702 if (mddev->ro) {
2703 if (force_change)
Shaohua Li29530792016-12-08 15:48:19 -08002704 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrownd87f0642013-04-24 11:42:40 +10002705 return;
2706 }
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002707
Guoqing Jiang2c97cf12016-05-02 11:33:09 -04002708repeat:
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002709 if (mddev_is_clustered(mddev)) {
Shaohua Li29530792016-12-08 15:48:19 -08002710 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002711 force_change = 1;
Shaohua Li29530792016-12-08 15:48:19 -08002712 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
Guoqing Jiang85ad1d12016-05-03 22:22:13 -04002713 nospares = 1;
Guoqing Jiang23b63f92015-10-12 17:21:30 +08002714 ret = md_cluster_ops->metadata_update_start(mddev);
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002715 /* Has someone else has updated the sb */
2716 if (!does_sb_need_changing(mddev)) {
Guoqing Jiang23b63f92015-10-12 17:21:30 +08002717 if (ret == 0)
2718 md_cluster_ops->metadata_update_cancel(mddev);
Shaohua Li29530792016-12-08 15:48:19 -08002719 bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
2720 BIT(MD_SB_CHANGE_DEVS) |
2721 BIT(MD_SB_CHANGE_CLEAN));
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05002722 return;
2723 }
2724 }
Guoqing Jiang2c97cf12016-05-02 11:33:09 -04002725
NeilBrowndb0505d2017-10-17 16:18:36 +11002726 /*
2727 * First make sure individual recovery_offsets are correct
2728 * curr_resync_completed can only be used during recovery.
2729 * During reshape/resync it might use array-addresses rather
2730 * that device addresses.
2731 */
NeilBrowndafb20f2012-03-19 12:46:39 +11002732 rdev_for_each(rdev, mddev) {
NeilBrown3a3a5dd2010-08-16 18:09:31 +10002733 if (rdev->raid_disk >= 0 &&
2734 mddev->delta_disks >= 0 &&
NeilBrowndb0505d2017-10-17 16:18:36 +11002735 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
2736 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery) &&
2737 !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
Shaohua Lif2076e72015-10-08 21:54:12 -07002738 !test_bit(Journal, &rdev->flags) &&
NeilBrown3a3a5dd2010-08-16 18:09:31 +10002739 !test_bit(In_sync, &rdev->flags) &&
2740 mddev->curr_resync_completed > rdev->recovery_offset)
2741 rdev->recovery_offset = mddev->curr_resync_completed;
2742
NeilBrownf72ffdd2014-09-30 14:23:59 +10002743 }
Dan Williamsbd52b742010-08-30 17:33:33 +10002744 if (!mddev->persistent) {
Shaohua Li29530792016-12-08 15:48:19 -08002745 clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
2746 clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrownde393cd2011-07-28 11:31:48 +10002747 if (!mddev->external) {
Shaohua Li29530792016-12-08 15:48:19 -08002748 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
NeilBrowndafb20f2012-03-19 12:46:39 +11002749 rdev_for_each(rdev, mddev) {
NeilBrownde393cd2011-07-28 11:31:48 +10002750 if (rdev->badblocks.changed) {
NeilBrownd0962932012-03-19 12:46:41 +11002751 rdev->badblocks.changed = 0;
Vishal Vermafc974ee2015-12-24 19:20:34 -07002752 ack_all_badblocks(&rdev->badblocks);
NeilBrownde393cd2011-07-28 11:31:48 +10002753 md_error(mddev, rdev);
2754 }
2755 clear_bit(Blocked, &rdev->flags);
2756 clear_bit(BlockedBadBlocks, &rdev->flags);
2757 wake_up(&rdev->blocked_wait);
2758 }
2759 }
NeilBrown3a3a5dd2010-08-16 18:09:31 +10002760 wake_up(&mddev->sb_wait);
2761 return;
2762 }
2763
NeilBrown85572d72014-12-15 12:56:56 +11002764 spin_lock(&mddev->lock);
NeilBrown84692192006-08-27 01:23:49 -07002765
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11002766 mddev->utime = ktime_get_real_seconds();
NeilBrown3a3a5dd2010-08-16 18:09:31 +10002767
Shaohua Li29530792016-12-08 15:48:19 -08002768 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
NeilBrown850b2b422006-10-03 01:15:46 -07002769 force_change = 1;
Shaohua Li29530792016-12-08 15:48:19 -08002770 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
NeilBrown850b2b422006-10-03 01:15:46 -07002771 /* just a clean<-> dirty transition, possibly leave spares alone,
2772 * though if events isn't the right even/odd, we will have to do
2773 * spares after all
2774 */
2775 nospares = 1;
2776 if (force_change)
2777 nospares = 0;
2778 if (mddev->degraded)
NeilBrown84692192006-08-27 01:23:49 -07002779 /* If the array is degraded, then skipping spares is both
2780 * dangerous and fairly pointless.
2781 * Dangerous because a device that was removed from the array
2782 * might have a event_count that still looks up-to-date,
2783 * so it can be re-added without a resync.
2784 * Pointless because if there are any spares to skip,
2785 * then a recovery will happen and soon that array won't
2786 * be degraded any more and the spare can go back to sleep then.
2787 */
NeilBrown850b2b422006-10-03 01:15:46 -07002788 nospares = 0;
NeilBrown84692192006-08-27 01:23:49 -07002789
NeilBrown06d91a52005-06-21 17:17:12 -07002790 sync_req = mddev->in_sync;
NeilBrown42543762006-06-26 00:27:57 -07002791
2792 /* If this is just a dirty<->clean transition, and the array is clean
2793 * and 'events' is odd, we can roll back to the previous clean state */
NeilBrown850b2b422006-10-03 01:15:46 -07002794 if (nospares
NeilBrown42543762006-06-26 00:27:57 -07002795 && (mddev->in_sync && mddev->recovery_cp == MaxSector)
NeilBrowna8707c02010-05-18 09:28:43 +10002796 && mddev->can_decrease_events
2797 && mddev->events != 1) {
NeilBrown42543762006-06-26 00:27:57 -07002798 mddev->events--;
NeilBrowna8707c02010-05-18 09:28:43 +10002799 mddev->can_decrease_events = 0;
2800 } else {
NeilBrown42543762006-06-26 00:27:57 -07002801 /* otherwise we have to go forward and ... */
2802 mddev->events ++;
NeilBrowna8707c02010-05-18 09:28:43 +10002803 mddev->can_decrease_events = nospares;
NeilBrown42543762006-06-26 00:27:57 -07002804 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805
NeilBrown403df472014-09-30 15:52:29 +10002806 /*
2807 * This 64-bit counter should never wrap.
2808 * Either we are in around ~1 trillion A.C., assuming
2809 * 1 reboot per second, or we have a bug...
2810 */
2811 WARN_ON(mddev->events == 0);
NeilBrown2699b672011-07-28 11:31:47 +10002812
NeilBrowndafb20f2012-03-19 12:46:39 +11002813 rdev_for_each(rdev, mddev) {
NeilBrown2699b672011-07-28 11:31:47 +10002814 if (rdev->badblocks.changed)
2815 any_badblocks_changed++;
NeilBrownde393cd2011-07-28 11:31:48 +10002816 if (test_bit(Faulty, &rdev->flags))
2817 set_bit(FaultRecorded, &rdev->flags);
2818 }
NeilBrown2699b672011-07-28 11:31:47 +10002819
NeilBrowne6910632008-02-06 01:39:51 -08002820 sync_sbs(mddev, nospares);
NeilBrown85572d72014-12-15 12:56:56 +11002821 spin_unlock(&mddev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002822
NeilBrown36a4e1f2011-10-07 14:23:17 +11002823 pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
2824 mdname(mddev), mddev->in_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002825
Shaohua Li504634f2016-11-18 09:44:08 -08002826 if (mddev->queue)
2827 blk_add_trace_msg(mddev->queue, "md md_update_sb");
NeilBrown46533ff2016-11-18 16:16:11 +11002828rewrite:
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002829 md_bitmap_update_sb(mddev->bitmap);
NeilBrowndafb20f2012-03-19 12:46:39 +11002830 rdev_for_each(rdev, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002831 char b[BDEVNAME_SIZE];
NeilBrown36a4e1f2011-10-07 14:23:17 +11002832
NeilBrown42543762006-06-26 00:27:57 -07002833 if (rdev->sb_loaded != 1)
2834 continue; /* no noise on spare devices */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002835
NeilBrownf4667222013-12-09 12:04:56 +11002836 if (!test_bit(Faulty, &rdev->flags)) {
NeilBrown7bfa19f2005-06-21 17:17:28 -07002837 md_super_write(mddev,rdev,
Andre Noll0f420352008-07-11 22:02:23 +10002838 rdev->sb_start, rdev->sb_size,
NeilBrown7bfa19f2005-06-21 17:17:28 -07002839 rdev->sb_page);
NeilBrown36a4e1f2011-10-07 14:23:17 +11002840 pr_debug("md: (write) %s's sb offset: %llu\n",
2841 bdevname(rdev->bdev, b),
2842 (unsigned long long)rdev->sb_start);
NeilBrown42543762006-06-26 00:27:57 -07002843 rdev->sb_events = mddev->events;
NeilBrown2699b672011-07-28 11:31:47 +10002844 if (rdev->badblocks.size) {
2845 md_super_write(mddev, rdev,
2846 rdev->badblocks.sector,
2847 rdev->badblocks.size << 9,
2848 rdev->bb_page);
2849 rdev->badblocks.size = 0;
2850 }
NeilBrown7bfa19f2005-06-21 17:17:28 -07002851
NeilBrownf4667222013-12-09 12:04:56 +11002852 } else
NeilBrown36a4e1f2011-10-07 14:23:17 +11002853 pr_debug("md: %s (skipping faulty)\n",
2854 bdevname(rdev->bdev, b));
Andrei Warkentind70ed2e2011-10-18 12:16:48 +11002855
NeilBrown7bfa19f2005-06-21 17:17:28 -07002856 if (mddev->level == LEVEL_MULTIPATH)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002857 /* only need to write one superblock... */
2858 break;
2859 }
NeilBrown46533ff2016-11-18 16:16:11 +11002860 if (md_super_wait(mddev) < 0)
2861 goto rewrite;
Shaohua Li29530792016-12-08 15:48:19 -08002862 /* if there was a failure, MD_SB_CHANGE_DEVS was set, and we re-write super */
NeilBrown7bfa19f2005-06-21 17:17:28 -07002863
Guoqing Jiang2c97cf12016-05-02 11:33:09 -04002864 if (mddev_is_clustered(mddev) && ret == 0)
2865 md_cluster_ops->metadata_update_finish(mddev);
2866
NeilBrown850b2b422006-10-03 01:15:46 -07002867 if (mddev->in_sync != sync_req ||
Shaohua Li29530792016-12-08 15:48:19 -08002868 !bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
2869 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_CLEAN)))
NeilBrown06d91a52005-06-21 17:17:12 -07002870 /* have to write it out again */
NeilBrown06d91a52005-06-21 17:17:12 -07002871 goto repeat;
NeilBrown3d310eb2005-06-21 17:17:26 -07002872 wake_up(&mddev->sb_wait);
NeilBrownacb180b2009-04-14 16:28:34 +10002873 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
Junxiao Bie1a86db2020-07-14 16:10:26 -07002874 sysfs_notify_dirent_safe(mddev->sysfs_completed);
NeilBrown06d91a52005-06-21 17:17:12 -07002875
NeilBrowndafb20f2012-03-19 12:46:39 +11002876 rdev_for_each(rdev, mddev) {
NeilBrownde393cd2011-07-28 11:31:48 +10002877 if (test_and_clear_bit(FaultRecorded, &rdev->flags))
2878 clear_bit(Blocked, &rdev->flags);
2879
2880 if (any_badblocks_changed)
Vishal Vermafc974ee2015-12-24 19:20:34 -07002881 ack_all_badblocks(&rdev->badblocks);
NeilBrownde393cd2011-07-28 11:31:48 +10002882 clear_bit(BlockedBadBlocks, &rdev->flags);
2883 wake_up(&rdev->blocked_wait);
2884 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002885}
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05002886EXPORT_SYMBOL(md_update_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002887
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002888static int add_bound_rdev(struct md_rdev *rdev)
2889{
2890 struct mddev *mddev = rdev->mddev;
2891 int err = 0;
Shaohua Li87d4d912016-01-06 14:37:14 -08002892 bool add_journal = test_bit(Journal, &rdev->flags);
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002893
Shaohua Li87d4d912016-01-06 14:37:14 -08002894 if (!mddev->pers->hot_remove_disk || add_journal) {
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002895 /* If there is hot_add_disk but no hot_remove_disk
2896 * then added disks for geometry changes,
2897 * and should be added immediately.
2898 */
2899 super_types[mddev->major_version].
2900 validate_super(mddev, rdev);
Shaohua Li87d4d912016-01-06 14:37:14 -08002901 if (add_journal)
2902 mddev_suspend(mddev);
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002903 err = mddev->pers->hot_add_disk(mddev, rdev);
Shaohua Li87d4d912016-01-06 14:37:14 -08002904 if (add_journal)
2905 mddev_resume(mddev);
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002906 if (err) {
Guoqing Jiangdb767672016-06-02 23:32:05 -04002907 md_kick_rdev_from_array(rdev);
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002908 return err;
2909 }
2910 }
2911 sysfs_notify_dirent_safe(rdev->sysfs_state);
2912
Shaohua Li29530792016-12-08 15:48:19 -08002913 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05002914 if (mddev->degraded)
2915 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
2916 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2917 md_new_event(mddev);
2918 md_wakeup_thread(mddev->thread);
2919 return 0;
2920}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002921
Andre Noll7f6ce762008-03-23 18:34:54 +01002922/* words written to sysfs files may, or may not, be \n terminated.
NeilBrownbce74da2006-01-06 00:20:41 -08002923 * We want to accept with case. For this we use cmd_match.
2924 */
2925static int cmd_match(const char *cmd, const char *str)
2926{
2927 /* See if cmd, written into a sysfs file, matches
2928 * str. They must either be the same, or cmd can
2929 * have a trailing newline
2930 */
2931 while (*cmd && *str && *cmd == *str) {
2932 cmd++;
2933 str++;
2934 }
2935 if (*cmd == '\n')
2936 cmd++;
2937 if (*str || *cmd)
2938 return 0;
2939 return 1;
2940}
2941
NeilBrown86e6ffd2005-11-08 21:39:24 -08002942struct rdev_sysfs_entry {
2943 struct attribute attr;
NeilBrown3cb03002011-10-11 16:45:26 +11002944 ssize_t (*show)(struct md_rdev *, char *);
2945 ssize_t (*store)(struct md_rdev *, const char *, size_t);
NeilBrown86e6ffd2005-11-08 21:39:24 -08002946};
2947
2948static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11002949state_show(struct md_rdev *rdev, char *page)
NeilBrown86e6ffd2005-11-08 21:39:24 -08002950{
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02002951 char *sep = ",";
NeilBrown20a49ff2008-02-06 01:39:57 -08002952 size_t len = 0;
Mark Rutland6aa7de02017-10-23 14:07:29 -07002953 unsigned long flags = READ_ONCE(rdev->flags);
NeilBrown86e6ffd2005-11-08 21:39:24 -08002954
NeilBrown758bfc82014-12-15 12:56:59 +11002955 if (test_bit(Faulty, &flags) ||
Tomasz Majchrzakdcbcb482016-10-21 16:27:08 +02002956 (!test_bit(ExternalBbl, &flags) &&
2957 rdev->badblocks.unacked_exist))
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02002958 len += sprintf(page+len, "faulty%s", sep);
2959 if (test_bit(In_sync, &flags))
2960 len += sprintf(page+len, "in_sync%s", sep);
2961 if (test_bit(Journal, &flags))
2962 len += sprintf(page+len, "journal%s", sep);
2963 if (test_bit(WriteMostly, &flags))
2964 len += sprintf(page+len, "write_mostly%s", sep);
NeilBrown758bfc82014-12-15 12:56:59 +11002965 if (test_bit(Blocked, &flags) ||
NeilBrown52c64152011-12-08 16:22:48 +11002966 (rdev->badblocks.unacked_exist
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02002967 && !test_bit(Faulty, &flags)))
2968 len += sprintf(page+len, "blocked%s", sep);
NeilBrown758bfc82014-12-15 12:56:59 +11002969 if (!test_bit(Faulty, &flags) &&
Shaohua Lif2076e72015-10-08 21:54:12 -07002970 !test_bit(Journal, &flags) &&
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02002971 !test_bit(In_sync, &flags))
2972 len += sprintf(page+len, "spare%s", sep);
2973 if (test_bit(WriteErrorSeen, &flags))
2974 len += sprintf(page+len, "write_error%s", sep);
2975 if (test_bit(WantReplacement, &flags))
2976 len += sprintf(page+len, "want_replacement%s", sep);
2977 if (test_bit(Replacement, &flags))
2978 len += sprintf(page+len, "replacement%s", sep);
2979 if (test_bit(ExternalBbl, &flags))
2980 len += sprintf(page+len, "external_bbl%s", sep);
NeilBrown688834e2016-11-18 16:16:11 +11002981 if (test_bit(FailFast, &flags))
2982 len += sprintf(page+len, "failfast%s", sep);
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02002983
2984 if (len)
2985 len -= strlen(sep);
NeilBrown2d78f8c2011-12-23 10:17:51 +11002986
NeilBrown86e6ffd2005-11-08 21:39:24 -08002987 return len+sprintf(page+len, "\n");
2988}
2989
NeilBrown45dc2de2006-06-26 00:27:58 -07002990static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11002991state_store(struct md_rdev *rdev, const char *buf, size_t len)
NeilBrown45dc2de2006-06-26 00:27:58 -07002992{
2993 /* can write
NeilBrownde393cd2011-07-28 11:31:48 +10002994 * faulty - simulates an error
NeilBrown45dc2de2006-06-26 00:27:58 -07002995 * remove - disconnects the device
NeilBrownf6556752006-06-26 00:28:01 -07002996 * writemostly - sets write_mostly
2997 * -writemostly - clears write_mostly
NeilBrownde393cd2011-07-28 11:31:48 +10002998 * blocked - sets the Blocked flags
2999 * -blocked - clears the Blocked and possibly simulates an error
NeilBrown6d56e272009-04-14 12:01:57 +10003000 * insync - sets Insync providing device isn't active
NeilBrownf4667222013-12-09 12:04:56 +11003001 * -insync - clear Insync for a device with a slot assigned,
3002 * so that it gets rebuilt based on bitmap
NeilBrownd7a9d442011-07-28 11:31:48 +10003003 * write_error - sets WriteErrorSeen
3004 * -write_error - clears WriteErrorSeen
NeilBrown688834e2016-11-18 16:16:11 +11003005 * {,-}failfast - set/clear FailFast
NeilBrown45dc2de2006-06-26 00:27:58 -07003006 */
3007 int err = -EINVAL;
3008 if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
3009 md_error(rdev->mddev, rdev);
NeilBrown5ef56c82011-08-25 14:42:51 +10003010 if (test_bit(Faulty, &rdev->flags))
3011 err = 0;
3012 else
3013 err = -EBUSY;
NeilBrown45dc2de2006-06-26 00:27:58 -07003014 } else if (cmd_match(buf, "remove")) {
Shaohua Li5d881782016-07-28 09:06:34 -07003015 if (rdev->mddev->pers) {
3016 clear_bit(Blocked, &rdev->flags);
3017 remove_and_add_spares(rdev->mddev, rdev);
3018 }
NeilBrown45dc2de2006-06-26 00:27:58 -07003019 if (rdev->raid_disk >= 0)
3020 err = -EBUSY;
3021 else {
NeilBrownfd01b882011-10-11 16:47:53 +11003022 struct mddev *mddev = rdev->mddev;
NeilBrown45dc2de2006-06-26 00:27:58 -07003023 err = 0;
Guoqing Jianga9720902015-10-12 17:21:27 +08003024 if (mddev_is_clustered(mddev))
3025 err = md_cluster_ops->remove_disk(mddev, rdev);
3026
3027 if (err == 0) {
3028 md_kick_rdev_from_array(rdev);
NeilBrown060b0682016-11-04 16:46:03 +11003029 if (mddev->pers) {
Shaohua Li29530792016-12-08 15:48:19 -08003030 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrown060b0682016-11-04 16:46:03 +11003031 md_wakeup_thread(mddev->thread);
3032 }
Guoqing Jianga9720902015-10-12 17:21:27 +08003033 md_new_event(mddev);
3034 }
NeilBrown45dc2de2006-06-26 00:27:58 -07003035 }
NeilBrownf6556752006-06-26 00:28:01 -07003036 } else if (cmd_match(buf, "writemostly")) {
3037 set_bit(WriteMostly, &rdev->flags);
Guoqing Jiang404659c2019-12-23 10:48:53 +01003038 mddev_create_serial_pool(rdev->mddev, rdev, false);
NeilBrownf6556752006-06-26 00:28:01 -07003039 err = 0;
3040 } else if (cmd_match(buf, "-writemostly")) {
Guoqing Jiang11d3a9f2019-12-23 10:48:55 +01003041 mddev_destroy_serial_pool(rdev->mddev, rdev, false);
NeilBrownf6556752006-06-26 00:28:01 -07003042 clear_bit(WriteMostly, &rdev->flags);
3043 err = 0;
Dan Williams6bfe0b42008-04-30 00:52:32 -07003044 } else if (cmd_match(buf, "blocked")) {
3045 set_bit(Blocked, &rdev->flags);
3046 err = 0;
3047 } else if (cmd_match(buf, "-blocked")) {
NeilBrownde393cd2011-07-28 11:31:48 +10003048 if (!test_bit(Faulty, &rdev->flags) &&
Tomasz Majchrzakdcbcb482016-10-21 16:27:08 +02003049 !test_bit(ExternalBbl, &rdev->flags) &&
NeilBrown7da64a02011-08-30 16:20:17 +10003050 rdev->badblocks.unacked_exist) {
NeilBrownde393cd2011-07-28 11:31:48 +10003051 /* metadata handler doesn't understand badblocks,
3052 * so we need to fail the device
3053 */
3054 md_error(rdev->mddev, rdev);
3055 }
Dan Williams6bfe0b42008-04-30 00:52:32 -07003056 clear_bit(Blocked, &rdev->flags);
NeilBrownde393cd2011-07-28 11:31:48 +10003057 clear_bit(BlockedBadBlocks, &rdev->flags);
Dan Williams6bfe0b42008-04-30 00:52:32 -07003058 wake_up(&rdev->blocked_wait);
3059 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3060 md_wakeup_thread(rdev->mddev->thread);
3061
3062 err = 0;
NeilBrown6d56e272009-04-14 12:01:57 +10003063 } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
3064 set_bit(In_sync, &rdev->flags);
3065 err = 0;
NeilBrown688834e2016-11-18 16:16:11 +11003066 } else if (cmd_match(buf, "failfast")) {
3067 set_bit(FailFast, &rdev->flags);
3068 err = 0;
3069 } else if (cmd_match(buf, "-failfast")) {
3070 clear_bit(FailFast, &rdev->flags);
3071 err = 0;
Shaohua Lif2076e72015-10-08 21:54:12 -07003072 } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 &&
3073 !test_bit(Journal, &rdev->flags)) {
NeilBrowne1960f82014-09-30 15:24:25 +10003074 if (rdev->mddev->pers == NULL) {
3075 clear_bit(In_sync, &rdev->flags);
3076 rdev->saved_raid_disk = rdev->raid_disk;
3077 rdev->raid_disk = -1;
3078 err = 0;
3079 }
NeilBrownd7a9d442011-07-28 11:31:48 +10003080 } else if (cmd_match(buf, "write_error")) {
3081 set_bit(WriteErrorSeen, &rdev->flags);
3082 err = 0;
3083 } else if (cmd_match(buf, "-write_error")) {
3084 clear_bit(WriteErrorSeen, &rdev->flags);
3085 err = 0;
NeilBrown2d78f8c2011-12-23 10:17:51 +11003086 } else if (cmd_match(buf, "want_replacement")) {
3087 /* Any non-spare device that is not a replacement can
3088 * become want_replacement at any time, but we then need to
3089 * check if recovery is needed.
3090 */
3091 if (rdev->raid_disk >= 0 &&
Shaohua Lif2076e72015-10-08 21:54:12 -07003092 !test_bit(Journal, &rdev->flags) &&
NeilBrown2d78f8c2011-12-23 10:17:51 +11003093 !test_bit(Replacement, &rdev->flags))
3094 set_bit(WantReplacement, &rdev->flags);
3095 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3096 md_wakeup_thread(rdev->mddev->thread);
3097 err = 0;
3098 } else if (cmd_match(buf, "-want_replacement")) {
3099 /* Clearing 'want_replacement' is always allowed.
3100 * Once replacements starts it is too late though.
3101 */
3102 err = 0;
3103 clear_bit(WantReplacement, &rdev->flags);
3104 } else if (cmd_match(buf, "replacement")) {
3105 /* Can only set a device as a replacement when array has not
3106 * yet been started. Once running, replacement is automatic
3107 * from spares, or by assigning 'slot'.
3108 */
3109 if (rdev->mddev->pers)
3110 err = -EBUSY;
3111 else {
3112 set_bit(Replacement, &rdev->flags);
3113 err = 0;
3114 }
3115 } else if (cmd_match(buf, "-replacement")) {
3116 /* Similarly, can only clear Replacement before start */
3117 if (rdev->mddev->pers)
3118 err = -EBUSY;
3119 else {
3120 clear_bit(Replacement, &rdev->flags);
3121 err = 0;
3122 }
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05003123 } else if (cmd_match(buf, "re-add")) {
Yufen Yuee37e622019-04-02 14:22:14 +08003124 if (!rdev->mddev->pers)
3125 err = -EINVAL;
3126 else if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
3127 rdev->saved_raid_disk >= 0) {
Goldwyn Rodrigues97f6cd32015-04-14 10:45:42 -05003128 /* clear_bit is performed _after_ all the devices
3129 * have their local Faulty bit cleared. If any writes
3130 * happen in the meantime in the local node, they
3131 * will land in the local bitmap, which will be synced
3132 * by this node eventually
3133 */
3134 if (!mddev_is_clustered(rdev->mddev) ||
3135 (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) {
3136 clear_bit(Faulty, &rdev->flags);
3137 err = add_bound_rdev(rdev);
3138 }
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05003139 } else
3140 err = -EBUSY;
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02003141 } else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) {
3142 set_bit(ExternalBbl, &rdev->flags);
3143 rdev->badblocks.shift = 0;
3144 err = 0;
3145 } else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) {
3146 clear_bit(ExternalBbl, &rdev->flags);
3147 err = 0;
NeilBrown45dc2de2006-06-26 00:27:58 -07003148 }
NeilBrown00bcb4a2010-06-01 19:37:23 +10003149 if (!err)
3150 sysfs_notify_dirent_safe(rdev->sysfs_state);
NeilBrown45dc2de2006-06-26 00:27:58 -07003151 return err ? err : len;
3152}
NeilBrown80ca3a42006-07-10 04:44:18 -07003153static struct rdev_sysfs_entry rdev_state =
NeilBrown750f1992014-09-30 08:53:05 +10003154__ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
NeilBrown86e6ffd2005-11-08 21:39:24 -08003155
3156static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003157errors_show(struct md_rdev *rdev, char *page)
NeilBrown4dbcdc72006-01-06 00:20:52 -08003158{
3159 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
3160}
3161
3162static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003163errors_store(struct md_rdev *rdev, const char *buf, size_t len)
NeilBrown4dbcdc72006-01-06 00:20:52 -08003164{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03003165 unsigned int n;
3166 int rv;
3167
3168 rv = kstrtouint(buf, 10, &n);
3169 if (rv < 0)
3170 return rv;
3171 atomic_set(&rdev->corrected_errors, n);
3172 return len;
NeilBrown4dbcdc72006-01-06 00:20:52 -08003173}
3174static struct rdev_sysfs_entry rdev_errors =
NeilBrown80ca3a42006-07-10 04:44:18 -07003175__ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
NeilBrown4dbcdc72006-01-06 00:20:52 -08003176
NeilBrown014236d2006-01-06 00:20:55 -08003177static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003178slot_show(struct md_rdev *rdev, char *page)
NeilBrown014236d2006-01-06 00:20:55 -08003179{
Shaohua Lif2076e72015-10-08 21:54:12 -07003180 if (test_bit(Journal, &rdev->flags))
3181 return sprintf(page, "journal\n");
3182 else if (rdev->raid_disk < 0)
NeilBrown014236d2006-01-06 00:20:55 -08003183 return sprintf(page, "none\n");
3184 else
3185 return sprintf(page, "%d\n", rdev->raid_disk);
3186}
3187
3188static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003189slot_store(struct md_rdev *rdev, const char *buf, size_t len)
NeilBrown014236d2006-01-06 00:20:55 -08003190{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03003191 int slot;
NeilBrownc303da62008-02-06 01:39:51 -08003192 int err;
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03003193
Shaohua Lif2076e72015-10-08 21:54:12 -07003194 if (test_bit(Journal, &rdev->flags))
3195 return -EBUSY;
NeilBrown014236d2006-01-06 00:20:55 -08003196 if (strncmp(buf, "none", 4)==0)
3197 slot = -1;
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03003198 else {
3199 err = kstrtouint(buf, 10, (unsigned int *)&slot);
3200 if (err < 0)
3201 return err;
3202 }
Neil Brown6c2fce22008-06-28 08:31:31 +10003203 if (rdev->mddev->pers && slot == -1) {
NeilBrownc303da62008-02-06 01:39:51 -08003204 /* Setting 'slot' on an active array requires also
3205 * updating the 'rd%d' link, and communicating
3206 * with the personality with ->hot_*_disk.
3207 * For now we only support removing
3208 * failed/spare devices. This normally happens automatically,
3209 * but not when the metadata is externally managed.
3210 */
NeilBrownc303da62008-02-06 01:39:51 -08003211 if (rdev->raid_disk == -1)
3212 return -EEXIST;
3213 /* personality does all needed checks */
Namhyung Kim01393f32011-06-09 11:42:54 +10003214 if (rdev->mddev->pers->hot_remove_disk == NULL)
NeilBrownc303da62008-02-06 01:39:51 -08003215 return -EINVAL;
NeilBrown746d3202013-04-24 11:42:41 +10003216 clear_bit(Blocked, &rdev->flags);
3217 remove_and_add_spares(rdev->mddev, rdev);
3218 if (rdev->raid_disk >= 0)
3219 return -EBUSY;
NeilBrownc303da62008-02-06 01:39:51 -08003220 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3221 md_wakeup_thread(rdev->mddev->thread);
Neil Brown6c2fce22008-06-28 08:31:31 +10003222 } else if (rdev->mddev->pers) {
Neil Brown6c2fce22008-06-28 08:31:31 +10003223 /* Activating a spare .. or possibly reactivating
NeilBrown6d56e272009-04-14 12:01:57 +10003224 * if we ever get bitmaps working here.
Neil Brown6c2fce22008-06-28 08:31:31 +10003225 */
Goldwyn Rodriguescb01c542015-12-18 15:19:16 +11003226 int err;
Neil Brown6c2fce22008-06-28 08:31:31 +10003227
3228 if (rdev->raid_disk != -1)
3229 return -EBUSY;
3230
NeilBrownc6751b22011-02-02 11:57:13 +11003231 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
3232 return -EBUSY;
3233
Neil Brown6c2fce22008-06-28 08:31:31 +10003234 if (rdev->mddev->pers->hot_add_disk == NULL)
3235 return -EINVAL;
3236
NeilBrownba1b41b2011-01-14 09:14:34 +11003237 if (slot >= rdev->mddev->raid_disks &&
3238 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
3239 return -ENOSPC;
3240
Neil Brown6c2fce22008-06-28 08:31:31 +10003241 rdev->raid_disk = slot;
3242 if (test_bit(In_sync, &rdev->flags))
3243 rdev->saved_raid_disk = slot;
3244 else
3245 rdev->saved_raid_disk = -1;
NeilBrownd30519f2011-10-18 12:13:47 +11003246 clear_bit(In_sync, &rdev->flags);
NeilBrown8313b8e2013-12-12 10:13:33 +11003247 clear_bit(Bitmap_sync, &rdev->flags);
Guoqing Jiang3f79cc22020-04-04 23:57:11 +02003248 err = rdev->mddev->pers->hot_add_disk(rdev->mddev, rdev);
Goldwyn Rodriguescb01c542015-12-18 15:19:16 +11003249 if (err) {
3250 rdev->raid_disk = -1;
3251 return err;
3252 } else
3253 sysfs_notify_dirent_safe(rdev->sysfs_state);
Damien Le Moal5e3b8a82020-07-16 13:54:40 +09003254 /* failure here is OK */;
3255 sysfs_link_rdev(rdev->mddev, rdev);
Neil Brown6c2fce22008-06-28 08:31:31 +10003256 /* don't wakeup anyone, leave that to userspace. */
NeilBrownc303da62008-02-06 01:39:51 -08003257 } else {
NeilBrownba1b41b2011-01-14 09:14:34 +11003258 if (slot >= rdev->mddev->raid_disks &&
3259 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
NeilBrownc303da62008-02-06 01:39:51 -08003260 return -ENOSPC;
3261 rdev->raid_disk = slot;
3262 /* assume it is working */
NeilBrownc5d79ad2008-02-06 01:39:54 -08003263 clear_bit(Faulty, &rdev->flags);
3264 clear_bit(WriteMostly, &rdev->flags);
NeilBrownc303da62008-02-06 01:39:51 -08003265 set_bit(In_sync, &rdev->flags);
NeilBrown00bcb4a2010-06-01 19:37:23 +10003266 sysfs_notify_dirent_safe(rdev->sysfs_state);
NeilBrownc303da62008-02-06 01:39:51 -08003267 }
NeilBrown014236d2006-01-06 00:20:55 -08003268 return len;
3269}
3270
NeilBrown014236d2006-01-06 00:20:55 -08003271static struct rdev_sysfs_entry rdev_slot =
NeilBrown80ca3a42006-07-10 04:44:18 -07003272__ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
NeilBrown014236d2006-01-06 00:20:55 -08003273
NeilBrown93c8cad2006-01-06 00:20:56 -08003274static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003275offset_show(struct md_rdev *rdev, char *page)
NeilBrown93c8cad2006-01-06 00:20:56 -08003276{
Andrew Morton6961ece2006-01-06 00:20:59 -08003277 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
NeilBrown93c8cad2006-01-06 00:20:56 -08003278}
3279
3280static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003281offset_store(struct md_rdev *rdev, const char *buf, size_t len)
NeilBrown93c8cad2006-01-06 00:20:56 -08003282{
NeilBrownc6563a82012-05-21 09:27:00 +10003283 unsigned long long offset;
Jingoo Hanb29bebd2013-06-01 16:15:16 +09003284 if (kstrtoull(buf, 10, &offset) < 0)
NeilBrown93c8cad2006-01-06 00:20:56 -08003285 return -EINVAL;
Neil Brown8ed0a522008-06-28 08:31:29 +10003286 if (rdev->mddev->pers && rdev->raid_disk >= 0)
NeilBrown93c8cad2006-01-06 00:20:56 -08003287 return -EBUSY;
Andre Nolldd8ac332009-03-31 14:33:13 +11003288 if (rdev->sectors && rdev->mddev->external)
NeilBrownc5d79ad2008-02-06 01:39:54 -08003289 /* Must set offset before size, so overlap checks
3290 * can be sane */
3291 return -EBUSY;
NeilBrown93c8cad2006-01-06 00:20:56 -08003292 rdev->data_offset = offset;
NeilBrown25f7fd42012-07-19 15:59:18 +10003293 rdev->new_data_offset = offset;
NeilBrown93c8cad2006-01-06 00:20:56 -08003294 return len;
3295}
3296
3297static struct rdev_sysfs_entry rdev_offset =
NeilBrown80ca3a42006-07-10 04:44:18 -07003298__ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
NeilBrown93c8cad2006-01-06 00:20:56 -08003299
NeilBrownc6563a82012-05-21 09:27:00 +10003300static ssize_t new_offset_show(struct md_rdev *rdev, char *page)
3301{
3302 return sprintf(page, "%llu\n",
3303 (unsigned long long)rdev->new_data_offset);
3304}
3305
3306static ssize_t new_offset_store(struct md_rdev *rdev,
3307 const char *buf, size_t len)
3308{
3309 unsigned long long new_offset;
3310 struct mddev *mddev = rdev->mddev;
3311
Jingoo Hanb29bebd2013-06-01 16:15:16 +09003312 if (kstrtoull(buf, 10, &new_offset) < 0)
NeilBrownc6563a82012-05-21 09:27:00 +10003313 return -EINVAL;
3314
NeilBrownf851b602014-12-11 10:02:10 +11003315 if (mddev->sync_thread ||
3316 test_bit(MD_RECOVERY_RUNNING,&mddev->recovery))
NeilBrownc6563a82012-05-21 09:27:00 +10003317 return -EBUSY;
3318 if (new_offset == rdev->data_offset)
3319 /* reset is always permitted */
3320 ;
3321 else if (new_offset > rdev->data_offset) {
3322 /* must not push array size beyond rdev_sectors */
3323 if (new_offset - rdev->data_offset
3324 + mddev->dev_sectors > rdev->sectors)
3325 return -E2BIG;
3326 }
3327 /* Metadata worries about other space details. */
3328
3329 /* decreasing the offset is inconsistent with a backwards
3330 * reshape.
3331 */
3332 if (new_offset < rdev->data_offset &&
3333 mddev->reshape_backwards)
3334 return -EINVAL;
3335 /* Increasing offset is inconsistent with forwards
3336 * reshape. reshape_direction should be set to
3337 * 'backwards' first.
3338 */
3339 if (new_offset > rdev->data_offset &&
3340 !mddev->reshape_backwards)
3341 return -EINVAL;
3342
3343 if (mddev->pers && mddev->persistent &&
3344 !super_types[mddev->major_version]
3345 .allow_new_offset(rdev, new_offset))
3346 return -E2BIG;
3347 rdev->new_data_offset = new_offset;
3348 if (new_offset > rdev->data_offset)
3349 mddev->reshape_backwards = 1;
3350 else if (new_offset < rdev->data_offset)
3351 mddev->reshape_backwards = 0;
3352
3353 return len;
3354}
3355static struct rdev_sysfs_entry rdev_new_offset =
3356__ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store);
3357
NeilBrown83303b62006-01-06 00:21:06 -08003358static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003359rdev_size_show(struct md_rdev *rdev, char *page)
NeilBrown83303b62006-01-06 00:21:06 -08003360{
Andre Nolldd8ac332009-03-31 14:33:13 +11003361 return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
NeilBrown83303b62006-01-06 00:21:06 -08003362}
3363
NeilBrownc5d79ad2008-02-06 01:39:54 -08003364static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
3365{
3366 /* check if two start/length pairs overlap */
3367 if (s1+l1 <= s2)
3368 return 0;
3369 if (s2+l2 <= s1)
3370 return 0;
3371 return 1;
3372}
3373
Dan Williamsb522adc2009-03-31 15:00:31 +11003374static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
3375{
3376 unsigned long long blocks;
3377 sector_t new;
3378
Jingoo Hanb29bebd2013-06-01 16:15:16 +09003379 if (kstrtoull(buf, 10, &blocks) < 0)
Dan Williamsb522adc2009-03-31 15:00:31 +11003380 return -EINVAL;
3381
3382 if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
3383 return -EINVAL; /* sector conversion overflow */
3384
3385 new = blocks * 2;
3386 if (new != blocks * 2)
3387 return -EINVAL; /* unsigned long long to sector_t overflow */
3388
3389 *sectors = new;
3390 return 0;
3391}
3392
NeilBrown83303b62006-01-06 00:21:06 -08003393static ssize_t
NeilBrown3cb03002011-10-11 16:45:26 +11003394rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
NeilBrown83303b62006-01-06 00:21:06 -08003395{
NeilBrownfd01b882011-10-11 16:47:53 +11003396 struct mddev *my_mddev = rdev->mddev;
Andre Nolldd8ac332009-03-31 14:33:13 +11003397 sector_t oldsectors = rdev->sectors;
Dan Williamsb522adc2009-03-31 15:00:31 +11003398 sector_t sectors;
NeilBrown27c529b2008-03-04 14:29:33 -08003399
Shaohua Lif2076e72015-10-08 21:54:12 -07003400 if (test_bit(Journal, &rdev->flags))
3401 return -EBUSY;
Dan Williamsb522adc2009-03-31 15:00:31 +11003402 if (strict_blocks_to_sectors(buf, &sectors) < 0)
Neil Brownd7027452008-07-12 10:37:50 +10003403 return -EINVAL;
NeilBrownc6563a82012-05-21 09:27:00 +10003404 if (rdev->data_offset != rdev->new_data_offset)
3405 return -EINVAL; /* too confusing */
Chris Webb0cd17fe2008-06-28 08:31:46 +10003406 if (my_mddev->pers && rdev->raid_disk >= 0) {
Neil Brownd7027452008-07-12 10:37:50 +10003407 if (my_mddev->persistent) {
Andre Nolldd8ac332009-03-31 14:33:13 +11003408 sectors = super_types[my_mddev->major_version].
3409 rdev_size_change(rdev, sectors);
3410 if (!sectors)
Chris Webb0cd17fe2008-06-28 08:31:46 +10003411 return -EBUSY;
Andre Nolldd8ac332009-03-31 14:33:13 +11003412 } else if (!sectors)
Mike Snitzer77304d22010-11-08 14:39:12 +01003413 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
Andre Nolldd8ac332009-03-31 14:33:13 +11003414 rdev->data_offset;
NeilBrowna6468532013-02-21 14:33:17 +11003415 if (!my_mddev->pers->resize)
3416 /* Cannot change size for RAID0 or Linear etc */
3417 return -EINVAL;
Chris Webb0cd17fe2008-06-28 08:31:46 +10003418 }
Andre Nolldd8ac332009-03-31 14:33:13 +11003419 if (sectors < my_mddev->dev_sectors)
Chris Webb7d3c6f82008-10-13 11:55:11 +11003420 return -EINVAL; /* component must fit device */
Chris Webb0cd17fe2008-06-28 08:31:46 +10003421
Andre Nolldd8ac332009-03-31 14:33:13 +11003422 rdev->sectors = sectors;
3423 if (sectors > oldsectors && my_mddev->external) {
NeilBrown8b1afc32014-09-29 15:33:20 +10003424 /* Need to check that all other rdevs with the same
3425 * ->bdev do not overlap. 'rcu' is sufficient to walk
3426 * the rdev lists safely.
3427 * This check does not provide a hard guarantee, it
3428 * just helps avoid dangerous mistakes.
NeilBrownc5d79ad2008-02-06 01:39:54 -08003429 */
NeilBrownfd01b882011-10-11 16:47:53 +11003430 struct mddev *mddev;
NeilBrownc5d79ad2008-02-06 01:39:54 -08003431 int overlap = 0;
Cheng Renquan159ec1f2009-01-09 08:31:08 +11003432 struct list_head *tmp;
NeilBrownc5d79ad2008-02-06 01:39:54 -08003433
NeilBrown8b1afc32014-09-29 15:33:20 +10003434 rcu_read_lock();
NeilBrown29ac4aa2008-02-06 01:39:58 -08003435 for_each_mddev(mddev, tmp) {
NeilBrown3cb03002011-10-11 16:45:26 +11003436 struct md_rdev *rdev2;
NeilBrownc5d79ad2008-02-06 01:39:54 -08003437
NeilBrowndafb20f2012-03-19 12:46:39 +11003438 rdev_for_each(rdev2, mddev)
NeilBrownf21e9ff2011-01-31 12:10:09 +11003439 if (rdev->bdev == rdev2->bdev &&
3440 rdev != rdev2 &&
3441 overlaps(rdev->data_offset, rdev->sectors,
3442 rdev2->data_offset,
3443 rdev2->sectors)) {
NeilBrownc5d79ad2008-02-06 01:39:54 -08003444 overlap = 1;
3445 break;
3446 }
NeilBrownc5d79ad2008-02-06 01:39:54 -08003447 if (overlap) {
3448 mddev_put(mddev);
3449 break;
3450 }
3451 }
NeilBrown8b1afc32014-09-29 15:33:20 +10003452 rcu_read_unlock();
NeilBrownc5d79ad2008-02-06 01:39:54 -08003453 if (overlap) {
3454 /* Someone else could have slipped in a size
3455 * change here, but doing so is just silly.
Andre Nolldd8ac332009-03-31 14:33:13 +11003456 * We put oldsectors back because we *know* it is
NeilBrownc5d79ad2008-02-06 01:39:54 -08003457 * safe, and trust userspace not to race with
3458 * itself
3459 */
Andre Nolldd8ac332009-03-31 14:33:13 +11003460 rdev->sectors = oldsectors;
NeilBrownc5d79ad2008-02-06 01:39:54 -08003461 return -EBUSY;
3462 }
3463 }
NeilBrown83303b62006-01-06 00:21:06 -08003464 return len;
3465}
3466
3467static struct rdev_sysfs_entry rdev_size =
NeilBrown80ca3a42006-07-10 04:44:18 -07003468__ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
NeilBrown83303b62006-01-06 00:21:06 -08003469
NeilBrown3cb03002011-10-11 16:45:26 +11003470static ssize_t recovery_start_show(struct md_rdev *rdev, char *page)
Dan Williams06e3c812009-12-12 21:17:12 -07003471{
3472 unsigned long long recovery_start = rdev->recovery_offset;
3473
3474 if (test_bit(In_sync, &rdev->flags) ||
3475 recovery_start == MaxSector)
3476 return sprintf(page, "none\n");
3477
3478 return sprintf(page, "%llu\n", recovery_start);
3479}
3480
NeilBrown3cb03002011-10-11 16:45:26 +11003481static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len)
Dan Williams06e3c812009-12-12 21:17:12 -07003482{
3483 unsigned long long recovery_start;
3484
3485 if (cmd_match(buf, "none"))
3486 recovery_start = MaxSector;
Jingoo Hanb29bebd2013-06-01 16:15:16 +09003487 else if (kstrtoull(buf, 10, &recovery_start))
Dan Williams06e3c812009-12-12 21:17:12 -07003488 return -EINVAL;
3489
3490 if (rdev->mddev->pers &&
3491 rdev->raid_disk >= 0)
3492 return -EBUSY;
3493
3494 rdev->recovery_offset = recovery_start;
3495 if (recovery_start == MaxSector)
3496 set_bit(In_sync, &rdev->flags);
3497 else
3498 clear_bit(In_sync, &rdev->flags);
3499 return len;
3500}
3501
3502static struct rdev_sysfs_entry rdev_recovery_start =
3503__ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
3504
Vishal Vermafc974ee2015-12-24 19:20:34 -07003505/* sysfs access to bad-blocks list.
3506 * We present two files.
3507 * 'bad-blocks' lists sector numbers and lengths of ranges that
3508 * are recorded as bad. The list is truncated to fit within
3509 * the one-page limit of sysfs.
3510 * Writing "sector length" to this file adds an acknowledged
3511 * bad block list.
3512 * 'unacknowledged-bad-blocks' lists bad blocks that have not yet
3513 * been acknowledged. Writing to this file adds bad blocks
3514 * without acknowledging them. This is largely for testing.
3515 */
NeilBrown3cb03002011-10-11 16:45:26 +11003516static ssize_t bb_show(struct md_rdev *rdev, char *page)
NeilBrown16c791a2011-07-28 11:31:47 +10003517{
3518 return badblocks_show(&rdev->badblocks, page, 0);
3519}
NeilBrown3cb03002011-10-11 16:45:26 +11003520static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len)
NeilBrown16c791a2011-07-28 11:31:47 +10003521{
NeilBrownde393cd2011-07-28 11:31:48 +10003522 int rv = badblocks_store(&rdev->badblocks, page, len, 0);
3523 /* Maybe that ack was all we needed */
3524 if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags))
3525 wake_up(&rdev->blocked_wait);
3526 return rv;
NeilBrown16c791a2011-07-28 11:31:47 +10003527}
3528static struct rdev_sysfs_entry rdev_bad_blocks =
3529__ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store);
3530
NeilBrown3cb03002011-10-11 16:45:26 +11003531static ssize_t ubb_show(struct md_rdev *rdev, char *page)
NeilBrown16c791a2011-07-28 11:31:47 +10003532{
3533 return badblocks_show(&rdev->badblocks, page, 1);
3534}
NeilBrown3cb03002011-10-11 16:45:26 +11003535static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len)
NeilBrown16c791a2011-07-28 11:31:47 +10003536{
3537 return badblocks_store(&rdev->badblocks, page, len, 1);
3538}
3539static struct rdev_sysfs_entry rdev_unack_bad_blocks =
3540__ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store);
3541
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01003542static ssize_t
3543ppl_sector_show(struct md_rdev *rdev, char *page)
3544{
3545 return sprintf(page, "%llu\n", (unsigned long long)rdev->ppl.sector);
3546}
3547
3548static ssize_t
3549ppl_sector_store(struct md_rdev *rdev, const char *buf, size_t len)
3550{
3551 unsigned long long sector;
3552
3553 if (kstrtoull(buf, 10, &sector) < 0)
3554 return -EINVAL;
3555 if (sector != (sector_t)sector)
3556 return -EINVAL;
3557
3558 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) &&
3559 rdev->raid_disk >= 0)
3560 return -EBUSY;
3561
3562 if (rdev->mddev->persistent) {
3563 if (rdev->mddev->major_version == 0)
3564 return -EINVAL;
3565 if ((sector > rdev->sb_start &&
3566 sector - rdev->sb_start > S16_MAX) ||
3567 (sector < rdev->sb_start &&
3568 rdev->sb_start - sector > -S16_MIN))
3569 return -EINVAL;
3570 rdev->ppl.offset = sector - rdev->sb_start;
3571 } else if (!rdev->mddev->external) {
3572 return -EBUSY;
3573 }
3574 rdev->ppl.sector = sector;
3575 return len;
3576}
3577
3578static struct rdev_sysfs_entry rdev_ppl_sector =
3579__ATTR(ppl_sector, S_IRUGO|S_IWUSR, ppl_sector_show, ppl_sector_store);
3580
3581static ssize_t
3582ppl_size_show(struct md_rdev *rdev, char *page)
3583{
3584 return sprintf(page, "%u\n", rdev->ppl.size);
3585}
3586
3587static ssize_t
3588ppl_size_store(struct md_rdev *rdev, const char *buf, size_t len)
3589{
3590 unsigned int size;
3591
3592 if (kstrtouint(buf, 10, &size) < 0)
3593 return -EINVAL;
3594
3595 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) &&
3596 rdev->raid_disk >= 0)
3597 return -EBUSY;
3598
3599 if (rdev->mddev->persistent) {
3600 if (rdev->mddev->major_version == 0)
3601 return -EINVAL;
3602 if (size > U16_MAX)
3603 return -EINVAL;
3604 } else if (!rdev->mddev->external) {
3605 return -EBUSY;
3606 }
3607 rdev->ppl.size = size;
3608 return len;
3609}
3610
3611static struct rdev_sysfs_entry rdev_ppl_size =
3612__ATTR(ppl_size, S_IRUGO|S_IWUSR, ppl_size_show, ppl_size_store);
3613
NeilBrown86e6ffd2005-11-08 21:39:24 -08003614static struct attribute *rdev_default_attrs[] = {
3615 &rdev_state.attr,
NeilBrown4dbcdc72006-01-06 00:20:52 -08003616 &rdev_errors.attr,
NeilBrown014236d2006-01-06 00:20:55 -08003617 &rdev_slot.attr,
NeilBrown93c8cad2006-01-06 00:20:56 -08003618 &rdev_offset.attr,
NeilBrownc6563a82012-05-21 09:27:00 +10003619 &rdev_new_offset.attr,
NeilBrown83303b62006-01-06 00:21:06 -08003620 &rdev_size.attr,
Dan Williams06e3c812009-12-12 21:17:12 -07003621 &rdev_recovery_start.attr,
NeilBrown16c791a2011-07-28 11:31:47 +10003622 &rdev_bad_blocks.attr,
3623 &rdev_unack_bad_blocks.attr,
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01003624 &rdev_ppl_sector.attr,
3625 &rdev_ppl_size.attr,
NeilBrown86e6ffd2005-11-08 21:39:24 -08003626 NULL,
3627};
3628static ssize_t
3629rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
3630{
3631 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
NeilBrown3cb03002011-10-11 16:45:26 +11003632 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
NeilBrown86e6ffd2005-11-08 21:39:24 -08003633
3634 if (!entry->show)
3635 return -EIO;
NeilBrown758bfc82014-12-15 12:56:59 +11003636 if (!rdev->mddev)
Marcos Paulo de Souza168b3052019-06-14 15:41:06 -07003637 return -ENODEV;
NeilBrown758bfc82014-12-15 12:56:59 +11003638 return entry->show(rdev, page);
NeilBrown86e6ffd2005-11-08 21:39:24 -08003639}
3640
3641static ssize_t
3642rdev_attr_store(struct kobject *kobj, struct attribute *attr,
3643 const char *page, size_t length)
3644{
3645 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
NeilBrown3cb03002011-10-11 16:45:26 +11003646 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
NeilBrown27c529b2008-03-04 14:29:33 -08003647 ssize_t rv;
NeilBrownfd01b882011-10-11 16:47:53 +11003648 struct mddev *mddev = rdev->mddev;
NeilBrown86e6ffd2005-11-08 21:39:24 -08003649
3650 if (!entry->store)
3651 return -EIO;
NeilBrown67463ac2006-07-10 04:44:19 -07003652 if (!capable(CAP_SYS_ADMIN))
3653 return -EACCES;
Pawel Baldysiakc42d3242019-03-27 13:48:21 +01003654 rv = mddev ? mddev_lock(mddev) : -ENODEV;
NeilBrownca388052008-02-06 01:39:55 -08003655 if (!rv) {
NeilBrown27c529b2008-03-04 14:29:33 -08003656 if (rdev->mddev == NULL)
Pawel Baldysiakc42d3242019-03-27 13:48:21 +01003657 rv = -ENODEV;
NeilBrown27c529b2008-03-04 14:29:33 -08003658 else
3659 rv = entry->store(rdev, page, length);
Dan Williams6a518302008-04-30 00:52:28 -07003660 mddev_unlock(mddev);
NeilBrownca388052008-02-06 01:39:55 -08003661 }
3662 return rv;
NeilBrown86e6ffd2005-11-08 21:39:24 -08003663}
3664
3665static void rdev_free(struct kobject *ko)
3666{
NeilBrown3cb03002011-10-11 16:45:26 +11003667 struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj);
NeilBrown86e6ffd2005-11-08 21:39:24 -08003668 kfree(rdev);
3669}
Emese Revfy52cf25d2010-01-19 02:58:23 +01003670static const struct sysfs_ops rdev_sysfs_ops = {
NeilBrown86e6ffd2005-11-08 21:39:24 -08003671 .show = rdev_attr_show,
3672 .store = rdev_attr_store,
3673};
3674static struct kobj_type rdev_ktype = {
3675 .release = rdev_free,
3676 .sysfs_ops = &rdev_sysfs_ops,
3677 .default_attrs = rdev_default_attrs,
3678};
3679
NeilBrown3cb03002011-10-11 16:45:26 +11003680int md_rdev_init(struct md_rdev *rdev)
NeilBrowne8bb9a82010-06-01 19:37:26 +10003681{
3682 rdev->desc_nr = -1;
3683 rdev->saved_raid_disk = -1;
3684 rdev->raid_disk = -1;
3685 rdev->flags = 0;
3686 rdev->data_offset = 0;
NeilBrownc6563a82012-05-21 09:27:00 +10003687 rdev->new_data_offset = 0;
NeilBrowne8bb9a82010-06-01 19:37:26 +10003688 rdev->sb_events = 0;
Arnd Bergmann0e3ef492016-06-17 17:33:10 +02003689 rdev->last_read_error = 0;
NeilBrown2699b672011-07-28 11:31:47 +10003690 rdev->sb_loaded = 0;
3691 rdev->bb_page = NULL;
NeilBrowne8bb9a82010-06-01 19:37:26 +10003692 atomic_set(&rdev->nr_pending, 0);
3693 atomic_set(&rdev->read_errors, 0);
3694 atomic_set(&rdev->corrected_errors, 0);
3695
3696 INIT_LIST_HEAD(&rdev->same_set);
3697 init_waitqueue_head(&rdev->blocked_wait);
NeilBrown2230dfe2011-07-28 11:31:46 +10003698
3699 /* Add space to store bad block list.
3700 * This reserves the space even on arrays where it cannot
3701 * be used - I wonder if that matters
3702 */
Vishal Vermafc974ee2015-12-24 19:20:34 -07003703 return badblocks_init(&rdev->badblocks, 0);
NeilBrowne8bb9a82010-06-01 19:37:26 +10003704}
3705EXPORT_SYMBOL_GPL(md_rdev_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003706/*
3707 * Import a device. If 'super_format' >= 0, then sanity check the superblock
3708 *
3709 * mark the device faulty if:
3710 *
3711 * - the device is nonexistent (zero size)
3712 * - the device has no valid superblock
3713 *
3714 * a faulty rdev _never_ has rdev->sb set.
3715 */
NeilBrown3cb03002011-10-11 16:45:26 +11003716static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003717{
3718 char b[BDEVNAME_SIZE];
3719 int err;
NeilBrown3cb03002011-10-11 16:45:26 +11003720 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003721 sector_t size;
3722
NeilBrown9ffae0c2006-01-06 00:20:32 -08003723 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
NeilBrown9d487392016-11-02 14:16:49 +11003724 if (!rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003725 return ERR_PTR(-ENOMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003726
NeilBrown2230dfe2011-07-28 11:31:46 +10003727 err = md_rdev_init(rdev);
3728 if (err)
3729 goto abort_free;
3730 err = alloc_disk_sb(rdev);
3731 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003732 goto abort_free;
3733
NeilBrownc5d79ad2008-02-06 01:39:54 -08003734 err = lock_rdev(rdev, newdev, super_format == -2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003735 if (err)
3736 goto abort_free;
3737
Greg Kroah-Hartmanf9cb0742007-12-17 23:05:35 -07003738 kobject_init(&rdev->kobj, &rdev_ktype);
NeilBrown86e6ffd2005-11-08 21:39:24 -08003739
Mike Snitzer77304d22010-11-08 14:39:12 +01003740 size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003741 if (!size) {
NeilBrown9d487392016-11-02 14:16:49 +11003742 pr_warn("md: %s has zero or unknown size, marking faulty!\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003743 bdevname(rdev->bdev,b));
3744 err = -EINVAL;
3745 goto abort_free;
3746 }
3747
3748 if (super_format >= 0) {
3749 err = super_types[super_format].
3750 load_super(rdev, NULL, super_minor);
3751 if (err == -EINVAL) {
NeilBrown9d487392016-11-02 14:16:49 +11003752 pr_warn("md: %s does not have a valid v%d.%d superblock, not importing!\n",
NeilBrowndf968c42007-07-17 04:06:11 -07003753 bdevname(rdev->bdev,b),
NeilBrown9d487392016-11-02 14:16:49 +11003754 super_format, super_minor);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003755 goto abort_free;
3756 }
3757 if (err < 0) {
NeilBrown9d487392016-11-02 14:16:49 +11003758 pr_warn("md: could not read %s's sb, not importing!\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003759 bdevname(rdev->bdev,b));
3760 goto abort_free;
3761 }
3762 }
Dan Williams6bfe0b42008-04-30 00:52:32 -07003763
Linus Torvalds1da177e2005-04-16 15:20:36 -07003764 return rdev;
3765
3766abort_free:
NeilBrown2699b672011-07-28 11:31:47 +10003767 if (rdev->bdev)
3768 unlock_rdev(rdev);
NeilBrown545c8792012-05-22 13:54:30 +10003769 md_rdev_clear(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003770 kfree(rdev);
3771 return ERR_PTR(err);
3772}
3773
3774/*
3775 * Check a full RAID array for plausibility
3776 */
3777
Yufen Yu6a5cb532019-10-16 16:00:03 +08003778static int analyze_sbs(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003779{
3780 int i;
NeilBrown3cb03002011-10-11 16:45:26 +11003781 struct md_rdev *rdev, *freshest, *tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003782 char b[BDEVNAME_SIZE];
3783
3784 freshest = NULL;
NeilBrowndafb20f2012-03-19 12:46:39 +11003785 rdev_for_each_safe(rdev, tmp, mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003786 switch (super_types[mddev->major_version].
3787 load_super(rdev, freshest, mddev->minor_version)) {
3788 case 1:
3789 freshest = rdev;
3790 break;
3791 case 0:
3792 break;
3793 default:
NeilBrown9d487392016-11-02 14:16:49 +11003794 pr_warn("md: fatal superblock inconsistency in %s -- removing from array\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003795 bdevname(rdev->bdev,b));
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05003796 md_kick_rdev_from_array(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003797 }
3798
Yufen Yu6a5cb532019-10-16 16:00:03 +08003799 /* Cannot find a valid fresh disk */
3800 if (!freshest) {
3801 pr_warn("md: cannot find a valid disk\n");
3802 return -EINVAL;
3803 }
3804
Linus Torvalds1da177e2005-04-16 15:20:36 -07003805 super_types[mddev->major_version].
3806 validate_super(mddev, freshest);
3807
3808 i = 0;
NeilBrowndafb20f2012-03-19 12:46:39 +11003809 rdev_for_each_safe(rdev, tmp, mddev) {
NeilBrown233fca32010-04-14 17:02:09 +10003810 if (mddev->max_disks &&
3811 (rdev->desc_nr >= mddev->max_disks ||
3812 i > mddev->max_disks)) {
NeilBrown9d487392016-11-02 14:16:49 +11003813 pr_warn("md: %s: %s: only %d devices permitted\n",
3814 mdname(mddev), bdevname(rdev->bdev, b),
3815 mddev->max_disks);
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05003816 md_kick_rdev_from_array(rdev);
NeilBrownde01dfa2009-02-06 18:02:46 +11003817 continue;
3818 }
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05003819 if (rdev != freshest) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003820 if (super_types[mddev->major_version].
3821 validate_super(mddev, rdev)) {
NeilBrown9d487392016-11-02 14:16:49 +11003822 pr_warn("md: kicking non-fresh %s from array!\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07003823 bdevname(rdev->bdev,b));
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05003824 md_kick_rdev_from_array(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003825 continue;
3826 }
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05003827 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003828 if (mddev->level == LEVEL_MULTIPATH) {
3829 rdev->desc_nr = i++;
3830 rdev->raid_disk = rdev->desc_nr;
NeilBrownb2d444d2005-11-08 21:39:31 -08003831 set_bit(In_sync, &rdev->flags);
Shaohua Lif2076e72015-10-08 21:54:12 -07003832 } else if (rdev->raid_disk >=
3833 (mddev->raid_disks - min(0, mddev->delta_disks)) &&
3834 !test_bit(Journal, &rdev->flags)) {
NeilBrowna778b732007-05-23 13:58:10 -07003835 rdev->raid_disk = -1;
3836 clear_bit(In_sync, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003837 }
3838 }
Yufen Yu6a5cb532019-10-16 16:00:03 +08003839
3840 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003841}
3842
NeilBrown72e02072009-12-14 12:49:55 +11003843/* Read a fixed-point number.
3844 * Numbers in sysfs attributes should be in "standard" units where
3845 * possible, so time should be in seconds.
NeilBrownf72ffdd2014-09-30 14:23:59 +10003846 * However we internally use a a much smaller unit such as
NeilBrown72e02072009-12-14 12:49:55 +11003847 * milliseconds or jiffies.
3848 * This function takes a decimal number with a possible fractional
3849 * component, and produces an integer which is the result of
3850 * multiplying that number by 10^'scale'.
3851 * all without any floating-point arithmetic.
3852 */
3853int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
3854{
3855 unsigned long result = 0;
3856 long decimals = -1;
3857 while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
3858 if (*cp == '.')
3859 decimals = 0;
3860 else if (decimals < scale) {
3861 unsigned int value;
3862 value = *cp - '0';
3863 result = result * 10 + value;
3864 if (decimals >= 0)
3865 decimals++;
3866 }
3867 cp++;
3868 }
3869 if (*cp == '\n')
3870 cp++;
3871 if (*cp)
3872 return -EINVAL;
3873 if (decimals < 0)
3874 decimals = 0;
Andy Shevchenkocf891602019-07-23 23:41:55 +03003875 *res = result * int_pow(10, scale - decimals);
NeilBrown72e02072009-12-14 12:49:55 +11003876 return 0;
3877}
3878
NeilBrowneae17012005-11-08 21:39:23 -08003879static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11003880safe_delay_show(struct mddev *mddev, char *page)
NeilBrown16f17b32006-06-26 00:27:37 -07003881{
3882 int msec = (mddev->safemode_delay*1000)/HZ;
3883 return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
3884}
3885static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11003886safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
NeilBrown16f17b32006-06-26 00:27:37 -07003887{
NeilBrown16f17b32006-06-26 00:27:37 -07003888 unsigned long msec;
Dan Williams97ce0a72008-09-24 22:48:19 -07003889
Goldwyn Rodrigues28c1b9f2015-10-22 16:01:25 +11003890 if (mddev_is_clustered(mddev)) {
NeilBrown9d487392016-11-02 14:16:49 +11003891 pr_warn("md: Safemode is disabled for clustered mode\n");
Goldwyn Rodrigues28c1b9f2015-10-22 16:01:25 +11003892 return -EINVAL;
3893 }
3894
NeilBrown72e02072009-12-14 12:49:55 +11003895 if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
NeilBrown16f17b32006-06-26 00:27:37 -07003896 return -EINVAL;
NeilBrown16f17b32006-06-26 00:27:37 -07003897 if (msec == 0)
3898 mddev->safemode_delay = 0;
3899 else {
NeilBrown19052c02008-08-05 15:54:13 +10003900 unsigned long old_delay = mddev->safemode_delay;
NeilBrown1b30e662014-12-15 12:57:00 +11003901 unsigned long new_delay = (msec*HZ)/1000;
3902
3903 if (new_delay == 0)
3904 new_delay = 1;
3905 mddev->safemode_delay = new_delay;
3906 if (new_delay < old_delay || old_delay == 0)
3907 mod_timer(&mddev->safemode_timer, jiffies+1);
NeilBrown16f17b32006-06-26 00:27:37 -07003908 }
3909 return len;
3910}
3911static struct md_sysfs_entry md_safe_delay =
NeilBrown80ca3a42006-07-10 04:44:18 -07003912__ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
NeilBrown16f17b32006-06-26 00:27:37 -07003913
3914static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11003915level_show(struct mddev *mddev, char *page)
NeilBrowneae17012005-11-08 21:39:23 -08003916{
NeilBrown36d091f2014-12-15 12:56:58 +11003917 struct md_personality *p;
3918 int ret;
3919 spin_lock(&mddev->lock);
3920 p = mddev->pers;
NeilBrownd9d166c2006-01-06 00:20:51 -08003921 if (p)
NeilBrown36d091f2014-12-15 12:56:58 +11003922 ret = sprintf(page, "%s\n", p->name);
NeilBrownd9d166c2006-01-06 00:20:51 -08003923 else if (mddev->clevel[0])
NeilBrown36d091f2014-12-15 12:56:58 +11003924 ret = sprintf(page, "%s\n", mddev->clevel);
NeilBrownd9d166c2006-01-06 00:20:51 -08003925 else if (mddev->level != LEVEL_NONE)
NeilBrown36d091f2014-12-15 12:56:58 +11003926 ret = sprintf(page, "%d\n", mddev->level);
NeilBrownd9d166c2006-01-06 00:20:51 -08003927 else
NeilBrown36d091f2014-12-15 12:56:58 +11003928 ret = 0;
3929 spin_unlock(&mddev->lock);
3930 return ret;
NeilBrowneae17012005-11-08 21:39:23 -08003931}
3932
NeilBrownd9d166c2006-01-06 00:20:51 -08003933static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11003934level_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrownd9d166c2006-01-06 00:20:51 -08003935{
Dan Williamsf2859af2010-05-02 10:04:16 -07003936 char clevel[16];
NeilBrown67918752014-12-15 12:57:01 +11003937 ssize_t rv;
3938 size_t slen = len;
NeilBrowndb721d32014-12-15 12:56:58 +11003939 struct md_personality *pers, *oldpers;
Dan Williamsf2859af2010-05-02 10:04:16 -07003940 long level;
NeilBrowndb721d32014-12-15 12:56:58 +11003941 void *priv, *oldpriv;
NeilBrown3cb03002011-10-11 16:45:26 +11003942 struct md_rdev *rdev;
NeilBrown245f46c2009-03-31 14:39:39 +11003943
NeilBrown67918752014-12-15 12:57:01 +11003944 if (slen == 0 || slen >= sizeof(clevel))
3945 return -EINVAL;
3946
3947 rv = mddev_lock(mddev);
3948 if (rv)
NeilBrown245f46c2009-03-31 14:39:39 +11003949 return rv;
NeilBrown67918752014-12-15 12:57:01 +11003950
3951 if (mddev->pers == NULL) {
3952 strncpy(mddev->clevel, buf, slen);
3953 if (mddev->clevel[slen-1] == '\n')
3954 slen--;
3955 mddev->clevel[slen] = 0;
3956 mddev->level = LEVEL_NONE;
3957 rv = len;
3958 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11003959 }
NeilBrown67918752014-12-15 12:57:01 +11003960 rv = -EROFS;
NeilBrownbd8839e2014-05-28 13:39:21 +10003961 if (mddev->ro)
NeilBrown67918752014-12-15 12:57:01 +11003962 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11003963
3964 /* request to change the personality. Need to ensure:
3965 * - array is not engaged in resync/recovery/reshape
3966 * - old personality can be suspended
3967 * - new personality will access other array.
3968 */
3969
NeilBrown67918752014-12-15 12:57:01 +11003970 rv = -EBUSY;
NeilBrownbb4f1e92010-08-08 21:18:03 +10003971 if (mddev->sync_thread ||
NeilBrownf851b602014-12-11 10:02:10 +11003972 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
NeilBrownbb4f1e92010-08-08 21:18:03 +10003973 mddev->reshape_position != MaxSector ||
3974 mddev->sysfs_active)
NeilBrown67918752014-12-15 12:57:01 +11003975 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11003976
NeilBrown67918752014-12-15 12:57:01 +11003977 rv = -EINVAL;
NeilBrown245f46c2009-03-31 14:39:39 +11003978 if (!mddev->pers->quiesce) {
NeilBrown9d487392016-11-02 14:16:49 +11003979 pr_warn("md: %s: %s does not support online personality change\n",
3980 mdname(mddev), mddev->pers->name);
NeilBrown67918752014-12-15 12:57:01 +11003981 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11003982 }
3983
3984 /* Now find the new personality */
NeilBrown67918752014-12-15 12:57:01 +11003985 strncpy(clevel, buf, slen);
3986 if (clevel[slen-1] == '\n')
3987 slen--;
3988 clevel[slen] = 0;
Jingoo Hanb29bebd2013-06-01 16:15:16 +09003989 if (kstrtol(clevel, 10, &level))
Dan Williamsf2859af2010-05-02 10:04:16 -07003990 level = LEVEL_NONE;
NeilBrown245f46c2009-03-31 14:39:39 +11003991
Dan Williamsf2859af2010-05-02 10:04:16 -07003992 if (request_module("md-%s", clevel) != 0)
3993 request_module("md-level-%s", clevel);
NeilBrown245f46c2009-03-31 14:39:39 +11003994 spin_lock(&pers_lock);
Dan Williamsf2859af2010-05-02 10:04:16 -07003995 pers = find_pers(level, clevel);
NeilBrown245f46c2009-03-31 14:39:39 +11003996 if (!pers || !try_module_get(pers->owner)) {
3997 spin_unlock(&pers_lock);
NeilBrown9d487392016-11-02 14:16:49 +11003998 pr_warn("md: personality %s not loaded\n", clevel);
NeilBrown67918752014-12-15 12:57:01 +11003999 rv = -EINVAL;
4000 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11004001 }
4002 spin_unlock(&pers_lock);
4003
4004 if (pers == mddev->pers) {
4005 /* Nothing to do! */
4006 module_put(pers->owner);
NeilBrown67918752014-12-15 12:57:01 +11004007 rv = len;
4008 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11004009 }
4010 if (!pers->takeover) {
4011 module_put(pers->owner);
NeilBrown9d487392016-11-02 14:16:49 +11004012 pr_warn("md: %s: %s does not support personality takeover\n",
4013 mdname(mddev), clevel);
NeilBrown67918752014-12-15 12:57:01 +11004014 rv = -EINVAL;
4015 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11004016 }
4017
NeilBrowndafb20f2012-03-19 12:46:39 +11004018 rdev_for_each(rdev, mddev)
NeilBrowne93f68a2010-06-15 09:36:03 +01004019 rdev->new_raid_disk = rdev->raid_disk;
4020
NeilBrown245f46c2009-03-31 14:39:39 +11004021 /* ->takeover must set new_* and/or delta_disks
4022 * if it succeeds, and may set them when it fails.
4023 */
4024 priv = pers->takeover(mddev);
4025 if (IS_ERR(priv)) {
4026 mddev->new_level = mddev->level;
4027 mddev->new_layout = mddev->layout;
Andre Noll664e7c42009-06-18 08:45:27 +10004028 mddev->new_chunk_sectors = mddev->chunk_sectors;
NeilBrown245f46c2009-03-31 14:39:39 +11004029 mddev->raid_disks -= mddev->delta_disks;
4030 mddev->delta_disks = 0;
NeilBrown2c810cd2012-05-21 09:27:00 +10004031 mddev->reshape_backwards = 0;
NeilBrown245f46c2009-03-31 14:39:39 +11004032 module_put(pers->owner);
NeilBrown9d487392016-11-02 14:16:49 +11004033 pr_warn("md: %s: %s would not accept array\n",
4034 mdname(mddev), clevel);
NeilBrown67918752014-12-15 12:57:01 +11004035 rv = PTR_ERR(priv);
4036 goto out_unlock;
NeilBrown245f46c2009-03-31 14:39:39 +11004037 }
4038
4039 /* Looks like we have a winner */
4040 mddev_suspend(mddev);
NeilBrown5aa61f42014-12-15 12:56:57 +11004041 mddev_detach(mddev);
NeilBrown36d091f2014-12-15 12:56:58 +11004042
4043 spin_lock(&mddev->lock);
NeilBrowndb721d32014-12-15 12:56:58 +11004044 oldpers = mddev->pers;
4045 oldpriv = mddev->private;
4046 mddev->pers = pers;
4047 mddev->private = priv;
4048 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
4049 mddev->level = mddev->new_level;
4050 mddev->layout = mddev->new_layout;
4051 mddev->chunk_sectors = mddev->new_chunk_sectors;
4052 mddev->delta_disks = 0;
4053 mddev->reshape_backwards = 0;
4054 mddev->degraded = 0;
NeilBrown36d091f2014-12-15 12:56:58 +11004055 spin_unlock(&mddev->lock);
NeilBrownf72ffdd2014-09-30 14:23:59 +10004056
NeilBrowndb721d32014-12-15 12:56:58 +11004057 if (oldpers->sync_request == NULL &&
Trela Maciej54071b32010-03-08 16:02:42 +11004058 mddev->external) {
4059 /* We are converting from a no-redundancy array
4060 * to a redundancy array and metadata is managed
4061 * externally so we need to be sure that writes
4062 * won't block due to a need to transition
4063 * clean->dirty
4064 * until external management is started.
4065 */
4066 mddev->in_sync = 0;
4067 mddev->safemode_delay = 0;
4068 mddev->safemode = 0;
4069 }
4070
NeilBrowndb721d32014-12-15 12:56:58 +11004071 oldpers->free(mddev, oldpriv);
4072
4073 if (oldpers->sync_request == NULL &&
4074 pers->sync_request != NULL) {
4075 /* need to add the md_redundancy_group */
4076 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
NeilBrown9d487392016-11-02 14:16:49 +11004077 pr_warn("md: cannot register extra attributes for %s\n",
4078 mdname(mddev));
NeilBrowndb721d32014-12-15 12:56:58 +11004079 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
Junxiao Bie8efa9b2020-08-04 17:27:18 -07004080 mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed");
4081 mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded");
NeilBrowndb721d32014-12-15 12:56:58 +11004082 }
4083 if (oldpers->sync_request != NULL &&
4084 pers->sync_request == NULL) {
4085 /* need to remove the md_redundancy_group */
4086 if (mddev->to_remove == NULL)
4087 mddev->to_remove = &md_redundancy_group;
4088 }
4089
Alexey Obitotskiy4cb9da72016-06-23 12:11:01 +02004090 module_put(oldpers->owner);
4091
NeilBrowndafb20f2012-03-19 12:46:39 +11004092 rdev_for_each(rdev, mddev) {
NeilBrowne93f68a2010-06-15 09:36:03 +01004093 if (rdev->raid_disk < 0)
4094 continue;
NeilBrownbf2cb0d2011-01-14 09:14:34 +11004095 if (rdev->new_raid_disk >= mddev->raid_disks)
NeilBrowne93f68a2010-06-15 09:36:03 +01004096 rdev->new_raid_disk = -1;
4097 if (rdev->new_raid_disk == rdev->raid_disk)
4098 continue;
Namhyung Kim36fad852011-07-27 11:00:36 +10004099 sysfs_unlink_rdev(mddev, rdev);
NeilBrowne93f68a2010-06-15 09:36:03 +01004100 }
NeilBrowndafb20f2012-03-19 12:46:39 +11004101 rdev_for_each(rdev, mddev) {
NeilBrowne93f68a2010-06-15 09:36:03 +01004102 if (rdev->raid_disk < 0)
4103 continue;
4104 if (rdev->new_raid_disk == rdev->raid_disk)
4105 continue;
4106 rdev->raid_disk = rdev->new_raid_disk;
4107 if (rdev->raid_disk < 0)
NeilBrown3a981b02009-08-03 10:59:55 +10004108 clear_bit(In_sync, &rdev->flags);
NeilBrowne93f68a2010-06-15 09:36:03 +01004109 else {
Namhyung Kim36fad852011-07-27 11:00:36 +10004110 if (sysfs_link_rdev(mddev, rdev))
NeilBrown9d487392016-11-02 14:16:49 +11004111 pr_warn("md: cannot register rd%d for %s after level change\n",
4112 rdev->raid_disk, mdname(mddev));
NeilBrown3a981b02009-08-03 10:59:55 +10004113 }
NeilBrowne93f68a2010-06-15 09:36:03 +01004114 }
4115
NeilBrowndb721d32014-12-15 12:56:58 +11004116 if (pers->sync_request == NULL) {
Trela, Maciej9af204c2010-03-08 16:02:44 +11004117 /* this is now an array without redundancy, so
4118 * it must always be in_sync
4119 */
4120 mddev->in_sync = 1;
4121 del_timer_sync(&mddev->safemode_timer);
4122 }
NeilBrown02e5f5c2013-11-14 15:16:15 +11004123 blk_set_stacking_limits(&mddev->queue->limits);
NeilBrown245f46c2009-03-31 14:39:39 +11004124 pers->run(mddev);
Shaohua Li29530792016-12-08 15:48:19 -08004125 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
Jonathan Brassow47525e52012-05-22 13:55:29 +10004126 mddev_resume(mddev);
NeilBrown830778a2014-01-14 15:17:03 +11004127 if (!mddev->thread)
4128 md_update_sb(mddev, 1);
Junxiao Bie1a86db2020-07-14 16:10:26 -07004129 sysfs_notify_dirent_safe(mddev->sysfs_level);
Dan Williamsbb7f8d22010-05-01 18:14:57 -07004130 md_new_event(mddev);
NeilBrown67918752014-12-15 12:57:01 +11004131 rv = len;
4132out_unlock:
4133 mddev_unlock(mddev);
NeilBrownd9d166c2006-01-06 00:20:51 -08004134 return rv;
4135}
4136
4137static struct md_sysfs_entry md_level =
NeilBrown80ca3a42006-07-10 04:44:18 -07004138__ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
NeilBrowneae17012005-11-08 21:39:23 -08004139
NeilBrownd4dbd022006-06-26 00:27:59 -07004140static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004141layout_show(struct mddev *mddev, char *page)
NeilBrownd4dbd022006-06-26 00:27:59 -07004142{
4143 /* just a number, not meaningful for all levels */
NeilBrown08a02ec2007-05-09 02:35:38 -07004144 if (mddev->reshape_position != MaxSector &&
4145 mddev->layout != mddev->new_layout)
4146 return sprintf(page, "%d (%d)\n",
4147 mddev->new_layout, mddev->layout);
NeilBrownd4dbd022006-06-26 00:27:59 -07004148 return sprintf(page, "%d\n", mddev->layout);
4149}
4150
4151static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004152layout_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrownd4dbd022006-06-26 00:27:59 -07004153{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004154 unsigned int n;
NeilBrown67918752014-12-15 12:57:01 +11004155 int err;
NeilBrownd4dbd022006-06-26 00:27:59 -07004156
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004157 err = kstrtouint(buf, 10, &n);
4158 if (err < 0)
4159 return err;
NeilBrown67918752014-12-15 12:57:01 +11004160 err = mddev_lock(mddev);
4161 if (err)
4162 return err;
NeilBrownd4dbd022006-06-26 00:27:59 -07004163
NeilBrownb3546032009-03-31 14:56:41 +11004164 if (mddev->pers) {
NeilBrown50ac1682009-06-18 08:47:55 +10004165 if (mddev->pers->check_reshape == NULL)
NeilBrown67918752014-12-15 12:57:01 +11004166 err = -EBUSY;
4167 else if (mddev->ro)
4168 err = -EROFS;
4169 else {
4170 mddev->new_layout = n;
4171 err = mddev->pers->check_reshape(mddev);
4172 if (err)
4173 mddev->new_layout = mddev->layout;
NeilBrown597a7112009-06-18 08:47:42 +10004174 }
NeilBrownb3546032009-03-31 14:56:41 +11004175 } else {
NeilBrown08a02ec2007-05-09 02:35:38 -07004176 mddev->new_layout = n;
NeilBrownb3546032009-03-31 14:56:41 +11004177 if (mddev->reshape_position == MaxSector)
4178 mddev->layout = n;
4179 }
NeilBrown67918752014-12-15 12:57:01 +11004180 mddev_unlock(mddev);
4181 return err ?: len;
NeilBrownd4dbd022006-06-26 00:27:59 -07004182}
4183static struct md_sysfs_entry md_layout =
NeilBrown80ca3a42006-07-10 04:44:18 -07004184__ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
NeilBrownd4dbd022006-06-26 00:27:59 -07004185
NeilBrowneae17012005-11-08 21:39:23 -08004186static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004187raid_disks_show(struct mddev *mddev, char *page)
NeilBrowneae17012005-11-08 21:39:23 -08004188{
NeilBrownbb636542005-11-08 21:39:45 -08004189 if (mddev->raid_disks == 0)
4190 return 0;
NeilBrown08a02ec2007-05-09 02:35:38 -07004191 if (mddev->reshape_position != MaxSector &&
4192 mddev->delta_disks != 0)
4193 return sprintf(page, "%d (%d)\n", mddev->raid_disks,
4194 mddev->raid_disks - mddev->delta_disks);
NeilBrowneae17012005-11-08 21:39:23 -08004195 return sprintf(page, "%d\n", mddev->raid_disks);
4196}
4197
NeilBrownfd01b882011-10-11 16:47:53 +11004198static int update_raid_disks(struct mddev *mddev, int raid_disks);
NeilBrownda943b992006-01-06 00:20:54 -08004199
4200static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004201raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrownda943b992006-01-06 00:20:54 -08004202{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004203 unsigned int n;
NeilBrown67918752014-12-15 12:57:01 +11004204 int err;
NeilBrownda943b992006-01-06 00:20:54 -08004205
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004206 err = kstrtouint(buf, 10, &n);
4207 if (err < 0)
4208 return err;
NeilBrownda943b992006-01-06 00:20:54 -08004209
NeilBrown67918752014-12-15 12:57:01 +11004210 err = mddev_lock(mddev);
4211 if (err)
4212 return err;
NeilBrownda943b992006-01-06 00:20:54 -08004213 if (mddev->pers)
NeilBrown67918752014-12-15 12:57:01 +11004214 err = update_raid_disks(mddev, n);
NeilBrown08a02ec2007-05-09 02:35:38 -07004215 else if (mddev->reshape_position != MaxSector) {
NeilBrownc6563a82012-05-21 09:27:00 +10004216 struct md_rdev *rdev;
NeilBrown08a02ec2007-05-09 02:35:38 -07004217 int olddisks = mddev->raid_disks - mddev->delta_disks;
NeilBrownc6563a82012-05-21 09:27:00 +10004218
NeilBrown67918752014-12-15 12:57:01 +11004219 err = -EINVAL;
NeilBrownc6563a82012-05-21 09:27:00 +10004220 rdev_for_each(rdev, mddev) {
4221 if (olddisks < n &&
4222 rdev->data_offset < rdev->new_data_offset)
NeilBrown67918752014-12-15 12:57:01 +11004223 goto out_unlock;
NeilBrownc6563a82012-05-21 09:27:00 +10004224 if (olddisks > n &&
4225 rdev->data_offset > rdev->new_data_offset)
NeilBrown67918752014-12-15 12:57:01 +11004226 goto out_unlock;
NeilBrownc6563a82012-05-21 09:27:00 +10004227 }
NeilBrown67918752014-12-15 12:57:01 +11004228 err = 0;
NeilBrown08a02ec2007-05-09 02:35:38 -07004229 mddev->delta_disks = n - olddisks;
4230 mddev->raid_disks = n;
NeilBrown2c810cd2012-05-21 09:27:00 +10004231 mddev->reshape_backwards = (mddev->delta_disks < 0);
NeilBrown08a02ec2007-05-09 02:35:38 -07004232 } else
NeilBrownda943b992006-01-06 00:20:54 -08004233 mddev->raid_disks = n;
NeilBrown67918752014-12-15 12:57:01 +11004234out_unlock:
4235 mddev_unlock(mddev);
4236 return err ? err : len;
NeilBrownda943b992006-01-06 00:20:54 -08004237}
4238static struct md_sysfs_entry md_raid_disks =
NeilBrown80ca3a42006-07-10 04:44:18 -07004239__ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
NeilBrowneae17012005-11-08 21:39:23 -08004240
NeilBrown24dd4692005-11-08 21:39:26 -08004241static ssize_t
Sebastian Parschauerec164d072020-07-28 12:01:39 +02004242uuid_show(struct mddev *mddev, char *page)
4243{
4244 return sprintf(page, "%pU\n", mddev->uuid);
4245}
4246static struct md_sysfs_entry md_uuid =
4247__ATTR(uuid, S_IRUGO, uuid_show, NULL);
4248
4249static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004250chunk_size_show(struct mddev *mddev, char *page)
NeilBrown3b343802006-01-06 00:20:47 -08004251{
NeilBrown08a02ec2007-05-09 02:35:38 -07004252 if (mddev->reshape_position != MaxSector &&
Andre Noll664e7c42009-06-18 08:45:27 +10004253 mddev->chunk_sectors != mddev->new_chunk_sectors)
4254 return sprintf(page, "%d (%d)\n",
4255 mddev->new_chunk_sectors << 9,
Andre Noll9d8f0362009-06-18 08:45:01 +10004256 mddev->chunk_sectors << 9);
4257 return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
NeilBrown3b343802006-01-06 00:20:47 -08004258}
4259
4260static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004261chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown3b343802006-01-06 00:20:47 -08004262{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004263 unsigned long n;
NeilBrown67918752014-12-15 12:57:01 +11004264 int err;
NeilBrown3b343802006-01-06 00:20:47 -08004265
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004266 err = kstrtoul(buf, 10, &n);
4267 if (err < 0)
4268 return err;
NeilBrown3b343802006-01-06 00:20:47 -08004269
NeilBrown67918752014-12-15 12:57:01 +11004270 err = mddev_lock(mddev);
4271 if (err)
4272 return err;
NeilBrownb3546032009-03-31 14:56:41 +11004273 if (mddev->pers) {
NeilBrown50ac1682009-06-18 08:47:55 +10004274 if (mddev->pers->check_reshape == NULL)
NeilBrown67918752014-12-15 12:57:01 +11004275 err = -EBUSY;
4276 else if (mddev->ro)
4277 err = -EROFS;
4278 else {
4279 mddev->new_chunk_sectors = n >> 9;
4280 err = mddev->pers->check_reshape(mddev);
4281 if (err)
4282 mddev->new_chunk_sectors = mddev->chunk_sectors;
NeilBrown597a7112009-06-18 08:47:42 +10004283 }
NeilBrownb3546032009-03-31 14:56:41 +11004284 } else {
Andre Noll664e7c42009-06-18 08:45:27 +10004285 mddev->new_chunk_sectors = n >> 9;
NeilBrownb3546032009-03-31 14:56:41 +11004286 if (mddev->reshape_position == MaxSector)
Andre Noll9d8f0362009-06-18 08:45:01 +10004287 mddev->chunk_sectors = n >> 9;
NeilBrownb3546032009-03-31 14:56:41 +11004288 }
NeilBrown67918752014-12-15 12:57:01 +11004289 mddev_unlock(mddev);
4290 return err ?: len;
NeilBrown3b343802006-01-06 00:20:47 -08004291}
4292static struct md_sysfs_entry md_chunk_size =
NeilBrown80ca3a42006-07-10 04:44:18 -07004293__ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
NeilBrown3b343802006-01-06 00:20:47 -08004294
NeilBrowna94213b2006-06-26 00:28:00 -07004295static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004296resync_start_show(struct mddev *mddev, char *page)
NeilBrowna94213b2006-06-26 00:28:00 -07004297{
NeilBrownd1a7c502009-03-31 15:24:32 +11004298 if (mddev->recovery_cp == MaxSector)
4299 return sprintf(page, "none\n");
NeilBrowna94213b2006-06-26 00:28:00 -07004300 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
4301}
4302
4303static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004304resync_start_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrowna94213b2006-06-26 00:28:00 -07004305{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004306 unsigned long long n;
NeilBrown67918752014-12-15 12:57:01 +11004307 int err;
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004308
4309 if (cmd_match(buf, "none"))
4310 n = MaxSector;
4311 else {
4312 err = kstrtoull(buf, 10, &n);
4313 if (err < 0)
4314 return err;
4315 if (n != (sector_t)n)
4316 return -EINVAL;
4317 }
NeilBrowna94213b2006-06-26 00:28:00 -07004318
NeilBrown67918752014-12-15 12:57:01 +11004319 err = mddev_lock(mddev);
4320 if (err)
4321 return err;
NeilBrownb0986362011-05-11 15:52:21 +10004322 if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
NeilBrown67918752014-12-15 12:57:01 +11004323 err = -EBUSY;
NeilBrowna94213b2006-06-26 00:28:00 -07004324
NeilBrown67918752014-12-15 12:57:01 +11004325 if (!err) {
4326 mddev->recovery_cp = n;
4327 if (mddev->pers)
Shaohua Li29530792016-12-08 15:48:19 -08004328 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
NeilBrown67918752014-12-15 12:57:01 +11004329 }
4330 mddev_unlock(mddev);
4331 return err ?: len;
NeilBrowna94213b2006-06-26 00:28:00 -07004332}
4333static struct md_sysfs_entry md_resync_start =
NeilBrown750f1992014-09-30 08:53:05 +10004334__ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR,
4335 resync_start_show, resync_start_store);
NeilBrowna94213b2006-06-26 00:28:00 -07004336
NeilBrown9e653b62006-06-26 00:27:58 -07004337/*
4338 * The array state can be:
4339 *
4340 * clear
4341 * No devices, no size, no level
4342 * Equivalent to STOP_ARRAY ioctl
4343 * inactive
4344 * May have some settings, but array is not active
4345 * all IO results in error
4346 * When written, doesn't tear down array, but just stops it
4347 * suspended (not supported yet)
4348 * All IO requests will block. The array can be reconfigured.
Andre Noll910d8cb2008-03-25 21:00:53 +01004349 * Writing this, if accepted, will block until array is quiescent
NeilBrown9e653b62006-06-26 00:27:58 -07004350 * readonly
4351 * no resync can happen. no superblocks get written.
4352 * write requests fail
4353 * read-auto
4354 * like readonly, but behaves like 'clean' on a write request.
4355 *
4356 * clean - no pending writes, but otherwise active.
4357 * When written to inactive array, starts without resync
4358 * If a write request arrives then
4359 * if metadata is known, mark 'dirty' and switch to 'active'.
4360 * if not known, block and switch to write-pending
4361 * If written to an active array that has pending writes, then fails.
4362 * active
4363 * fully active: IO and resync can be happening.
4364 * When written to inactive array, starts with resync
4365 *
4366 * write-pending
4367 * clean, but writes are blocked waiting for 'active' to be written.
4368 *
4369 * active-idle
4370 * like active, but no writes have been seen for a while (100msec).
4371 *
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -03004372 * broken
4373 * RAID0/LINEAR-only: same as clean, but array is missing a member.
4374 * It's useful because RAID0/LINEAR mounted-arrays aren't stopped
4375 * when a member is gone, so this state will at least alert the
4376 * user that something is wrong.
NeilBrown9e653b62006-06-26 00:27:58 -07004377 */
4378enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -03004379 write_pending, active_idle, broken, bad_word};
Adrian Bunk05381952006-06-26 00:28:01 -07004380static char *array_states[] = {
NeilBrown9e653b62006-06-26 00:27:58 -07004381 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -03004382 "write-pending", "active-idle", "broken", NULL };
NeilBrown9e653b62006-06-26 00:27:58 -07004383
4384static int match_word(const char *word, char **list)
4385{
4386 int n;
4387 for (n=0; list[n]; n++)
4388 if (cmd_match(word, list[n]))
4389 break;
4390 return n;
4391}
4392
4393static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004394array_state_show(struct mddev *mddev, char *page)
NeilBrown9e653b62006-06-26 00:27:58 -07004395{
4396 enum array_state st = inactive;
4397
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -03004398 if (mddev->pers && !test_bit(MD_NOT_READY, &mddev->flags)) {
NeilBrown9e653b62006-06-26 00:27:58 -07004399 switch(mddev->ro) {
4400 case 1:
4401 st = readonly;
4402 break;
4403 case 2:
4404 st = read_auto;
4405 break;
4406 case 0:
NeilBrown55cc39f2017-03-15 14:05:14 +11004407 spin_lock(&mddev->lock);
Shaohua Li29530792016-12-08 15:48:19 -08004408 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
NeilBrowne6910632008-02-06 01:39:51 -08004409 st = write_pending;
Tomasz Majchrzak16f88942016-10-24 12:47:28 +02004410 else if (mddev->in_sync)
4411 st = clean;
NeilBrown9e653b62006-06-26 00:27:58 -07004412 else if (mddev->safemode)
4413 st = active_idle;
4414 else
4415 st = active;
NeilBrown55cc39f2017-03-15 14:05:14 +11004416 spin_unlock(&mddev->lock);
NeilBrown9e653b62006-06-26 00:27:58 -07004417 }
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -03004418
4419 if (test_bit(MD_BROKEN, &mddev->flags) && st == clean)
4420 st = broken;
4421 } else {
NeilBrown9e653b62006-06-26 00:27:58 -07004422 if (list_empty(&mddev->disks) &&
4423 mddev->raid_disks == 0 &&
Andre Noll58c0fed2009-03-31 14:33:13 +11004424 mddev->dev_sectors == 0)
NeilBrown9e653b62006-06-26 00:27:58 -07004425 st = clear;
4426 else
4427 st = inactive;
4428 }
4429 return sprintf(page, "%s\n", array_states[st]);
4430}
4431
NeilBrownf72ffdd2014-09-30 14:23:59 +10004432static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev);
4433static int md_set_readonly(struct mddev *mddev, struct block_device *bdev);
NeilBrownfd01b882011-10-11 16:47:53 +11004434static int restart_array(struct mddev *mddev);
NeilBrown9e653b62006-06-26 00:27:58 -07004435
4436static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004437array_state_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown9e653b62006-06-26 00:27:58 -07004438{
NeilBrown6497709b2017-03-15 14:05:14 +11004439 int err = 0;
NeilBrown9e653b62006-06-26 00:27:58 -07004440 enum array_state st = match_word(buf, array_states);
NeilBrown67918752014-12-15 12:57:01 +11004441
4442 if (mddev->pers && (st == active || st == clean) && mddev->ro != 1) {
4443 /* don't take reconfig_mutex when toggling between
4444 * clean and active
4445 */
4446 spin_lock(&mddev->lock);
4447 if (st == active) {
4448 restart_array(mddev);
Shaohua Li29530792016-12-08 15:48:19 -08004449 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
Tomasz Majchrzak91a6c4a2016-10-25 17:07:08 +02004450 md_wakeup_thread(mddev->thread);
NeilBrown67918752014-12-15 12:57:01 +11004451 wake_up(&mddev->sb_wait);
NeilBrown67918752014-12-15 12:57:01 +11004452 } else /* st == clean */ {
4453 restart_array(mddev);
NeilBrown6497709b2017-03-15 14:05:14 +11004454 if (!set_in_sync(mddev))
NeilBrown67918752014-12-15 12:57:01 +11004455 err = -EBUSY;
4456 }
Tomasz Majchrzak573275b2016-06-30 10:47:09 +02004457 if (!err)
4458 sysfs_notify_dirent_safe(mddev->sysfs_state);
NeilBrown67918752014-12-15 12:57:01 +11004459 spin_unlock(&mddev->lock);
NeilBrownc008f1d2015-06-12 19:46:44 +10004460 return err ?: len;
NeilBrown67918752014-12-15 12:57:01 +11004461 }
4462 err = mddev_lock(mddev);
4463 if (err)
4464 return err;
4465 err = -EINVAL;
NeilBrown9e653b62006-06-26 00:27:58 -07004466 switch(st) {
4467 case bad_word:
4468 break;
4469 case clear:
4470 /* stopping an active array */
NeilBrowna05b7ea2012-07-19 15:59:18 +10004471 err = do_md_stop(mddev, 0, NULL);
NeilBrown9e653b62006-06-26 00:27:58 -07004472 break;
4473 case inactive:
4474 /* stopping an active array */
NeilBrown90cf1952012-07-31 10:04:55 +10004475 if (mddev->pers)
NeilBrowna05b7ea2012-07-19 15:59:18 +10004476 err = do_md_stop(mddev, 2, NULL);
NeilBrown90cf1952012-07-31 10:04:55 +10004477 else
NeilBrowne6910632008-02-06 01:39:51 -08004478 err = 0; /* already inactive */
NeilBrown9e653b62006-06-26 00:27:58 -07004479 break;
4480 case suspended:
4481 break; /* not supported yet */
4482 case readonly:
4483 if (mddev->pers)
NeilBrowna05b7ea2012-07-19 15:59:18 +10004484 err = md_set_readonly(mddev, NULL);
NeilBrown9e653b62006-06-26 00:27:58 -07004485 else {
4486 mddev->ro = 1;
NeilBrown648b6292008-04-30 00:52:30 -07004487 set_disk_ro(mddev->gendisk, 1);
NeilBrown9e653b62006-06-26 00:27:58 -07004488 err = do_md_run(mddev);
4489 }
4490 break;
4491 case read_auto:
NeilBrown9e653b62006-06-26 00:27:58 -07004492 if (mddev->pers) {
NeilBrown80268ee2008-10-13 11:55:12 +11004493 if (mddev->ro == 0)
NeilBrowna05b7ea2012-07-19 15:59:18 +10004494 err = md_set_readonly(mddev, NULL);
NeilBrown80268ee2008-10-13 11:55:12 +11004495 else if (mddev->ro == 1)
NeilBrown648b6292008-04-30 00:52:30 -07004496 err = restart_array(mddev);
4497 if (err == 0) {
4498 mddev->ro = 2;
4499 set_disk_ro(mddev->gendisk, 0);
4500 }
NeilBrown9e653b62006-06-26 00:27:58 -07004501 } else {
4502 mddev->ro = 2;
4503 err = do_md_run(mddev);
4504 }
4505 break;
4506 case clean:
4507 if (mddev->pers) {
Song Liu339421d2015-10-08 21:54:13 -07004508 err = restart_array(mddev);
4509 if (err)
4510 break;
NeilBrown85572d72014-12-15 12:56:56 +11004511 spin_lock(&mddev->lock);
NeilBrown6497709b2017-03-15 14:05:14 +11004512 if (!set_in_sync(mddev))
NeilBrowne6910632008-02-06 01:39:51 -08004513 err = -EBUSY;
NeilBrown85572d72014-12-15 12:56:56 +11004514 spin_unlock(&mddev->lock);
NeilBrown5bf29592009-05-07 12:50:57 +10004515 } else
4516 err = -EINVAL;
NeilBrown9e653b62006-06-26 00:27:58 -07004517 break;
4518 case active:
4519 if (mddev->pers) {
Song Liu339421d2015-10-08 21:54:13 -07004520 err = restart_array(mddev);
4521 if (err)
4522 break;
Shaohua Li29530792016-12-08 15:48:19 -08004523 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
NeilBrown9e653b62006-06-26 00:27:58 -07004524 wake_up(&mddev->sb_wait);
4525 err = 0;
4526 } else {
4527 mddev->ro = 0;
NeilBrown648b6292008-04-30 00:52:30 -07004528 set_disk_ro(mddev->gendisk, 0);
NeilBrown9e653b62006-06-26 00:27:58 -07004529 err = do_md_run(mddev);
4530 }
4531 break;
4532 case write_pending:
4533 case active_idle:
Guilherme G. Piccoli62f7b192019-09-03 16:49:00 -03004534 case broken:
NeilBrown9e653b62006-06-26 00:27:58 -07004535 /* these cannot be set */
4536 break;
4537 }
NeilBrown67918752014-12-15 12:57:01 +11004538
4539 if (!err) {
NeilBrown1d23f172011-12-08 15:49:12 +11004540 if (mddev->hold_active == UNTIL_IOCTL)
4541 mddev->hold_active = 0;
NeilBrown00bcb4a2010-06-01 19:37:23 +10004542 sysfs_notify_dirent_safe(mddev->sysfs_state);
Neil Brown0fd62b82008-06-28 08:31:36 +10004543 }
NeilBrown67918752014-12-15 12:57:01 +11004544 mddev_unlock(mddev);
4545 return err ?: len;
NeilBrown9e653b62006-06-26 00:27:58 -07004546}
NeilBrown80ca3a42006-07-10 04:44:18 -07004547static struct md_sysfs_entry md_array_state =
NeilBrown750f1992014-09-30 08:53:05 +10004548__ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
NeilBrown9e653b62006-06-26 00:27:58 -07004549
NeilBrown6d7ff7382006-01-06 00:21:16 -08004550static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004551max_corrected_read_errors_show(struct mddev *mddev, char *page) {
Robert Becker1e509152009-12-14 12:49:58 +11004552 return sprintf(page, "%d\n",
4553 atomic_read(&mddev->max_corr_read_errors));
4554}
4555
4556static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004557max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len)
Robert Becker1e509152009-12-14 12:49:58 +11004558{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004559 unsigned int n;
4560 int rv;
Robert Becker1e509152009-12-14 12:49:58 +11004561
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004562 rv = kstrtouint(buf, 10, &n);
4563 if (rv < 0)
4564 return rv;
4565 atomic_set(&mddev->max_corr_read_errors, n);
4566 return len;
Robert Becker1e509152009-12-14 12:49:58 +11004567}
4568
4569static struct md_sysfs_entry max_corr_read_errors =
4570__ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
4571 max_corrected_read_errors_store);
4572
4573static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004574null_show(struct mddev *mddev, char *page)
NeilBrown6d7ff7382006-01-06 00:21:16 -08004575{
4576 return -EINVAL;
4577}
4578
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02004579/* need to ensure rdev_delayed_delete() has completed */
4580static void flush_rdev_wq(struct mddev *mddev)
4581{
4582 struct md_rdev *rdev;
4583
4584 rcu_read_lock();
4585 rdev_for_each_rcu(rdev, mddev)
4586 if (work_pending(&rdev->del_work)) {
4587 flush_workqueue(md_rdev_misc_wq);
4588 break;
4589 }
4590 rcu_read_unlock();
4591}
4592
NeilBrown6d7ff7382006-01-06 00:21:16 -08004593static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004594new_dev_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown6d7ff7382006-01-06 00:21:16 -08004595{
4596 /* buf must be %d:%d\n? giving major and minor numbers */
4597 /* The new device is added to the array.
4598 * If the array has a persistent superblock, we read the
4599 * superblock to initialise info and check validity.
4600 * Otherwise, only checking done is that in bind_rdev_to_array,
4601 * which mainly checks size.
4602 */
4603 char *e;
4604 int major = simple_strtoul(buf, &e, 10);
4605 int minor;
4606 dev_t dev;
NeilBrown3cb03002011-10-11 16:45:26 +11004607 struct md_rdev *rdev;
NeilBrown6d7ff7382006-01-06 00:21:16 -08004608 int err;
4609
4610 if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
4611 return -EINVAL;
4612 minor = simple_strtoul(e+1, &e, 10);
4613 if (*e && *e != '\n')
4614 return -EINVAL;
4615 dev = MKDEV(major, minor);
4616 if (major != MAJOR(dev) ||
4617 minor != MINOR(dev))
4618 return -EOVERFLOW;
4619
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02004620 flush_rdev_wq(mddev);
NeilBrown67918752014-12-15 12:57:01 +11004621 err = mddev_lock(mddev);
4622 if (err)
4623 return err;
NeilBrown6d7ff7382006-01-06 00:21:16 -08004624 if (mddev->persistent) {
4625 rdev = md_import_device(dev, mddev->major_version,
4626 mddev->minor_version);
4627 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
NeilBrown3cb03002011-10-11 16:45:26 +11004628 struct md_rdev *rdev0
4629 = list_entry(mddev->disks.next,
4630 struct md_rdev, same_set);
NeilBrown6d7ff7382006-01-06 00:21:16 -08004631 err = super_types[mddev->major_version]
4632 .load_super(rdev, rdev0, mddev->minor_version);
4633 if (err < 0)
4634 goto out;
4635 }
NeilBrownc5d79ad2008-02-06 01:39:54 -08004636 } else if (mddev->external)
4637 rdev = md_import_device(dev, -2, -1);
4638 else
NeilBrown6d7ff7382006-01-06 00:21:16 -08004639 rdev = md_import_device(dev, -1, -1);
4640
NeilBrown9a8c0fa2015-06-25 17:06:40 +10004641 if (IS_ERR(rdev)) {
4642 mddev_unlock(mddev);
NeilBrown6d7ff7382006-01-06 00:21:16 -08004643 return PTR_ERR(rdev);
NeilBrown9a8c0fa2015-06-25 17:06:40 +10004644 }
NeilBrown6d7ff7382006-01-06 00:21:16 -08004645 err = bind_rdev_to_array(rdev, mddev);
4646 out:
4647 if (err)
4648 export_rdev(rdev);
NeilBrown67918752014-12-15 12:57:01 +11004649 mddev_unlock(mddev);
Alexey Obitotskiy5492c462017-07-28 15:49:25 +02004650 if (!err)
4651 md_new_event(mddev);
NeilBrown6d7ff7382006-01-06 00:21:16 -08004652 return err ? err : len;
4653}
4654
4655static struct md_sysfs_entry md_new_device =
NeilBrown80ca3a42006-07-10 04:44:18 -07004656__ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
NeilBrown3b343802006-01-06 00:20:47 -08004657
4658static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004659bitmap_store(struct mddev *mddev, const char *buf, size_t len)
Paul Clements9b1d1da2006-10-03 01:15:49 -07004660{
4661 char *end;
4662 unsigned long chunk, end_chunk;
NeilBrown67918752014-12-15 12:57:01 +11004663 int err;
Paul Clements9b1d1da2006-10-03 01:15:49 -07004664
NeilBrown67918752014-12-15 12:57:01 +11004665 err = mddev_lock(mddev);
4666 if (err)
4667 return err;
Paul Clements9b1d1da2006-10-03 01:15:49 -07004668 if (!mddev->bitmap)
4669 goto out;
4670 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
4671 while (*buf) {
4672 chunk = end_chunk = simple_strtoul(buf, &end, 0);
4673 if (buf == end) break;
4674 if (*end == '-') { /* range */
4675 buf = end + 1;
4676 end_chunk = simple_strtoul(buf, &end, 0);
4677 if (buf == end) break;
4678 }
4679 if (*end && !isspace(*end)) break;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07004680 md_bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
André Goddard Rosae7d28602009-12-14 18:01:06 -08004681 buf = skip_spaces(end);
Paul Clements9b1d1da2006-10-03 01:15:49 -07004682 }
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07004683 md_bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
Paul Clements9b1d1da2006-10-03 01:15:49 -07004684out:
NeilBrown67918752014-12-15 12:57:01 +11004685 mddev_unlock(mddev);
Paul Clements9b1d1da2006-10-03 01:15:49 -07004686 return len;
4687}
4688
4689static struct md_sysfs_entry md_bitmap =
4690__ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
4691
4692static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004693size_show(struct mddev *mddev, char *page)
NeilBrowna35b0d62006-01-06 00:20:49 -08004694{
Andre Noll58c0fed2009-03-31 14:33:13 +11004695 return sprintf(page, "%llu\n",
4696 (unsigned long long)mddev->dev_sectors / 2);
NeilBrowna35b0d62006-01-06 00:20:49 -08004697}
4698
NeilBrownfd01b882011-10-11 16:47:53 +11004699static int update_size(struct mddev *mddev, sector_t num_sectors);
NeilBrowna35b0d62006-01-06 00:20:49 -08004700
4701static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004702size_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrowna35b0d62006-01-06 00:20:49 -08004703{
4704 /* If array is inactive, we can reduce the component size, but
4705 * not increase it (except from 0).
4706 * If array is active, we can try an on-line resize
4707 */
Dan Williamsb522adc2009-03-31 15:00:31 +11004708 sector_t sectors;
4709 int err = strict_blocks_to_sectors(buf, &sectors);
NeilBrowna35b0d62006-01-06 00:20:49 -08004710
Andre Noll58c0fed2009-03-31 14:33:13 +11004711 if (err < 0)
4712 return err;
NeilBrown67918752014-12-15 12:57:01 +11004713 err = mddev_lock(mddev);
4714 if (err)
4715 return err;
NeilBrowna35b0d62006-01-06 00:20:49 -08004716 if (mddev->pers) {
Andre Noll58c0fed2009-03-31 14:33:13 +11004717 err = update_size(mddev, sectors);
Xiao Ni4ba1e782016-06-12 17:18:00 +08004718 if (err == 0)
4719 md_update_sb(mddev, 1);
NeilBrowna35b0d62006-01-06 00:20:49 -08004720 } else {
Andre Noll58c0fed2009-03-31 14:33:13 +11004721 if (mddev->dev_sectors == 0 ||
4722 mddev->dev_sectors > sectors)
4723 mddev->dev_sectors = sectors;
NeilBrowna35b0d62006-01-06 00:20:49 -08004724 else
4725 err = -ENOSPC;
4726 }
NeilBrown67918752014-12-15 12:57:01 +11004727 mddev_unlock(mddev);
NeilBrowna35b0d62006-01-06 00:20:49 -08004728 return err ? err : len;
4729}
4730
4731static struct md_sysfs_entry md_size =
NeilBrown80ca3a42006-07-10 04:44:18 -07004732__ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
NeilBrowna35b0d62006-01-06 00:20:49 -08004733
Masanari Iida83f0d772012-10-30 00:18:08 +09004734/* Metadata version.
NeilBrowne6910632008-02-06 01:39:51 -08004735 * This is one of
4736 * 'none' for arrays with no metadata (good luck...)
4737 * 'external' for arrays with externally managed metadata,
NeilBrown8bb93aa2006-01-06 00:20:50 -08004738 * or N.M for internally known formats
4739 */
4740static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004741metadata_show(struct mddev *mddev, char *page)
NeilBrown8bb93aa2006-01-06 00:20:50 -08004742{
4743 if (mddev->persistent)
4744 return sprintf(page, "%d.%d\n",
4745 mddev->major_version, mddev->minor_version);
NeilBrowne6910632008-02-06 01:39:51 -08004746 else if (mddev->external)
4747 return sprintf(page, "external:%s\n", mddev->metadata_type);
NeilBrown8bb93aa2006-01-06 00:20:50 -08004748 else
4749 return sprintf(page, "none\n");
4750}
4751
4752static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004753metadata_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown8bb93aa2006-01-06 00:20:50 -08004754{
4755 int major, minor;
4756 char *e;
NeilBrown67918752014-12-15 12:57:01 +11004757 int err;
NeilBrownea43ddd2008-10-13 11:55:11 +11004758 /* Changing the details of 'external' metadata is
4759 * always permitted. Otherwise there must be
4760 * no devices attached to the array.
4761 */
NeilBrown67918752014-12-15 12:57:01 +11004762
4763 err = mddev_lock(mddev);
4764 if (err)
4765 return err;
4766 err = -EBUSY;
NeilBrownea43ddd2008-10-13 11:55:11 +11004767 if (mddev->external && strncmp(buf, "external:", 9) == 0)
4768 ;
4769 else if (!list_empty(&mddev->disks))
NeilBrown67918752014-12-15 12:57:01 +11004770 goto out_unlock;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004771
NeilBrown67918752014-12-15 12:57:01 +11004772 err = 0;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004773 if (cmd_match(buf, "none")) {
4774 mddev->persistent = 0;
NeilBrowne6910632008-02-06 01:39:51 -08004775 mddev->external = 0;
4776 mddev->major_version = 0;
4777 mddev->minor_version = 90;
NeilBrown67918752014-12-15 12:57:01 +11004778 goto out_unlock;
NeilBrowne6910632008-02-06 01:39:51 -08004779 }
4780 if (strncmp(buf, "external:", 9) == 0) {
NeilBrown20a49ff2008-02-06 01:39:57 -08004781 size_t namelen = len-9;
NeilBrowne6910632008-02-06 01:39:51 -08004782 if (namelen >= sizeof(mddev->metadata_type))
4783 namelen = sizeof(mddev->metadata_type)-1;
4784 strncpy(mddev->metadata_type, buf+9, namelen);
4785 mddev->metadata_type[namelen] = 0;
4786 if (namelen && mddev->metadata_type[namelen-1] == '\n')
4787 mddev->metadata_type[--namelen] = 0;
4788 mddev->persistent = 0;
4789 mddev->external = 1;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004790 mddev->major_version = 0;
4791 mddev->minor_version = 90;
NeilBrown67918752014-12-15 12:57:01 +11004792 goto out_unlock;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004793 }
4794 major = simple_strtoul(buf, &e, 10);
NeilBrown67918752014-12-15 12:57:01 +11004795 err = -EINVAL;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004796 if (e==buf || *e != '.')
NeilBrown67918752014-12-15 12:57:01 +11004797 goto out_unlock;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004798 buf = e+1;
4799 minor = simple_strtoul(buf, &e, 10);
NeilBrown3f9d7b02006-12-22 01:11:41 -08004800 if (e==buf || (*e && *e != '\n') )
NeilBrown67918752014-12-15 12:57:01 +11004801 goto out_unlock;
4802 err = -ENOENT;
Ahmed S. Darwish50511da2007-05-09 02:35:34 -07004803 if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
NeilBrown67918752014-12-15 12:57:01 +11004804 goto out_unlock;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004805 mddev->major_version = major;
4806 mddev->minor_version = minor;
4807 mddev->persistent = 1;
NeilBrowne6910632008-02-06 01:39:51 -08004808 mddev->external = 0;
NeilBrown67918752014-12-15 12:57:01 +11004809 err = 0;
4810out_unlock:
4811 mddev_unlock(mddev);
4812 return err ?: len;
NeilBrown8bb93aa2006-01-06 00:20:50 -08004813}
4814
4815static struct md_sysfs_entry md_metadata =
NeilBrown750f1992014-09-30 08:53:05 +10004816__ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
NeilBrown8bb93aa2006-01-06 00:20:50 -08004817
NeilBrowna35b0d62006-01-06 00:20:49 -08004818static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004819action_show(struct mddev *mddev, char *page)
NeilBrown24dd4692005-11-08 21:39:26 -08004820{
NeilBrown7eec3142005-11-08 21:39:44 -08004821 char *type = "idle";
NeilBrownb7b17c92014-12-15 12:56:59 +11004822 unsigned long recovery = mddev->recovery;
4823 if (test_bit(MD_RECOVERY_FROZEN, &recovery))
NeilBrownb6a9ce62009-05-26 09:41:17 +10004824 type = "frozen";
NeilBrownb7b17c92014-12-15 12:56:59 +11004825 else if (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
4826 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery))) {
4827 if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
NeilBrownccfcc3c2006-03-27 01:18:09 -08004828 type = "reshape";
NeilBrownb7b17c92014-12-15 12:56:59 +11004829 else if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
4830 if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
NeilBrown24dd4692005-11-08 21:39:26 -08004831 type = "resync";
NeilBrownb7b17c92014-12-15 12:56:59 +11004832 else if (test_bit(MD_RECOVERY_CHECK, &recovery))
NeilBrown24dd4692005-11-08 21:39:26 -08004833 type = "check";
4834 else
4835 type = "repair";
NeilBrownb7b17c92014-12-15 12:56:59 +11004836 } else if (test_bit(MD_RECOVERY_RECOVER, &recovery))
NeilBrown24dd4692005-11-08 21:39:26 -08004837 type = "recover";
NeilBrown985ca972015-07-06 12:26:57 +10004838 else if (mddev->reshape_position != MaxSector)
4839 type = "reshape";
NeilBrown24dd4692005-11-08 21:39:26 -08004840 }
4841 return sprintf(page, "%s\n", type);
4842}
4843
4844static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004845action_store(struct mddev *mddev, const char *page, size_t len)
NeilBrown24dd4692005-11-08 21:39:26 -08004846{
NeilBrown7eec3142005-11-08 21:39:44 -08004847 if (!mddev->pers || !mddev->pers->sync_request)
4848 return -EINVAL;
4849
NeilBrownb6a9ce62009-05-26 09:41:17 +10004850
4851 if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
NeilBrown56ccc112015-05-28 17:53:29 +10004852 if (cmd_match(page, "frozen"))
4853 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4854 else
4855 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
NeilBrown8e8e2512015-06-12 19:51:27 +10004856 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
4857 mddev_lock(mddev) == 0) {
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02004858 if (work_pending(&mddev->del_work))
4859 flush_workqueue(md_misc_wq);
NeilBrown8e8e2512015-06-12 19:51:27 +10004860 if (mddev->sync_thread) {
4861 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
NeilBrown67918752014-12-15 12:57:01 +11004862 md_reap_sync_thread(mddev);
NeilBrown67918752014-12-15 12:57:01 +11004863 }
NeilBrown8e8e2512015-06-12 19:51:27 +10004864 mddev_unlock(mddev);
NeilBrown7eec3142005-11-08 21:39:44 -08004865 }
NeilBrown312045e2015-12-21 11:01:21 +11004866 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
NeilBrown24dd4692005-11-08 21:39:26 -08004867 return -EBUSY;
Neil Brown72a23c22008-06-28 08:31:41 +10004868 else if (cmd_match(page, "resync"))
NeilBrown56ccc112015-05-28 17:53:29 +10004869 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
Neil Brown72a23c22008-06-28 08:31:41 +10004870 else if (cmd_match(page, "recover")) {
NeilBrown56ccc112015-05-28 17:53:29 +10004871 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
Neil Brown72a23c22008-06-28 08:31:41 +10004872 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
Neil Brown72a23c22008-06-28 08:31:41 +10004873 } else if (cmd_match(page, "reshape")) {
NeilBrown16484bf2006-03-27 01:18:13 -08004874 int err;
4875 if (mddev->pers->start_reshape == NULL)
4876 return -EINVAL;
NeilBrown67918752014-12-15 12:57:01 +11004877 err = mddev_lock(mddev);
4878 if (!err) {
NeilBrown312045e2015-12-21 11:01:21 +11004879 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4880 err = -EBUSY;
4881 else {
4882 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4883 err = mddev->pers->start_reshape(mddev);
4884 }
NeilBrown67918752014-12-15 12:57:01 +11004885 mddev_unlock(mddev);
4886 }
NeilBrown16484bf2006-03-27 01:18:13 -08004887 if (err)
4888 return err;
Junxiao Bie1a86db2020-07-14 16:10:26 -07004889 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
NeilBrown16484bf2006-03-27 01:18:13 -08004890 } else {
NeilBrownbce74da2006-01-06 00:20:41 -08004891 if (cmd_match(page, "check"))
NeilBrown7eec3142005-11-08 21:39:44 -08004892 set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
NeilBrown2adc7d42006-05-20 14:59:57 -07004893 else if (!cmd_match(page, "repair"))
NeilBrown7eec3142005-11-08 21:39:44 -08004894 return -EINVAL;
NeilBrown56ccc112015-05-28 17:53:29 +10004895 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
NeilBrown7eec3142005-11-08 21:39:44 -08004896 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
4897 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
NeilBrown7eec3142005-11-08 21:39:44 -08004898 }
NeilBrown48c26dd2012-10-11 14:19:39 +11004899 if (mddev->ro == 2) {
4900 /* A write to sync_action is enough to justify
4901 * canceling read-auto mode
4902 */
4903 mddev->ro = 0;
4904 md_wakeup_thread(mddev->sync_thread);
4905 }
NeilBrown03c902e2006-01-06 00:20:46 -08004906 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
NeilBrown24dd4692005-11-08 21:39:26 -08004907 md_wakeup_thread(mddev->thread);
NeilBrown00bcb4a2010-06-01 19:37:23 +10004908 sysfs_notify_dirent_safe(mddev->sysfs_action);
NeilBrown24dd4692005-11-08 21:39:26 -08004909 return len;
4910}
4911
Jonathan Brassowc4a39552013-06-25 01:23:59 -05004912static struct md_sysfs_entry md_scan_mode =
NeilBrown750f1992014-09-30 08:53:05 +10004913__ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
Jonathan Brassowc4a39552013-06-25 01:23:59 -05004914
4915static ssize_t
4916last_sync_action_show(struct mddev *mddev, char *page)
4917{
4918 return sprintf(page, "%s\n", mddev->last_sync_action);
4919}
4920
4921static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action);
4922
NeilBrown9d888832005-11-08 21:39:26 -08004923static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004924mismatch_cnt_show(struct mddev *mddev, char *page)
NeilBrown9d888832005-11-08 21:39:26 -08004925{
4926 return sprintf(page, "%llu\n",
Jianpeng Ma7f7583d2012-10-11 14:17:59 +11004927 (unsigned long long)
4928 atomic64_read(&mddev->resync_mismatches));
NeilBrown9d888832005-11-08 21:39:26 -08004929}
4930
NeilBrown80ca3a42006-07-10 04:44:18 -07004931static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
NeilBrown9d888832005-11-08 21:39:26 -08004932
NeilBrown88202a02006-01-06 00:21:36 -08004933static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004934sync_min_show(struct mddev *mddev, char *page)
NeilBrown88202a02006-01-06 00:21:36 -08004935{
4936 return sprintf(page, "%d (%s)\n", speed_min(mddev),
4937 mddev->sync_speed_min ? "local": "system");
4938}
4939
4940static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004941sync_min_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown88202a02006-01-06 00:21:36 -08004942{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004943 unsigned int min;
4944 int rv;
4945
NeilBrown88202a02006-01-06 00:21:36 -08004946 if (strncmp(buf, "system", 6)==0) {
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004947 min = 0;
4948 } else {
4949 rv = kstrtouint(buf, 10, &min);
4950 if (rv < 0)
4951 return rv;
4952 if (min == 0)
4953 return -EINVAL;
NeilBrown88202a02006-01-06 00:21:36 -08004954 }
NeilBrown88202a02006-01-06 00:21:36 -08004955 mddev->sync_speed_min = min;
4956 return len;
4957}
4958
4959static struct md_sysfs_entry md_sync_min =
4960__ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
4961
4962static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004963sync_max_show(struct mddev *mddev, char *page)
NeilBrown88202a02006-01-06 00:21:36 -08004964{
4965 return sprintf(page, "%d (%s)\n", speed_max(mddev),
4966 mddev->sync_speed_max ? "local": "system");
4967}
4968
4969static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004970sync_max_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown88202a02006-01-06 00:21:36 -08004971{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004972 unsigned int max;
4973 int rv;
4974
NeilBrown88202a02006-01-06 00:21:36 -08004975 if (strncmp(buf, "system", 6)==0) {
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03004976 max = 0;
4977 } else {
4978 rv = kstrtouint(buf, 10, &max);
4979 if (rv < 0)
4980 return rv;
4981 if (max == 0)
4982 return -EINVAL;
NeilBrown88202a02006-01-06 00:21:36 -08004983 }
NeilBrown88202a02006-01-06 00:21:36 -08004984 mddev->sync_speed_max = max;
4985 return len;
4986}
4987
4988static struct md_sysfs_entry md_sync_max =
4989__ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
4990
Iustin Popd7f3d292007-10-16 23:30:54 -07004991static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004992degraded_show(struct mddev *mddev, char *page)
Iustin Popd7f3d292007-10-16 23:30:54 -07004993{
4994 return sprintf(page, "%d\n", mddev->degraded);
4995}
4996static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
NeilBrown88202a02006-01-06 00:21:36 -08004997
4998static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11004999sync_force_parallel_show(struct mddev *mddev, char *page)
Bernd Schubert90b08712008-05-23 13:04:38 -07005000{
5001 return sprintf(page, "%d\n", mddev->parallel_resync);
5002}
5003
5004static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005005sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len)
Bernd Schubert90b08712008-05-23 13:04:38 -07005006{
5007 long n;
5008
Jingoo Hanb29bebd2013-06-01 16:15:16 +09005009 if (kstrtol(buf, 10, &n))
Bernd Schubert90b08712008-05-23 13:04:38 -07005010 return -EINVAL;
5011
5012 if (n != 0 && n != 1)
5013 return -EINVAL;
5014
5015 mddev->parallel_resync = n;
5016
5017 if (mddev->sync_thread)
5018 wake_up(&resync_wait);
5019
5020 return len;
5021}
5022
5023/* force parallel resync, even with shared block devices */
5024static struct md_sysfs_entry md_sync_force_parallel =
5025__ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
5026 sync_force_parallel_show, sync_force_parallel_store);
5027
5028static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005029sync_speed_show(struct mddev *mddev, char *page)
NeilBrown88202a02006-01-06 00:21:36 -08005030{
5031 unsigned long resync, dt, db;
NeilBrownd1a7c502009-03-31 15:24:32 +11005032 if (mddev->curr_resync == 0)
5033 return sprintf(page, "none\n");
Andre Noll9687a602008-03-25 22:24:09 +01005034 resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
5035 dt = (jiffies - mddev->resync_mark) / HZ;
NeilBrown88202a02006-01-06 00:21:36 -08005036 if (!dt) dt++;
Andre Noll9687a602008-03-25 22:24:09 +01005037 db = resync - mddev->resync_mark_cnt;
5038 return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
NeilBrown88202a02006-01-06 00:21:36 -08005039}
5040
NeilBrown80ca3a42006-07-10 04:44:18 -07005041static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
NeilBrown88202a02006-01-06 00:21:36 -08005042
5043static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005044sync_completed_show(struct mddev *mddev, char *page)
NeilBrown88202a02006-01-06 00:21:36 -08005045{
RĂ©mi RĂ©rolle13ae8642011-01-14 09:14:34 +11005046 unsigned long long max_sectors, resync;
NeilBrown88202a02006-01-06 00:21:36 -08005047
NeilBrownacb180b2009-04-14 16:28:34 +10005048 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5049 return sprintf(page, "none\n");
5050
NeilBrown72f36d52012-10-11 14:25:57 +11005051 if (mddev->curr_resync == 1 ||
5052 mddev->curr_resync == 2)
5053 return sprintf(page, "delayed\n");
5054
NeilBrownc804cde2012-05-21 09:28:33 +10005055 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
5056 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
Andre Noll58c0fed2009-03-31 14:33:13 +11005057 max_sectors = mddev->resync_max_sectors;
NeilBrown88202a02006-01-06 00:21:36 -08005058 else
Andre Noll58c0fed2009-03-31 14:33:13 +11005059 max_sectors = mddev->dev_sectors;
NeilBrown88202a02006-01-06 00:21:36 -08005060
NeilBrownacb180b2009-04-14 16:28:34 +10005061 resync = mddev->curr_resync_completed;
RĂ©mi RĂ©rolle13ae8642011-01-14 09:14:34 +11005062 return sprintf(page, "%llu / %llu\n", resync, max_sectors);
NeilBrown88202a02006-01-06 00:21:36 -08005063}
5064
NeilBrown750f1992014-09-30 08:53:05 +10005065static struct md_sysfs_entry md_sync_completed =
5066 __ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL);
NeilBrown88202a02006-01-06 00:21:36 -08005067
NeilBrowne464eaf2006-03-27 01:18:14 -08005068static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005069min_sync_show(struct mddev *mddev, char *page)
Neil Brown5e96ee62008-06-28 08:31:24 +10005070{
5071 return sprintf(page, "%llu\n",
5072 (unsigned long long)mddev->resync_min);
5073}
5074static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005075min_sync_store(struct mddev *mddev, const char *buf, size_t len)
Neil Brown5e96ee62008-06-28 08:31:24 +10005076{
5077 unsigned long long min;
NeilBrown23da4222014-12-15 12:57:01 +11005078 int err;
NeilBrown23da4222014-12-15 12:57:01 +11005079
Jingoo Hanb29bebd2013-06-01 16:15:16 +09005080 if (kstrtoull(buf, 10, &min))
Neil Brown5e96ee62008-06-28 08:31:24 +10005081 return -EINVAL;
NeilBrown23da4222014-12-15 12:57:01 +11005082
5083 spin_lock(&mddev->lock);
5084 err = -EINVAL;
Neil Brown5e96ee62008-06-28 08:31:24 +10005085 if (min > mddev->resync_max)
NeilBrown23da4222014-12-15 12:57:01 +11005086 goto out_unlock;
5087
5088 err = -EBUSY;
Neil Brown5e96ee62008-06-28 08:31:24 +10005089 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
NeilBrown23da4222014-12-15 12:57:01 +11005090 goto out_unlock;
Neil Brown5e96ee62008-06-28 08:31:24 +10005091
NeilBrown50c37b12015-03-23 17:36:38 +11005092 /* Round down to multiple of 4K for safety */
5093 mddev->resync_min = round_down(min, 8);
NeilBrown23da4222014-12-15 12:57:01 +11005094 err = 0;
Neil Brown5e96ee62008-06-28 08:31:24 +10005095
NeilBrown23da4222014-12-15 12:57:01 +11005096out_unlock:
5097 spin_unlock(&mddev->lock);
5098 return err ?: len;
Neil Brown5e96ee62008-06-28 08:31:24 +10005099}
5100
5101static struct md_sysfs_entry md_min_sync =
5102__ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
5103
5104static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005105max_sync_show(struct mddev *mddev, char *page)
NeilBrownc6207272008-02-06 01:39:52 -08005106{
5107 if (mddev->resync_max == MaxSector)
5108 return sprintf(page, "max\n");
5109 else
5110 return sprintf(page, "%llu\n",
5111 (unsigned long long)mddev->resync_max);
5112}
5113static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005114max_sync_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrownc6207272008-02-06 01:39:52 -08005115{
NeilBrown23da4222014-12-15 12:57:01 +11005116 int err;
5117 spin_lock(&mddev->lock);
NeilBrownc6207272008-02-06 01:39:52 -08005118 if (strncmp(buf, "max", 3) == 0)
5119 mddev->resync_max = MaxSector;
5120 else {
Neil Brown5e96ee62008-06-28 08:31:24 +10005121 unsigned long long max;
NeilBrown23da4222014-12-15 12:57:01 +11005122 int chunk;
5123
5124 err = -EINVAL;
Jingoo Hanb29bebd2013-06-01 16:15:16 +09005125 if (kstrtoull(buf, 10, &max))
NeilBrown23da4222014-12-15 12:57:01 +11005126 goto out_unlock;
Neil Brown5e96ee62008-06-28 08:31:24 +10005127 if (max < mddev->resync_min)
NeilBrown23da4222014-12-15 12:57:01 +11005128 goto out_unlock;
5129
5130 err = -EBUSY;
NeilBrownc6207272008-02-06 01:39:52 -08005131 if (max < mddev->resync_max &&
NeilBrown4d484a42009-08-13 10:41:50 +10005132 mddev->ro == 0 &&
NeilBrownc6207272008-02-06 01:39:52 -08005133 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
NeilBrown23da4222014-12-15 12:57:01 +11005134 goto out_unlock;
NeilBrownc6207272008-02-06 01:39:52 -08005135
5136 /* Must be a multiple of chunk_size */
NeilBrown23da4222014-12-15 12:57:01 +11005137 chunk = mddev->chunk_sectors;
5138 if (chunk) {
raz ben yehuda2ac06c32009-06-16 17:01:42 +10005139 sector_t temp = max;
NeilBrown23da4222014-12-15 12:57:01 +11005140
5141 err = -EINVAL;
5142 if (sector_div(temp, chunk))
5143 goto out_unlock;
NeilBrownc6207272008-02-06 01:39:52 -08005144 }
5145 mddev->resync_max = max;
5146 }
5147 wake_up(&mddev->recovery_wait);
NeilBrown23da4222014-12-15 12:57:01 +11005148 err = 0;
5149out_unlock:
5150 spin_unlock(&mddev->lock);
5151 return err ?: len;
NeilBrownc6207272008-02-06 01:39:52 -08005152}
5153
5154static struct md_sysfs_entry md_max_sync =
5155__ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
5156
5157static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005158suspend_lo_show(struct mddev *mddev, char *page)
NeilBrowne464eaf2006-03-27 01:18:14 -08005159{
5160 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
5161}
5162
5163static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005164suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrowne464eaf2006-03-27 01:18:14 -08005165{
NeilBrownb03e0cc2017-10-19 12:49:15 +11005166 unsigned long long new;
NeilBrown67918752014-12-15 12:57:01 +11005167 int err;
NeilBrowne464eaf2006-03-27 01:18:14 -08005168
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03005169 err = kstrtoull(buf, 10, &new);
5170 if (err < 0)
5171 return err;
5172 if (new != (sector_t)new)
NeilBrowne464eaf2006-03-27 01:18:14 -08005173 return -EINVAL;
NeilBrown23ddff32011-01-14 09:14:34 +11005174
NeilBrown67918752014-12-15 12:57:01 +11005175 err = mddev_lock(mddev);
5176 if (err)
5177 return err;
5178 err = -EINVAL;
5179 if (mddev->pers == NULL ||
5180 mddev->pers->quiesce == NULL)
5181 goto unlock;
NeilBrownb03e0cc2017-10-19 12:49:15 +11005182 mddev_suspend(mddev);
NeilBrown23ddff32011-01-14 09:14:34 +11005183 mddev->suspend_lo = new;
NeilBrownb03e0cc2017-10-19 12:49:15 +11005184 mddev_resume(mddev);
5185
NeilBrown67918752014-12-15 12:57:01 +11005186 err = 0;
5187unlock:
5188 mddev_unlock(mddev);
5189 return err ?: len;
NeilBrowne464eaf2006-03-27 01:18:14 -08005190}
5191static struct md_sysfs_entry md_suspend_lo =
5192__ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
5193
NeilBrowne464eaf2006-03-27 01:18:14 -08005194static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005195suspend_hi_show(struct mddev *mddev, char *page)
NeilBrowne464eaf2006-03-27 01:18:14 -08005196{
5197 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
5198}
5199
5200static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005201suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrowne464eaf2006-03-27 01:18:14 -08005202{
NeilBrownb03e0cc2017-10-19 12:49:15 +11005203 unsigned long long new;
NeilBrown67918752014-12-15 12:57:01 +11005204 int err;
NeilBrowne464eaf2006-03-27 01:18:14 -08005205
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03005206 err = kstrtoull(buf, 10, &new);
5207 if (err < 0)
5208 return err;
5209 if (new != (sector_t)new)
NeilBrowne464eaf2006-03-27 01:18:14 -08005210 return -EINVAL;
NeilBrown23ddff32011-01-14 09:14:34 +11005211
NeilBrown67918752014-12-15 12:57:01 +11005212 err = mddev_lock(mddev);
5213 if (err)
5214 return err;
5215 err = -EINVAL;
NeilBrownb03e0cc2017-10-19 12:49:15 +11005216 if (mddev->pers == NULL)
NeilBrown67918752014-12-15 12:57:01 +11005217 goto unlock;
NeilBrownb03e0cc2017-10-19 12:49:15 +11005218
5219 mddev_suspend(mddev);
NeilBrown23ddff32011-01-14 09:14:34 +11005220 mddev->suspend_hi = new;
NeilBrownb03e0cc2017-10-19 12:49:15 +11005221 mddev_resume(mddev);
5222
NeilBrown67918752014-12-15 12:57:01 +11005223 err = 0;
5224unlock:
5225 mddev_unlock(mddev);
5226 return err ?: len;
NeilBrowne464eaf2006-03-27 01:18:14 -08005227}
5228static struct md_sysfs_entry md_suspend_hi =
5229__ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
5230
NeilBrown08a02ec2007-05-09 02:35:38 -07005231static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005232reshape_position_show(struct mddev *mddev, char *page)
NeilBrown08a02ec2007-05-09 02:35:38 -07005233{
5234 if (mddev->reshape_position != MaxSector)
5235 return sprintf(page, "%llu\n",
5236 (unsigned long long)mddev->reshape_position);
5237 strcpy(page, "none\n");
5238 return 5;
5239}
5240
5241static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005242reshape_position_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown08a02ec2007-05-09 02:35:38 -07005243{
NeilBrownc6563a82012-05-21 09:27:00 +10005244 struct md_rdev *rdev;
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03005245 unsigned long long new;
NeilBrown67918752014-12-15 12:57:01 +11005246 int err;
NeilBrown67918752014-12-15 12:57:01 +11005247
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03005248 err = kstrtoull(buf, 10, &new);
5249 if (err < 0)
5250 return err;
5251 if (new != (sector_t)new)
NeilBrown08a02ec2007-05-09 02:35:38 -07005252 return -EINVAL;
NeilBrown67918752014-12-15 12:57:01 +11005253 err = mddev_lock(mddev);
5254 if (err)
5255 return err;
5256 err = -EBUSY;
5257 if (mddev->pers)
5258 goto unlock;
NeilBrown08a02ec2007-05-09 02:35:38 -07005259 mddev->reshape_position = new;
5260 mddev->delta_disks = 0;
NeilBrown2c810cd2012-05-21 09:27:00 +10005261 mddev->reshape_backwards = 0;
NeilBrown08a02ec2007-05-09 02:35:38 -07005262 mddev->new_level = mddev->level;
5263 mddev->new_layout = mddev->layout;
Andre Noll664e7c42009-06-18 08:45:27 +10005264 mddev->new_chunk_sectors = mddev->chunk_sectors;
NeilBrownc6563a82012-05-21 09:27:00 +10005265 rdev_for_each(rdev, mddev)
5266 rdev->new_data_offset = rdev->data_offset;
NeilBrown67918752014-12-15 12:57:01 +11005267 err = 0;
5268unlock:
5269 mddev_unlock(mddev);
5270 return err ?: len;
NeilBrown08a02ec2007-05-09 02:35:38 -07005271}
5272
5273static struct md_sysfs_entry md_reshape_position =
5274__ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
5275 reshape_position_store);
5276
Dan Williamsb522adc2009-03-31 15:00:31 +11005277static ssize_t
NeilBrown2c810cd2012-05-21 09:27:00 +10005278reshape_direction_show(struct mddev *mddev, char *page)
5279{
5280 return sprintf(page, "%s\n",
5281 mddev->reshape_backwards ? "backwards" : "forwards");
5282}
5283
5284static ssize_t
5285reshape_direction_store(struct mddev *mddev, const char *buf, size_t len)
5286{
5287 int backwards = 0;
NeilBrown67918752014-12-15 12:57:01 +11005288 int err;
5289
NeilBrown2c810cd2012-05-21 09:27:00 +10005290 if (cmd_match(buf, "forwards"))
5291 backwards = 0;
5292 else if (cmd_match(buf, "backwards"))
5293 backwards = 1;
5294 else
5295 return -EINVAL;
5296 if (mddev->reshape_backwards == backwards)
5297 return len;
5298
NeilBrown67918752014-12-15 12:57:01 +11005299 err = mddev_lock(mddev);
5300 if (err)
5301 return err;
NeilBrown2c810cd2012-05-21 09:27:00 +10005302 /* check if we are allowed to change */
5303 if (mddev->delta_disks)
NeilBrown67918752014-12-15 12:57:01 +11005304 err = -EBUSY;
5305 else if (mddev->persistent &&
NeilBrown2c810cd2012-05-21 09:27:00 +10005306 mddev->major_version == 0)
NeilBrown67918752014-12-15 12:57:01 +11005307 err = -EINVAL;
5308 else
5309 mddev->reshape_backwards = backwards;
5310 mddev_unlock(mddev);
5311 return err ?: len;
NeilBrown2c810cd2012-05-21 09:27:00 +10005312}
5313
5314static struct md_sysfs_entry md_reshape_direction =
5315__ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show,
5316 reshape_direction_store);
5317
5318static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005319array_size_show(struct mddev *mddev, char *page)
Dan Williamsb522adc2009-03-31 15:00:31 +11005320{
5321 if (mddev->external_size)
5322 return sprintf(page, "%llu\n",
5323 (unsigned long long)mddev->array_sectors/2);
5324 else
5325 return sprintf(page, "default\n");
5326}
5327
5328static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11005329array_size_store(struct mddev *mddev, const char *buf, size_t len)
Dan Williamsb522adc2009-03-31 15:00:31 +11005330{
5331 sector_t sectors;
NeilBrown67918752014-12-15 12:57:01 +11005332 int err;
5333
5334 err = mddev_lock(mddev);
5335 if (err)
5336 return err;
Dan Williamsb522adc2009-03-31 15:00:31 +11005337
Guoqing Jiangab5a98b2016-05-02 11:33:13 -04005338 /* cluster raid doesn't support change array_sectors */
Zhilong Liub6708832017-04-10 14:15:55 +08005339 if (mddev_is_clustered(mddev)) {
5340 mddev_unlock(mddev);
Guoqing Jiangab5a98b2016-05-02 11:33:13 -04005341 return -EINVAL;
Zhilong Liub6708832017-04-10 14:15:55 +08005342 }
Guoqing Jiangab5a98b2016-05-02 11:33:13 -04005343
Dan Williamsb522adc2009-03-31 15:00:31 +11005344 if (strncmp(buf, "default", 7) == 0) {
5345 if (mddev->pers)
5346 sectors = mddev->pers->size(mddev, 0, 0);
5347 else
5348 sectors = mddev->array_sectors;
5349
5350 mddev->external_size = 0;
5351 } else {
5352 if (strict_blocks_to_sectors(buf, &sectors) < 0)
NeilBrown67918752014-12-15 12:57:01 +11005353 err = -EINVAL;
5354 else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
5355 err = -E2BIG;
5356 else
5357 mddev->external_size = 1;
Dan Williamsb522adc2009-03-31 15:00:31 +11005358 }
5359
NeilBrown67918752014-12-15 12:57:01 +11005360 if (!err) {
5361 mddev->array_sectors = sectors;
Christoph Hellwig2c247c52020-11-16 15:57:11 +01005362 if (mddev->pers)
5363 set_capacity_and_notify(mddev->gendisk,
5364 mddev->array_sectors);
NeilBrowncbe6ef12011-02-16 13:58:38 +11005365 }
NeilBrown67918752014-12-15 12:57:01 +11005366 mddev_unlock(mddev);
5367 return err ?: len;
Dan Williamsb522adc2009-03-31 15:00:31 +11005368}
5369
5370static struct md_sysfs_entry md_array_size =
5371__ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
5372 array_size_store);
NeilBrowne464eaf2006-03-27 01:18:14 -08005373
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005374static ssize_t
5375consistency_policy_show(struct mddev *mddev, char *page)
5376{
5377 int ret;
5378
5379 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
5380 ret = sprintf(page, "journal\n");
5381 } else if (test_bit(MD_HAS_PPL, &mddev->flags)) {
5382 ret = sprintf(page, "ppl\n");
5383 } else if (mddev->bitmap) {
5384 ret = sprintf(page, "bitmap\n");
5385 } else if (mddev->pers) {
5386 if (mddev->pers->sync_request)
5387 ret = sprintf(page, "resync\n");
5388 else
5389 ret = sprintf(page, "none\n");
5390 } else {
5391 ret = sprintf(page, "unknown\n");
5392 }
5393
5394 return ret;
5395}
5396
5397static ssize_t
5398consistency_policy_store(struct mddev *mddev, const char *buf, size_t len)
5399{
Artur Paszkiewiczba903a32017-03-09 10:00:03 +01005400 int err = 0;
5401
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005402 if (mddev->pers) {
Artur Paszkiewiczba903a32017-03-09 10:00:03 +01005403 if (mddev->pers->change_consistency_policy)
5404 err = mddev->pers->change_consistency_policy(mddev, buf);
5405 else
5406 err = -EBUSY;
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005407 } else if (mddev->external && strncmp(buf, "ppl", 3) == 0) {
5408 set_bit(MD_HAS_PPL, &mddev->flags);
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005409 } else {
Artur Paszkiewiczba903a32017-03-09 10:00:03 +01005410 err = -EINVAL;
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005411 }
Artur Paszkiewiczba903a32017-03-09 10:00:03 +01005412
5413 return err ? err : len;
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005414}
5415
5416static struct md_sysfs_entry md_consistency_policy =
5417__ATTR(consistency_policy, S_IRUGO | S_IWUSR, consistency_policy_show,
5418 consistency_policy_store);
5419
Guoqing Jiang9a567842019-07-24 11:09:19 +02005420static ssize_t fail_last_dev_show(struct mddev *mddev, char *page)
5421{
5422 return sprintf(page, "%d\n", mddev->fail_last_dev);
5423}
5424
5425/*
5426 * Setting fail_last_dev to true to allow last device to be forcibly removed
5427 * from RAID1/RAID10.
5428 */
5429static ssize_t
5430fail_last_dev_store(struct mddev *mddev, const char *buf, size_t len)
5431{
5432 int ret;
5433 bool value;
5434
5435 ret = kstrtobool(buf, &value);
5436 if (ret)
5437 return ret;
5438
5439 if (value != mddev->fail_last_dev)
5440 mddev->fail_last_dev = value;
5441
5442 return len;
5443}
5444static struct md_sysfs_entry md_fail_last_dev =
5445__ATTR(fail_last_dev, S_IRUGO | S_IWUSR, fail_last_dev_show,
5446 fail_last_dev_store);
5447
Guoqing Jiang3938f5f2019-12-23 10:48:56 +01005448static ssize_t serialize_policy_show(struct mddev *mddev, char *page)
5449{
5450 if (mddev->pers == NULL || (mddev->pers->level != 1))
5451 return sprintf(page, "n/a\n");
5452 else
5453 return sprintf(page, "%d\n", mddev->serialize_policy);
5454}
5455
5456/*
5457 * Setting serialize_policy to true to enforce write IO is not reordered
5458 * for raid1.
5459 */
5460static ssize_t
5461serialize_policy_store(struct mddev *mddev, const char *buf, size_t len)
5462{
5463 int err;
5464 bool value;
5465
5466 err = kstrtobool(buf, &value);
5467 if (err)
5468 return err;
5469
5470 if (value == mddev->serialize_policy)
5471 return len;
5472
5473 err = mddev_lock(mddev);
5474 if (err)
5475 return err;
5476 if (mddev->pers == NULL || (mddev->pers->level != 1)) {
5477 pr_err("md: serialize_policy is only effective for raid1\n");
5478 err = -EINVAL;
5479 goto unlock;
5480 }
5481
5482 mddev_suspend(mddev);
5483 if (value)
5484 mddev_create_serial_pool(mddev, NULL, true);
5485 else
5486 mddev_destroy_serial_pool(mddev, NULL, true);
5487 mddev->serialize_policy = value;
5488 mddev_resume(mddev);
5489unlock:
5490 mddev_unlock(mddev);
5491 return err ?: len;
5492}
5493
5494static struct md_sysfs_entry md_serialize_policy =
5495__ATTR(serialize_policy, S_IRUGO | S_IWUSR, serialize_policy_show,
5496 serialize_policy_store);
5497
5498
NeilBrowneae17012005-11-08 21:39:23 -08005499static struct attribute *md_default_attrs[] = {
5500 &md_level.attr,
NeilBrownd4dbd022006-06-26 00:27:59 -07005501 &md_layout.attr,
NeilBrowneae17012005-11-08 21:39:23 -08005502 &md_raid_disks.attr,
Sebastian Parschauerec164d072020-07-28 12:01:39 +02005503 &md_uuid.attr,
NeilBrown3b343802006-01-06 00:20:47 -08005504 &md_chunk_size.attr,
NeilBrowna35b0d62006-01-06 00:20:49 -08005505 &md_size.attr,
NeilBrowna94213b2006-06-26 00:28:00 -07005506 &md_resync_start.attr,
NeilBrown8bb93aa2006-01-06 00:20:50 -08005507 &md_metadata.attr,
NeilBrown6d7ff7382006-01-06 00:21:16 -08005508 &md_new_device.attr,
NeilBrown16f17b32006-06-26 00:27:37 -07005509 &md_safe_delay.attr,
NeilBrown9e653b62006-06-26 00:27:58 -07005510 &md_array_state.attr,
NeilBrown08a02ec2007-05-09 02:35:38 -07005511 &md_reshape_position.attr,
NeilBrown2c810cd2012-05-21 09:27:00 +10005512 &md_reshape_direction.attr,
Dan Williamsb522adc2009-03-31 15:00:31 +11005513 &md_array_size.attr,
Robert Becker1e509152009-12-14 12:49:58 +11005514 &max_corr_read_errors.attr,
Artur Paszkiewicz664aed02017-03-09 10:00:00 +01005515 &md_consistency_policy.attr,
Guoqing Jiang9a567842019-07-24 11:09:19 +02005516 &md_fail_last_dev.attr,
Guoqing Jiang3938f5f2019-12-23 10:48:56 +01005517 &md_serialize_policy.attr,
NeilBrown411036f2005-11-08 21:39:40 -08005518 NULL,
5519};
5520
5521static struct attribute *md_redundancy_attrs[] = {
NeilBrown24dd4692005-11-08 21:39:26 -08005522 &md_scan_mode.attr,
Jonathan Brassowc4a39552013-06-25 01:23:59 -05005523 &md_last_scan_mode.attr,
NeilBrown9d888832005-11-08 21:39:26 -08005524 &md_mismatches.attr,
NeilBrown88202a02006-01-06 00:21:36 -08005525 &md_sync_min.attr,
5526 &md_sync_max.attr,
5527 &md_sync_speed.attr,
Bernd Schubert90b08712008-05-23 13:04:38 -07005528 &md_sync_force_parallel.attr,
NeilBrown88202a02006-01-06 00:21:36 -08005529 &md_sync_completed.attr,
Neil Brown5e96ee62008-06-28 08:31:24 +10005530 &md_min_sync.attr,
NeilBrownc6207272008-02-06 01:39:52 -08005531 &md_max_sync.attr,
NeilBrowne464eaf2006-03-27 01:18:14 -08005532 &md_suspend_lo.attr,
5533 &md_suspend_hi.attr,
Paul Clements9b1d1da2006-10-03 01:15:49 -07005534 &md_bitmap.attr,
Iustin Popd7f3d292007-10-16 23:30:54 -07005535 &md_degraded.attr,
NeilBrowneae17012005-11-08 21:39:23 -08005536 NULL,
5537};
NeilBrown411036f2005-11-08 21:39:40 -08005538static struct attribute_group md_redundancy_group = {
5539 .name = NULL,
5540 .attrs = md_redundancy_attrs,
5541};
5542
NeilBrowneae17012005-11-08 21:39:23 -08005543static ssize_t
5544md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
5545{
5546 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
NeilBrownfd01b882011-10-11 16:47:53 +11005547 struct mddev *mddev = container_of(kobj, struct mddev, kobj);
NeilBrown96de1e62005-11-08 21:39:39 -08005548 ssize_t rv;
NeilBrowneae17012005-11-08 21:39:23 -08005549
5550 if (!entry->show)
5551 return -EIO;
NeilBrownaf8a2432011-12-08 15:49:46 +11005552 spin_lock(&all_mddevs_lock);
5553 if (list_empty(&mddev->all_mddevs)) {
5554 spin_unlock(&all_mddevs_lock);
5555 return -EBUSY;
5556 }
5557 mddev_get(mddev);
5558 spin_unlock(&all_mddevs_lock);
5559
NeilBrownb7b17c92014-12-15 12:56:59 +11005560 rv = entry->show(mddev, page);
NeilBrownaf8a2432011-12-08 15:49:46 +11005561 mddev_put(mddev);
NeilBrown96de1e62005-11-08 21:39:39 -08005562 return rv;
NeilBrowneae17012005-11-08 21:39:23 -08005563}
5564
5565static ssize_t
5566md_attr_store(struct kobject *kobj, struct attribute *attr,
5567 const char *page, size_t length)
5568{
5569 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
NeilBrownfd01b882011-10-11 16:47:53 +11005570 struct mddev *mddev = container_of(kobj, struct mddev, kobj);
NeilBrown96de1e62005-11-08 21:39:39 -08005571 ssize_t rv;
NeilBrowneae17012005-11-08 21:39:23 -08005572
5573 if (!entry->store)
5574 return -EIO;
NeilBrown67463ac2006-07-10 04:44:19 -07005575 if (!capable(CAP_SYS_ADMIN))
5576 return -EACCES;
NeilBrownaf8a2432011-12-08 15:49:46 +11005577 spin_lock(&all_mddevs_lock);
5578 if (list_empty(&mddev->all_mddevs)) {
5579 spin_unlock(&all_mddevs_lock);
5580 return -EBUSY;
5581 }
5582 mddev_get(mddev);
5583 spin_unlock(&all_mddevs_lock);
NeilBrown67918752014-12-15 12:57:01 +11005584 rv = entry->store(mddev, page, length);
NeilBrownaf8a2432011-12-08 15:49:46 +11005585 mddev_put(mddev);
NeilBrown96de1e62005-11-08 21:39:39 -08005586 return rv;
NeilBrowneae17012005-11-08 21:39:23 -08005587}
5588
5589static void md_free(struct kobject *ko)
5590{
NeilBrownfd01b882011-10-11 16:47:53 +11005591 struct mddev *mddev = container_of(ko, struct mddev, kobj);
NeilBrowna21d1502009-01-09 08:31:09 +11005592
5593 if (mddev->sysfs_state)
5594 sysfs_put(mddev->sysfs_state);
Junxiao Bie1a86db2020-07-14 16:10:26 -07005595 if (mddev->sysfs_level)
5596 sysfs_put(mddev->sysfs_level);
5597
Bart Van Assched8115c352018-02-28 10:15:29 -08005598 if (mddev->gendisk)
5599 del_gendisk(mddev->gendisk);
NeilBrown6cd18e72015-04-27 14:12:22 +10005600 if (mddev->queue)
5601 blk_cleanup_queue(mddev->queue);
Bart Van Assched8115c352018-02-28 10:15:29 -08005602 if (mddev->gendisk)
NeilBrowna21d1502009-01-09 08:31:09 +11005603 put_disk(mddev->gendisk);
NeilBrown4ad23a972017-03-15 14:05:14 +11005604 percpu_ref_exit(&mddev->writes_pending);
NeilBrowna21d1502009-01-09 08:31:09 +11005605
Kent Overstreet28dec872018-06-07 20:52:54 -04005606 bioset_exit(&mddev->bio_set);
5607 bioset_exit(&mddev->sync_set);
Artur Paszkiewicz41d2d842020-07-03 11:13:09 +02005608 mempool_exit(&mddev->md_io_pool);
NeilBrowneae17012005-11-08 21:39:23 -08005609 kfree(mddev);
5610}
5611
Emese Revfy52cf25d2010-01-19 02:58:23 +01005612static const struct sysfs_ops md_sysfs_ops = {
NeilBrowneae17012005-11-08 21:39:23 -08005613 .show = md_attr_show,
5614 .store = md_attr_store,
5615};
5616static struct kobj_type md_ktype = {
5617 .release = md_free,
5618 .sysfs_ops = &md_sysfs_ops,
5619 .default_attrs = md_default_attrs,
5620};
5621
Linus Torvalds1da177e2005-04-16 15:20:36 -07005622int mdp_major = 0;
5623
Dan Williams5fd3a172009-03-04 00:57:25 -07005624static void mddev_delayed_delete(struct work_struct *ws)
5625{
NeilBrownfd01b882011-10-11 16:47:53 +11005626 struct mddev *mddev = container_of(ws, struct mddev, del_work);
Dan Williams5fd3a172009-03-04 00:57:25 -07005627
NeilBrown43a70502009-12-14 12:49:55 +11005628 sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
Dan Williams5fd3a172009-03-04 00:57:25 -07005629 kobject_del(&mddev->kobj);
5630 kobject_put(&mddev->kobj);
5631}
5632
NeilBrown4ad23a972017-03-15 14:05:14 +11005633static void no_op(struct percpu_ref *r) {}
5634
NeilBrowna415c0f2017-06-05 16:05:13 +10005635int mddev_init_writes_pending(struct mddev *mddev)
5636{
5637 if (mddev->writes_pending.percpu_count_ptr)
5638 return 0;
Roman Gushchinddde2af2019-05-07 10:01:49 -07005639 if (percpu_ref_init(&mddev->writes_pending, no_op,
5640 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL) < 0)
NeilBrowna415c0f2017-06-05 16:05:13 +10005641 return -ENOMEM;
5642 /* We want to start with the refcount at zero */
5643 percpu_ref_put(&mddev->writes_pending);
5644 return 0;
5645}
5646EXPORT_SYMBOL_GPL(mddev_init_writes_pending);
5647
NeilBrownefeb53c2009-01-09 08:31:10 +11005648static int md_alloc(dev_t dev, char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005649{
NeilBrown039b7222017-04-12 16:26:13 +10005650 /*
5651 * If dev is zero, name is the name of a device to allocate with
5652 * an arbitrary minor number. It will be "md_???"
5653 * If dev is non-zero it must be a device number with a MAJOR of
5654 * MD_MAJOR or mdp_major. In this case, if "name" is NULL, then
5655 * the device is being created by opening a node in /dev.
5656 * If "name" is not NULL, the device is being created by
5657 * writing to /sys/module/md_mod/parameters/new_array.
5658 */
Arjan van de Ven48c9c272006-03-27 01:18:20 -08005659 static DEFINE_MUTEX(disks_mutex);
NeilBrownfd01b882011-10-11 16:47:53 +11005660 struct mddev *mddev = mddev_find(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005661 struct gendisk *disk;
NeilBrownefeb53c2009-01-09 08:31:10 +11005662 int partitioned;
5663 int shift;
5664 int unit;
Greg Kroah-Hartman3830c622007-12-17 15:54:39 -04005665 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005666
5667 if (!mddev)
NeilBrownefeb53c2009-01-09 08:31:10 +11005668 return -ENODEV;
5669
5670 partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
5671 shift = partitioned ? MdpMinorShift : 0;
5672 unit = MINOR(mddev->unit) >> shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005673
Tejun Heoe804ac72010-10-15 15:36:08 +02005674 /* wait for any previous instance of this device to be
5675 * completely removed (mddev_delayed_delete).
NeilBrownd3374822009-01-09 08:31:10 +11005676 */
Tejun Heoe804ac72010-10-15 15:36:08 +02005677 flush_workqueue(md_misc_wq);
NeilBrownd3374822009-01-09 08:31:10 +11005678
Arjan van de Ven48c9c272006-03-27 01:18:20 -08005679 mutex_lock(&disks_mutex);
NeilBrown0909dc42009-07-01 12:27:21 +10005680 error = -EEXIST;
5681 if (mddev->gendisk)
5682 goto abort;
NeilBrownefeb53c2009-01-09 08:31:10 +11005683
NeilBrown039b7222017-04-12 16:26:13 +10005684 if (name && !dev) {
NeilBrownefeb53c2009-01-09 08:31:10 +11005685 /* Need to ensure that 'name' is not a duplicate.
5686 */
NeilBrownfd01b882011-10-11 16:47:53 +11005687 struct mddev *mddev2;
NeilBrownefeb53c2009-01-09 08:31:10 +11005688 spin_lock(&all_mddevs_lock);
5689
5690 list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
5691 if (mddev2->gendisk &&
5692 strcmp(mddev2->gendisk->disk_name, name) == 0) {
5693 spin_unlock(&all_mddevs_lock);
NeilBrown0909dc42009-07-01 12:27:21 +10005694 goto abort;
NeilBrownefeb53c2009-01-09 08:31:10 +11005695 }
5696 spin_unlock(&all_mddevs_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005697 }
NeilBrown039b7222017-04-12 16:26:13 +10005698 if (name && dev)
5699 /*
5700 * Creating /dev/mdNNN via "newarray", so adjust hold_active.
5701 */
5702 mddev->hold_active = UNTIL_STOP;
NeilBrown8b765392009-01-09 08:31:08 +11005703
Artur Paszkiewicz41d2d842020-07-03 11:13:09 +02005704 error = mempool_init_kmalloc_pool(&mddev->md_io_pool, BIO_POOL_SIZE,
5705 sizeof(struct md_io));
5706 if (error)
5707 goto abort;
5708
NeilBrown0909dc42009-07-01 12:27:21 +10005709 error = -ENOMEM;
Christoph Hellwigc62b37d2020-07-01 10:59:43 +02005710 mddev->queue = blk_alloc_queue(NUMA_NO_NODE);
NeilBrown0909dc42009-07-01 12:27:21 +10005711 if (!mddev->queue)
5712 goto abort;
NeilBrown409c57f2009-03-31 14:39:39 +11005713
Martin K. Petersenb1bd0552012-01-11 16:27:11 +01005714 blk_set_stacking_limits(&mddev->queue->limits);
NeilBrown8b765392009-01-09 08:31:08 +11005715
Linus Torvalds1da177e2005-04-16 15:20:36 -07005716 disk = alloc_disk(1 << shift);
5717 if (!disk) {
NeilBrown8b765392009-01-09 08:31:08 +11005718 blk_cleanup_queue(mddev->queue);
5719 mddev->queue = NULL;
NeilBrown0909dc42009-07-01 12:27:21 +10005720 goto abort;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005721 }
NeilBrownefeb53c2009-01-09 08:31:10 +11005722 disk->major = MAJOR(mddev->unit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005723 disk->first_minor = unit << shift;
NeilBrownefeb53c2009-01-09 08:31:10 +11005724 if (name)
5725 strcpy(disk->disk_name, name);
5726 else if (partitioned)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005727 sprintf(disk->disk_name, "md_d%d", unit);
Greg Kroah-Hartmance7b0f462005-06-20 21:15:16 -07005728 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07005729 sprintf(disk->disk_name, "md%d", unit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005730 disk->fops = &md_fops;
5731 disk->private_data = mddev;
5732 disk->queue = mddev->queue;
Jens Axboe56883a72016-03-30 10:16:53 -06005733 blk_queue_write_cache(mddev->queue, true, true);
NeilBrown92850bb2008-10-21 13:25:32 +11005734 /* Allow extended partitions. This makes the
NeilBrownd3374822009-01-09 08:31:10 +11005735 * 'mdp' device redundant, but we can't really
NeilBrown92850bb2008-10-21 13:25:32 +11005736 * remove it now.
5737 */
5738 disk->flags |= GENHD_FL_EXT_DEVT;
Christoph Hellwiga564e232020-07-08 14:25:41 +02005739 disk->events |= DISK_EVENT_MEDIA_CHANGE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005740 mddev->gendisk = disk;
NeilBrownb0140892011-05-10 17:49:01 +10005741 /* As soon as we call add_disk(), another thread could get
5742 * through to md_open, so make sure it doesn't get too far
5743 */
5744 mutex_lock(&mddev->open_mutex);
5745 add_disk(disk);
5746
Kent Overstreet28dec872018-06-07 20:52:54 -04005747 error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md");
NeilBrown0909dc42009-07-01 12:27:21 +10005748 if (error) {
5749 /* This isn't possible, but as kobject_init_and_add is marked
5750 * __must_check, we must do something with the result
5751 */
NeilBrown9d487392016-11-02 14:16:49 +11005752 pr_debug("md: cannot register %s/md - name in use\n",
5753 disk->disk_name);
NeilBrown0909dc42009-07-01 12:27:21 +10005754 error = 0;
5755 }
NeilBrown00bcb4a2010-06-01 19:37:23 +10005756 if (mddev->kobj.sd &&
5757 sysfs_create_group(&mddev->kobj, &md_bitmap_group))
NeilBrown9d487392016-11-02 14:16:49 +11005758 pr_debug("pointless warning\n");
NeilBrownb0140892011-05-10 17:49:01 +10005759 mutex_unlock(&mddev->open_mutex);
NeilBrown0909dc42009-07-01 12:27:21 +10005760 abort:
5761 mutex_unlock(&disks_mutex);
NeilBrown00bcb4a2010-06-01 19:37:23 +10005762 if (!error && mddev->kobj.sd) {
Greg Kroah-Hartman3830c622007-12-17 15:54:39 -04005763 kobject_uevent(&mddev->kobj, KOBJ_ADD);
NeilBrown00bcb4a2010-06-01 19:37:23 +10005764 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
Junxiao Bie1a86db2020-07-14 16:10:26 -07005765 mddev->sysfs_level = sysfs_get_dirent_safe(mddev->kobj.sd, "level");
NeilBrownb62b7592008-10-21 13:25:21 +11005766 }
NeilBrownd3374822009-01-09 08:31:10 +11005767 mddev_put(mddev);
NeilBrown0909dc42009-07-01 12:27:21 +10005768 return error;
NeilBrownefeb53c2009-01-09 08:31:10 +11005769}
5770
Christoph Hellwig28144f92020-10-29 15:58:34 +01005771static void md_probe(dev_t dev)
NeilBrownefeb53c2009-01-09 08:31:10 +11005772{
Christoph Hellwig28144f92020-10-29 15:58:34 +01005773 if (MAJOR(dev) == MD_MAJOR && MINOR(dev) >= 512)
5774 return;
NeilBrown78b63502017-04-12 16:26:13 +10005775 if (create_on_open)
5776 md_alloc(dev, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005777}
5778
Kees Cooke4dca7b2017-10-17 19:04:42 -07005779static int add_named_array(const char *val, const struct kernel_param *kp)
NeilBrownefeb53c2009-01-09 08:31:10 +11005780{
NeilBrown039b7222017-04-12 16:26:13 +10005781 /*
5782 * val must be "md_*" or "mdNNN".
5783 * For "md_*" we allocate an array with a large free minor number, and
NeilBrownefeb53c2009-01-09 08:31:10 +11005784 * set the name to val. val must not already be an active name.
NeilBrown039b7222017-04-12 16:26:13 +10005785 * For "mdNNN" we allocate an array with the minor number NNN
5786 * which must not already be in use.
NeilBrownefeb53c2009-01-09 08:31:10 +11005787 */
5788 int len = strlen(val);
5789 char buf[DISK_NAME_LEN];
NeilBrown039b7222017-04-12 16:26:13 +10005790 unsigned long devnum;
NeilBrownefeb53c2009-01-09 08:31:10 +11005791
5792 while (len && val[len-1] == '\n')
5793 len--;
5794 if (len >= DISK_NAME_LEN)
5795 return -E2BIG;
5796 strlcpy(buf, val, len+1);
NeilBrown039b7222017-04-12 16:26:13 +10005797 if (strncmp(buf, "md_", 3) == 0)
5798 return md_alloc(0, buf);
5799 if (strncmp(buf, "md", 2) == 0 &&
5800 isdigit(buf[2]) &&
5801 kstrtoul(buf+2, 10, &devnum) == 0 &&
5802 devnum <= MINORMASK)
5803 return md_alloc(MKDEV(MD_MAJOR, devnum), NULL);
5804
5805 return -EINVAL;
NeilBrownefeb53c2009-01-09 08:31:10 +11005806}
5807
Kees Cook8376d3c2017-10-16 17:01:48 -07005808static void md_safemode_timeout(struct timer_list *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005809{
Kees Cook8376d3c2017-10-16 17:01:48 -07005810 struct mddev *mddev = from_timer(mddev, t, safemode_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005811
NeilBrown4ad23a972017-03-15 14:05:14 +11005812 mddev->safemode = 1;
5813 if (mddev->external)
5814 sysfs_notify_dirent_safe(mddev->sysfs_state);
5815
Linus Torvalds1da177e2005-04-16 15:20:36 -07005816 md_wakeup_thread(mddev->thread);
5817}
5818
NeilBrown6ff8d8ec2006-01-06 00:20:15 -08005819static int start_dirty_degraded;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005820
NeilBrownfd01b882011-10-11 16:47:53 +11005821int md_run(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005822{
NeilBrown2604b702006-01-06 00:20:36 -08005823 int err;
NeilBrown3cb03002011-10-11 16:45:26 +11005824 struct md_rdev *rdev;
NeilBrown84fc4b52011-10-11 16:49:58 +11005825 struct md_personality *pers;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005826
NeilBrowna757e642005-04-16 15:26:42 -07005827 if (list_empty(&mddev->disks))
5828 /* cannot run an array with no devices.. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005829 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005830
5831 if (mddev->pers)
5832 return -EBUSY;
NeilBrownbb4f1e92010-08-08 21:18:03 +10005833 /* Cannot run until previous stop completes properly */
5834 if (mddev->sysfs_active)
5835 return -EBUSY;
NeilBrownb6eb1272010-04-15 10:13:47 +10005836
Linus Torvalds1da177e2005-04-16 15:20:36 -07005837 /*
5838 * Analyze all RAID superblock(s)
5839 */
NeilBrown1ec4a932008-02-06 01:39:53 -08005840 if (!mddev->raid_disks) {
5841 if (!mddev->persistent)
5842 return -EINVAL;
Yufen Yu6a5cb532019-10-16 16:00:03 +08005843 err = analyze_sbs(mddev);
5844 if (err)
5845 return -EINVAL;
NeilBrown1ec4a932008-02-06 01:39:53 -08005846 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005847
NeilBrownd9d166c2006-01-06 00:20:51 -08005848 if (mddev->level != LEVEL_NONE)
5849 request_module("md-level-%d", mddev->level);
5850 else if (mddev->clevel[0])
5851 request_module("md-%s", mddev->clevel);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005852
5853 /*
5854 * Drop all container device buffers, from now on
5855 * the only valid external interface is through the md
5856 * device.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005857 */
Heinz Mauelshagen4b6c1062018-02-02 23:13:19 +01005858 mddev->has_superblocks = false;
NeilBrowndafb20f2012-03-19 12:46:39 +11005859 rdev_for_each(rdev, mddev) {
NeilBrownb2d444d2005-11-08 21:39:31 -08005860 if (test_bit(Faulty, &rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005861 continue;
5862 sync_blockdev(rdev->bdev);
Peter Zijlstraf98393a2007-05-06 14:49:54 -07005863 invalidate_bdev(rdev->bdev);
NeilBrown97b20ef2017-04-13 08:53:48 +10005864 if (mddev->ro != 1 &&
5865 (bdev_read_only(rdev->bdev) ||
5866 bdev_read_only(rdev->meta_bdev))) {
5867 mddev->ro = 1;
5868 if (mddev->gendisk)
5869 set_disk_ro(mddev->gendisk, 1);
5870 }
NeilBrownf0d76d72007-07-17 04:06:12 -07005871
Heinz Mauelshagen4b6c1062018-02-02 23:13:19 +01005872 if (rdev->sb_page)
5873 mddev->has_superblocks = true;
5874
NeilBrownf0d76d72007-07-17 04:06:12 -07005875 /* perform some consistency tests on the device.
5876 * We don't want the data to overlap the metadata,
Andre Noll58c0fed2009-03-31 14:33:13 +11005877 * Internal Bitmap issues have been handled elsewhere.
NeilBrownf0d76d72007-07-17 04:06:12 -07005878 */
Jonathan Brassowa6ff7e02011-01-14 09:14:34 +11005879 if (rdev->meta_bdev) {
5880 /* Nothing to check */;
5881 } else if (rdev->data_offset < rdev->sb_start) {
Andre Noll58c0fed2009-03-31 14:33:13 +11005882 if (mddev->dev_sectors &&
5883 rdev->data_offset + mddev->dev_sectors
Andre Noll0f420352008-07-11 22:02:23 +10005884 > rdev->sb_start) {
NeilBrown9d487392016-11-02 14:16:49 +11005885 pr_warn("md: %s: data overlaps metadata\n",
5886 mdname(mddev));
NeilBrownf0d76d72007-07-17 04:06:12 -07005887 return -EINVAL;
5888 }
5889 } else {
Andre Noll0f420352008-07-11 22:02:23 +10005890 if (rdev->sb_start + rdev->sb_size/512
NeilBrownf0d76d72007-07-17 04:06:12 -07005891 > rdev->data_offset) {
NeilBrown9d487392016-11-02 14:16:49 +11005892 pr_warn("md: %s: metadata overlaps data\n",
5893 mdname(mddev));
NeilBrownf0d76d72007-07-17 04:06:12 -07005894 return -EINVAL;
5895 }
5896 }
NeilBrown00bcb4a2010-06-01 19:37:23 +10005897 sysfs_notify_dirent_safe(rdev->sysfs_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005898 }
5899
Kent Overstreetafeee512018-05-20 18:25:52 -04005900 if (!bioset_initialized(&mddev->bio_set)) {
5901 err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
5902 if (err)
5903 return err;
Ming Lei10273172017-02-14 23:29:00 +08005904 }
Kent Overstreetafeee512018-05-20 18:25:52 -04005905 if (!bioset_initialized(&mddev->sync_set)) {
5906 err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
5907 if (err)
Kent Overstreet28dec872018-06-07 20:52:54 -04005908 return err;
NeilBrown5a850712017-06-21 09:12:21 +10005909 }
NeilBrowna167f662010-10-26 18:31:13 +11005910
Linus Torvalds1da177e2005-04-16 15:20:36 -07005911 spin_lock(&pers_lock);
NeilBrownd9d166c2006-01-06 00:20:51 -08005912 pers = find_pers(mddev->level, mddev->clevel);
NeilBrown2604b702006-01-06 00:20:36 -08005913 if (!pers || !try_module_get(pers->owner)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005914 spin_unlock(&pers_lock);
NeilBrownd9d166c2006-01-06 00:20:51 -08005915 if (mddev->level != LEVEL_NONE)
NeilBrown9d487392016-11-02 14:16:49 +11005916 pr_warn("md: personality for level %d is not loaded!\n",
5917 mddev->level);
NeilBrownd9d166c2006-01-06 00:20:51 -08005918 else
NeilBrown9d487392016-11-02 14:16:49 +11005919 pr_warn("md: personality for level %s is not loaded!\n",
5920 mddev->clevel);
Shaohua Libfc9dfd2018-06-13 08:39:49 -07005921 err = -EINVAL;
5922 goto abort;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005923 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005924 spin_unlock(&pers_lock);
NeilBrown34817e82009-03-31 14:39:38 +11005925 if (mddev->level != pers->level) {
5926 mddev->level = pers->level;
5927 mddev->new_level = pers->level;
5928 }
NeilBrownd9d166c2006-01-06 00:20:51 -08005929 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005930
NeilBrownf6705572006-03-27 01:18:11 -08005931 if (mddev->reshape_position != MaxSector &&
NeilBrown63c70c42006-03-27 01:18:13 -08005932 pers->start_reshape == NULL) {
NeilBrownf6705572006-03-27 01:18:11 -08005933 /* This personality cannot handle reshaping... */
NeilBrownf6705572006-03-27 01:18:11 -08005934 module_put(pers->owner);
Shaohua Libfc9dfd2018-06-13 08:39:49 -07005935 err = -EINVAL;
5936 goto abort;
NeilBrownf6705572006-03-27 01:18:11 -08005937 }
5938
NeilBrown7dd5e7c32007-02-28 20:11:35 -08005939 if (pers->sync_request) {
5940 /* Warn if this is a potentially silly
5941 * configuration.
5942 */
5943 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
NeilBrown3cb03002011-10-11 16:45:26 +11005944 struct md_rdev *rdev2;
NeilBrown7dd5e7c32007-02-28 20:11:35 -08005945 int warned = 0;
Cheng Renquan159ec1f2009-01-09 08:31:08 +11005946
NeilBrowndafb20f2012-03-19 12:46:39 +11005947 rdev_for_each(rdev, mddev)
5948 rdev_for_each(rdev2, mddev) {
NeilBrown7dd5e7c32007-02-28 20:11:35 -08005949 if (rdev < rdev2 &&
Christoph Hellwig61a27e1f2020-09-03 07:40:58 +02005950 rdev->bdev->bd_disk ==
5951 rdev2->bdev->bd_disk) {
NeilBrown9d487392016-11-02 14:16:49 +11005952 pr_warn("%s: WARNING: %s appears to be on the same physical disk as %s.\n",
5953 mdname(mddev),
5954 bdevname(rdev->bdev,b),
5955 bdevname(rdev2->bdev,b2));
NeilBrown7dd5e7c32007-02-28 20:11:35 -08005956 warned = 1;
5957 }
5958 }
Cheng Renquan159ec1f2009-01-09 08:31:08 +11005959
NeilBrown7dd5e7c32007-02-28 20:11:35 -08005960 if (warned)
NeilBrown9d487392016-11-02 14:16:49 +11005961 pr_warn("True protection against single-disk failure might be compromised.\n");
NeilBrown7dd5e7c32007-02-28 20:11:35 -08005962 }
5963
NeilBrown657390d2005-08-26 18:34:16 -07005964 mddev->recovery = 0;
Andre Noll58c0fed2009-03-31 14:33:13 +11005965 /* may be over-ridden by personality */
5966 mddev->resync_max_sectors = mddev->dev_sectors;
5967
NeilBrown6ff8d8ec2006-01-06 00:20:15 -08005968 mddev->ok_start_degraded = start_dirty_degraded;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005969
NeilBrown0f9552b52009-12-30 12:08:50 +11005970 if (start_readonly && mddev->ro == 0)
NeilBrownf91de922005-11-08 21:39:36 -08005971 mddev->ro = 2; /* read-only, but switch on first write */
5972
NeilBrown36d091f2014-12-15 12:56:58 +11005973 err = pers->run(mddev);
Andre Noll13e53df2008-03-26 00:07:03 +01005974 if (err)
NeilBrown9d487392016-11-02 14:16:49 +11005975 pr_warn("md: pers->run() failed ...\n");
NeilBrown36d091f2014-12-15 12:56:58 +11005976 else if (pers->size(mddev, 0, 0) < mddev->array_sectors) {
NeilBrown9d487392016-11-02 14:16:49 +11005977 WARN_ONCE(!mddev->external_size,
5978 "%s: default size too small, but 'external_size' not in effect?\n",
5979 __func__);
5980 pr_warn("md: invalid array_size %llu > default size %llu\n",
5981 (unsigned long long)mddev->array_sectors / 2,
5982 (unsigned long long)pers->size(mddev, 0, 0) / 2);
Dan Williamsb522adc2009-03-31 15:00:31 +11005983 err = -EINVAL;
Dan Williamsb522adc2009-03-31 15:00:31 +11005984 }
NeilBrown36d091f2014-12-15 12:56:58 +11005985 if (err == 0 && pers->sync_request &&
NeilBrownef99bf42012-05-22 13:55:08 +10005986 (mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05005987 struct bitmap *bitmap;
5988
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07005989 bitmap = md_bitmap_create(mddev, -1);
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05005990 if (IS_ERR(bitmap)) {
5991 err = PTR_ERR(bitmap);
NeilBrown9d487392016-11-02 14:16:49 +11005992 pr_warn("%s: failed to create bitmap (%d)\n",
5993 mdname(mddev), err);
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05005994 } else
5995 mddev->bitmap = bitmap;
5996
NeilBrownb15c2e52006-01-06 00:20:16 -08005997 }
Guoqing Jiangd4945492019-06-14 17:10:39 +08005998 if (err)
5999 goto bitmap_abort;
Guoqing Jiang3e148a32019-06-19 17:30:46 +08006000
6001 if (mddev->bitmap_info.max_write_behind > 0) {
Guoqing Jiang3e173ab2019-12-23 10:48:54 +01006002 bool create_pool = false;
Guoqing Jiang3e148a32019-06-19 17:30:46 +08006003
6004 rdev_for_each(rdev, mddev) {
6005 if (test_bit(WriteMostly, &rdev->flags) &&
Guoqing Jiang404659c2019-12-23 10:48:53 +01006006 rdev_init_serial(rdev))
Guoqing Jiang3e173ab2019-12-23 10:48:54 +01006007 create_pool = true;
Guoqing Jiang3e148a32019-06-19 17:30:46 +08006008 }
Guoqing Jiang3e173ab2019-12-23 10:48:54 +01006009 if (create_pool && mddev->serial_info_pool == NULL) {
Guoqing Jiang404659c2019-12-23 10:48:53 +01006010 mddev->serial_info_pool =
6011 mempool_create_kmalloc_pool(NR_SERIAL_INFOS,
6012 sizeof(struct serial_info));
6013 if (!mddev->serial_info_pool) {
Guoqing Jiang3e148a32019-06-19 17:30:46 +08006014 err = -ENOMEM;
Guoqing Jiangd4945492019-06-14 17:10:39 +08006015 goto bitmap_abort;
Guoqing Jiang3e148a32019-06-19 17:30:46 +08006016 }
6017 }
6018 }
6019
NeilBrown5c675f82014-12-15 12:56:56 +11006020 if (mddev->queue) {
Shaohua Libb086a82016-09-30 09:45:40 -07006021 bool nonrot = true;
6022
6023 rdev_for_each(rdev, mddev) {
6024 if (rdev->raid_disk >= 0 &&
6025 !blk_queue_nonrot(bdev_get_queue(rdev->bdev))) {
6026 nonrot = false;
6027 break;
6028 }
6029 }
6030 if (mddev->degraded)
6031 nonrot = false;
6032 if (nonrot)
Bart Van Assche8b904b52018-03-07 17:10:10 -08006033 blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue);
Shaohua Libb086a82016-09-30 09:45:40 -07006034 else
Bart Van Assche8b904b52018-03-07 17:10:10 -08006035 blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue);
NeilBrown5c675f82014-12-15 12:56:56 +11006036 }
NeilBrown36d091f2014-12-15 12:56:58 +11006037 if (pers->sync_request) {
NeilBrown00bcb4a2010-06-01 19:37:23 +10006038 if (mddev->kobj.sd &&
6039 sysfs_create_group(&mddev->kobj, &md_redundancy_group))
NeilBrown9d487392016-11-02 14:16:49 +11006040 pr_warn("md: cannot register extra attributes for %s\n",
6041 mdname(mddev));
NeilBrown00bcb4a2010-06-01 19:37:23 +10006042 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
Junxiao Bie8efa9b2020-08-04 17:27:18 -07006043 mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed");
6044 mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded");
NeilBrown5e55e2f2007-03-26 21:32:14 -08006045 } else if (mddev->ro == 2) /* auto-readonly not meaningful */
NeilBrownfd9d49c2005-11-08 21:39:42 -08006046 mddev->ro = 0;
6047
Robert Becker1e509152009-12-14 12:49:58 +11006048 atomic_set(&mddev->max_corr_read_errors,
6049 MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006050 mddev->safemode = 0;
Goldwyn Rodrigues28c1b9f2015-10-22 16:01:25 +11006051 if (mddev_is_clustered(mddev))
6052 mddev->safemode_delay = 0;
6053 else
Zhao Heming7c9d5c52020-07-21 02:08:52 +08006054 mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006055 mddev->in_sync = 1;
NeilBrown0ca69882011-01-14 09:14:33 +11006056 smp_wmb();
NeilBrown36d091f2014-12-15 12:56:58 +11006057 spin_lock(&mddev->lock);
6058 mddev->pers = pers;
NeilBrown36d091f2014-12-15 12:56:58 +11006059 spin_unlock(&mddev->lock);
NeilBrowndafb20f2012-03-19 12:46:39 +11006060 rdev_for_each(rdev, mddev)
Namhyung Kim36fad852011-07-27 11:00:36 +10006061 if (rdev->raid_disk >= 0)
Yufen Yue5b521e2019-06-14 15:41:07 -07006062 sysfs_link_rdev(mddev, rdev); /* failure here is OK */
NeilBrownf72ffdd2014-09-30 14:23:59 +10006063
NeilBrowna4a3d262015-07-17 11:57:30 +10006064 if (mddev->degraded && !mddev->ro)
6065 /* This ensures that recovering status is reported immediately
6066 * via sysfs - until a lack of spares is confirmed.
6067 */
6068 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006069 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
NeilBrownf72ffdd2014-09-30 14:23:59 +10006070
Shaohua Li29530792016-12-08 15:48:19 -08006071 if (mddev->sb_flags)
NeilBrown850b2b422006-10-03 01:15:46 -07006072 md_update_sb(mddev, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006073
NeilBrownd7603b72006-01-06 00:20:30 -08006074 md_new_event(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006075 return 0;
Xiao Nib1261942018-01-24 12:17:38 +08006076
Guoqing Jiangd4945492019-06-14 17:10:39 +08006077bitmap_abort:
6078 mddev_detach(mddev);
6079 if (mddev->private)
6080 pers->free(mddev, mddev->private);
6081 mddev->private = NULL;
6082 module_put(pers->owner);
6083 md_bitmap_destroy(mddev);
Xiao Nib1261942018-01-24 12:17:38 +08006084abort:
NeilBrown4bc034d2019-03-29 10:46:16 -07006085 bioset_exit(&mddev->bio_set);
6086 bioset_exit(&mddev->sync_set);
Xiao Nib1261942018-01-24 12:17:38 +08006087 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006088}
NeilBrown390ee602010-06-01 19:37:27 +10006089EXPORT_SYMBOL_GPL(md_run);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006090
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02006091int do_md_run(struct mddev *mddev)
NeilBrownfe60b012010-03-29 11:10:42 +11006092{
6093 int err;
6094
NeilBrown9d4b45d2019-08-20 10:21:09 +10006095 set_bit(MD_NOT_READY, &mddev->flags);
NeilBrownfe60b012010-03-29 11:10:42 +11006096 err = md_run(mddev);
6097 if (err)
6098 goto out;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07006099 err = md_bitmap_load(mddev);
NeilBrown69e51b42010-06-01 19:37:35 +10006100 if (err) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07006101 md_bitmap_destroy(mddev);
NeilBrown69e51b42010-06-01 19:37:35 +10006102 goto out;
6103 }
Jonathan Brassow0fd018a2011-06-07 17:49:36 -05006104
Goldwyn Rodrigues28c1b9f2015-10-22 16:01:25 +11006105 if (mddev_is_clustered(mddev))
6106 md_allow_write(mddev);
6107
Song Liud5d885f2017-11-19 22:17:01 -08006108 /* run start up tasks that require md_thread */
6109 md_start(mddev);
6110
Jonathan Brassow0fd018a2011-06-07 17:49:36 -05006111 md_wakeup_thread(mddev->thread);
6112 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
6113
Christoph Hellwig2c247c52020-11-16 15:57:11 +01006114 set_capacity_and_notify(mddev->gendisk, mddev->array_sectors);
NeilBrown9d4b45d2019-08-20 10:21:09 +10006115 clear_bit(MD_NOT_READY, &mddev->flags);
NeilBrownf0b4f7e2011-02-24 17:26:41 +11006116 mddev->changed = 1;
NeilBrownfe60b012010-03-29 11:10:42 +11006117 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
NeilBrown9d4b45d2019-08-20 10:21:09 +10006118 sysfs_notify_dirent_safe(mddev->sysfs_state);
6119 sysfs_notify_dirent_safe(mddev->sysfs_action);
Junxiao Bie1a86db2020-07-14 16:10:26 -07006120 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
NeilBrownfe60b012010-03-29 11:10:42 +11006121out:
NeilBrown9d4b45d2019-08-20 10:21:09 +10006122 clear_bit(MD_NOT_READY, &mddev->flags);
NeilBrownfe60b012010-03-29 11:10:42 +11006123 return err;
6124}
6125
Song Liud5d885f2017-11-19 22:17:01 -08006126int md_start(struct mddev *mddev)
6127{
6128 int ret = 0;
6129
6130 if (mddev->pers->start) {
6131 set_bit(MD_RECOVERY_WAIT, &mddev->recovery);
6132 md_wakeup_thread(mddev->thread);
6133 ret = mddev->pers->start(mddev);
6134 clear_bit(MD_RECOVERY_WAIT, &mddev->recovery);
6135 md_wakeup_thread(mddev->sync_thread);
6136 }
6137 return ret;
6138}
6139EXPORT_SYMBOL_GPL(md_start);
6140
NeilBrownfd01b882011-10-11 16:47:53 +11006141static int restart_array(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006142{
6143 struct gendisk *disk = mddev->gendisk;
NeilBrown97b20ef2017-04-13 08:53:48 +10006144 struct md_rdev *rdev;
6145 bool has_journal = false;
6146 bool has_readonly = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006147
Andre Noll80fab1d2008-07-11 22:02:21 +10006148 /* Complain if it has no devices */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006149 if (list_empty(&mddev->disks))
Andre Noll80fab1d2008-07-11 22:02:21 +10006150 return -ENXIO;
6151 if (!mddev->pers)
6152 return -EINVAL;
6153 if (!mddev->ro)
6154 return -EBUSY;
Song Liu339421d2015-10-08 21:54:13 -07006155
NeilBrown97b20ef2017-04-13 08:53:48 +10006156 rcu_read_lock();
6157 rdev_for_each_rcu(rdev, mddev) {
6158 if (test_bit(Journal, &rdev->flags) &&
6159 !test_bit(Faulty, &rdev->flags))
6160 has_journal = true;
6161 if (bdev_read_only(rdev->bdev))
6162 has_readonly = true;
Song Liu339421d2015-10-08 21:54:13 -07006163 }
NeilBrown97b20ef2017-04-13 08:53:48 +10006164 rcu_read_unlock();
6165 if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !has_journal)
6166 /* Don't restart rw with journal missing/faulty */
6167 return -EINVAL;
6168 if (has_readonly)
6169 return -EROFS;
Song Liu339421d2015-10-08 21:54:13 -07006170
Andre Noll80fab1d2008-07-11 22:02:21 +10006171 mddev->safemode = 0;
6172 mddev->ro = 0;
6173 set_disk_ro(disk, 0);
NeilBrown9d487392016-11-02 14:16:49 +11006174 pr_debug("md: %s switched to read-write mode.\n", mdname(mddev));
Andre Noll80fab1d2008-07-11 22:02:21 +10006175 /* Kick recovery or resync if necessary */
6176 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6177 md_wakeup_thread(mddev->thread);
6178 md_wakeup_thread(mddev->sync_thread);
NeilBrown00bcb4a2010-06-01 19:37:23 +10006179 sysfs_notify_dirent_safe(mddev->sysfs_state);
Andre Noll80fab1d2008-07-11 22:02:21 +10006180 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006181}
6182
NeilBrownfd01b882011-10-11 16:47:53 +11006183static void md_clean(struct mddev *mddev)
NeilBrown6177b472010-03-29 11:37:13 +11006184{
6185 mddev->array_sectors = 0;
6186 mddev->external_size = 0;
6187 mddev->dev_sectors = 0;
6188 mddev->raid_disks = 0;
6189 mddev->recovery_cp = 0;
6190 mddev->resync_min = 0;
6191 mddev->resync_max = MaxSector;
6192 mddev->reshape_position = MaxSector;
6193 mddev->external = 0;
6194 mddev->persistent = 0;
6195 mddev->level = LEVEL_NONE;
6196 mddev->clevel[0] = 0;
6197 mddev->flags = 0;
Shaohua Li29530792016-12-08 15:48:19 -08006198 mddev->sb_flags = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006199 mddev->ro = 0;
6200 mddev->metadata_type[0] = 0;
6201 mddev->chunk_sectors = 0;
6202 mddev->ctime = mddev->utime = 0;
6203 mddev->layout = 0;
6204 mddev->max_disks = 0;
6205 mddev->events = 0;
NeilBrowna8707c02010-05-18 09:28:43 +10006206 mddev->can_decrease_events = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006207 mddev->delta_disks = 0;
NeilBrown2c810cd2012-05-21 09:27:00 +10006208 mddev->reshape_backwards = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006209 mddev->new_level = LEVEL_NONE;
6210 mddev->new_layout = 0;
6211 mddev->new_chunk_sectors = 0;
6212 mddev->curr_resync = 0;
Jianpeng Ma7f7583d2012-10-11 14:17:59 +11006213 atomic64_set(&mddev->resync_mismatches, 0);
NeilBrown6177b472010-03-29 11:37:13 +11006214 mddev->suspend_lo = mddev->suspend_hi = 0;
6215 mddev->sync_speed_min = mddev->sync_speed_max = 0;
6216 mddev->recovery = 0;
6217 mddev->in_sync = 0;
NeilBrownf0b4f7e2011-02-24 17:26:41 +11006218 mddev->changed = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006219 mddev->degraded = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006220 mddev->safemode = 0;
NeilBrownbd691922015-06-25 17:01:40 +10006221 mddev->private = NULL;
Guoqing Jiangc20c33f2016-08-12 13:42:38 +08006222 mddev->cluster_info = NULL;
NeilBrown6177b472010-03-29 11:37:13 +11006223 mddev->bitmap_info.offset = 0;
6224 mddev->bitmap_info.default_offset = 0;
NeilBrown6409bb02012-05-22 13:55:07 +10006225 mddev->bitmap_info.default_space = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006226 mddev->bitmap_info.chunksize = 0;
6227 mddev->bitmap_info.daemon_sleep = 0;
6228 mddev->bitmap_info.max_write_behind = 0;
Guoqing Jiangc20c33f2016-08-12 13:42:38 +08006229 mddev->bitmap_info.nodes = 0;
NeilBrown6177b472010-03-29 11:37:13 +11006230}
6231
NeilBrownfd01b882011-10-11 16:47:53 +11006232static void __md_stop_writes(struct mddev *mddev)
NeilBrowna047e122010-03-29 12:07:53 +11006233{
NeilBrown6b6204e2013-05-09 09:48:30 +10006234 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
Guoqing Jiang21e09582020-04-04 23:57:07 +02006235 if (work_pending(&mddev->del_work))
6236 flush_workqueue(md_misc_wq);
NeilBrowna047e122010-03-29 12:07:53 +11006237 if (mddev->sync_thread) {
NeilBrowna047e122010-03-29 12:07:53 +11006238 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10006239 md_reap_sync_thread(mddev);
NeilBrowna047e122010-03-29 12:07:53 +11006240 }
6241
6242 del_timer_sync(&mddev->safemode_timer);
6243
Shaohua Li034e33f2016-11-21 10:29:19 -08006244 if (mddev->pers && mddev->pers->quiesce) {
6245 mddev->pers->quiesce(mddev, 1);
6246 mddev->pers->quiesce(mddev, 0);
6247 }
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07006248 md_bitmap_flush(mddev);
NeilBrowna047e122010-03-29 12:07:53 +11006249
NeilBrownb6d428c2013-04-24 11:42:42 +10006250 if (mddev->ro == 0 &&
Goldwyn Rodrigues28c1b9f2015-10-22 16:01:25 +11006251 ((!mddev->in_sync && !mddev_is_clustered(mddev)) ||
Shaohua Li29530792016-12-08 15:48:19 -08006252 mddev->sb_flags)) {
NeilBrowna047e122010-03-29 12:07:53 +11006253 /* mark array as shutdown cleanly */
Goldwyn Rodrigues28c1b9f2015-10-22 16:01:25 +11006254 if (!mddev_is_clustered(mddev))
6255 mddev->in_sync = 1;
NeilBrowna047e122010-03-29 12:07:53 +11006256 md_update_sb(mddev, 1);
6257 }
Guoqing Jiang69b00b52019-12-23 10:49:00 +01006258 /* disable policy to guarantee rdevs free resources for serialization */
6259 mddev->serialize_policy = 0;
6260 mddev_destroy_serial_pool(mddev, NULL, true);
NeilBrowna047e122010-03-29 12:07:53 +11006261}
NeilBrowndefad612011-01-14 09:14:33 +11006262
NeilBrownfd01b882011-10-11 16:47:53 +11006263void md_stop_writes(struct mddev *mddev)
NeilBrowndefad612011-01-14 09:14:33 +11006264{
NeilBrown29f097c2013-11-14 17:54:51 +11006265 mddev_lock_nointr(mddev);
NeilBrowndefad612011-01-14 09:14:33 +11006266 __md_stop_writes(mddev);
6267 mddev_unlock(mddev);
6268}
NeilBrown390ee602010-06-01 19:37:27 +10006269EXPORT_SYMBOL_GPL(md_stop_writes);
NeilBrowna047e122010-03-29 12:07:53 +11006270
NeilBrown5aa61f42014-12-15 12:56:57 +11006271static void mddev_detach(struct mddev *mddev)
6272{
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07006273 md_bitmap_wait_behind_writes(mddev);
Guoqing Jiang6b40bec2020-02-11 11:10:04 +01006274 if (mddev->pers && mddev->pers->quiesce && !mddev->suspended) {
NeilBrown5aa61f42014-12-15 12:56:57 +11006275 mddev->pers->quiesce(mddev, 1);
6276 mddev->pers->quiesce(mddev, 0);
6277 }
6278 md_unregister_thread(&mddev->thread);
6279 if (mddev->queue)
6280 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
6281}
6282
NeilBrown5eff3c42012-11-19 10:47:48 +11006283static void __md_stop(struct mddev *mddev)
NeilBrown6177b472010-03-29 11:37:13 +11006284{
NeilBrown36d091f2014-12-15 12:56:58 +11006285 struct md_personality *pers = mddev->pers;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07006286 md_bitmap_destroy(mddev);
NeilBrown5aa61f42014-12-15 12:56:57 +11006287 mddev_detach(mddev);
NeilBrownee5d0042015-07-22 10:20:07 +10006288 /* Ensure ->event_work is done */
Guoqing Jiang21e09582020-04-04 23:57:07 +02006289 if (mddev->event_work.func)
6290 flush_workqueue(md_misc_wq);
NeilBrown36d091f2014-12-15 12:56:58 +11006291 spin_lock(&mddev->lock);
NeilBrown6177b472010-03-29 11:37:13 +11006292 mddev->pers = NULL;
NeilBrown36d091f2014-12-15 12:56:58 +11006293 spin_unlock(&mddev->lock);
6294 pers->free(mddev, mddev->private);
NeilBrownbd691922015-06-25 17:01:40 +10006295 mddev->private = NULL;
NeilBrown36d091f2014-12-15 12:56:58 +11006296 if (pers->sync_request && mddev->to_remove == NULL)
6297 mddev->to_remove = &md_redundancy_group;
6298 module_put(pers->owner);
NeilBrowncca9cf92010-04-01 12:08:16 +11006299 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
Jack Wang6aaa58c2018-10-19 16:21:31 +02006300}
6301
6302void md_stop(struct mddev *mddev)
6303{
6304 /* stop the array and free an attached data structures.
6305 * This is called from dm-raid
6306 */
6307 __md_stop(mddev);
Kent Overstreetafeee512018-05-20 18:25:52 -04006308 bioset_exit(&mddev->bio_set);
6309 bioset_exit(&mddev->sync_set);
NeilBrown5eff3c42012-11-19 10:47:48 +11006310}
6311
NeilBrown390ee602010-06-01 19:37:27 +10006312EXPORT_SYMBOL_GPL(md_stop);
NeilBrown6177b472010-03-29 11:37:13 +11006313
NeilBrowna05b7ea2012-07-19 15:59:18 +10006314static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
NeilBrowna4bd82d2010-03-29 13:23:10 +11006315{
6316 int err = 0;
NeilBrown30b8feb2013-11-14 15:16:17 +11006317 int did_freeze = 0;
6318
6319 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
6320 did_freeze = 1;
6321 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6322 md_wakeup_thread(mddev->thread);
6323 }
NeilBrownf851b602014-12-11 10:02:10 +11006324 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
NeilBrown30b8feb2013-11-14 15:16:17 +11006325 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
NeilBrownf851b602014-12-11 10:02:10 +11006326 if (mddev->sync_thread)
NeilBrown30b8feb2013-11-14 15:16:17 +11006327 /* Thread might be blocked waiting for metadata update
6328 * which will now never happen */
6329 wake_up_process(mddev->sync_thread->tsk);
NeilBrownf851b602014-12-11 10:02:10 +11006330
Shaohua Li29530792016-12-08 15:48:19 -08006331 if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
NeilBrown88724bf2015-09-24 14:00:51 +10006332 return -EBUSY;
NeilBrown30b8feb2013-11-14 15:16:17 +11006333 mddev_unlock(mddev);
NeilBrownf851b602014-12-11 10:02:10 +11006334 wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
6335 &mddev->recovery));
NeilBrown88724bf2015-09-24 14:00:51 +10006336 wait_event(mddev->sb_wait,
Shaohua Li29530792016-12-08 15:48:19 -08006337 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
NeilBrown30b8feb2013-11-14 15:16:17 +11006338 mddev_lock_nointr(mddev);
6339
NeilBrowna4bd82d2010-03-29 13:23:10 +11006340 mutex_lock(&mddev->open_mutex);
NeilBrown9ba3b7f2014-09-09 14:00:15 +10006341 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
NeilBrown30b8feb2013-11-14 15:16:17 +11006342 mddev->sync_thread ||
Guoqing Jiangaf8d8e62016-08-12 13:42:37 +08006343 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
NeilBrown9d487392016-11-02 14:16:49 +11006344 pr_warn("md: %s still in use.\n",mdname(mddev));
NeilBrown30b8feb2013-11-14 15:16:17 +11006345 if (did_freeze) {
6346 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
NeilBrown45eaf452014-10-29 08:49:50 +11006347 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
NeilBrown30b8feb2013-11-14 15:16:17 +11006348 md_wakeup_thread(mddev->thread);
6349 }
NeilBrowna4bd82d2010-03-29 13:23:10 +11006350 err = -EBUSY;
6351 goto out;
6352 }
6353 if (mddev->pers) {
NeilBrowndefad612011-01-14 09:14:33 +11006354 __md_stop_writes(mddev);
NeilBrowna4bd82d2010-03-29 13:23:10 +11006355
6356 err = -ENXIO;
6357 if (mddev->ro==1)
6358 goto out;
6359 mddev->ro = 1;
6360 set_disk_ro(mddev->gendisk, 1);
6361 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
NeilBrown45eaf452014-10-29 08:49:50 +11006362 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6363 md_wakeup_thread(mddev->thread);
NeilBrown00bcb4a2010-06-01 19:37:23 +10006364 sysfs_notify_dirent_safe(mddev->sysfs_state);
NeilBrown30b8feb2013-11-14 15:16:17 +11006365 err = 0;
NeilBrowna4bd82d2010-03-29 13:23:10 +11006366 }
6367out:
6368 mutex_unlock(&mddev->open_mutex);
6369 return err;
6370}
6371
NeilBrown9e653b62006-06-26 00:27:58 -07006372/* mode:
6373 * 0 - completely stop and dis-assemble array
NeilBrown9e653b62006-06-26 00:27:58 -07006374 * 2 - stop but do not disassemble array
6375 */
NeilBrownf72ffdd2014-09-30 14:23:59 +10006376static int do_md_stop(struct mddev *mddev, int mode,
NeilBrowna05b7ea2012-07-19 15:59:18 +10006377 struct block_device *bdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006378{
Linus Torvalds1da177e2005-04-16 15:20:36 -07006379 struct gendisk *disk = mddev->gendisk;
NeilBrown3cb03002011-10-11 16:45:26 +11006380 struct md_rdev *rdev;
NeilBrown30b8feb2013-11-14 15:16:17 +11006381 int did_freeze = 0;
6382
6383 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
6384 did_freeze = 1;
6385 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6386 md_wakeup_thread(mddev->thread);
6387 }
NeilBrownf851b602014-12-11 10:02:10 +11006388 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
NeilBrown30b8feb2013-11-14 15:16:17 +11006389 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
NeilBrownf851b602014-12-11 10:02:10 +11006390 if (mddev->sync_thread)
NeilBrown30b8feb2013-11-14 15:16:17 +11006391 /* Thread might be blocked waiting for metadata update
6392 * which will now never happen */
6393 wake_up_process(mddev->sync_thread->tsk);
NeilBrownf851b602014-12-11 10:02:10 +11006394
NeilBrown30b8feb2013-11-14 15:16:17 +11006395 mddev_unlock(mddev);
NeilBrownf851b602014-12-11 10:02:10 +11006396 wait_event(resync_wait, (mddev->sync_thread == NULL &&
6397 !test_bit(MD_RECOVERY_RUNNING,
6398 &mddev->recovery)));
NeilBrown30b8feb2013-11-14 15:16:17 +11006399 mddev_lock_nointr(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006400
NeilBrownc8c00a62009-08-10 12:50:52 +10006401 mutex_lock(&mddev->open_mutex);
NeilBrown9ba3b7f2014-09-09 14:00:15 +10006402 if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
NeilBrown30b8feb2013-11-14 15:16:17 +11006403 mddev->sysfs_active ||
6404 mddev->sync_thread ||
Guoqing Jiangaf8d8e62016-08-12 13:42:37 +08006405 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
NeilBrown9d487392016-11-02 14:16:49 +11006406 pr_warn("md: %s still in use.\n",mdname(mddev));
NeilBrown6e17b022010-08-07 21:41:19 +10006407 mutex_unlock(&mddev->open_mutex);
NeilBrown30b8feb2013-11-14 15:16:17 +11006408 if (did_freeze) {
6409 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
NeilBrown45eaf452014-10-29 08:49:50 +11006410 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
NeilBrown30b8feb2013-11-14 15:16:17 +11006411 md_wakeup_thread(mddev->thread);
6412 }
NeilBrown260fa032013-08-27 16:44:13 +10006413 return -EBUSY;
6414 }
NeilBrown6e17b022010-08-07 21:41:19 +10006415 if (mddev->pers) {
NeilBrowna4bd82d2010-03-29 13:23:10 +11006416 if (mddev->ro)
6417 set_disk_ro(disk, 0);
NeilBrown409c57f2009-03-31 14:39:39 +11006418
NeilBrowndefad612011-01-14 09:14:33 +11006419 __md_stop_writes(mddev);
NeilBrown5eff3c42012-11-19 10:47:48 +11006420 __md_stop(mddev);
NeilBrown6177b472010-03-29 11:37:13 +11006421
NeilBrowna4bd82d2010-03-29 13:23:10 +11006422 /* tell userspace to handle 'inactive' */
NeilBrown00bcb4a2010-06-01 19:37:23 +10006423 sysfs_notify_dirent_safe(mddev->sysfs_state);
NeilBrown0d4ca602006-12-10 02:20:44 -08006424
NeilBrowndafb20f2012-03-19 12:46:39 +11006425 rdev_for_each(rdev, mddev)
Namhyung Kim36fad852011-07-27 11:00:36 +10006426 if (rdev->raid_disk >= 0)
6427 sysfs_unlink_rdev(mddev, rdev);
NeilBrownc4647292009-05-07 12:51:06 +10006428
Christoph Hellwig2c247c52020-11-16 15:57:11 +01006429 set_capacity_and_notify(disk, 0);
NeilBrown6e17b022010-08-07 21:41:19 +10006430 mutex_unlock(&mddev->open_mutex);
NeilBrownf0b4f7e2011-02-24 17:26:41 +11006431 mddev->changed = 1;
NeilBrown0d4ca602006-12-10 02:20:44 -08006432
NeilBrowna4bd82d2010-03-29 13:23:10 +11006433 if (mddev->ro)
6434 mddev->ro = 0;
NeilBrown6e17b022010-08-07 21:41:19 +10006435 } else
6436 mutex_unlock(&mddev->open_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006437 /*
6438 * Free resources if final stop
6439 */
NeilBrown9e653b62006-06-26 00:27:58 -07006440 if (mode == 0) {
NeilBrown9d487392016-11-02 14:16:49 +11006441 pr_info("md: %s stopped.\n", mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006442
NeilBrownc3d97142009-12-14 12:49:52 +11006443 if (mddev->bitmap_info.file) {
NeilBrown4af1a042014-12-15 12:57:00 +11006444 struct file *f = mddev->bitmap_info.file;
6445 spin_lock(&mddev->lock);
NeilBrownc3d97142009-12-14 12:49:52 +11006446 mddev->bitmap_info.file = NULL;
NeilBrown4af1a042014-12-15 12:57:00 +11006447 spin_unlock(&mddev->lock);
6448 fput(f);
NeilBrown978f9462006-02-02 14:28:05 -08006449 }
NeilBrownc3d97142009-12-14 12:49:52 +11006450 mddev->bitmap_info.offset = 0;
NeilBrown978f9462006-02-02 14:28:05 -08006451
Linus Torvalds1da177e2005-04-16 15:20:36 -07006452 export_array(mddev);
6453
NeilBrown6177b472010-03-29 11:37:13 +11006454 md_clean(mddev);
NeilBrownefeb53c2009-01-09 08:31:10 +11006455 if (mddev->hold_active == UNTIL_STOP)
6456 mddev->hold_active = 0;
NeilBrowna4bd82d2010-03-29 13:23:10 +11006457 }
NeilBrownd7603b72006-01-06 00:20:30 -08006458 md_new_event(mddev);
NeilBrown00bcb4a2010-06-01 19:37:23 +10006459 sysfs_notify_dirent_safe(mddev->sysfs_state);
NeilBrown6e17b022010-08-07 21:41:19 +10006460 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006461}
6462
Jeff Garzikfdee8ae2006-12-10 02:20:50 -08006463#ifndef MODULE
NeilBrownfd01b882011-10-11 16:47:53 +11006464static void autorun_array(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006465{
NeilBrown3cb03002011-10-11 16:45:26 +11006466 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006467 int err;
6468
NeilBrowna757e642005-04-16 15:26:42 -07006469 if (list_empty(&mddev->disks))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006470 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006471
NeilBrown9d487392016-11-02 14:16:49 +11006472 pr_info("md: running: ");
Linus Torvalds1da177e2005-04-16 15:20:36 -07006473
NeilBrowndafb20f2012-03-19 12:46:39 +11006474 rdev_for_each(rdev, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006475 char b[BDEVNAME_SIZE];
NeilBrown9d487392016-11-02 14:16:49 +11006476 pr_cont("<%s>", bdevname(rdev->bdev,b));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006477 }
NeilBrown9d487392016-11-02 14:16:49 +11006478 pr_cont("\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07006479
NeilBrownd710e132008-10-13 11:55:12 +11006480 err = do_md_run(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006481 if (err) {
NeilBrown9d487392016-11-02 14:16:49 +11006482 pr_warn("md: do_md_run() returned %d\n", err);
NeilBrowna05b7ea2012-07-19 15:59:18 +10006483 do_md_stop(mddev, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006484 }
6485}
6486
6487/*
6488 * lets try to run arrays based on all disks that have arrived
6489 * until now. (those are in pending_raid_disks)
6490 *
6491 * the method: pick the first pending disk, collect all disks with
6492 * the same UUID, remove all from the pending list and put them into
6493 * the 'same_array' list. Then order this list based on superblock
6494 * update time (freshest comes first), kick out 'old' disks and
6495 * compare superblocks. If everything's fine then run it.
6496 *
6497 * If "unit" is allocated, then bump its reference count
6498 */
6499static void autorun_devices(int part)
6500{
NeilBrown3cb03002011-10-11 16:45:26 +11006501 struct md_rdev *rdev0, *rdev, *tmp;
NeilBrownfd01b882011-10-11 16:47:53 +11006502 struct mddev *mddev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006503 char b[BDEVNAME_SIZE];
6504
NeilBrown9d487392016-11-02 14:16:49 +11006505 pr_info("md: autorun ...\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07006506 while (!list_empty(&pending_raid_disks)) {
NeilBrowne8703fe2006-10-03 01:15:59 -07006507 int unit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006508 dev_t dev;
NeilBrownad01c9e2006-03-27 01:18:07 -08006509 LIST_HEAD(candidates);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006510 rdev0 = list_entry(pending_raid_disks.next,
NeilBrown3cb03002011-10-11 16:45:26 +11006511 struct md_rdev, same_set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006512
NeilBrown9d487392016-11-02 14:16:49 +11006513 pr_debug("md: considering %s ...\n", bdevname(rdev0->bdev,b));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006514 INIT_LIST_HEAD(&candidates);
Cheng Renquan159ec1f2009-01-09 08:31:08 +11006515 rdev_for_each_list(rdev, tmp, &pending_raid_disks)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006516 if (super_90_load(rdev, rdev0, 0) >= 0) {
NeilBrown9d487392016-11-02 14:16:49 +11006517 pr_debug("md: adding %s ...\n",
6518 bdevname(rdev->bdev,b));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006519 list_move(&rdev->same_set, &candidates);
6520 }
6521 /*
6522 * now we have a set of devices, with all of them having
6523 * mostly sane superblocks. It's time to allocate the
6524 * mddev.
6525 */
NeilBrowne8703fe2006-10-03 01:15:59 -07006526 if (part) {
6527 dev = MKDEV(mdp_major,
6528 rdev0->preferred_minor << MdpMinorShift);
6529 unit = MINOR(dev) >> MdpMinorShift;
6530 } else {
6531 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
6532 unit = MINOR(dev);
6533 }
6534 if (rdev0->preferred_minor != unit) {
NeilBrown9d487392016-11-02 14:16:49 +11006535 pr_warn("md: unit number in %s is bad: %d\n",
6536 bdevname(rdev0->bdev, b), rdev0->preferred_minor);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006537 break;
6538 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006539
Christoph Hellwig28144f92020-10-29 15:58:34 +01006540 md_probe(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006541 mddev = mddev_find(dev);
Neil Brown9bbbca32008-06-28 08:31:17 +10006542 if (!mddev || !mddev->gendisk) {
6543 if (mddev)
6544 mddev_put(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006545 break;
6546 }
NeilBrownf72ffdd2014-09-30 14:23:59 +10006547 if (mddev_lock(mddev))
NeilBrown9d487392016-11-02 14:16:49 +11006548 pr_warn("md: %s locked, cannot run\n", mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006549 else if (mddev->raid_disks || mddev->major_version
6550 || !list_empty(&mddev->disks)) {
NeilBrown9d487392016-11-02 14:16:49 +11006551 pr_warn("md: %s already running, cannot run %s\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006552 mdname(mddev), bdevname(rdev0->bdev,b));
6553 mddev_unlock(mddev);
6554 } else {
NeilBrown9d487392016-11-02 14:16:49 +11006555 pr_debug("md: created %s\n", mdname(mddev));
NeilBrown1ec4a932008-02-06 01:39:53 -08006556 mddev->persistent = 1;
Cheng Renquan159ec1f2009-01-09 08:31:08 +11006557 rdev_for_each_list(rdev, tmp, &candidates) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006558 list_del_init(&rdev->same_set);
6559 if (bind_rdev_to_array(rdev, mddev))
6560 export_rdev(rdev);
6561 }
6562 autorun_array(mddev);
6563 mddev_unlock(mddev);
6564 }
6565 /* on success, candidates will be empty, on error
6566 * it won't...
6567 */
Cheng Renquan159ec1f2009-01-09 08:31:08 +11006568 rdev_for_each_list(rdev, tmp, &candidates) {
NeilBrown4b809912008-07-21 17:05:25 +10006569 list_del_init(&rdev->same_set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006570 export_rdev(rdev);
NeilBrown4b809912008-07-21 17:05:25 +10006571 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006572 mddev_put(mddev);
6573 }
NeilBrown9d487392016-11-02 14:16:49 +11006574 pr_info("md: ... autorun DONE.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07006575}
Jeff Garzikfdee8ae2006-12-10 02:20:50 -08006576#endif /* !MODULE */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006577
NeilBrownf72ffdd2014-09-30 14:23:59 +10006578static int get_version(void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006579{
6580 mdu_version_t ver;
6581
6582 ver.major = MD_MAJOR_VERSION;
6583 ver.minor = MD_MINOR_VERSION;
6584 ver.patchlevel = MD_PATCHLEVEL_VERSION;
6585
6586 if (copy_to_user(arg, &ver, sizeof(ver)))
6587 return -EFAULT;
6588
6589 return 0;
6590}
6591
NeilBrownf72ffdd2014-09-30 14:23:59 +10006592static int get_array_info(struct mddev *mddev, void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006593{
6594 mdu_array_info_t info;
NeilBrowna9f326e2009-09-23 18:06:41 +10006595 int nr,working,insync,failed,spare;
NeilBrown3cb03002011-10-11 16:45:26 +11006596 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006597
NeilBrown1ca69c42012-10-11 13:37:33 +11006598 nr = working = insync = failed = spare = 0;
6599 rcu_read_lock();
6600 rdev_for_each_rcu(rdev, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006601 nr++;
NeilBrownb2d444d2005-11-08 21:39:31 -08006602 if (test_bit(Faulty, &rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006603 failed++;
6604 else {
6605 working++;
NeilBrownb2d444d2005-11-08 21:39:31 -08006606 if (test_bit(In_sync, &rdev->flags))
NeilBrownf72ffdd2014-09-30 14:23:59 +10006607 insync++;
Song Liub347af82016-08-11 17:14:45 -07006608 else if (test_bit(Journal, &rdev->flags))
6609 /* TODO: add journal count to md_u.h */
6610 ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006611 else
6612 spare++;
6613 }
6614 }
NeilBrown1ca69c42012-10-11 13:37:33 +11006615 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006616
6617 info.major_version = mddev->major_version;
6618 info.minor_version = mddev->minor_version;
6619 info.patch_version = MD_PATCHLEVEL_VERSION;
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11006620 info.ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006621 info.level = mddev->level;
Andre Noll58c0fed2009-03-31 14:33:13 +11006622 info.size = mddev->dev_sectors / 2;
6623 if (info.size != mddev->dev_sectors / 2) /* overflow */
NeilBrown284ae7c2006-02-03 03:03:40 -08006624 info.size = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006625 info.nr_disks = nr;
6626 info.raid_disks = mddev->raid_disks;
6627 info.md_minor = mddev->md_minor;
6628 info.not_persistent= !mddev->persistent;
6629
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11006630 info.utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006631 info.state = 0;
6632 if (mddev->in_sync)
6633 info.state = (1<<MD_SB_CLEAN);
NeilBrownc3d97142009-12-14 12:49:52 +11006634 if (mddev->bitmap && mddev->bitmap_info.offset)
NeilBrown9bd35922014-07-02 11:35:06 +10006635 info.state |= (1<<MD_SB_BITMAP_PRESENT);
Goldwyn Rodriguesca8895d2014-11-26 12:22:03 -06006636 if (mddev_is_clustered(mddev))
6637 info.state |= (1<<MD_SB_CLUSTERED);
NeilBrowna9f326e2009-09-23 18:06:41 +10006638 info.active_disks = insync;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006639 info.working_disks = working;
6640 info.failed_disks = failed;
6641 info.spare_disks = spare;
6642
6643 info.layout = mddev->layout;
Andre Noll9d8f0362009-06-18 08:45:01 +10006644 info.chunk_size = mddev->chunk_sectors << 9;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006645
6646 if (copy_to_user(arg, &info, sizeof(info)))
6647 return -EFAULT;
6648
6649 return 0;
6650}
6651
NeilBrownf72ffdd2014-09-30 14:23:59 +10006652static int get_bitmap_file(struct mddev *mddev, void __user * arg)
NeilBrown32a76272005-06-21 17:17:14 -07006653{
6654 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
NeilBrownf4ad3d32014-12-15 12:57:00 +11006655 char *ptr;
NeilBrown4af1a042014-12-15 12:57:00 +11006656 int err;
NeilBrown32a76272005-06-21 17:17:14 -07006657
Benjamin Randazzob6878d92015-07-25 16:36:50 +02006658 file = kzalloc(sizeof(*file), GFP_NOIO);
NeilBrown32a76272005-06-21 17:17:14 -07006659 if (!file)
NeilBrown4af1a042014-12-15 12:57:00 +11006660 return -ENOMEM;
NeilBrown32a76272005-06-21 17:17:14 -07006661
NeilBrown32a76272005-06-21 17:17:14 -07006662 err = 0;
NeilBrown4af1a042014-12-15 12:57:00 +11006663 spin_lock(&mddev->lock);
Benjamin Randazzo25eafe12015-07-25 16:36:50 +02006664 /* bitmap enabled */
6665 if (mddev->bitmap_info.file) {
6666 ptr = file_path(mddev->bitmap_info.file, file->pathname,
6667 sizeof(file->pathname));
6668 if (IS_ERR(ptr))
6669 err = PTR_ERR(ptr);
6670 else
6671 memmove(file->pathname, ptr,
6672 sizeof(file->pathname)-(ptr-file->pathname));
6673 }
NeilBrown4af1a042014-12-15 12:57:00 +11006674 spin_unlock(&mddev->lock);
6675
6676 if (err == 0 &&
6677 copy_to_user(arg, file, sizeof(*file)))
NeilBrown32a76272005-06-21 17:17:14 -07006678 err = -EFAULT;
NeilBrown4af1a042014-12-15 12:57:00 +11006679
NeilBrown32a76272005-06-21 17:17:14 -07006680 kfree(file);
6681 return err;
6682}
6683
NeilBrownf72ffdd2014-09-30 14:23:59 +10006684static int get_disk_info(struct mddev *mddev, void __user * arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006685{
6686 mdu_disk_info_t info;
NeilBrown3cb03002011-10-11 16:45:26 +11006687 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006688
6689 if (copy_from_user(&info, arg, sizeof(info)))
6690 return -EFAULT;
6691
NeilBrown1ca69c42012-10-11 13:37:33 +11006692 rcu_read_lock();
Goldwyn Rodrigues57d051d2015-04-14 10:43:55 -05006693 rdev = md_find_rdev_nr_rcu(mddev, info.number);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006694 if (rdev) {
6695 info.major = MAJOR(rdev->bdev->bd_dev);
6696 info.minor = MINOR(rdev->bdev->bd_dev);
6697 info.raid_disk = rdev->raid_disk;
6698 info.state = 0;
NeilBrownb2d444d2005-11-08 21:39:31 -08006699 if (test_bit(Faulty, &rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006700 info.state |= (1<<MD_DISK_FAULTY);
NeilBrownb2d444d2005-11-08 21:39:31 -08006701 else if (test_bit(In_sync, &rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006702 info.state |= (1<<MD_DISK_ACTIVE);
6703 info.state |= (1<<MD_DISK_SYNC);
6704 }
Shaohua Li9efdca12015-10-12 16:59:50 -07006705 if (test_bit(Journal, &rdev->flags))
Song Liubac624f2015-08-13 14:31:55 -07006706 info.state |= (1<<MD_DISK_JOURNAL);
NeilBrown8ddf9ef2005-09-09 16:23:45 -07006707 if (test_bit(WriteMostly, &rdev->flags))
6708 info.state |= (1<<MD_DISK_WRITEMOSTLY);
NeilBrown688834e2016-11-18 16:16:11 +11006709 if (test_bit(FailFast, &rdev->flags))
6710 info.state |= (1<<MD_DISK_FAILFAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006711 } else {
6712 info.major = info.minor = 0;
6713 info.raid_disk = -1;
6714 info.state = (1<<MD_DISK_REMOVED);
6715 }
NeilBrown1ca69c42012-10-11 13:37:33 +11006716 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006717
6718 if (copy_to_user(arg, &info, sizeof(info)))
6719 return -EFAULT;
6720
6721 return 0;
6722}
6723
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02006724int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006725{
6726 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
NeilBrown3cb03002011-10-11 16:45:26 +11006727 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006728 dev_t dev = MKDEV(info->major,info->minor);
6729
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006730 if (mddev_is_clustered(mddev) &&
6731 !(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) {
NeilBrown9d487392016-11-02 14:16:49 +11006732 pr_warn("%s: Cannot add to clustered mddev.\n",
6733 mdname(mddev));
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006734 return -EINVAL;
6735 }
6736
Linus Torvalds1da177e2005-04-16 15:20:36 -07006737 if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
6738 return -EOVERFLOW;
6739
6740 if (!mddev->raid_disks) {
6741 int err;
6742 /* expecting a device which has a superblock */
6743 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
6744 if (IS_ERR(rdev)) {
NeilBrown9d487392016-11-02 14:16:49 +11006745 pr_warn("md: md_import_device returned %ld\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006746 PTR_ERR(rdev));
6747 return PTR_ERR(rdev);
6748 }
6749 if (!list_empty(&mddev->disks)) {
NeilBrown3cb03002011-10-11 16:45:26 +11006750 struct md_rdev *rdev0
6751 = list_entry(mddev->disks.next,
6752 struct md_rdev, same_set);
NeilBrowna9f326e2009-09-23 18:06:41 +10006753 err = super_types[mddev->major_version]
Linus Torvalds1da177e2005-04-16 15:20:36 -07006754 .load_super(rdev, rdev0, mddev->minor_version);
6755 if (err < 0) {
NeilBrown9d487392016-11-02 14:16:49 +11006756 pr_warn("md: %s has different UUID to %s\n",
NeilBrownf72ffdd2014-09-30 14:23:59 +10006757 bdevname(rdev->bdev,b),
Linus Torvalds1da177e2005-04-16 15:20:36 -07006758 bdevname(rdev0->bdev,b2));
6759 export_rdev(rdev);
6760 return -EINVAL;
6761 }
6762 }
6763 err = bind_rdev_to_array(rdev, mddev);
6764 if (err)
6765 export_rdev(rdev);
6766 return err;
6767 }
6768
6769 /*
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02006770 * md_add_new_disk can be used once the array is assembled
Linus Torvalds1da177e2005-04-16 15:20:36 -07006771 * to add "hot spares". They must already have a superblock
6772 * written
6773 */
6774 if (mddev->pers) {
6775 int err;
6776 if (!mddev->pers->hot_add_disk) {
NeilBrown9d487392016-11-02 14:16:49 +11006777 pr_warn("%s: personality does not support diskops!\n",
6778 mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006779 return -EINVAL;
6780 }
NeilBrown7b1e35f2005-09-09 16:23:50 -07006781 if (mddev->persistent)
6782 rdev = md_import_device(dev, mddev->major_version,
6783 mddev->minor_version);
6784 else
6785 rdev = md_import_device(dev, -1, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006786 if (IS_ERR(rdev)) {
NeilBrown9d487392016-11-02 14:16:49 +11006787 pr_warn("md: md_import_device returned %ld\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006788 PTR_ERR(rdev));
6789 return PTR_ERR(rdev);
6790 }
NeilBrown1a855a02010-12-09 16:36:28 +11006791 /* set saved_raid_disk if appropriate */
NeilBrown41158c72005-06-21 17:17:25 -07006792 if (!mddev->persistent) {
6793 if (info->state & (1<<MD_DISK_SYNC) &&
NeilBrownbf572542011-01-12 09:03:35 +11006794 info->raid_disk < mddev->raid_disks) {
NeilBrown41158c72005-06-21 17:17:25 -07006795 rdev->raid_disk = info->raid_disk;
NeilBrownbf572542011-01-12 09:03:35 +11006796 set_bit(In_sync, &rdev->flags);
NeilBrown8313b8e2013-12-12 10:13:33 +11006797 clear_bit(Bitmap_sync, &rdev->flags);
NeilBrownbf572542011-01-12 09:03:35 +11006798 } else
NeilBrown41158c72005-06-21 17:17:25 -07006799 rdev->raid_disk = -1;
NeilBrownf4667222013-12-09 12:04:56 +11006800 rdev->saved_raid_disk = rdev->raid_disk;
NeilBrown41158c72005-06-21 17:17:25 -07006801 } else
6802 super_types[mddev->major_version].
6803 validate_super(mddev, rdev);
NeilBrownbedd86b2011-05-11 14:26:20 +10006804 if ((info->state & (1<<MD_DISK_SYNC)) &&
NeilBrownf4563092012-07-03 15:59:06 +10006805 rdev->raid_disk != info->raid_disk) {
NeilBrownbedd86b2011-05-11 14:26:20 +10006806 /* This was a hot-add request, but events doesn't
6807 * match, so reject it.
6808 */
6809 export_rdev(rdev);
6810 return -EINVAL;
6811 }
6812
NeilBrownb2d444d2005-11-08 21:39:31 -08006813 clear_bit(In_sync, &rdev->flags); /* just to be sure */
NeilBrown8ddf9ef2005-09-09 16:23:45 -07006814 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
6815 set_bit(WriteMostly, &rdev->flags);
NeilBrown575a80f2009-03-31 14:33:13 +11006816 else
6817 clear_bit(WriteMostly, &rdev->flags);
NeilBrown688834e2016-11-18 16:16:11 +11006818 if (info->state & (1<<MD_DISK_FAILFAST))
6819 set_bit(FailFast, &rdev->flags);
6820 else
6821 clear_bit(FailFast, &rdev->flags);
NeilBrown8ddf9ef2005-09-09 16:23:45 -07006822
Shaohua Lif6b6ec52015-12-21 10:51:02 +11006823 if (info->state & (1<<MD_DISK_JOURNAL)) {
6824 struct md_rdev *rdev2;
6825 bool has_journal = false;
6826
6827 /* make sure no existing journal disk */
6828 rdev_for_each(rdev2, mddev) {
6829 if (test_bit(Journal, &rdev2->flags)) {
6830 has_journal = true;
6831 break;
6832 }
6833 }
NeilBrown230b55f2017-10-17 14:24:09 +11006834 if (has_journal || mddev->bitmap) {
Shaohua Lif6b6ec52015-12-21 10:51:02 +11006835 export_rdev(rdev);
6836 return -EBUSY;
6837 }
Song Liubac624f2015-08-13 14:31:55 -07006838 set_bit(Journal, &rdev->flags);
Shaohua Lif6b6ec52015-12-21 10:51:02 +11006839 }
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006840 /*
6841 * check whether the device shows up in other nodes
6842 */
6843 if (mddev_is_clustered(mddev)) {
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006844 if (info->state & (1 << MD_DISK_CANDIDATE))
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006845 set_bit(Candidate, &rdev->flags);
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006846 else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) {
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006847 /* --add initiated by this node */
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006848 err = md_cluster_ops->add_new_disk(mddev, rdev);
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006849 if (err) {
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05006850 export_rdev(rdev);
6851 return err;
6852 }
6853 }
6854 }
6855
Linus Torvalds1da177e2005-04-16 15:20:36 -07006856 rdev->raid_disk = -1;
6857 err = bind_rdev_to_array(rdev, mddev);
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006858
Linus Torvalds1da177e2005-04-16 15:20:36 -07006859 if (err)
6860 export_rdev(rdev);
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006861
6862 if (mddev_is_clustered(mddev)) {
Guoqing Jiange566aef2016-08-12 13:42:34 +08006863 if (info->state & (1 << MD_DISK_CANDIDATE)) {
6864 if (!err) {
6865 err = md_cluster_ops->new_disk_ack(mddev,
6866 err == 0);
6867 if (err)
6868 md_kick_rdev_from_array(rdev);
6869 }
6870 } else {
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006871 if (err)
6872 md_cluster_ops->add_new_disk_cancel(mddev);
6873 else
6874 err = add_bound_rdev(rdev);
6875 }
6876
6877 } else if (!err)
Goldwyn Rodriguesa6da4ef2015-04-14 10:45:22 -05006878 err = add_bound_rdev(rdev);
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05006879
Linus Torvalds1da177e2005-04-16 15:20:36 -07006880 return err;
6881 }
6882
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02006883 /* otherwise, md_add_new_disk is only allowed
Linus Torvalds1da177e2005-04-16 15:20:36 -07006884 * for major_version==0 superblocks
6885 */
6886 if (mddev->major_version != 0) {
NeilBrown9d487392016-11-02 14:16:49 +11006887 pr_warn("%s: ADD_NEW_DISK not supported\n", mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006888 return -EINVAL;
6889 }
6890
6891 if (!(info->state & (1<<MD_DISK_FAULTY))) {
6892 int err;
NeilBrownd710e132008-10-13 11:55:12 +11006893 rdev = md_import_device(dev, -1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006894 if (IS_ERR(rdev)) {
NeilBrown9d487392016-11-02 14:16:49 +11006895 pr_warn("md: error, md_import_device() returned %ld\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006896 PTR_ERR(rdev));
6897 return PTR_ERR(rdev);
6898 }
6899 rdev->desc_nr = info->number;
6900 if (info->raid_disk < mddev->raid_disks)
6901 rdev->raid_disk = info->raid_disk;
6902 else
6903 rdev->raid_disk = -1;
6904
Linus Torvalds1da177e2005-04-16 15:20:36 -07006905 if (rdev->raid_disk < mddev->raid_disks)
NeilBrownb2d444d2005-11-08 21:39:31 -08006906 if (info->state & (1<<MD_DISK_SYNC))
6907 set_bit(In_sync, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006908
NeilBrown8ddf9ef2005-09-09 16:23:45 -07006909 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
6910 set_bit(WriteMostly, &rdev->flags);
NeilBrown688834e2016-11-18 16:16:11 +11006911 if (info->state & (1<<MD_DISK_FAILFAST))
6912 set_bit(FailFast, &rdev->flags);
NeilBrown8ddf9ef2005-09-09 16:23:45 -07006913
Linus Torvalds1da177e2005-04-16 15:20:36 -07006914 if (!mddev->persistent) {
NeilBrown9d487392016-11-02 14:16:49 +11006915 pr_debug("md: nonpersistent superblock ...\n");
Mike Snitzer77304d22010-11-08 14:39:12 +01006916 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
6917 } else
Jonathan Brassow57b2caa2011-01-14 09:14:33 +11006918 rdev->sb_start = calc_dev_sboffset(rdev);
NeilBrown8190e752009-06-18 08:48:58 +10006919 rdev->sectors = rdev->sb_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006920
NeilBrown2bf071b2006-01-06 00:20:55 -08006921 err = bind_rdev_to_array(rdev, mddev);
6922 if (err) {
6923 export_rdev(rdev);
6924 return err;
6925 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006926 }
6927
6928 return 0;
6929}
6930
NeilBrownf72ffdd2014-09-30 14:23:59 +10006931static int hot_remove_disk(struct mddev *mddev, dev_t dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006932{
6933 char b[BDEVNAME_SIZE];
NeilBrown3cb03002011-10-11 16:45:26 +11006934 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006935
Yufen Yuc42a0e22018-05-04 18:08:10 +08006936 if (!mddev->pers)
6937 return -ENODEV;
6938
Linus Torvalds1da177e2005-04-16 15:20:36 -07006939 rdev = find_rdev(mddev, dev);
6940 if (!rdev)
6941 return -ENXIO;
6942
Goldwyn Rodrigues2910ff12015-09-28 10:27:26 -05006943 if (rdev->raid_disk < 0)
6944 goto kick_rdev;
Goldwyn Rodrigues293467a2014-06-07 01:44:51 -05006945
NeilBrown3ea8929d2013-04-24 11:42:41 +10006946 clear_bit(Blocked, &rdev->flags);
6947 remove_and_add_spares(mddev, rdev);
6948
Linus Torvalds1da177e2005-04-16 15:20:36 -07006949 if (rdev->raid_disk >= 0)
6950 goto busy;
6951
Goldwyn Rodrigues2910ff12015-09-28 10:27:26 -05006952kick_rdev:
Zhao Hemingbca5b062020-11-19 19:41:34 +08006953 if (mddev_is_clustered(mddev)) {
6954 if (md_cluster_ops->remove_disk(mddev, rdev))
6955 goto busy;
6956 }
Goldwyn Rodrigues88bcfef2015-04-14 10:44:44 -05006957
Goldwyn Rodriguesfb56dfe2015-04-14 10:43:24 -05006958 md_kick_rdev_from_array(rdev);
Shaohua Li29530792016-12-08 15:48:19 -08006959 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrown060b0682016-11-04 16:46:03 +11006960 if (mddev->thread)
6961 md_wakeup_thread(mddev->thread);
6962 else
6963 md_update_sb(mddev, 1);
NeilBrownd7603b72006-01-06 00:20:30 -08006964 md_new_event(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006965
6966 return 0;
6967busy:
NeilBrown9d487392016-11-02 14:16:49 +11006968 pr_debug("md: cannot remove active disk %s from %s ...\n",
6969 bdevname(rdev->bdev,b), mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006970 return -EBUSY;
6971}
6972
NeilBrownf72ffdd2014-09-30 14:23:59 +10006973static int hot_add_disk(struct mddev *mddev, dev_t dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006974{
6975 char b[BDEVNAME_SIZE];
6976 int err;
NeilBrown3cb03002011-10-11 16:45:26 +11006977 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006978
6979 if (!mddev->pers)
6980 return -ENODEV;
6981
6982 if (mddev->major_version != 0) {
NeilBrown9d487392016-11-02 14:16:49 +11006983 pr_warn("%s: HOT_ADD may only be used with version-0 superblocks.\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006984 mdname(mddev));
6985 return -EINVAL;
6986 }
6987 if (!mddev->pers->hot_add_disk) {
NeilBrown9d487392016-11-02 14:16:49 +11006988 pr_warn("%s: personality does not support diskops!\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006989 mdname(mddev));
6990 return -EINVAL;
6991 }
6992
NeilBrownd710e132008-10-13 11:55:12 +11006993 rdev = md_import_device(dev, -1, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006994 if (IS_ERR(rdev)) {
NeilBrown9d487392016-11-02 14:16:49 +11006995 pr_warn("md: error, md_import_device() returned %ld\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07006996 PTR_ERR(rdev));
6997 return -EINVAL;
6998 }
6999
7000 if (mddev->persistent)
Jonathan Brassow57b2caa2011-01-14 09:14:33 +11007001 rdev->sb_start = calc_dev_sboffset(rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007002 else
Mike Snitzer77304d22010-11-08 14:39:12 +01007003 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007004
NeilBrown8190e752009-06-18 08:48:58 +10007005 rdev->sectors = rdev->sb_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007006
NeilBrownb2d444d2005-11-08 21:39:31 -08007007 if (test_bit(Faulty, &rdev->flags)) {
NeilBrown9d487392016-11-02 14:16:49 +11007008 pr_warn("md: can not hot-add faulty %s disk to %s!\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07007009 bdevname(rdev->bdev,b), mdname(mddev));
7010 err = -EINVAL;
7011 goto abort_export;
7012 }
Goldwyn Rodrigues293467a2014-06-07 01:44:51 -05007013
NeilBrownb2d444d2005-11-08 21:39:31 -08007014 clear_bit(In_sync, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007015 rdev->desc_nr = -1;
NeilBrown58427302006-10-06 00:44:04 -07007016 rdev->saved_raid_disk = -1;
NeilBrown2bf071b2006-01-06 00:20:55 -08007017 err = bind_rdev_to_array(rdev, mddev);
7018 if (err)
Goldwyn Rodrigues2aa82192015-09-28 19:21:35 -05007019 goto abort_export;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007020
7021 /*
7022 * The rest should better be atomic, we can have disk failures
7023 * noticed in interrupt contexts ...
7024 */
7025
Linus Torvalds1da177e2005-04-16 15:20:36 -07007026 rdev->raid_disk = -1;
7027
Shaohua Li29530792016-12-08 15:48:19 -08007028 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrown060b0682016-11-04 16:46:03 +11007029 if (!mddev->thread)
7030 md_update_sb(mddev, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007031 /*
7032 * Kick recovery, maybe this spare has to be added to the
7033 * array immediately.
7034 */
7035 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7036 md_wakeup_thread(mddev->thread);
NeilBrownd7603b72006-01-06 00:20:30 -08007037 md_new_event(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007038 return 0;
7039
Linus Torvalds1da177e2005-04-16 15:20:36 -07007040abort_export:
7041 export_rdev(rdev);
7042 return err;
7043}
7044
NeilBrownfd01b882011-10-11 16:47:53 +11007045static int set_bitmap_file(struct mddev *mddev, int fd)
NeilBrown32a76272005-06-21 17:17:14 -07007046{
NeilBrown035328c2014-04-09 12:25:40 +10007047 int err = 0;
NeilBrown32a76272005-06-21 17:17:14 -07007048
NeilBrown36fa3062005-09-09 16:23:45 -07007049 if (mddev->pers) {
NeilBrownd66b1b32014-08-08 15:40:24 +10007050 if (!mddev->pers->quiesce || !mddev->thread)
NeilBrown36fa3062005-09-09 16:23:45 -07007051 return -EBUSY;
7052 if (mddev->recovery || mddev->sync_thread)
7053 return -EBUSY;
7054 /* we should be able to change the bitmap.. */
NeilBrown32a76272005-06-21 17:17:14 -07007055 }
7056
NeilBrown36fa3062005-09-09 16:23:45 -07007057 if (fd >= 0) {
NeilBrown035328c2014-04-09 12:25:40 +10007058 struct inode *inode;
NeilBrown1e594bb2014-12-15 12:57:00 +11007059 struct file *f;
NeilBrown36fa3062005-09-09 16:23:45 -07007060
NeilBrown1e594bb2014-12-15 12:57:00 +11007061 if (mddev->bitmap || mddev->bitmap_info.file)
7062 return -EEXIST; /* cannot add when bitmap is present */
7063 f = fget(fd);
7064
7065 if (f == NULL) {
NeilBrown9d487392016-11-02 14:16:49 +11007066 pr_warn("%s: error: failed to get bitmap file\n",
7067 mdname(mddev));
NeilBrown36fa3062005-09-09 16:23:45 -07007068 return -EBADF;
7069 }
7070
NeilBrown1e594bb2014-12-15 12:57:00 +11007071 inode = f->f_mapping->host;
NeilBrown035328c2014-04-09 12:25:40 +10007072 if (!S_ISREG(inode->i_mode)) {
NeilBrown9d487392016-11-02 14:16:49 +11007073 pr_warn("%s: error: bitmap file must be a regular file\n",
7074 mdname(mddev));
NeilBrown035328c2014-04-09 12:25:40 +10007075 err = -EBADF;
NeilBrown1e594bb2014-12-15 12:57:00 +11007076 } else if (!(f->f_mode & FMODE_WRITE)) {
NeilBrown9d487392016-11-02 14:16:49 +11007077 pr_warn("%s: error: bitmap file must open for write\n",
7078 mdname(mddev));
NeilBrown035328c2014-04-09 12:25:40 +10007079 err = -EBADF;
7080 } else if (atomic_read(&inode->i_writecount) != 1) {
NeilBrown9d487392016-11-02 14:16:49 +11007081 pr_warn("%s: error: bitmap file is already in use\n",
7082 mdname(mddev));
NeilBrown035328c2014-04-09 12:25:40 +10007083 err = -EBUSY;
7084 }
7085 if (err) {
NeilBrown1e594bb2014-12-15 12:57:00 +11007086 fput(f);
NeilBrown36fa3062005-09-09 16:23:45 -07007087 return err;
7088 }
NeilBrown1e594bb2014-12-15 12:57:00 +11007089 mddev->bitmap_info.file = f;
NeilBrownc3d97142009-12-14 12:49:52 +11007090 mddev->bitmap_info.offset = 0; /* file overrides offset */
NeilBrown36fa3062005-09-09 16:23:45 -07007091 } else if (mddev->bitmap == NULL)
7092 return -ENOENT; /* cannot remove what isn't there */
7093 err = 0;
7094 if (mddev->pers) {
NeilBrown69e51b42010-06-01 19:37:35 +10007095 if (fd >= 0) {
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05007096 struct bitmap *bitmap;
7097
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007098 bitmap = md_bitmap_create(mddev, -1);
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007099 mddev_suspend(mddev);
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05007100 if (!IS_ERR(bitmap)) {
7101 mddev->bitmap = bitmap;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007102 err = md_bitmap_load(mddev);
NeilBrownba599ac2015-02-25 11:44:11 +11007103 } else
7104 err = PTR_ERR(bitmap);
NeilBrown52a0d492017-10-17 13:46:43 +11007105 if (err) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007106 md_bitmap_destroy(mddev);
NeilBrown52a0d492017-10-17 13:46:43 +11007107 fd = -1;
7108 }
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007109 mddev_resume(mddev);
NeilBrown52a0d492017-10-17 13:46:43 +11007110 } else if (fd < 0) {
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007111 mddev_suspend(mddev);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007112 md_bitmap_destroy(mddev);
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007113 mddev_resume(mddev);
NeilBrownd7375ab2006-06-26 00:27:43 -07007114 }
NeilBrownd7375ab2006-06-26 00:27:43 -07007115 }
7116 if (fd < 0) {
NeilBrown4af1a042014-12-15 12:57:00 +11007117 struct file *f = mddev->bitmap_info.file;
7118 if (f) {
7119 spin_lock(&mddev->lock);
7120 mddev->bitmap_info.file = NULL;
7121 spin_unlock(&mddev->lock);
7122 fput(f);
7123 }
NeilBrown36fa3062005-09-09 16:23:45 -07007124 }
7125
NeilBrown32a76272005-06-21 17:17:14 -07007126 return err;
7127}
7128
Linus Torvalds1da177e2005-04-16 15:20:36 -07007129/*
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02007130 * md_set_array_info is used two different ways
Linus Torvalds1da177e2005-04-16 15:20:36 -07007131 * The original usage is when creating a new array.
7132 * In this usage, raid_disks is > 0 and it together with
7133 * level, size, not_persistent,layout,chunksize determine the
7134 * shape of the array.
7135 * This will always create an array with a type-0.90.0 superblock.
7136 * The newer usage is when assembling an array.
7137 * In this case raid_disks will be 0, and the major_version field is
7138 * use to determine which style super-blocks are to be found on the devices.
7139 * The minor and patch _version numbers are also kept incase the
7140 * super_block handler wishes to interpret them.
7141 */
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02007142int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007143{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007144 if (info->raid_disks == 0) {
7145 /* just setting version number for superblock loading */
7146 if (info->major_version < 0 ||
Ahmed S. Darwish50511da2007-05-09 02:35:34 -07007147 info->major_version >= ARRAY_SIZE(super_types) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07007148 super_types[info->major_version].name == NULL) {
7149 /* maybe try to auto-load a module? */
NeilBrown9d487392016-11-02 14:16:49 +11007150 pr_warn("md: superblock version %d not known\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07007151 info->major_version);
7152 return -EINVAL;
7153 }
7154 mddev->major_version = info->major_version;
7155 mddev->minor_version = info->minor_version;
7156 mddev->patch_version = info->patch_version;
NeilBrown3f9d7b02006-12-22 01:11:41 -08007157 mddev->persistent = !info->not_persistent;
NeilBrowncbd19982009-12-30 12:08:49 +11007158 /* ensure mddev_put doesn't delete this now that there
7159 * is some minimal configuration.
7160 */
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11007161 mddev->ctime = ktime_get_real_seconds();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007162 return 0;
7163 }
7164 mddev->major_version = MD_MAJOR_VERSION;
7165 mddev->minor_version = MD_MINOR_VERSION;
7166 mddev->patch_version = MD_PATCHLEVEL_VERSION;
Deepa Dinamani9ebc6ef2015-12-21 10:51:01 +11007167 mddev->ctime = ktime_get_real_seconds();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007168
7169 mddev->level = info->level;
NeilBrown17115e02006-01-16 22:14:57 -08007170 mddev->clevel[0] = 0;
Andre Noll58c0fed2009-03-31 14:33:13 +11007171 mddev->dev_sectors = 2 * (sector_t)info->size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007172 mddev->raid_disks = info->raid_disks;
7173 /* don't set md_minor, it is determined by which /dev/md* was
7174 * openned
7175 */
7176 if (info->state & (1<<MD_SB_CLEAN))
7177 mddev->recovery_cp = MaxSector;
7178 else
7179 mddev->recovery_cp = 0;
7180 mddev->persistent = ! info->not_persistent;
NeilBrowne6910632008-02-06 01:39:51 -08007181 mddev->external = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007182
7183 mddev->layout = info->layout;
NeilBrown33f2c352019-09-09 16:52:29 +10007184 if (mddev->level == 0)
7185 /* Cannot trust RAID0 layout info here */
7186 mddev->layout = -1;
Andre Noll9d8f0362009-06-18 08:45:01 +10007187 mddev->chunk_sectors = info->chunk_size >> 9;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007188
Shaohua Li29530792016-12-08 15:48:19 -08007189 if (mddev->persistent) {
NeilBrown1b3bae42017-03-01 07:31:28 +11007190 mddev->max_disks = MD_SB_DISKS;
7191 mddev->flags = 0;
7192 mddev->sb_flags = 0;
Shaohua Li29530792016-12-08 15:48:19 -08007193 }
7194 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007195
NeilBrownc3d97142009-12-14 12:49:52 +11007196 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
NeilBrown6409bb02012-05-22 13:55:07 +10007197 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
NeilBrownc3d97142009-12-14 12:49:52 +11007198 mddev->bitmap_info.offset = 0;
NeilBrownb2a27032005-11-28 13:44:12 -08007199
NeilBrownf6705572006-03-27 01:18:11 -08007200 mddev->reshape_position = MaxSector;
7201
Linus Torvalds1da177e2005-04-16 15:20:36 -07007202 /*
7203 * Generate a 128 bit UUID
7204 */
7205 get_random_bytes(mddev->uuid, 16);
7206
NeilBrownf6705572006-03-27 01:18:11 -08007207 mddev->new_level = mddev->level;
Andre Noll664e7c42009-06-18 08:45:27 +10007208 mddev->new_chunk_sectors = mddev->chunk_sectors;
NeilBrownf6705572006-03-27 01:18:11 -08007209 mddev->new_layout = mddev->layout;
7210 mddev->delta_disks = 0;
NeilBrown2c810cd2012-05-21 09:27:00 +10007211 mddev->reshape_backwards = 0;
NeilBrownf6705572006-03-27 01:18:11 -08007212
Linus Torvalds1da177e2005-04-16 15:20:36 -07007213 return 0;
7214}
7215
NeilBrownfd01b882011-10-11 16:47:53 +11007216void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors)
Dan Williams1f403622009-03-31 14:59:03 +11007217{
Shaohua Liefa4b772017-10-18 22:08:13 -07007218 lockdep_assert_held(&mddev->reconfig_mutex);
Dan Williamsb522adc2009-03-31 15:00:31 +11007219
7220 if (mddev->external_size)
7221 return;
7222
Dan Williams1f403622009-03-31 14:59:03 +11007223 mddev->array_sectors = array_sectors;
7224}
7225EXPORT_SYMBOL(md_set_array_sectors);
7226
NeilBrownfd01b882011-10-11 16:47:53 +11007227static int update_size(struct mddev *mddev, sector_t num_sectors)
NeilBrowna35b0d62006-01-06 00:20:49 -08007228{
NeilBrown3cb03002011-10-11 16:45:26 +11007229 struct md_rdev *rdev;
NeilBrowna35b0d62006-01-06 00:20:49 -08007230 int rv;
Andre Nolld71f9f82008-07-11 22:02:22 +10007231 int fit = (num_sectors == 0);
Guoqing Jiang818da592017-03-01 16:42:40 +08007232 sector_t old_dev_sectors = mddev->dev_sectors;
Guoqing Jiangab5a98b2016-05-02 11:33:13 -04007233
NeilBrowna35b0d62006-01-06 00:20:49 -08007234 if (mddev->pers->resize == NULL)
7235 return -EINVAL;
Andre Nolld71f9f82008-07-11 22:02:22 +10007236 /* The "num_sectors" is the number of sectors of each device that
7237 * is used. This can only make sense for arrays with redundancy.
7238 * linear and raid0 always use whatever space is available. We can only
7239 * consider changing this number if no resync or reconstruction is
7240 * happening, and if the new size is acceptable. It must fit before the
Andre Noll0f420352008-07-11 22:02:23 +10007241 * sb_start or, if that is <data_offset, it must fit before the size
Andre Nolld71f9f82008-07-11 22:02:22 +10007242 * of each device. If num_sectors is zero, we find the largest size
7243 * that fits.
NeilBrowna35b0d62006-01-06 00:20:49 -08007244 */
NeilBrownf851b602014-12-11 10:02:10 +11007245 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
7246 mddev->sync_thread)
NeilBrowna35b0d62006-01-06 00:20:49 -08007247 return -EBUSY;
NeilBrownbd8839e2014-05-28 13:39:21 +10007248 if (mddev->ro)
7249 return -EROFS;
NeilBrowna4a61252012-05-22 13:55:27 +10007250
NeilBrowndafb20f2012-03-19 12:46:39 +11007251 rdev_for_each(rdev, mddev) {
Andre Nolldd8ac332009-03-31 14:33:13 +11007252 sector_t avail = rdev->sectors;
NeilBrown01ab5662006-10-28 10:38:30 -07007253
Andre Nolld71f9f82008-07-11 22:02:22 +10007254 if (fit && (num_sectors == 0 || num_sectors > avail))
7255 num_sectors = avail;
7256 if (avail < num_sectors)
NeilBrowna35b0d62006-01-06 00:20:49 -08007257 return -ENOSPC;
7258 }
Andre Nolld71f9f82008-07-11 22:02:22 +10007259 rv = mddev->pers->resize(mddev, num_sectors);
Guoqing Jiangc9483632017-02-24 11:15:23 +08007260 if (!rv) {
Guoqing Jiang818da592017-03-01 16:42:40 +08007261 if (mddev_is_clustered(mddev))
7262 md_cluster_ops->update_size(mddev, old_dev_sectors);
7263 else if (mddev->queue) {
Christoph Hellwig2c247c52020-11-16 15:57:11 +01007264 set_capacity_and_notify(mddev->gendisk,
7265 mddev->array_sectors);
Guoqing Jiangc9483632017-02-24 11:15:23 +08007266 }
7267 }
NeilBrowna35b0d62006-01-06 00:20:49 -08007268 return rv;
7269}
7270
NeilBrownfd01b882011-10-11 16:47:53 +11007271static int update_raid_disks(struct mddev *mddev, int raid_disks)
NeilBrownda943b992006-01-06 00:20:54 -08007272{
7273 int rv;
NeilBrownc6563a82012-05-21 09:27:00 +10007274 struct md_rdev *rdev;
NeilBrownda943b992006-01-06 00:20:54 -08007275 /* change the number of raid disks */
NeilBrown63c70c42006-03-27 01:18:13 -08007276 if (mddev->pers->check_reshape == NULL)
NeilBrownda943b992006-01-06 00:20:54 -08007277 return -EINVAL;
NeilBrownbd8839e2014-05-28 13:39:21 +10007278 if (mddev->ro)
7279 return -EROFS;
NeilBrownda943b992006-01-06 00:20:54 -08007280 if (raid_disks <= 0 ||
NeilBrown233fca32010-04-14 17:02:09 +10007281 (mddev->max_disks && raid_disks >= mddev->max_disks))
NeilBrownda943b992006-01-06 00:20:54 -08007282 return -EINVAL;
NeilBrownf851b602014-12-11 10:02:10 +11007283 if (mddev->sync_thread ||
7284 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
Zhao Heminga8da01f2020-11-19 19:41:33 +08007285 test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) ||
NeilBrownf851b602014-12-11 10:02:10 +11007286 mddev->reshape_position != MaxSector)
NeilBrownda943b992006-01-06 00:20:54 -08007287 return -EBUSY;
NeilBrownc6563a82012-05-21 09:27:00 +10007288
7289 rdev_for_each(rdev, mddev) {
7290 if (mddev->raid_disks < raid_disks &&
7291 rdev->data_offset < rdev->new_data_offset)
7292 return -EINVAL;
7293 if (mddev->raid_disks > raid_disks &&
7294 rdev->data_offset > rdev->new_data_offset)
7295 return -EINVAL;
7296 }
7297
NeilBrown63c70c42006-03-27 01:18:13 -08007298 mddev->delta_disks = raid_disks - mddev->raid_disks;
NeilBrown2c810cd2012-05-21 09:27:00 +10007299 if (mddev->delta_disks < 0)
7300 mddev->reshape_backwards = 1;
7301 else if (mddev->delta_disks > 0)
7302 mddev->reshape_backwards = 0;
NeilBrown63c70c42006-03-27 01:18:13 -08007303
7304 rv = mddev->pers->check_reshape(mddev);
NeilBrown2c810cd2012-05-21 09:27:00 +10007305 if (rv < 0) {
NeilBrownde171cb2011-01-31 11:57:42 +11007306 mddev->delta_disks = 0;
NeilBrown2c810cd2012-05-21 09:27:00 +10007307 mddev->reshape_backwards = 0;
7308 }
NeilBrownda943b992006-01-06 00:20:54 -08007309 return rv;
7310}
7311
Linus Torvalds1da177e2005-04-16 15:20:36 -07007312/*
7313 * update_array_info is used to change the configuration of an
7314 * on-line array.
7315 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
7316 * fields in the info are checked against the array.
7317 * Any differences that cannot be handled will cause an error.
7318 * Normally, only one change can be managed at a time.
7319 */
NeilBrownfd01b882011-10-11 16:47:53 +11007320static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007321{
7322 int rv = 0;
7323 int cnt = 0;
NeilBrown36fa3062005-09-09 16:23:45 -07007324 int state = 0;
7325
7326 /* calculate expected state,ignoring low bits */
NeilBrownc3d97142009-12-14 12:49:52 +11007327 if (mddev->bitmap && mddev->bitmap_info.offset)
NeilBrown36fa3062005-09-09 16:23:45 -07007328 state |= (1 << MD_SB_BITMAP_PRESENT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007329
7330 if (mddev->major_version != info->major_version ||
7331 mddev->minor_version != info->minor_version ||
7332/* mddev->patch_version != info->patch_version || */
7333 mddev->ctime != info->ctime ||
7334 mddev->level != info->level ||
7335/* mddev->layout != info->layout || */
Firo Yang4e023612015-06-11 09:41:10 +08007336 mddev->persistent != !info->not_persistent ||
Andre Noll9d8f0362009-06-18 08:45:01 +10007337 mddev->chunk_sectors != info->chunk_size >> 9 ||
NeilBrown36fa3062005-09-09 16:23:45 -07007338 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
7339 ((state^info->state) & 0xfffffe00)
7340 )
Linus Torvalds1da177e2005-04-16 15:20:36 -07007341 return -EINVAL;
7342 /* Check there is only one change */
Andre Noll58c0fed2009-03-31 14:33:13 +11007343 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
7344 cnt++;
7345 if (mddev->raid_disks != info->raid_disks)
7346 cnt++;
7347 if (mddev->layout != info->layout)
7348 cnt++;
7349 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
7350 cnt++;
7351 if (cnt == 0)
7352 return 0;
7353 if (cnt > 1)
7354 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007355
7356 if (mddev->layout != info->layout) {
7357 /* Change layout
7358 * we don't need to do anything at the md level, the
7359 * personality will take care of it all.
7360 */
NeilBrown50ac1682009-06-18 08:47:55 +10007361 if (mddev->pers->check_reshape == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007362 return -EINVAL;
NeilBrown597a7112009-06-18 08:47:42 +10007363 else {
7364 mddev->new_layout = info->layout;
NeilBrown50ac1682009-06-18 08:47:55 +10007365 rv = mddev->pers->check_reshape(mddev);
NeilBrown597a7112009-06-18 08:47:42 +10007366 if (rv)
7367 mddev->new_layout = mddev->layout;
7368 return rv;
7369 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007370 }
Andre Noll58c0fed2009-03-31 14:33:13 +11007371 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
Andre Nolld71f9f82008-07-11 22:02:22 +10007372 rv = update_size(mddev, (sector_t)info->size * 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007373
NeilBrownda943b992006-01-06 00:20:54 -08007374 if (mddev->raid_disks != info->raid_disks)
7375 rv = update_raid_disks(mddev, info->raid_disks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007376
NeilBrown36fa3062005-09-09 16:23:45 -07007377 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
Goldwyn Rodrigues293467a2014-06-07 01:44:51 -05007378 if (mddev->pers->quiesce == NULL || mddev->thread == NULL) {
7379 rv = -EINVAL;
7380 goto err;
7381 }
7382 if (mddev->recovery || mddev->sync_thread) {
7383 rv = -EBUSY;
7384 goto err;
7385 }
NeilBrown36fa3062005-09-09 16:23:45 -07007386 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05007387 struct bitmap *bitmap;
NeilBrown36fa3062005-09-09 16:23:45 -07007388 /* add the bitmap */
Goldwyn Rodrigues293467a2014-06-07 01:44:51 -05007389 if (mddev->bitmap) {
7390 rv = -EEXIST;
7391 goto err;
7392 }
7393 if (mddev->bitmap_info.default_offset == 0) {
7394 rv = -EINVAL;
7395 goto err;
7396 }
NeilBrownc3d97142009-12-14 12:49:52 +11007397 mddev->bitmap_info.offset =
7398 mddev->bitmap_info.default_offset;
NeilBrown6409bb02012-05-22 13:55:07 +10007399 mddev->bitmap_info.space =
7400 mddev->bitmap_info.default_space;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007401 bitmap = md_bitmap_create(mddev, -1);
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007402 mddev_suspend(mddev);
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05007403 if (!IS_ERR(bitmap)) {
7404 mddev->bitmap = bitmap;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007405 rv = md_bitmap_load(mddev);
NeilBrownba599ac2015-02-25 11:44:11 +11007406 } else
7407 rv = PTR_ERR(bitmap);
NeilBrown36fa3062005-09-09 16:23:45 -07007408 if (rv)
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007409 md_bitmap_destroy(mddev);
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007410 mddev_resume(mddev);
NeilBrown36fa3062005-09-09 16:23:45 -07007411 } else {
7412 /* remove the bitmap */
Goldwyn Rodrigues293467a2014-06-07 01:44:51 -05007413 if (!mddev->bitmap) {
7414 rv = -ENOENT;
7415 goto err;
7416 }
7417 if (mddev->bitmap->storage.file) {
7418 rv = -EINVAL;
7419 goto err;
7420 }
Guoqing Jiangf6a2dc62015-12-21 10:51:00 +11007421 if (mddev->bitmap_info.nodes) {
7422 /* hold PW on all the bitmap lock */
7423 if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) {
NeilBrown9d487392016-11-02 14:16:49 +11007424 pr_warn("md: can't change bitmap to none since the array is in use by more than one node\n");
Guoqing Jiangf6a2dc62015-12-21 10:51:00 +11007425 rv = -EPERM;
7426 md_cluster_ops->unlock_all_bitmaps(mddev);
7427 goto err;
7428 }
7429
7430 mddev->bitmap_info.nodes = 0;
7431 md_cluster_ops->leave(mddev);
Zhao Hemingedee9df2020-07-21 02:08:53 +08007432 module_put(md_cluster_mod);
Zhao Heming7c9d5c52020-07-21 02:08:52 +08007433 mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY;
Guoqing Jiangf6a2dc62015-12-21 10:51:00 +11007434 }
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007435 mddev_suspend(mddev);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07007436 md_bitmap_destroy(mddev);
NeilBrown9e1cc0a2017-10-17 13:46:43 +11007437 mddev_resume(mddev);
NeilBrownc3d97142009-12-14 12:49:52 +11007438 mddev->bitmap_info.offset = 0;
NeilBrown36fa3062005-09-09 16:23:45 -07007439 }
7440 }
NeilBrown850b2b422006-10-03 01:15:46 -07007441 md_update_sb(mddev, 1);
Goldwyn Rodrigues293467a2014-06-07 01:44:51 -05007442 return rv;
7443err:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007444 return rv;
7445}
7446
NeilBrownfd01b882011-10-11 16:47:53 +11007447static int set_disk_faulty(struct mddev *mddev, dev_t dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007448{
NeilBrown3cb03002011-10-11 16:45:26 +11007449 struct md_rdev *rdev;
NeilBrown1ca69c42012-10-11 13:37:33 +11007450 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007451
7452 if (mddev->pers == NULL)
7453 return -ENODEV;
7454
NeilBrown1ca69c42012-10-11 13:37:33 +11007455 rcu_read_lock();
Tomasz Majchrzak1532d9e2017-12-27 10:31:40 +01007456 rdev = md_find_rdev_rcu(mddev, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007457 if (!rdev)
NeilBrown1ca69c42012-10-11 13:37:33 +11007458 err = -ENODEV;
7459 else {
7460 md_error(mddev, rdev);
7461 if (!test_bit(Faulty, &rdev->flags))
7462 err = -EBUSY;
7463 }
7464 rcu_read_unlock();
7465 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007466}
7467
Andre Noll2f9618c2008-04-25 18:57:58 +02007468/*
7469 * We have a problem here : there is no easy way to give a CHS
7470 * virtual geometry. We currently pretend that we have a 2 heads
7471 * 4 sectors (with a BIG number of cylinders...). This drives
7472 * dosfs just mad... ;-)
7473 */
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08007474static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
7475{
NeilBrownfd01b882011-10-11 16:47:53 +11007476 struct mddev *mddev = bdev->bd_disk->private_data;
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08007477
7478 geo->heads = 2;
7479 geo->sectors = 4;
NeilBrown49ce6ce2010-03-29 10:51:42 +11007480 geo->cylinders = mddev->array_sectors / 8;
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08007481 return 0;
7482}
7483
Nicolas Schichancb335f82014-01-15 16:58:52 +01007484static inline bool md_ioctl_valid(unsigned int cmd)
7485{
7486 switch (cmd) {
7487 case ADD_NEW_DISK:
Nicolas Schichancb335f82014-01-15 16:58:52 +01007488 case GET_ARRAY_INFO:
7489 case GET_BITMAP_FILE:
7490 case GET_DISK_INFO:
7491 case HOT_ADD_DISK:
7492 case HOT_REMOVE_DISK:
Nicolas Schichancb335f82014-01-15 16:58:52 +01007493 case RAID_VERSION:
7494 case RESTART_ARRAY_RW:
7495 case RUN_ARRAY:
7496 case SET_ARRAY_INFO:
7497 case SET_BITMAP_FILE:
7498 case SET_DISK_FAULTY:
7499 case STOP_ARRAY:
7500 case STOP_ARRAY_RO:
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05007501 case CLUSTERED_DISK_NACK:
Nicolas Schichancb335f82014-01-15 16:58:52 +01007502 return true;
7503 default:
7504 return false;
7505 }
7506}
7507
Al Viroa39907f2008-03-02 10:31:15 -05007508static int md_ioctl(struct block_device *bdev, fmode_t mode,
Linus Torvalds1da177e2005-04-16 15:20:36 -07007509 unsigned int cmd, unsigned long arg)
7510{
7511 int err = 0;
7512 void __user *argp = (void __user *)arg;
NeilBrownfd01b882011-10-11 16:47:53 +11007513 struct mddev *mddev = NULL;
NeilBrown065e5192017-04-06 11:16:33 +08007514 bool did_set_md_closing = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007515
Nicolas Schichancb335f82014-01-15 16:58:52 +01007516 if (!md_ioctl_valid(cmd))
7517 return -ENOTTY;
7518
NeilBrown506c9e42011-12-23 10:17:26 +11007519 switch (cmd) {
7520 case RAID_VERSION:
7521 case GET_ARRAY_INFO:
7522 case GET_DISK_INFO:
7523 break;
7524 default:
7525 if (!capable(CAP_SYS_ADMIN))
7526 return -EACCES;
7527 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007528
7529 /*
7530 * Commands dealing with the RAID driver but not any
7531 * particular array:
7532 */
NeilBrownc02c0ae2012-12-11 13:39:21 +11007533 switch (cmd) {
7534 case RAID_VERSION:
7535 err = get_version(argp);
NeilBrown3adc28d2014-09-30 15:46:41 +10007536 goto out;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007537 default:;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007538 }
7539
7540 /*
7541 * Commands creating/starting a new array:
7542 */
7543
Al Viroa39907f2008-03-02 10:31:15 -05007544 mddev = bdev->bd_disk->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007545
7546 if (!mddev) {
7547 BUG();
NeilBrown3adc28d2014-09-30 15:46:41 +10007548 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007549 }
7550
NeilBrown1ca69c42012-10-11 13:37:33 +11007551 /* Some actions do not requires the mutex */
7552 switch (cmd) {
7553 case GET_ARRAY_INFO:
7554 if (!mddev->raid_disks && !mddev->external)
7555 err = -ENODEV;
7556 else
7557 err = get_array_info(mddev, argp);
NeilBrown3adc28d2014-09-30 15:46:41 +10007558 goto out;
NeilBrown1ca69c42012-10-11 13:37:33 +11007559
7560 case GET_DISK_INFO:
7561 if (!mddev->raid_disks && !mddev->external)
7562 err = -ENODEV;
7563 else
7564 err = get_disk_info(mddev, argp);
NeilBrown3adc28d2014-09-30 15:46:41 +10007565 goto out;
NeilBrown1ca69c42012-10-11 13:37:33 +11007566
7567 case SET_DISK_FAULTY:
7568 err = set_disk_faulty(mddev, new_decode_dev(arg));
NeilBrown3adc28d2014-09-30 15:46:41 +10007569 goto out;
NeilBrown4af1a042014-12-15 12:57:00 +11007570
7571 case GET_BITMAP_FILE:
7572 err = get_bitmap_file(mddev, argp);
7573 goto out;
7574
NeilBrown1ca69c42012-10-11 13:37:33 +11007575 }
7576
Guoqing Jiang78b990c2020-04-04 23:57:10 +02007577 if (cmd == ADD_NEW_DISK || cmd == HOT_ADD_DISK)
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02007578 flush_rdev_wq(mddev);
NeilBrowna7a3f082012-12-11 13:35:54 +11007579
Hannes Reinecke90f5f7a2013-04-02 08:38:55 +02007580 if (cmd == HOT_REMOVE_DISK)
7581 /* need to ensure recovery thread has run */
7582 wait_event_interruptible_timeout(mddev->sb_wait,
7583 !test_bit(MD_RECOVERY_NEEDED,
Shaohua Li82a301c2016-12-08 15:48:18 -08007584 &mddev->recovery),
Hannes Reinecke90f5f7a2013-04-02 08:38:55 +02007585 msecs_to_jiffies(5000));
NeilBrown260fa032013-08-27 16:44:13 +10007586 if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
7587 /* Need to flush page cache, and ensure no-one else opens
7588 * and writes
7589 */
7590 mutex_lock(&mddev->open_mutex);
NeilBrown9ba3b7f2014-09-09 14:00:15 +10007591 if (mddev->pers && atomic_read(&mddev->openers) > 1) {
NeilBrown260fa032013-08-27 16:44:13 +10007592 mutex_unlock(&mddev->open_mutex);
7593 err = -EBUSY;
NeilBrown3adc28d2014-09-30 15:46:41 +10007594 goto out;
NeilBrown260fa032013-08-27 16:44:13 +10007595 }
Dae R. Jeongc731b842020-10-22 10:21:28 +09007596 if (test_and_set_bit(MD_CLOSING, &mddev->flags)) {
7597 mutex_unlock(&mddev->open_mutex);
7598 err = -EBUSY;
7599 goto out;
7600 }
NeilBrown065e5192017-04-06 11:16:33 +08007601 did_set_md_closing = true;
NeilBrown260fa032013-08-27 16:44:13 +10007602 mutex_unlock(&mddev->open_mutex);
7603 sync_blockdev(bdev);
7604 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007605 err = mddev_lock(mddev);
7606 if (err) {
NeilBrown9d487392016-11-02 14:16:49 +11007607 pr_debug("md: ioctl lock interrupted, reason %d, cmd %d\n",
7608 err, cmd);
NeilBrown3adc28d2014-09-30 15:46:41 +10007609 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007610 }
7611
NeilBrownc02c0ae2012-12-11 13:39:21 +11007612 if (cmd == SET_ARRAY_INFO) {
7613 mdu_array_info_t info;
7614 if (!arg)
7615 memset(&info, 0, sizeof(info));
7616 else if (copy_from_user(&info, argp, sizeof(info))) {
7617 err = -EFAULT;
NeilBrown3adc28d2014-09-30 15:46:41 +10007618 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007619 }
7620 if (mddev->pers) {
7621 err = update_array_info(mddev, &info);
7622 if (err) {
NeilBrown9d487392016-11-02 14:16:49 +11007623 pr_warn("md: couldn't update array info. %d\n", err);
NeilBrown3adc28d2014-09-30 15:46:41 +10007624 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007625 }
NeilBrown3adc28d2014-09-30 15:46:41 +10007626 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007627 }
7628 if (!list_empty(&mddev->disks)) {
NeilBrown9d487392016-11-02 14:16:49 +11007629 pr_warn("md: array %s already has disks!\n", mdname(mddev));
NeilBrownc02c0ae2012-12-11 13:39:21 +11007630 err = -EBUSY;
NeilBrown3adc28d2014-09-30 15:46:41 +10007631 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007632 }
7633 if (mddev->raid_disks) {
NeilBrown9d487392016-11-02 14:16:49 +11007634 pr_warn("md: array %s already initialised!\n", mdname(mddev));
NeilBrownc02c0ae2012-12-11 13:39:21 +11007635 err = -EBUSY;
NeilBrown3adc28d2014-09-30 15:46:41 +10007636 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007637 }
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02007638 err = md_set_array_info(mddev, &info);
NeilBrownc02c0ae2012-12-11 13:39:21 +11007639 if (err) {
NeilBrown9d487392016-11-02 14:16:49 +11007640 pr_warn("md: couldn't set array info. %d\n", err);
NeilBrown3adc28d2014-09-30 15:46:41 +10007641 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007642 }
NeilBrown3adc28d2014-09-30 15:46:41 +10007643 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007644 }
7645
7646 /*
7647 * Commands querying/configuring an existing array:
7648 */
NeilBrown32a76272005-06-21 17:17:14 -07007649 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
NeilBrown3f9d7b02006-12-22 01:11:41 -08007650 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
NeilBrowna17184a2008-02-06 01:39:55 -08007651 if ((!mddev->raid_disks && !mddev->external)
7652 && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
7653 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
7654 && cmd != GET_BITMAP_FILE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007655 err = -ENODEV;
NeilBrown3adc28d2014-09-30 15:46:41 +10007656 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007657 }
7658
7659 /*
7660 * Commands even a read-only array can execute:
7661 */
NeilBrownc02c0ae2012-12-11 13:39:21 +11007662 switch (cmd) {
NeilBrownc02c0ae2012-12-11 13:39:21 +11007663 case RESTART_ARRAY_RW:
7664 err = restart_array(mddev);
NeilBrown3adc28d2014-09-30 15:46:41 +10007665 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007666
7667 case STOP_ARRAY:
7668 err = do_md_stop(mddev, 0, bdev);
NeilBrown3adc28d2014-09-30 15:46:41 +10007669 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007670
7671 case STOP_ARRAY_RO:
7672 err = md_set_readonly(mddev, bdev);
NeilBrown3adc28d2014-09-30 15:46:41 +10007673 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007674
NeilBrown3ea8929d2013-04-24 11:42:41 +10007675 case HOT_REMOVE_DISK:
7676 err = hot_remove_disk(mddev, new_decode_dev(arg));
NeilBrown3adc28d2014-09-30 15:46:41 +10007677 goto unlock;
NeilBrown3ea8929d2013-04-24 11:42:41 +10007678
NeilBrown7ceb17e2013-04-24 11:42:42 +10007679 case ADD_NEW_DISK:
7680 /* We can support ADD_NEW_DISK on read-only arrays
Wei Fang466ad292016-03-21 19:19:30 +08007681 * only if we are re-adding a preexisting device.
NeilBrown7ceb17e2013-04-24 11:42:42 +10007682 * So require mddev->pers and MD_DISK_SYNC.
7683 */
7684 if (mddev->pers) {
7685 mdu_disk_info_t info;
7686 if (copy_from_user(&info, argp, sizeof(info)))
7687 err = -EFAULT;
7688 else if (!(info.state & (1<<MD_DISK_SYNC)))
7689 /* Need to clear read-only for this */
7690 break;
7691 else
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02007692 err = md_add_new_disk(mddev, &info);
NeilBrown3adc28d2014-09-30 15:46:41 +10007693 goto unlock;
NeilBrown7ceb17e2013-04-24 11:42:42 +10007694 }
7695 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007696 }
7697
7698 /*
7699 * The remaining ioctls are changing the state of the
NeilBrownf91de922005-11-08 21:39:36 -08007700 * superblock, so we do not allow them on read-only arrays.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007701 */
NeilBrown326eb172014-09-30 15:36:28 +10007702 if (mddev->ro && mddev->pers) {
NeilBrownf91de922005-11-08 21:39:36 -08007703 if (mddev->ro == 2) {
7704 mddev->ro = 0;
NeilBrown00bcb4a2010-06-01 19:37:23 +10007705 sysfs_notify_dirent_safe(mddev->sysfs_state);
Neil Brown0fd62b82008-06-28 08:31:36 +10007706 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
NeilBrownf3378b42013-02-28 11:59:03 +11007707 /* mddev_unlock will wake thread */
7708 /* If a device failed while we were read-only, we
7709 * need to make sure the metadata is updated now.
7710 */
Shaohua Li29530792016-12-08 15:48:19 -08007711 if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) {
NeilBrownf3378b42013-02-28 11:59:03 +11007712 mddev_unlock(mddev);
7713 wait_event(mddev->sb_wait,
Shaohua Li29530792016-12-08 15:48:19 -08007714 !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) &&
7715 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
NeilBrown29f097c2013-11-14 17:54:51 +11007716 mddev_lock_nointr(mddev);
NeilBrownf3378b42013-02-28 11:59:03 +11007717 }
NeilBrownf91de922005-11-08 21:39:36 -08007718 } else {
7719 err = -EROFS;
NeilBrown3adc28d2014-09-30 15:46:41 +10007720 goto unlock;
NeilBrownf91de922005-11-08 21:39:36 -08007721 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007722 }
7723
NeilBrownc02c0ae2012-12-11 13:39:21 +11007724 switch (cmd) {
7725 case ADD_NEW_DISK:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007726 {
NeilBrownc02c0ae2012-12-11 13:39:21 +11007727 mdu_disk_info_t info;
7728 if (copy_from_user(&info, argp, sizeof(info)))
7729 err = -EFAULT;
7730 else
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02007731 err = md_add_new_disk(mddev, &info);
NeilBrown3adc28d2014-09-30 15:46:41 +10007732 goto unlock;
NeilBrownc02c0ae2012-12-11 13:39:21 +11007733 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007734
Goldwyn Rodrigues1aee41f2014-10-29 18:51:31 -05007735 case CLUSTERED_DISK_NACK:
7736 if (mddev_is_clustered(mddev))
7737 md_cluster_ops->new_disk_ack(mddev, false);
7738 else
7739 err = -EINVAL;
7740 goto unlock;
7741
NeilBrownc02c0ae2012-12-11 13:39:21 +11007742 case HOT_ADD_DISK:
7743 err = hot_add_disk(mddev, new_decode_dev(arg));
NeilBrown3adc28d2014-09-30 15:46:41 +10007744 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007745
NeilBrownc02c0ae2012-12-11 13:39:21 +11007746 case RUN_ARRAY:
7747 err = do_md_run(mddev);
NeilBrown3adc28d2014-09-30 15:46:41 +10007748 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007749
NeilBrownc02c0ae2012-12-11 13:39:21 +11007750 case SET_BITMAP_FILE:
7751 err = set_bitmap_file(mddev, (int)arg);
NeilBrown3adc28d2014-09-30 15:46:41 +10007752 goto unlock;
NeilBrown32a76272005-06-21 17:17:14 -07007753
NeilBrownc02c0ae2012-12-11 13:39:21 +11007754 default:
7755 err = -EINVAL;
NeilBrown3adc28d2014-09-30 15:46:41 +10007756 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007757 }
7758
NeilBrown3adc28d2014-09-30 15:46:41 +10007759unlock:
NeilBrownd3374822009-01-09 08:31:10 +11007760 if (mddev->hold_active == UNTIL_IOCTL &&
7761 err != -EINVAL)
7762 mddev->hold_active = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007763 mddev_unlock(mddev);
NeilBrown3adc28d2014-09-30 15:46:41 +10007764out:
NeilBrown065e5192017-04-06 11:16:33 +08007765 if(did_set_md_closing)
7766 clear_bit(MD_CLOSING, &mddev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007767 return err;
7768}
Arnd Bergmannaa98aa32009-12-14 12:50:05 +11007769#ifdef CONFIG_COMPAT
7770static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
7771 unsigned int cmd, unsigned long arg)
7772{
7773 switch (cmd) {
7774 case HOT_REMOVE_DISK:
7775 case HOT_ADD_DISK:
7776 case SET_DISK_FAULTY:
7777 case SET_BITMAP_FILE:
7778 /* These take in integer arg, do not convert */
7779 break;
7780 default:
7781 arg = (unsigned long)compat_ptr(arg);
7782 break;
7783 }
7784
7785 return md_ioctl(bdev, mode, cmd, arg);
7786}
7787#endif /* CONFIG_COMPAT */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007788
Christoph Hellwig118cf082020-11-03 11:00:13 +01007789static int md_set_read_only(struct block_device *bdev, bool ro)
7790{
7791 struct mddev *mddev = bdev->bd_disk->private_data;
7792 int err;
7793
7794 err = mddev_lock(mddev);
7795 if (err)
7796 return err;
7797
7798 if (!mddev->raid_disks && !mddev->external) {
7799 err = -ENODEV;
7800 goto out_unlock;
7801 }
7802
7803 /*
7804 * Transitioning to read-auto need only happen for arrays that call
7805 * md_write_start and which are not ready for writes yet.
7806 */
7807 if (!ro && mddev->ro == 1 && mddev->pers) {
7808 err = restart_array(mddev);
7809 if (err)
7810 goto out_unlock;
7811 mddev->ro = 2;
7812 }
7813
7814out_unlock:
7815 mddev_unlock(mddev);
7816 return err;
7817}
7818
Al Viroa39907f2008-03-02 10:31:15 -05007819static int md_open(struct block_device *bdev, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007820{
7821 /*
7822 * Succeed if we can lock the mddev, which confirms that
7823 * it isn't being stopped right now.
7824 */
NeilBrownfd01b882011-10-11 16:47:53 +11007825 struct mddev *mddev = mddev_find(bdev->bd_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007826 int err;
7827
Yuanhan Liu0c098222012-05-22 13:55:32 +10007828 if (!mddev)
7829 return -ENODEV;
7830
NeilBrownd3374822009-01-09 08:31:10 +11007831 if (mddev->gendisk != bdev->bd_disk) {
7832 /* we are racing with mddev_put which is discarding this
7833 * bd_disk.
7834 */
7835 mddev_put(mddev);
7836 /* Wait until bdev->bd_disk is definitely gone */
Guoqing Jiangf6766ff2020-04-04 23:57:09 +02007837 if (work_pending(&mddev->del_work))
7838 flush_workqueue(md_misc_wq);
NeilBrownd3374822009-01-09 08:31:10 +11007839 /* Then retry the open from the top */
7840 return -ERESTARTSYS;
7841 }
7842 BUG_ON(mddev != bdev->bd_disk->private_data);
7843
NeilBrownc8c00a62009-08-10 12:50:52 +10007844 if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07007845 goto out;
7846
Guoqing Jiangaf8d8e62016-08-12 13:42:37 +08007847 if (test_bit(MD_CLOSING, &mddev->flags)) {
7848 mutex_unlock(&mddev->open_mutex);
NeilBrowne2342ca2016-12-05 16:40:50 +11007849 err = -ENODEV;
7850 goto out;
Guoqing Jiangaf8d8e62016-08-12 13:42:37 +08007851 }
7852
Linus Torvalds1da177e2005-04-16 15:20:36 -07007853 err = 0;
NeilBrownf2ea68c2008-07-21 17:05:25 +10007854 atomic_inc(&mddev->openers);
NeilBrownc8c00a62009-08-10 12:50:52 +10007855 mutex_unlock(&mddev->open_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007856
Christoph Hellwig818077d2020-09-08 16:53:43 +02007857 bdev_check_media_change(bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007858 out:
NeilBrowne2342ca2016-12-05 16:40:50 +11007859 if (err)
7860 mddev_put(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007861 return err;
7862}
7863
Al Virodb2a1442013-05-05 21:52:57 -04007864static void md_release(struct gendisk *disk, fmode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007865{
NeilBrownf72ffdd2014-09-30 14:23:59 +10007866 struct mddev *mddev = disk->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007867
Eric Sesterhenn52e5f9d2006-10-03 23:33:23 +02007868 BUG_ON(!mddev);
NeilBrownf2ea68c2008-07-21 17:05:25 +10007869 atomic_dec(&mddev->openers);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007870 mddev_put(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007871}
NeilBrownf0b4f7e2011-02-24 17:26:41 +11007872
Christoph Hellwiga564e232020-07-08 14:25:41 +02007873static unsigned int md_check_events(struct gendisk *disk, unsigned int clearing)
NeilBrownf0b4f7e2011-02-24 17:26:41 +11007874{
NeilBrownfd01b882011-10-11 16:47:53 +11007875 struct mddev *mddev = disk->private_data;
Christoph Hellwiga564e232020-07-08 14:25:41 +02007876 unsigned int ret = 0;
NeilBrownf0b4f7e2011-02-24 17:26:41 +11007877
Christoph Hellwiga564e232020-07-08 14:25:41 +02007878 if (mddev->changed)
7879 ret = DISK_EVENT_MEDIA_CHANGE;
NeilBrownf0b4f7e2011-02-24 17:26:41 +11007880 mddev->changed = 0;
Christoph Hellwiga564e232020-07-08 14:25:41 +02007881 return ret;
NeilBrownf0b4f7e2011-02-24 17:26:41 +11007882}
Christoph Hellwiga564e232020-07-08 14:25:41 +02007883
Christoph Hellwig7e0adbf2020-06-07 17:31:19 +02007884const struct block_device_operations md_fops =
Linus Torvalds1da177e2005-04-16 15:20:36 -07007885{
7886 .owner = THIS_MODULE,
Christoph Hellwigc62b37d2020-07-01 10:59:43 +02007887 .submit_bio = md_submit_bio,
Al Viroa39907f2008-03-02 10:31:15 -05007888 .open = md_open,
7889 .release = md_release,
NeilBrownb492b852009-05-26 12:57:36 +10007890 .ioctl = md_ioctl,
Arnd Bergmannaa98aa32009-12-14 12:50:05 +11007891#ifdef CONFIG_COMPAT
7892 .compat_ioctl = md_compat_ioctl,
7893#endif
Christoph Hellwiga885c8c2006-01-08 01:02:50 -08007894 .getgeo = md_getgeo,
Christoph Hellwiga564e232020-07-08 14:25:41 +02007895 .check_events = md_check_events,
Christoph Hellwig118cf082020-11-03 11:00:13 +01007896 .set_read_only = md_set_read_only,
Linus Torvalds1da177e2005-04-16 15:20:36 -07007897};
7898
NeilBrownf72ffdd2014-09-30 14:23:59 +10007899static int md_thread(void *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007900{
NeilBrown2b8bf342011-10-11 16:48:23 +11007901 struct md_thread *thread = arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007902
Linus Torvalds1da177e2005-04-16 15:20:36 -07007903 /*
7904 * md_thread is a 'system-thread', it's priority should be very
7905 * high. We avoid resource deadlocks individually in each
7906 * raid personality. (RAID5 does preallocation) We also use RR and
7907 * the very same RT priority as kswapd, thus we will never get
7908 * into a priority inversion deadlock.
7909 *
7910 * we definitely have to have equal or higher priority than
7911 * bdflush, otherwise bdflush will deadlock if there are too
7912 * many dirty RAID5 blocks.
7913 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007914
NeilBrown6985c432005-10-19 21:23:47 -07007915 allow_signal(SIGKILL);
NeilBrowna6fb0932005-09-09 16:23:56 -07007916 while (!kthread_should_stop()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007917
NeilBrown93588e22005-11-15 00:09:12 -08007918 /* We need to wait INTERRUPTIBLE so that
7919 * we don't add to the load-average.
7920 * That means we need to be sure no signals are
7921 * pending
7922 */
7923 if (signal_pending(current))
7924 flush_signals(current);
7925
7926 wait_event_interruptible_timeout
7927 (thread->wqueue,
7928 test_bit(THREAD_WAKEUP, &thread->flags)
Shaohua Lice1ccd02016-11-21 10:29:18 -08007929 || kthread_should_stop() || kthread_should_park(),
NeilBrown93588e22005-11-15 00:09:12 -08007930 thread->timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007931
NeilBrown6c987912011-01-14 09:13:53 +11007932 clear_bit(THREAD_WAKEUP, &thread->flags);
Shaohua Lice1ccd02016-11-21 10:29:18 -08007933 if (kthread_should_park())
7934 kthread_parkme();
NeilBrown6c987912011-01-14 09:13:53 +11007935 if (!kthread_should_stop())
Shaohua Li4ed87312012-10-11 13:34:00 +11007936 thread->run(thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007937 }
NeilBrowna6fb0932005-09-09 16:23:56 -07007938
Linus Torvalds1da177e2005-04-16 15:20:36 -07007939 return 0;
7940}
7941
NeilBrown2b8bf342011-10-11 16:48:23 +11007942void md_wakeup_thread(struct md_thread *thread)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007943{
7944 if (thread) {
NeilBrown36a4e1f2011-10-07 14:23:17 +11007945 pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm);
Guoqing Jiangd1d90142017-10-09 10:32:48 +08007946 set_bit(THREAD_WAKEUP, &thread->flags);
7947 wake_up(&thread->wqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007948 }
7949}
NeilBrown6c144d32014-09-30 16:15:38 +10007950EXPORT_SYMBOL(md_wakeup_thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007951
Shaohua Li4ed87312012-10-11 13:34:00 +11007952struct md_thread *md_register_thread(void (*run) (struct md_thread *),
7953 struct mddev *mddev, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007954{
NeilBrown2b8bf342011-10-11 16:48:23 +11007955 struct md_thread *thread;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007956
NeilBrown2b8bf342011-10-11 16:48:23 +11007957 thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007958 if (!thread)
7959 return NULL;
7960
Linus Torvalds1da177e2005-04-16 15:20:36 -07007961 init_waitqueue_head(&thread->wqueue);
7962
Linus Torvalds1da177e2005-04-16 15:20:36 -07007963 thread->run = run;
7964 thread->mddev = mddev;
NeilBrown32a76272005-06-21 17:17:14 -07007965 thread->timeout = MAX_SCHEDULE_TIMEOUT;
NeilBrown0da3c612009-09-23 18:09:45 +10007966 thread->tsk = kthread_run(md_thread, thread,
7967 "%s_%s",
7968 mdname(thread->mddev),
NeilBrown02326052012-07-03 15:56:52 +10007969 name);
NeilBrowna6fb0932005-09-09 16:23:56 -07007970 if (IS_ERR(thread->tsk)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007971 kfree(thread);
7972 return NULL;
7973 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007974 return thread;
7975}
NeilBrown6c144d32014-09-30 16:15:38 +10007976EXPORT_SYMBOL(md_register_thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007977
NeilBrown2b8bf342011-10-11 16:48:23 +11007978void md_unregister_thread(struct md_thread **threadp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007979{
NeilBrown2b8bf342011-10-11 16:48:23 +11007980 struct md_thread *thread = *threadp;
NeilBrowne0cf8f02009-03-31 14:39:39 +11007981 if (!thread)
7982 return;
NeilBrown36a4e1f2011-10-07 14:23:17 +11007983 pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
NeilBrown01f96c02011-09-21 15:30:20 +10007984 /* Locking ensures that mddev_unlock does not wake_up a
7985 * non-existent thread
7986 */
7987 spin_lock(&pers_lock);
7988 *threadp = NULL;
7989 spin_unlock(&pers_lock);
NeilBrowna6fb0932005-09-09 16:23:56 -07007990
7991 kthread_stop(thread->tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007992 kfree(thread);
7993}
NeilBrown6c144d32014-09-30 16:15:38 +10007994EXPORT_SYMBOL(md_unregister_thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007995
NeilBrownfd01b882011-10-11 16:47:53 +11007996void md_error(struct mddev *mddev, struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007997{
NeilBrownb2d444d2005-11-08 21:39:31 -08007998 if (!rdev || test_bit(Faulty, &rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07007999 return;
Dan Williams6bfe0b42008-04-30 00:52:32 -07008000
NeilBrownde393cd2011-07-28 11:31:48 +10008001 if (!mddev->pers || !mddev->pers->error_handler)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008002 return;
8003 mddev->pers->error_handler(mddev,rdev);
Neil Brown72a23c22008-06-28 08:31:41 +10008004 if (mddev->degraded)
8005 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
NeilBrown00bcb4a2010-06-01 19:37:23 +10008006 sysfs_notify_dirent_safe(rdev->sysfs_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008007 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
8008 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8009 md_wakeup_thread(mddev->thread);
NeilBrown768a4182010-07-26 11:49:55 +10008010 if (mddev->event_work.func)
Tejun Heoe804ac72010-10-15 15:36:08 +02008011 queue_work(md_misc_wq, &mddev->event_work);
Guoqing Jiangbb9ef712015-12-28 10:46:38 +08008012 md_new_event(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008013}
NeilBrown6c144d32014-09-30 16:15:38 +10008014EXPORT_SYMBOL(md_error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008015
8016/* seq_file implementation /proc/mdstat */
8017
8018static void status_unused(struct seq_file *seq)
8019{
8020 int i = 0;
NeilBrown3cb03002011-10-11 16:45:26 +11008021 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008022
8023 seq_printf(seq, "unused devices: ");
8024
Cheng Renquan159ec1f2009-01-09 08:31:08 +11008025 list_for_each_entry(rdev, &pending_raid_disks, same_set) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008026 char b[BDEVNAME_SIZE];
8027 i++;
8028 seq_printf(seq, "%s ",
8029 bdevname(rdev->bdev,b));
8030 }
8031 if (!i)
8032 seq_printf(seq, "<none>");
8033
8034 seq_printf(seq, "\n");
8035}
8036
NeilBrownf7851be2015-07-02 17:12:58 +10008037static int status_resync(struct seq_file *seq, struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008038{
NeilBrowndd71cf62009-05-07 12:49:35 +10008039 sector_t max_sectors, resync, res;
Mariusz Tkaczyk9642fa72019-06-13 16:11:41 +02008040 unsigned long dt, db = 0;
8041 sector_t rt, curr_mark_cnt, resync_mark_cnt;
8042 int scale, recovery_active;
NeilBrown4588b422006-03-27 01:18:04 -08008043 unsigned int per_milli;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008044
NeilBrownc804cde2012-05-21 09:28:33 +10008045 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
8046 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
NeilBrowndd71cf62009-05-07 12:49:35 +10008047 max_sectors = mddev->resync_max_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008048 else
NeilBrowndd71cf62009-05-07 12:49:35 +10008049 max_sectors = mddev->dev_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008050
NeilBrownf7851be2015-07-02 17:12:58 +10008051 resync = mddev->curr_resync;
8052 if (resync <= 3) {
8053 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
8054 /* Still cleaning up */
8055 resync = max_sectors;
Nate Daileyd2e2ec82017-11-30 11:33:30 -05008056 } else if (resync > max_sectors)
8057 resync = max_sectors;
8058 else
NeilBrownf7851be2015-07-02 17:12:58 +10008059 resync -= atomic_read(&mddev->recovery_active);
8060
8061 if (resync == 0) {
Guoqing Jiang0357ba22018-07-02 16:26:25 +08008062 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery)) {
8063 struct md_rdev *rdev;
8064
8065 rdev_for_each(rdev, mddev)
8066 if (rdev->raid_disk >= 0 &&
8067 !test_bit(Faulty, &rdev->flags) &&
8068 rdev->recovery_offset != MaxSector &&
8069 rdev->recovery_offset) {
8070 seq_printf(seq, "\trecover=REMOTE");
8071 return 1;
8072 }
8073 if (mddev->reshape_position != MaxSector)
8074 seq_printf(seq, "\treshape=REMOTE");
8075 else
8076 seq_printf(seq, "\tresync=REMOTE");
8077 return 1;
8078 }
NeilBrownf7851be2015-07-02 17:12:58 +10008079 if (mddev->recovery_cp < MaxSector) {
8080 seq_printf(seq, "\tresync=PENDING");
8081 return 1;
8082 }
8083 return 0;
8084 }
8085 if (resync < 3) {
8086 seq_printf(seq, "\tresync=DELAYED");
8087 return 1;
8088 }
8089
NeilBrown403df472014-09-30 15:52:29 +10008090 WARN_ON(max_sectors == 0);
NeilBrown4588b422006-03-27 01:18:04 -08008091 /* Pick 'scale' such that (resync>>scale)*1000 will fit
NeilBrowndd71cf62009-05-07 12:49:35 +10008092 * in a sector_t, and (max_sectors>>scale) will fit in a
NeilBrown4588b422006-03-27 01:18:04 -08008093 * u32, as those are the requirements for sector_div.
8094 * Thus 'scale' must be at least 10
8095 */
8096 scale = 10;
8097 if (sizeof(sector_t) > sizeof(unsigned long)) {
NeilBrowndd71cf62009-05-07 12:49:35 +10008098 while ( max_sectors/2 > (1ULL<<(scale+32)))
NeilBrown4588b422006-03-27 01:18:04 -08008099 scale++;
8100 }
8101 res = (resync>>scale)*1000;
NeilBrowndd71cf62009-05-07 12:49:35 +10008102 sector_div(res, (u32)((max_sectors>>scale)+1));
NeilBrown4588b422006-03-27 01:18:04 -08008103
8104 per_milli = res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008105 {
NeilBrown4588b422006-03-27 01:18:04 -08008106 int i, x = per_milli/50, y = 20-x;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008107 seq_printf(seq, "[");
8108 for (i = 0; i < x; i++)
8109 seq_printf(seq, "=");
8110 seq_printf(seq, ">");
8111 for (i = 0; i < y; i++)
8112 seq_printf(seq, ".");
8113 seq_printf(seq, "] ");
8114 }
NeilBrown4588b422006-03-27 01:18:04 -08008115 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
NeilBrownccfcc3c2006-03-27 01:18:09 -08008116 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
8117 "reshape" :
NeilBrown61df9d92006-10-03 01:15:57 -07008118 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
8119 "check" :
8120 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
8121 "resync" : "recovery"))),
8122 per_milli/10, per_milli % 10,
NeilBrowndd71cf62009-05-07 12:49:35 +10008123 (unsigned long long) resync/2,
8124 (unsigned long long) max_sectors/2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008125
8126 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07008127 * dt: time from mark until now
8128 * db: blocks written from mark until now
8129 * rt: remaining time
NeilBrowndd71cf62009-05-07 12:49:35 +10008130 *
Mariusz Tkaczyk9642fa72019-06-13 16:11:41 +02008131 * rt is a sector_t, which is always 64bit now. We are keeping
8132 * the original algorithm, but it is not really necessary.
8133 *
8134 * Original algorithm:
8135 * So we divide before multiply in case it is 32bit and close
8136 * to the limit.
8137 * We scale the divisor (db) by 32 to avoid losing precision
8138 * near the end of resync when the number of remaining sectors
8139 * is close to 'db'.
8140 * We then divide rt by 32 after multiplying by db to compensate.
8141 * The '+1' avoids division by zero if db is very small.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008142 */
8143 dt = ((jiffies - mddev->resync_mark) / HZ);
8144 if (!dt) dt++;
Mariusz Tkaczyk9642fa72019-06-13 16:11:41 +02008145
8146 curr_mark_cnt = mddev->curr_mark_cnt;
8147 recovery_active = atomic_read(&mddev->recovery_active);
8148 resync_mark_cnt = mddev->resync_mark_cnt;
8149
8150 if (curr_mark_cnt >= (recovery_active + resync_mark_cnt))
8151 db = curr_mark_cnt - (recovery_active + resync_mark_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008152
NeilBrowndd71cf62009-05-07 12:49:35 +10008153 rt = max_sectors - resync; /* number of remaining sectors */
Mariusz Tkaczyk9642fa72019-06-13 16:11:41 +02008154 rt = div64_u64(rt, db/32+1);
NeilBrowndd71cf62009-05-07 12:49:35 +10008155 rt *= dt;
8156 rt >>= 5;
8157
8158 seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
8159 ((unsigned long)rt % 60)/6);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008160
NeilBrownff4e8d92006-07-10 04:44:16 -07008161 seq_printf(seq, " speed=%ldK/sec", db/2/dt);
NeilBrownf7851be2015-07-02 17:12:58 +10008162 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008163}
8164
8165static void *md_seq_start(struct seq_file *seq, loff_t *pos)
8166{
8167 struct list_head *tmp;
8168 loff_t l = *pos;
NeilBrownfd01b882011-10-11 16:47:53 +11008169 struct mddev *mddev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008170
8171 if (l >= 0x10000)
8172 return NULL;
8173 if (!l--)
8174 /* header */
8175 return (void*)1;
8176
8177 spin_lock(&all_mddevs_lock);
8178 list_for_each(tmp,&all_mddevs)
8179 if (!l--) {
NeilBrownfd01b882011-10-11 16:47:53 +11008180 mddev = list_entry(tmp, struct mddev, all_mddevs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008181 mddev_get(mddev);
8182 spin_unlock(&all_mddevs_lock);
8183 return mddev;
8184 }
8185 spin_unlock(&all_mddevs_lock);
8186 if (!l--)
8187 return (void*)2;/* tail */
8188 return NULL;
8189}
8190
8191static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
8192{
8193 struct list_head *tmp;
NeilBrownfd01b882011-10-11 16:47:53 +11008194 struct mddev *next_mddev, *mddev = v;
NeilBrownf72ffdd2014-09-30 14:23:59 +10008195
Linus Torvalds1da177e2005-04-16 15:20:36 -07008196 ++*pos;
8197 if (v == (void*)2)
8198 return NULL;
8199
8200 spin_lock(&all_mddevs_lock);
8201 if (v == (void*)1)
8202 tmp = all_mddevs.next;
8203 else
8204 tmp = mddev->all_mddevs.next;
8205 if (tmp != &all_mddevs)
NeilBrownfd01b882011-10-11 16:47:53 +11008206 next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs));
Linus Torvalds1da177e2005-04-16 15:20:36 -07008207 else {
8208 next_mddev = (void*)2;
8209 *pos = 0x10000;
NeilBrownf72ffdd2014-09-30 14:23:59 +10008210 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008211 spin_unlock(&all_mddevs_lock);
8212
8213 if (v != (void*)1)
8214 mddev_put(mddev);
8215 return next_mddev;
8216
8217}
8218
8219static void md_seq_stop(struct seq_file *seq, void *v)
8220{
NeilBrownfd01b882011-10-11 16:47:53 +11008221 struct mddev *mddev = v;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008222
8223 if (mddev && v != (void*)1 && v != (void*)2)
8224 mddev_put(mddev);
8225}
8226
8227static int md_seq_show(struct seq_file *seq, void *v)
8228{
NeilBrownfd01b882011-10-11 16:47:53 +11008229 struct mddev *mddev = v;
Andre Nolldd8ac332009-03-31 14:33:13 +11008230 sector_t sectors;
NeilBrown3cb03002011-10-11 16:45:26 +11008231 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008232
8233 if (v == (void*)1) {
NeilBrown84fc4b52011-10-11 16:49:58 +11008234 struct md_personality *pers;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008235 seq_printf(seq, "Personalities : ");
8236 spin_lock(&pers_lock);
NeilBrown2604b702006-01-06 00:20:36 -08008237 list_for_each_entry(pers, &pers_list, list)
8238 seq_printf(seq, "[%s] ", pers->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008239
8240 spin_unlock(&pers_lock);
8241 seq_printf(seq, "\n");
Kay Sieversf1514632011-07-12 20:48:39 +02008242 seq->poll_event = atomic_read(&md_event_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008243 return 0;
8244 }
8245 if (v == (void*)2) {
8246 status_unused(seq);
8247 return 0;
8248 }
8249
NeilBrown36d091f2014-12-15 12:56:58 +11008250 spin_lock(&mddev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008251 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
8252 seq_printf(seq, "%s : %sactive", mdname(mddev),
8253 mddev->pers ? "" : "in");
8254 if (mddev->pers) {
NeilBrownf91de922005-11-08 21:39:36 -08008255 if (mddev->ro==1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008256 seq_printf(seq, " (read-only)");
NeilBrownf91de922005-11-08 21:39:36 -08008257 if (mddev->ro==2)
NeilBrown52720ae2008-03-10 11:43:47 -07008258 seq_printf(seq, " (auto-read-only)");
Linus Torvalds1da177e2005-04-16 15:20:36 -07008259 seq_printf(seq, " %s", mddev->pers->name);
8260 }
8261
Andre Nolldd8ac332009-03-31 14:33:13 +11008262 sectors = 0;
NeilBrownf97fcad2014-12-15 12:56:59 +11008263 rcu_read_lock();
8264 rdev_for_each_rcu(rdev, mddev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008265 char b[BDEVNAME_SIZE];
8266 seq_printf(seq, " %s[%d]",
8267 bdevname(rdev->bdev,b), rdev->desc_nr);
NeilBrown8ddf9ef2005-09-09 16:23:45 -07008268 if (test_bit(WriteMostly, &rdev->flags))
8269 seq_printf(seq, "(W)");
Shaohua Li9efdca12015-10-12 16:59:50 -07008270 if (test_bit(Journal, &rdev->flags))
8271 seq_printf(seq, "(J)");
NeilBrownb2d444d2005-11-08 21:39:31 -08008272 if (test_bit(Faulty, &rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008273 seq_printf(seq, "(F)");
8274 continue;
NeilBrown2d78f8c2011-12-23 10:17:51 +11008275 }
8276 if (rdev->raid_disk < 0)
NeilBrownb325a322005-09-09 16:24:00 -07008277 seq_printf(seq, "(S)"); /* spare */
NeilBrown2d78f8c2011-12-23 10:17:51 +11008278 if (test_bit(Replacement, &rdev->flags))
8279 seq_printf(seq, "(R)");
Andre Nolldd8ac332009-03-31 14:33:13 +11008280 sectors += rdev->sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008281 }
NeilBrownf97fcad2014-12-15 12:56:59 +11008282 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07008283
8284 if (!list_empty(&mddev->disks)) {
8285 if (mddev->pers)
8286 seq_printf(seq, "\n %llu blocks",
Andre Nollf233ea52008-07-21 17:05:22 +10008287 (unsigned long long)
8288 mddev->array_sectors / 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008289 else
8290 seq_printf(seq, "\n %llu blocks",
Andre Nolldd8ac332009-03-31 14:33:13 +11008291 (unsigned long long)sectors / 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008292 }
NeilBrown1cd6bf12005-09-09 16:24:00 -07008293 if (mddev->persistent) {
8294 if (mddev->major_version != 0 ||
8295 mddev->minor_version != 90) {
8296 seq_printf(seq," super %d.%d",
8297 mddev->major_version,
8298 mddev->minor_version);
8299 }
NeilBrowne6910632008-02-06 01:39:51 -08008300 } else if (mddev->external)
8301 seq_printf(seq, " super external:%s",
8302 mddev->metadata_type);
8303 else
NeilBrown1cd6bf12005-09-09 16:24:00 -07008304 seq_printf(seq, " super non-persistent");
Linus Torvalds1da177e2005-04-16 15:20:36 -07008305
8306 if (mddev->pers) {
NeilBrownd710e132008-10-13 11:55:12 +11008307 mddev->pers->status(seq, mddev);
NeilBrownf72ffdd2014-09-30 14:23:59 +10008308 seq_printf(seq, "\n ");
NeilBrown8e1b39d2005-11-08 21:39:41 -08008309 if (mddev->pers->sync_request) {
NeilBrownf7851be2015-07-02 17:12:58 +10008310 if (status_resync(seq, mddev))
NeilBrown8e1b39d2005-11-08 21:39:41 -08008311 seq_printf(seq, "\n ");
NeilBrown8e1b39d2005-11-08 21:39:41 -08008312 }
NeilBrown32a76272005-06-21 17:17:14 -07008313 } else
8314 seq_printf(seq, "\n ");
8315
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07008316 md_bitmap_status(seq, mddev->bitmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008317
8318 seq_printf(seq, "\n");
8319 }
NeilBrown36d091f2014-12-15 12:56:58 +11008320 spin_unlock(&mddev->lock);
NeilBrownf72ffdd2014-09-30 14:23:59 +10008321
Linus Torvalds1da177e2005-04-16 15:20:36 -07008322 return 0;
8323}
8324
Jan Engelhardt110518b2009-05-07 12:49:37 +10008325static const struct seq_operations md_seq_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008326 .start = md_seq_start,
8327 .next = md_seq_next,
8328 .stop = md_seq_stop,
8329 .show = md_seq_show,
8330};
8331
8332static int md_seq_open(struct inode *inode, struct file *file)
8333{
Kay Sieversf1514632011-07-12 20:48:39 +02008334 struct seq_file *seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008335 int error;
8336
8337 error = seq_open(file, &md_seq_ops);
NeilBrownd7603b72006-01-06 00:20:30 -08008338 if (error)
Kay Sieversf1514632011-07-12 20:48:39 +02008339 return error;
8340
8341 seq = file->private_data;
8342 seq->poll_event = atomic_read(&md_event_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008343 return error;
8344}
8345
NeilBrowne2f23b62014-04-09 14:33:51 +10008346static int md_unloading;
Al Viroafc9a422017-07-03 06:39:46 -04008347static __poll_t mdstat_poll(struct file *filp, poll_table *wait)
NeilBrownd7603b72006-01-06 00:20:30 -08008348{
Kay Sieversf1514632011-07-12 20:48:39 +02008349 struct seq_file *seq = filp->private_data;
Al Viroafc9a422017-07-03 06:39:46 -04008350 __poll_t mask;
NeilBrownd7603b72006-01-06 00:20:30 -08008351
NeilBrowne2f23b62014-04-09 14:33:51 +10008352 if (md_unloading)
Linus Torvaldsa9a08842018-02-11 14:34:03 -08008353 return EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI;
NeilBrownd7603b72006-01-06 00:20:30 -08008354 poll_wait(filp, &md_event_waiters, wait);
8355
8356 /* always allow read */
Linus Torvaldsa9a08842018-02-11 14:34:03 -08008357 mask = EPOLLIN | EPOLLRDNORM;
NeilBrownd7603b72006-01-06 00:20:30 -08008358
Kay Sieversf1514632011-07-12 20:48:39 +02008359 if (seq->poll_event != atomic_read(&md_event_count))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08008360 mask |= EPOLLERR | EPOLLPRI;
NeilBrownd7603b72006-01-06 00:20:30 -08008361 return mask;
8362}
8363
Alexey Dobriyan97a32532020-02-03 17:37:17 -08008364static const struct proc_ops mdstat_proc_ops = {
8365 .proc_open = md_seq_open,
8366 .proc_read = seq_read,
8367 .proc_lseek = seq_lseek,
8368 .proc_release = seq_release,
8369 .proc_poll = mdstat_poll,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008370};
8371
NeilBrown84fc4b52011-10-11 16:49:58 +11008372int register_md_personality(struct md_personality *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008373{
NeilBrown9d487392016-11-02 14:16:49 +11008374 pr_debug("md: %s personality registered for level %d\n",
8375 p->name, p->level);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008376 spin_lock(&pers_lock);
NeilBrown2604b702006-01-06 00:20:36 -08008377 list_add_tail(&p->list, &pers_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008378 spin_unlock(&pers_lock);
8379 return 0;
8380}
NeilBrown6c144d32014-09-30 16:15:38 +10008381EXPORT_SYMBOL(register_md_personality);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008382
NeilBrown84fc4b52011-10-11 16:49:58 +11008383int unregister_md_personality(struct md_personality *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008384{
NeilBrown9d487392016-11-02 14:16:49 +11008385 pr_debug("md: %s personality unregistered\n", p->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008386 spin_lock(&pers_lock);
NeilBrown2604b702006-01-06 00:20:36 -08008387 list_del_init(&p->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008388 spin_unlock(&pers_lock);
8389 return 0;
8390}
NeilBrown6c144d32014-09-30 16:15:38 +10008391EXPORT_SYMBOL(unregister_md_personality);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008392
NeilBrown6022e752015-08-13 12:32:55 +10008393int register_md_cluster_operations(struct md_cluster_operations *ops,
8394 struct module *module)
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008395{
NeilBrown6022e752015-08-13 12:32:55 +10008396 int ret = 0;
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008397 spin_lock(&pers_lock);
NeilBrown6022e752015-08-13 12:32:55 +10008398 if (md_cluster_ops != NULL)
8399 ret = -EALREADY;
8400 else {
8401 md_cluster_ops = ops;
8402 md_cluster_mod = module;
8403 }
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008404 spin_unlock(&pers_lock);
NeilBrown6022e752015-08-13 12:32:55 +10008405 return ret;
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008406}
8407EXPORT_SYMBOL(register_md_cluster_operations);
8408
8409int unregister_md_cluster_operations(void)
8410{
8411 spin_lock(&pers_lock);
8412 md_cluster_ops = NULL;
8413 spin_unlock(&pers_lock);
8414 return 0;
8415}
8416EXPORT_SYMBOL(unregister_md_cluster_operations);
8417
8418int md_setup_cluster(struct mddev *mddev, int nodes)
8419{
Zhao Heming7c9d5c52020-07-21 02:08:52 +08008420 int ret;
Guoqing Jiang47a7b0d2016-09-04 22:17:28 -04008421 if (!md_cluster_ops)
8422 request_module("md-cluster");
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008423 spin_lock(&pers_lock);
Guoqing Jiang47a7b0d2016-09-04 22:17:28 -04008424 /* ensure module won't be unloaded */
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008425 if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
NeilBrown9d487392016-11-02 14:16:49 +11008426 pr_warn("can't find md-cluster module or get it's reference.\n");
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008427 spin_unlock(&pers_lock);
8428 return -ENOENT;
8429 }
8430 spin_unlock(&pers_lock);
8431
Zhao Heming7c9d5c52020-07-21 02:08:52 +08008432 ret = md_cluster_ops->join(mddev, nodes);
8433 if (!ret)
8434 mddev->safemode_delay = 0;
8435 return ret;
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008436}
8437
8438void md_cluster_stop(struct mddev *mddev)
8439{
Goldwyn Rodriguesc4ce8672014-03-29 10:20:02 -05008440 if (!md_cluster_ops)
8441 return;
Goldwyn Rodriguesedb39c92014-03-29 10:01:53 -05008442 md_cluster_ops->leave(mddev);
8443 module_put(md_cluster_mod);
8444}
8445
NeilBrownfd01b882011-10-11 16:47:53 +11008446static int is_mddev_idle(struct mddev *mddev, int init)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008447{
NeilBrownf72ffdd2014-09-30 14:23:59 +10008448 struct md_rdev *rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008449 int idle;
NeilBrowneea1bf32009-03-31 14:27:02 +11008450 int curr_events;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008451
8452 idle = 1;
NeilBrown4b809912008-07-21 17:05:25 +10008453 rcu_read_lock();
8454 rdev_for_each_rcu(rdev, mddev) {
Christoph Hellwig4245e522020-09-03 07:40:59 +02008455 struct gendisk *disk = rdev->bdev->bd_disk;
Christoph Hellwig8446fe92020-11-24 09:36:54 +01008456 curr_events = (int)part_stat_read_accum(disk->part0, sectors) -
NeilBrowneea1bf32009-03-31 14:27:02 +11008457 atomic_read(&disk->sync_io);
NeilBrown713f6ab2007-07-17 04:06:12 -07008458 /* sync IO will cause sync_io to increase before the disk_stats
8459 * as sync_io is counted when a request starts, and
8460 * disk_stats is counted when it completes.
8461 * So resync activity will cause curr_events to be smaller than
8462 * when there was no such activity.
8463 * non-sync IO will cause disk_stat to increase without
8464 * increasing sync_io so curr_events will (eventually)
8465 * be larger than it was before. Once it becomes
8466 * substantially larger, the test below will cause
8467 * the array to appear non-idle, and resync will slow
8468 * down.
8469 * If there is a lot of outstanding resync activity when
8470 * we set last_event to curr_events, then all that activity
8471 * completing might cause the array to appear non-idle
8472 * and resync will be slowed down even though there might
8473 * not have been non-resync activity. This will only
8474 * happen once though. 'last_events' will soon reflect
8475 * the state where there is little or no outstanding
8476 * resync requests, and further resync activity will
8477 * always make curr_events less than last_events.
NeilBrownc0e48522005-11-18 01:11:01 -08008478 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07008479 */
NeilBrowneea1bf32009-03-31 14:27:02 +11008480 if (init || curr_events - rdev->last_events > 64) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008481 rdev->last_events = curr_events;
8482 idle = 0;
8483 }
8484 }
NeilBrown4b809912008-07-21 17:05:25 +10008485 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07008486 return idle;
8487}
8488
NeilBrownfd01b882011-10-11 16:47:53 +11008489void md_done_sync(struct mddev *mddev, int blocks, int ok)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008490{
8491 /* another "blocks" (512byte) blocks have been synced */
8492 atomic_sub(blocks, &mddev->recovery_active);
8493 wake_up(&mddev->recovery_wait);
8494 if (!ok) {
NeilBrowndfc70642008-05-23 13:04:39 -07008495 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
majianpeng0a19caa2012-11-19 19:57:34 +08008496 set_bit(MD_RECOVERY_ERROR, &mddev->recovery);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008497 md_wakeup_thread(mddev->thread);
8498 // stop recovery, signal do_sync ....
8499 }
8500}
NeilBrown6c144d32014-09-30 16:15:38 +10008501EXPORT_SYMBOL(md_done_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008502
NeilBrown06d91a52005-06-21 17:17:12 -07008503/* md_write_start(mddev, bi)
8504 * If we need to update some array metadata (e.g. 'active' flag
NeilBrown3d310eb2005-06-21 17:17:26 -07008505 * in superblock) before writing, schedule a superblock update
8506 * and wait for it to complete.
NeilBrowncc27b0c2017-06-05 16:49:39 +10008507 * A return value of 'false' means that the write wasn't recorded
8508 * and cannot proceed as the array is being suspend.
NeilBrown06d91a52005-06-21 17:17:12 -07008509 */
NeilBrowncc27b0c2017-06-05 16:49:39 +10008510bool md_write_start(struct mddev *mddev, struct bio *bi)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008511{
Neil Brown0fd62b82008-06-28 08:31:36 +10008512 int did_change = 0;
Heinz Mauelshagen4b6c1062018-02-02 23:13:19 +01008513
NeilBrown06d91a52005-06-21 17:17:12 -07008514 if (bio_data_dir(bi) != WRITE)
NeilBrowncc27b0c2017-06-05 16:49:39 +10008515 return true;
NeilBrown06d91a52005-06-21 17:17:12 -07008516
NeilBrownf91de922005-11-08 21:39:36 -08008517 BUG_ON(mddev->ro == 1);
8518 if (mddev->ro == 2) {
8519 /* need to switch to read/write */
8520 mddev->ro = 0;
8521 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8522 md_wakeup_thread(mddev->thread);
NeilBrown25156192008-03-04 14:29:32 -08008523 md_wakeup_thread(mddev->sync_thread);
Neil Brown0fd62b82008-06-28 08:31:36 +10008524 did_change = 1;
NeilBrownf91de922005-11-08 21:39:36 -08008525 }
NeilBrown4ad23a972017-03-15 14:05:14 +11008526 rcu_read_lock();
8527 percpu_ref_get(&mddev->writes_pending);
NeilBrown55cc39f2017-03-15 14:05:14 +11008528 smp_mb(); /* Match smp_mb in set_in_sync() */
NeilBrown31a59e32008-04-30 00:52:30 -07008529 if (mddev->safemode == 1)
8530 mddev->safemode = 0;
NeilBrown4ad23a972017-03-15 14:05:14 +11008531 /* sync_checkers is always 0 when writes_pending is in per-cpu mode */
NeilBrown81fe48e2017-08-08 16:56:36 +10008532 if (mddev->in_sync || mddev->sync_checkers) {
NeilBrown85572d72014-12-15 12:56:56 +11008533 spin_lock(&mddev->lock);
NeilBrown3d310eb2005-06-21 17:17:26 -07008534 if (mddev->in_sync) {
8535 mddev->in_sync = 0;
Shaohua Li29530792016-12-08 15:48:19 -08008536 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
8537 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
NeilBrown3d310eb2005-06-21 17:17:26 -07008538 md_wakeup_thread(mddev->thread);
Neil Brown0fd62b82008-06-28 08:31:36 +10008539 did_change = 1;
NeilBrown3d310eb2005-06-21 17:17:26 -07008540 }
NeilBrown85572d72014-12-15 12:56:56 +11008541 spin_unlock(&mddev->lock);
NeilBrown06d91a52005-06-21 17:17:12 -07008542 }
NeilBrown4ad23a972017-03-15 14:05:14 +11008543 rcu_read_unlock();
Neil Brown0fd62b82008-06-28 08:31:36 +10008544 if (did_change)
NeilBrown00bcb4a2010-06-01 19:37:23 +10008545 sysfs_notify_dirent_safe(mddev->sysfs_state);
Heinz Mauelshagen4b6c1062018-02-02 23:13:19 +01008546 if (!mddev->has_superblocks)
8547 return true;
NeilBrown09a44cc2008-05-23 13:04:36 -07008548 wait_event(mddev->sb_wait,
NeilBrownd47c8ad2017-10-05 16:23:16 +11008549 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) ||
8550 mddev->suspended);
NeilBrowncc27b0c2017-06-05 16:49:39 +10008551 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
8552 percpu_ref_put(&mddev->writes_pending);
8553 return false;
8554 }
8555 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008556}
NeilBrown6c144d32014-09-30 16:15:38 +10008557EXPORT_SYMBOL(md_write_start);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008558
NeilBrown49728052017-03-15 14:05:12 +11008559/* md_write_inc can only be called when md_write_start() has
8560 * already been called at least once of the current request.
8561 * It increments the counter and is useful when a single request
8562 * is split into several parts. Each part causes an increment and
8563 * so needs a matching md_write_end().
8564 * Unlike md_write_start(), it is safe to call md_write_inc() inside
8565 * a spinlocked region.
8566 */
8567void md_write_inc(struct mddev *mddev, struct bio *bi)
8568{
8569 if (bio_data_dir(bi) != WRITE)
8570 return;
8571 WARN_ON_ONCE(mddev->in_sync || mddev->ro);
NeilBrown4ad23a972017-03-15 14:05:14 +11008572 percpu_ref_get(&mddev->writes_pending);
NeilBrown49728052017-03-15 14:05:12 +11008573}
8574EXPORT_SYMBOL(md_write_inc);
8575
NeilBrownfd01b882011-10-11 16:47:53 +11008576void md_write_end(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008577{
NeilBrown4ad23a972017-03-15 14:05:14 +11008578 percpu_ref_put(&mddev->writes_pending);
8579
8580 if (mddev->safemode == 2)
8581 md_wakeup_thread(mddev->thread);
8582 else if (mddev->safemode_delay)
8583 /* The roundup() ensures this only performs locking once
8584 * every ->safemode_delay jiffies
8585 */
8586 mod_timer(&mddev->safemode_timer,
8587 roundup(jiffies, mddev->safemode_delay) +
8588 mddev->safemode_delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008589}
NeilBrown4ad23a972017-03-15 14:05:14 +11008590
NeilBrown6c144d32014-09-30 16:15:38 +10008591EXPORT_SYMBOL(md_write_end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008592
NeilBrown2a2275d2007-01-26 00:57:11 -08008593/* md_allow_write(mddev)
8594 * Calling this ensures that the array is marked 'active' so that writes
8595 * may proceed without blocking. It is important to call this before
8596 * attempting a GFP_KERNEL allocation while holding the mddev lock.
8597 * Must be called with mddev_lock held.
8598 */
Artur Paszkiewicz2214c262017-05-08 11:56:55 +02008599void md_allow_write(struct mddev *mddev)
NeilBrown2a2275d2007-01-26 00:57:11 -08008600{
8601 if (!mddev->pers)
Artur Paszkiewicz2214c262017-05-08 11:56:55 +02008602 return;
NeilBrown2a2275d2007-01-26 00:57:11 -08008603 if (mddev->ro)
Artur Paszkiewicz2214c262017-05-08 11:56:55 +02008604 return;
Neil Brown1a0fd492008-06-28 08:31:27 +10008605 if (!mddev->pers->sync_request)
Artur Paszkiewicz2214c262017-05-08 11:56:55 +02008606 return;
NeilBrown2a2275d2007-01-26 00:57:11 -08008607
NeilBrown85572d72014-12-15 12:56:56 +11008608 spin_lock(&mddev->lock);
NeilBrown2a2275d2007-01-26 00:57:11 -08008609 if (mddev->in_sync) {
8610 mddev->in_sync = 0;
Shaohua Li29530792016-12-08 15:48:19 -08008611 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
8612 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
NeilBrown2a2275d2007-01-26 00:57:11 -08008613 if (mddev->safemode_delay &&
8614 mddev->safemode == 0)
8615 mddev->safemode = 1;
NeilBrown85572d72014-12-15 12:56:56 +11008616 spin_unlock(&mddev->lock);
NeilBrown2a2275d2007-01-26 00:57:11 -08008617 md_update_sb(mddev, 0);
NeilBrown00bcb4a2010-06-01 19:37:23 +10008618 sysfs_notify_dirent_safe(mddev->sysfs_state);
Artur Paszkiewicz2214c262017-05-08 11:56:55 +02008619 /* wait for the dirty state to be recorded in the metadata */
8620 wait_event(mddev->sb_wait,
Artur Paszkiewicz2214c262017-05-08 11:56:55 +02008621 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
NeilBrown2a2275d2007-01-26 00:57:11 -08008622 } else
NeilBrown85572d72014-12-15 12:56:56 +11008623 spin_unlock(&mddev->lock);
NeilBrown2a2275d2007-01-26 00:57:11 -08008624}
8625EXPORT_SYMBOL_GPL(md_allow_write);
8626
Linus Torvalds1da177e2005-04-16 15:20:36 -07008627#define SYNC_MARKS 10
8628#define SYNC_MARK_STEP (3*HZ)
majianpeng54f89342012-10-31 11:59:10 +11008629#define UPDATE_FREQUENCY (5*60*HZ)
Shaohua Li4ed87312012-10-11 13:34:00 +11008630void md_do_sync(struct md_thread *thread)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008631{
Shaohua Li4ed87312012-10-11 13:34:00 +11008632 struct mddev *mddev = thread->mddev;
NeilBrownfd01b882011-10-11 16:47:53 +11008633 struct mddev *mddev2;
Yufen Yue5b521e2019-06-14 15:41:07 -07008634 unsigned int currspeed = 0, window;
Xiao Niac7e50a2014-08-07 09:37:41 -04008635 sector_t max_sectors,j, io_sectors, recovery_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008636 unsigned long mark[SYNC_MARKS];
majianpeng54f89342012-10-31 11:59:10 +11008637 unsigned long update_time;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008638 sector_t mark_cnt[SYNC_MARKS];
8639 int last_mark,m;
8640 struct list_head *tmp;
8641 sector_t last_check;
NeilBrown57afd892005-06-21 17:17:13 -07008642 int skipped = 0;
NeilBrown3cb03002011-10-11 16:45:26 +11008643 struct md_rdev *rdev;
Jonathan Brassowc4a39552013-06-25 01:23:59 -05008644 char *desc, *action = NULL;
majianpeng7c2c57c2012-07-03 12:12:26 +10008645 struct blk_plug plug;
Guoqing Jiang41a9a0d2016-05-02 11:33:08 -04008646 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008647
8648 /* just incase thread restarts... */
Song Liud5d885f2017-11-19 22:17:01 -08008649 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
8650 test_bit(MD_RECOVERY_WAIT, &mddev->recovery))
Linus Torvalds1da177e2005-04-16 15:20:36 -07008651 return;
NeilBrown3991b312014-05-28 13:39:23 +10008652 if (mddev->ro) {/* never try to sync a read-only array */
8653 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008654 return;
NeilBrown3991b312014-05-28 13:39:23 +10008655 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008656
Guoqing Jiang41a9a0d2016-05-02 11:33:08 -04008657 if (mddev_is_clustered(mddev)) {
8658 ret = md_cluster_ops->resync_start(mddev);
8659 if (ret)
8660 goto skip;
8661
Guoqing Jiangbb8bf152016-06-02 23:32:04 -04008662 set_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags);
Guoqing Jiang41a9a0d2016-05-02 11:33:08 -04008663 if (!(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
8664 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ||
8665 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
8666 && ((unsigned long long)mddev->curr_resync_completed
8667 < (unsigned long long)mddev->resync_max_sectors))
8668 goto skip;
8669 }
8670
NeilBrown61df9d92006-10-03 01:15:57 -07008671 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
Jonathan Brassowc4a39552013-06-25 01:23:59 -05008672 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
NeilBrown61df9d92006-10-03 01:15:57 -07008673 desc = "data-check";
Jonathan Brassowc4a39552013-06-25 01:23:59 -05008674 action = "check";
8675 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
NeilBrown61df9d92006-10-03 01:15:57 -07008676 desc = "requested-resync";
Jonathan Brassowc4a39552013-06-25 01:23:59 -05008677 action = "repair";
8678 } else
NeilBrown61df9d92006-10-03 01:15:57 -07008679 desc = "resync";
8680 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
8681 desc = "reshape";
8682 else
8683 desc = "recovery";
8684
Jonathan Brassowc4a39552013-06-25 01:23:59 -05008685 mddev->last_sync_action = action ?: desc;
8686
Linus Torvalds1da177e2005-04-16 15:20:36 -07008687 /* we overload curr_resync somewhat here.
8688 * 0 == not engaged in resync at all
8689 * 2 == checking that there is no conflict with another sync
8690 * 1 == like 2, but have yielded to allow conflicting resync to
Yufen Yue5b521e2019-06-14 15:41:07 -07008691 * commence
Linus Torvalds1da177e2005-04-16 15:20:36 -07008692 * other == active in resync - this many blocks
8693 *
8694 * Before starting a resync we must have set curr_resync to
8695 * 2, and then checked that every "conflicting" array has curr_resync
8696 * less than ours. When we find one that is the same or higher
8697 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync
8698 * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
8699 * This will mean we have to start checking from the beginning again.
8700 *
8701 */
8702
8703 do {
Artur Paszkiewiczc622ca52016-08-16 14:26:08 +02008704 int mddev2_minor = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008705 mddev->curr_resync = 2;
8706
8707 try_again:
NeilBrown404e4b42009-12-30 15:25:23 +11008708 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
Linus Torvalds1da177e2005-04-16 15:20:36 -07008709 goto skip;
NeilBrown29ac4aa2008-02-06 01:39:58 -08008710 for_each_mddev(mddev2, tmp) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008711 if (mddev2 == mddev)
8712 continue;
Bernd Schubert90b08712008-05-23 13:04:38 -07008713 if (!mddev->parallel_resync
8714 && mddev2->curr_resync
8715 && match_mddev_units(mddev, mddev2)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008716 DEFINE_WAIT(wq);
8717 if (mddev < mddev2 && mddev->curr_resync == 2) {
8718 /* arbitrarily yield */
8719 mddev->curr_resync = 1;
8720 wake_up(&resync_wait);
8721 }
8722 if (mddev > mddev2 && mddev->curr_resync == 1)
8723 /* no need to wait here, we can wait the next
8724 * time 'round when curr_resync == 2
8725 */
8726 continue;
NeilBrown97441972008-09-19 11:49:54 +10008727 /* We need to wait 'interruptible' so as not to
8728 * contribute to the load average, and not to
8729 * be caught by 'softlockup'
8730 */
8731 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
NeilBrownc91abf52013-11-19 12:02:01 +11008732 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
NeilBrown8712e552005-10-26 01:58:58 -07008733 mddev2->curr_resync >= mddev->curr_resync) {
Artur Paszkiewiczc622ca52016-08-16 14:26:08 +02008734 if (mddev2_minor != mddev2->md_minor) {
8735 mddev2_minor = mddev2->md_minor;
NeilBrown9d487392016-11-02 14:16:49 +11008736 pr_info("md: delaying %s of %s until %s has finished (they share one or more physical units)\n",
8737 desc, mdname(mddev),
8738 mdname(mddev2));
Artur Paszkiewiczc622ca52016-08-16 14:26:08 +02008739 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008740 mddev_put(mddev2);
NeilBrown97441972008-09-19 11:49:54 +10008741 if (signal_pending(current))
8742 flush_signals(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008743 schedule();
8744 finish_wait(&resync_wait, &wq);
8745 goto try_again;
8746 }
8747 finish_wait(&resync_wait, &wq);
8748 }
8749 }
8750 } while (mddev->curr_resync < 2);
8751
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008752 j = 0;
NeilBrown9d888832005-11-08 21:39:26 -08008753 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008754 /* resync follows the size requested by the personality,
NeilBrown57afd892005-06-21 17:17:13 -07008755 * which defaults to physical size, but can be virtual size
Linus Torvalds1da177e2005-04-16 15:20:36 -07008756 */
8757 max_sectors = mddev->resync_max_sectors;
Jianpeng Ma7f7583d2012-10-11 14:17:59 +11008758 atomic64_set(&mddev->resync_mismatches, 0);
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008759 /* we don't use the checkpoint if there's a bitmap */
Neil Brown5e96ee62008-06-28 08:31:24 +10008760 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
8761 j = mddev->resync_min;
8762 else if (!mddev->bitmap)
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008763 j = mddev->recovery_cp;
Neil Brown5e96ee62008-06-28 08:31:24 +10008764
Guoqing Jiangcb9ee152018-10-18 16:37:47 +08008765 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
NeilBrownc804cde2012-05-21 09:28:33 +10008766 max_sectors = mddev->resync_max_sectors;
Guoqing Jiangcb9ee152018-10-18 16:37:47 +08008767 /*
8768 * If the original node aborts reshaping then we continue the
8769 * reshaping, so set j again to avoid restart reshape from the
8770 * first beginning
8771 */
8772 if (mddev_is_clustered(mddev) &&
8773 mddev->reshape_position != MaxSector)
8774 j = mddev->reshape_position;
8775 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008776 /* recovery follows the physical size of devices */
Andre Noll58c0fed2009-03-31 14:33:13 +11008777 max_sectors = mddev->dev_sectors;
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008778 j = MaxSector;
Dan Williams4e59ca72009-12-12 21:17:06 -07008779 rcu_read_lock();
NeilBrowndafb20f2012-03-19 12:46:39 +11008780 rdev_for_each_rcu(rdev, mddev)
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008781 if (rdev->raid_disk >= 0 &&
Shaohua Lif2076e72015-10-08 21:54:12 -07008782 !test_bit(Journal, &rdev->flags) &&
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008783 !test_bit(Faulty, &rdev->flags) &&
8784 !test_bit(In_sync, &rdev->flags) &&
8785 rdev->recovery_offset < j)
8786 j = rdev->recovery_offset;
Dan Williams4e59ca72009-12-12 21:17:06 -07008787 rcu_read_unlock();
NeilBrown133d4522014-07-02 12:04:14 +10008788
8789 /* If there is a bitmap, we need to make sure all
8790 * writes that started before we added a spare
8791 * complete before we start doing a recovery.
8792 * Otherwise the write might complete and (via
8793 * bitmap_endwrite) set a bit in the bitmap after the
8794 * recovery has checked that bit and skipped that
8795 * region.
8796 */
8797 if (mddev->bitmap) {
8798 mddev->pers->quiesce(mddev, 1);
8799 mddev->pers->quiesce(mddev, 0);
8800 }
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008801 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008802
NeilBrown9d487392016-11-02 14:16:49 +11008803 pr_info("md: %s of RAID array %s\n", desc, mdname(mddev));
8804 pr_debug("md: minimum _guaranteed_ speed: %d KB/sec/disk.\n", speed_min(mddev));
8805 pr_debug("md: using maximum available idle IO bandwidth (but not more than %d KB/sec) for %s.\n",
8806 speed_max(mddev), desc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008807
NeilBrowneea1bf32009-03-31 14:27:02 +11008808 is_mddev_idle(mddev, 1); /* this initializes IO event counters */
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008809
NeilBrown57afd892005-06-21 17:17:13 -07008810 io_sectors = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008811 for (m = 0; m < SYNC_MARKS; m++) {
8812 mark[m] = jiffies;
NeilBrown57afd892005-06-21 17:17:13 -07008813 mark_cnt[m] = io_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008814 }
8815 last_mark = 0;
8816 mddev->resync_mark = mark[last_mark];
8817 mddev->resync_mark_cnt = mark_cnt[last_mark];
8818
8819 /*
8820 * Tune reconstruction:
8821 */
Yufen Yue5b521e2019-06-14 15:41:07 -07008822 window = 32 * (PAGE_SIZE / 512);
NeilBrown9d487392016-11-02 14:16:49 +11008823 pr_debug("md: using %dk window, over a total of %lluk.\n",
8824 window/2, (unsigned long long)max_sectors/2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008825
8826 atomic_set(&mddev->recovery_active, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008827 last_check = 0;
8828
8829 if (j>2) {
NeilBrown9d487392016-11-02 14:16:49 +11008830 pr_debug("md: resuming %s of %s from checkpoint.\n",
8831 desc, mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07008832 mddev->curr_resync = j;
NeilBrown72f36d52012-10-11 14:25:57 +11008833 } else
8834 mddev->curr_resync = 3; /* no longer delayed */
NeilBrown75d3da42011-01-14 09:14:34 +11008835 mddev->curr_resync_completed = j;
Junxiao Bie1a86db2020-07-14 16:10:26 -07008836 sysfs_notify_dirent_safe(mddev->sysfs_completed);
NeilBrown72f36d52012-10-11 14:25:57 +11008837 md_new_event(mddev);
majianpeng54f89342012-10-31 11:59:10 +11008838 update_time = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008839
majianpeng7c2c57c2012-07-03 12:12:26 +10008840 blk_start_plug(&plug);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008841 while (j < max_sectors) {
NeilBrown57afd892005-06-21 17:17:13 -07008842 sector_t sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008843
NeilBrown57afd892005-06-21 17:17:13 -07008844 skipped = 0;
NeilBrown97e4f422009-03-31 14:33:13 +11008845
NeilBrown7a91ee12009-05-26 12:57:21 +10008846 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8847 ((mddev->curr_resync > mddev->curr_resync_completed &&
8848 (mddev->curr_resync - mddev->curr_resync_completed)
8849 > (max_sectors >> 4)) ||
majianpeng54f89342012-10-31 11:59:10 +11008850 time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) ||
NeilBrown7a91ee12009-05-26 12:57:21 +10008851 (j - mddev->curr_resync_completed)*2
NeilBrownc5e19d92015-07-17 12:06:02 +10008852 >= mddev->resync_max - mddev->curr_resync_completed ||
8853 mddev->curr_resync_completed > mddev->resync_max
NeilBrown7a91ee12009-05-26 12:57:21 +10008854 )) {
NeilBrown97e4f422009-03-31 14:33:13 +11008855 /* time to update curr_resync_completed */
NeilBrown97e4f422009-03-31 14:33:13 +11008856 wait_event(mddev->recovery_wait,
8857 atomic_read(&mddev->recovery_active) == 0);
NeilBrown75d3da42011-01-14 09:14:34 +11008858 mddev->curr_resync_completed = j;
kernelmail35d78c62012-10-31 11:59:10 +11008859 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
8860 j > mddev->recovery_cp)
8861 mddev->recovery_cp = j;
majianpeng54f89342012-10-31 11:59:10 +11008862 update_time = jiffies;
Shaohua Li29530792016-12-08 15:48:19 -08008863 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
Junxiao Bie1a86db2020-07-14 16:10:26 -07008864 sysfs_notify_dirent_safe(mddev->sysfs_completed);
NeilBrown97e4f422009-03-31 14:33:13 +11008865 }
NeilBrownacb180b2009-04-14 16:28:34 +10008866
NeilBrownc91abf52013-11-19 12:02:01 +11008867 while (j >= mddev->resync_max &&
8868 !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
NeilBrowne62e58a2009-07-01 13:15:35 +10008869 /* As this condition is controlled by user-space,
8870 * we can block indefinitely, so use '_interruptible'
8871 * to avoid triggering warnings.
8872 */
8873 flush_signals(current); /* just in case */
8874 wait_event_interruptible(mddev->recovery_wait,
8875 mddev->resync_max > j
NeilBrownc91abf52013-11-19 12:02:01 +11008876 || test_bit(MD_RECOVERY_INTR,
8877 &mddev->recovery));
NeilBrowne62e58a2009-07-01 13:15:35 +10008878 }
NeilBrownacb180b2009-04-14 16:28:34 +10008879
NeilBrownc91abf52013-11-19 12:02:01 +11008880 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8881 break;
NeilBrownacb180b2009-04-14 16:28:34 +10008882
NeilBrown09314792015-02-19 16:04:40 +11008883 sectors = mddev->pers->sync_request(mddev, j, &skipped);
NeilBrown57afd892005-06-21 17:17:13 -07008884 if (sectors == 0) {
NeilBrowndfc70642008-05-23 13:04:39 -07008885 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
NeilBrownc91abf52013-11-19 12:02:01 +11008886 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008887 }
NeilBrown57afd892005-06-21 17:17:13 -07008888
8889 if (!skipped) { /* actual IO requested */
8890 io_sectors += sectors;
8891 atomic_add(sectors, &mddev->recovery_active);
8892 }
8893
NeilBrowne875ece2011-07-28 11:39:24 +10008894 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8895 break;
8896
Linus Torvalds1da177e2005-04-16 15:20:36 -07008897 j += sectors;
NeilBrown5ed1df22015-07-24 13:27:08 +10008898 if (j > max_sectors)
8899 /* when skipping, extra large numbers can be returned. */
8900 j = max_sectors;
NeilBrown72f36d52012-10-11 14:25:57 +11008901 if (j > 2)
8902 mddev->curr_resync = j;
NeilBrownff4e8d92006-07-10 04:44:16 -07008903 mddev->curr_mark_cnt = io_sectors;
NeilBrownd7603b72006-01-06 00:20:30 -08008904 if (last_check == 0)
NeilBrowne875ece2011-07-28 11:39:24 +10008905 /* this is the earliest that rebuild will be
NeilBrownd7603b72006-01-06 00:20:30 -08008906 * visible in /proc/mdstat
8907 */
8908 md_new_event(mddev);
NeilBrown57afd892005-06-21 17:17:13 -07008909
8910 if (last_check + window > io_sectors || j == max_sectors)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008911 continue;
8912
NeilBrown57afd892005-06-21 17:17:13 -07008913 last_check = io_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008914 repeat:
8915 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
8916 /* step marks */
8917 int next = (last_mark+1) % SYNC_MARKS;
8918
8919 mddev->resync_mark = mark[next];
8920 mddev->resync_mark_cnt = mark_cnt[next];
8921 mark[next] = jiffies;
NeilBrown57afd892005-06-21 17:17:13 -07008922 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008923 last_mark = next;
8924 }
8925
NeilBrownc91abf52013-11-19 12:02:01 +11008926 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8927 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008928
8929 /*
8930 * this loop exits only if either when we are slower than
8931 * the 'hard' speed limit, or the system was IO-idle for
8932 * a jiffy.
8933 * the system might be non-idle CPU-wise, but we only care
8934 * about not overloading the IO subsystem. (things like an
8935 * e2fsck being done on the RAID array should execute fast)
8936 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07008937 cond_resched();
8938
Xiao Niac7e50a2014-08-07 09:37:41 -04008939 recovery_done = io_sectors - atomic_read(&mddev->recovery_active);
8940 currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2
NeilBrown57afd892005-06-21 17:17:13 -07008941 /((jiffies-mddev->resync_mark)/HZ +1) +1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008942
NeilBrown88202a02006-01-06 00:21:36 -08008943 if (currspeed > speed_min(mddev)) {
NeilBrownac8fa412015-02-19 16:55:00 +11008944 if (currspeed > speed_max(mddev)) {
NeilBrownc0e48522005-11-18 01:11:01 -08008945 msleep(500);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008946 goto repeat;
8947 }
NeilBrownac8fa412015-02-19 16:55:00 +11008948 if (!is_mddev_idle(mddev, 0)) {
8949 /*
8950 * Give other IO more of a chance.
8951 * The faster the devices, the less we wait.
8952 */
8953 wait_event(mddev->recovery_wait,
8954 !atomic_read(&mddev->recovery_active));
8955 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008956 }
8957 }
NeilBrown9d487392016-11-02 14:16:49 +11008958 pr_info("md: %s: %s %s.\n",mdname(mddev), desc,
8959 test_bit(MD_RECOVERY_INTR, &mddev->recovery)
8960 ? "interrupted" : "done");
Linus Torvalds1da177e2005-04-16 15:20:36 -07008961 /*
8962 * this also signals 'finished resyncing' to md_stop
8963 */
majianpeng7c2c57c2012-07-03 12:12:26 +10008964 blk_finish_plug(&plug);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008965 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
8966
NeilBrown5ed1df22015-07-24 13:27:08 +10008967 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8968 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
NeilBrown1217e1d2016-10-28 15:59:41 +11008969 mddev->curr_resync > 3) {
NeilBrown5ed1df22015-07-24 13:27:08 +10008970 mddev->curr_resync_completed = mddev->curr_resync;
Junxiao Bie1a86db2020-07-14 16:10:26 -07008971 sysfs_notify_dirent_safe(mddev->sysfs_completed);
NeilBrown5ed1df22015-07-24 13:27:08 +10008972 }
NeilBrown09314792015-02-19 16:04:40 +11008973 mddev->pers->sync_request(mddev, max_sectors, &skipped);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008974
NeilBrowndfc70642008-05-23 13:04:39 -07008975 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
NeilBrown1217e1d2016-10-28 15:59:41 +11008976 mddev->curr_resync > 3) {
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008977 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
8978 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
8979 if (mddev->curr_resync >= mddev->recovery_cp) {
NeilBrown9d487392016-11-02 14:16:49 +11008980 pr_debug("md: checkpointing %s of %s.\n",
8981 desc, mdname(mddev));
majianpeng0a19caa2012-11-19 19:57:34 +08008982 if (test_bit(MD_RECOVERY_ERROR,
8983 &mddev->recovery))
8984 mddev->recovery_cp =
8985 mddev->curr_resync_completed;
8986 else
8987 mddev->recovery_cp =
8988 mddev->curr_resync;
NeilBrown5fd6c1d2006-06-26 00:27:40 -07008989 }
8990 } else
8991 mddev->recovery_cp = MaxSector;
8992 } else {
8993 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8994 mddev->curr_resync = MaxSector;
NeilBrowndb0505d2017-10-17 16:18:36 +11008995 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8996 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) {
8997 rcu_read_lock();
8998 rdev_for_each_rcu(rdev, mddev)
8999 if (rdev->raid_disk >= 0 &&
9000 mddev->delta_disks >= 0 &&
9001 !test_bit(Journal, &rdev->flags) &&
9002 !test_bit(Faulty, &rdev->flags) &&
9003 !test_bit(In_sync, &rdev->flags) &&
9004 rdev->recovery_offset < mddev->curr_resync)
9005 rdev->recovery_offset = mddev->curr_resync;
9006 rcu_read_unlock();
9007 }
NeilBrown5fd6c1d2006-06-26 00:27:40 -07009008 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009009 }
NeilBrowndb91ff52012-02-07 12:01:51 +11009010 skip:
Guoqing Jiangbb8bf152016-06-02 23:32:04 -04009011 /* set CHANGE_PENDING here since maybe another update is needed,
9012 * so other nodes are informed. It should be harmless for normal
9013 * raid */
Shaohua Li29530792016-12-08 15:48:19 -08009014 set_mask_bits(&mddev->sb_flags, 0,
9015 BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS));
Goldwyn Rodriguesc186b122015-09-30 13:20:35 -05009016
BingJing Chang88763912018-02-22 13:34:46 +08009017 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
9018 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
9019 mddev->delta_disks > 0 &&
9020 mddev->pers->finish_reshape &&
9021 mddev->pers->size &&
9022 mddev->queue) {
9023 mddev_lock_nointr(mddev);
9024 md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0));
9025 mddev_unlock(mddev);
Christoph Hellwig2c247c52020-11-16 15:57:11 +01009026 if (!mddev_is_clustered(mddev))
9027 set_capacity_and_notify(mddev->gendisk,
9028 mddev->array_sectors);
BingJing Chang88763912018-02-22 13:34:46 +08009029 }
9030
NeilBrown23da4222014-12-15 12:57:01 +11009031 spin_lock(&mddev->lock);
NeilBrownc07b70a2009-12-14 12:49:48 +11009032 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
9033 /* We completed so min/max setting can be forgotten if used. */
9034 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
9035 mddev->resync_min = 0;
9036 mddev->resync_max = MaxSector;
9037 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
9038 mddev->resync_min = mddev->curr_resync_completed;
NeilBrownf7851be2015-07-02 17:12:58 +10009039 set_bit(MD_RECOVERY_DONE, &mddev->recovery);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009040 mddev->curr_resync = 0;
NeilBrown23da4222014-12-15 12:57:01 +11009041 spin_unlock(&mddev->lock);
9042
Linus Torvalds1da177e2005-04-16 15:20:36 -07009043 wake_up(&resync_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009044 md_wakeup_thread(mddev->thread);
NeilBrownc6207272008-02-06 01:39:52 -08009045 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009046}
NeilBrown29269552006-03-27 01:18:10 -08009047EXPORT_SYMBOL_GPL(md_do_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009048
NeilBrown746d3202013-04-24 11:42:41 +10009049static int remove_and_add_spares(struct mddev *mddev,
9050 struct md_rdev *this)
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009051{
NeilBrown3cb03002011-10-11 16:45:26 +11009052 struct md_rdev *rdev;
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009053 int spares = 0;
NeilBrownf2a371c2012-01-09 00:46:41 +11009054 int removed = 0;
NeilBrownd787be42016-06-02 16:19:53 +10009055 bool remove_some = false;
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009056
NeilBrown39772f02018-02-03 09:19:30 +11009057 if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
9058 /* Mustn't remove devices when resync thread is running */
9059 return 0;
9060
NeilBrownd787be42016-06-02 16:19:53 +10009061 rdev_for_each(rdev, mddev) {
NeilBrown746d3202013-04-24 11:42:41 +10009062 if ((this == NULL || rdev == this) &&
9063 rdev->raid_disk >= 0 &&
Dan Williams6bfe0b42008-04-30 00:52:32 -07009064 !test_bit(Blocked, &rdev->flags) &&
NeilBrownd787be42016-06-02 16:19:53 +10009065 test_bit(Faulty, &rdev->flags) &&
9066 atomic_read(&rdev->nr_pending)==0) {
9067 /* Faulty non-Blocked devices with nr_pending == 0
9068 * never get nr_pending incremented,
9069 * never get Faulty cleared, and never get Blocked set.
9070 * So we can synchronize_rcu now rather than once per device
9071 */
9072 remove_some = true;
9073 set_bit(RemoveSynchronized, &rdev->flags);
9074 }
9075 }
9076
9077 if (remove_some)
9078 synchronize_rcu();
9079 rdev_for_each(rdev, mddev) {
9080 if ((this == NULL || rdev == this) &&
9081 rdev->raid_disk >= 0 &&
9082 !test_bit(Blocked, &rdev->flags) &&
9083 ((test_bit(RemoveSynchronized, &rdev->flags) ||
Shaohua Lif2076e72015-10-08 21:54:12 -07009084 (!test_bit(In_sync, &rdev->flags) &&
9085 !test_bit(Journal, &rdev->flags))) &&
NeilBrownd787be42016-06-02 16:19:53 +10009086 atomic_read(&rdev->nr_pending)==0)) {
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009087 if (mddev->pers->hot_remove_disk(
NeilBrownb8321b62011-12-23 10:17:51 +11009088 mddev, rdev) == 0) {
Namhyung Kim36fad852011-07-27 11:00:36 +10009089 sysfs_unlink_rdev(mddev, rdev);
NeilBrown011abdc2018-04-26 14:46:29 +10009090 rdev->saved_raid_disk = rdev->raid_disk;
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009091 rdev->raid_disk = -1;
NeilBrownf2a371c2012-01-09 00:46:41 +11009092 removed++;
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009093 }
9094 }
NeilBrownd787be42016-06-02 16:19:53 +10009095 if (remove_some && test_bit(RemoveSynchronized, &rdev->flags))
9096 clear_bit(RemoveSynchronized, &rdev->flags);
9097 }
9098
Jonathan Brassow90584fc2013-03-07 16:24:26 -06009099 if (removed && mddev->kobj.sd)
Junxiao Bie1a86db2020-07-14 16:10:26 -07009100 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009101
Goldwyn Rodrigues2910ff12015-09-28 10:27:26 -05009102 if (this && removed)
NeilBrown746d3202013-04-24 11:42:41 +10009103 goto no_add;
9104
NeilBrowndafb20f2012-03-19 12:46:39 +11009105 rdev_for_each(rdev, mddev) {
Goldwyn Rodrigues2910ff12015-09-28 10:27:26 -05009106 if (this && this != rdev)
9107 continue;
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05009108 if (test_bit(Candidate, &rdev->flags))
9109 continue;
NeilBrown7bfec5f2011-12-23 10:17:53 +11009110 if (rdev->raid_disk >= 0 &&
9111 !test_bit(In_sync, &rdev->flags) &&
Shaohua Lif2076e72015-10-08 21:54:12 -07009112 !test_bit(Journal, &rdev->flags) &&
NeilBrown7bfec5f2011-12-23 10:17:53 +11009113 !test_bit(Faulty, &rdev->flags))
9114 spares++;
NeilBrown7ceb17e2013-04-24 11:42:42 +10009115 if (rdev->raid_disk >= 0)
9116 continue;
9117 if (test_bit(Faulty, &rdev->flags))
9118 continue;
Shaohua Lif6b6ec52015-12-21 10:51:02 +11009119 if (!test_bit(Journal, &rdev->flags)) {
9120 if (mddev->ro &&
9121 ! (rdev->saved_raid_disk >= 0 &&
9122 !test_bit(Bitmap_sync, &rdev->flags)))
9123 continue;
NeilBrown7ceb17e2013-04-24 11:42:42 +10009124
Shaohua Lif6b6ec52015-12-21 10:51:02 +11009125 rdev->recovery_offset = 0;
9126 }
Guoqing Jiang3f79cc22020-04-04 23:57:11 +02009127 if (mddev->pers->hot_add_disk(mddev, rdev) == 0) {
Damien Le Moal5e3b8a82020-07-16 13:54:40 +09009128 /* failure here is OK */
9129 sysfs_link_rdev(mddev, rdev);
Shaohua Lif6b6ec52015-12-21 10:51:02 +11009130 if (!test_bit(Journal, &rdev->flags))
9131 spares++;
NeilBrown7ceb17e2013-04-24 11:42:42 +10009132 md_new_event(mddev);
Shaohua Li29530792016-12-08 15:48:19 -08009133 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrowndfc70642008-05-23 13:04:39 -07009134 }
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009135 }
NeilBrown746d3202013-04-24 11:42:41 +10009136no_add:
NeilBrown6dafab62012-09-19 12:54:22 +10009137 if (removed)
Shaohua Li29530792016-12-08 15:48:19 -08009138 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009139 return spares;
9140}
NeilBrown7ebc0be2011-01-14 09:14:33 +11009141
NeilBrownac05f252014-09-30 08:10:42 +10009142static void md_start_sync(struct work_struct *ws)
9143{
9144 struct mddev *mddev = container_of(ws, struct mddev, del_work);
Goldwyn Rodriguesc186b122015-09-30 13:20:35 -05009145
NeilBrownac05f252014-09-30 08:10:42 +10009146 mddev->sync_thread = md_register_thread(md_do_sync,
9147 mddev,
9148 "resync");
9149 if (!mddev->sync_thread) {
NeilBrown9d487392016-11-02 14:16:49 +11009150 pr_warn("%s: could not start resync thread...\n",
9151 mdname(mddev));
NeilBrownac05f252014-09-30 08:10:42 +10009152 /* leave the spares where they are, it shouldn't hurt */
9153 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9154 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
9155 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
9156 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
9157 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
NeilBrownf851b602014-12-11 10:02:10 +11009158 wake_up(&resync_wait);
NeilBrownac05f252014-09-30 08:10:42 +10009159 if (test_and_clear_bit(MD_RECOVERY_RECOVER,
9160 &mddev->recovery))
9161 if (mddev->sysfs_action)
9162 sysfs_notify_dirent_safe(mddev->sysfs_action);
9163 } else
9164 md_wakeup_thread(mddev->sync_thread);
9165 sysfs_notify_dirent_safe(mddev->sysfs_action);
9166 md_new_event(mddev);
9167}
9168
Linus Torvalds1da177e2005-04-16 15:20:36 -07009169/*
9170 * This routine is regularly called by all per-raid-array threads to
9171 * deal with generic issues like resync and super-block update.
9172 * Raid personalities that don't have a thread (linear/raid0) do not
9173 * need this as they never do any recovery or update the superblock.
9174 *
9175 * It does not do any resync itself, but rather "forks" off other threads
9176 * to do that as needed.
9177 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
9178 * "->recovery" and create a thread at ->sync_thread.
NeilBrowndfc70642008-05-23 13:04:39 -07009179 * When the thread finishes it sets MD_RECOVERY_DONE
Linus Torvalds1da177e2005-04-16 15:20:36 -07009180 * and wakeups up this thread which will reap the thread and finish up.
9181 * This thread also removes any faulty devices (with nr_pending == 0).
9182 *
9183 * The overall approach is:
9184 * 1/ if the superblock needs updating, update it.
9185 * 2/ If a recovery thread is running, don't do anything else.
9186 * 3/ If recovery has finished, clean up, possibly marking spares active.
9187 * 4/ If there are any faulty devices, remove them.
9188 * 5/ If array is degraded, try to add spares devices
9189 * 6/ If array has spares or is not in-sync, start a resync thread.
9190 */
NeilBrownfd01b882011-10-11 16:47:53 +11009191void md_check_recovery(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009192{
NeilBrown059421e2018-10-03 15:04:41 +10009193 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) {
9194 /* Write superblock - thread that called mddev_suspend()
9195 * holds reconfig_mutex for us.
9196 */
9197 set_bit(MD_UPDATING_SB, &mddev->flags);
9198 smp_mb__after_atomic();
9199 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags))
9200 md_update_sb(mddev, 0);
9201 clear_bit_unlock(MD_UPDATING_SB, &mddev->flags);
9202 wake_up(&mddev->sb_wait);
9203 }
9204
Jonathan Brassow68866e42011-06-08 15:10:08 +10009205 if (mddev->suspended)
9206 return;
9207
NeilBrown5f404022005-06-21 17:17:16 -07009208 if (mddev->bitmap)
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07009209 md_bitmap_daemon_work(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009210
NeilBrownfca4d842005-06-21 17:17:11 -07009211 if (signal_pending(current)) {
NeilBrown31a59e32008-04-30 00:52:30 -07009212 if (mddev->pers->sync_request && !mddev->external) {
NeilBrown9d487392016-11-02 14:16:49 +11009213 pr_debug("md: %s in immediate safe mode\n",
9214 mdname(mddev));
NeilBrownfca4d842005-06-21 17:17:11 -07009215 mddev->safemode = 2;
9216 }
9217 flush_signals(current);
9218 }
9219
NeilBrownc89a8ee2008-08-05 15:54:13 +10009220 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
9221 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009222 if ( ! (
Shaohua Li29530792016-12-08 15:48:19 -08009223 (mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07009224 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
NeilBrownfca4d842005-06-21 17:17:11 -07009225 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
NeilBrown31a59e32008-04-30 00:52:30 -07009226 (mddev->external == 0 && mddev->safemode == 1) ||
NeilBrown4ad23a972017-03-15 14:05:14 +11009227 (mddev->safemode == 2
NeilBrownfca4d842005-06-21 17:17:11 -07009228 && !mddev->in_sync && mddev->recovery_cp == MaxSector)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009229 ))
9230 return;
NeilBrownfca4d842005-06-21 17:17:11 -07009231
NeilBrowndf5b89b2006-03-27 01:18:20 -08009232 if (mddev_trylock(mddev)) {
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009233 int spares = 0;
NeilBrown480523f2019-08-20 10:21:09 +10009234 bool try_set_sync = mddev->safemode != 0;
NeilBrownfca4d842005-06-21 17:17:11 -07009235
Shaohua Liafc1f552017-08-11 20:34:45 -07009236 if (!mddev->external && mddev->safemode == 1)
NeilBrown33182d12017-08-08 16:56:36 +10009237 mddev->safemode = 0;
9238
NeilBrownc89a8ee2008-08-05 15:54:13 +10009239 if (mddev->ro) {
Neil Brownab16bfc2015-06-17 12:31:46 +10009240 struct md_rdev *rdev;
9241 if (!mddev->external && mddev->in_sync)
9242 /* 'Blocked' flag not needed as failed devices
9243 * will be recorded if array switched to read/write.
9244 * Leaving it set will prevent the device
9245 * from being removed.
9246 */
9247 rdev_for_each(rdev, mddev)
9248 clear_bit(Blocked, &rdev->flags);
NeilBrown7ceb17e2013-04-24 11:42:42 +10009249 /* On a read-only array we can:
9250 * - remove failed devices
9251 * - add already-in_sync devices if the array itself
9252 * is in-sync.
9253 * As we only add devices that are already in-sync,
9254 * we can activate the spares immediately.
NeilBrownc89a8ee2008-08-05 15:54:13 +10009255 */
NeilBrown7ceb17e2013-04-24 11:42:42 +10009256 remove_and_add_spares(mddev, NULL);
NeilBrown8313b8e2013-12-12 10:13:33 +11009257 /* There is no thread, but we need to call
9258 * ->spare_active and clear saved_raid_disk
9259 */
NeilBrown2ac295a2014-05-29 11:40:03 +10009260 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
NeilBrown8313b8e2013-12-12 10:13:33 +11009261 md_reap_sync_thread(mddev);
NeilBrowna4a3d262015-07-17 11:57:30 +10009262 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
NeilBrown8313b8e2013-12-12 10:13:33 +11009263 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
Shaohua Li29530792016-12-08 15:48:19 -08009264 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
NeilBrownc89a8ee2008-08-05 15:54:13 +10009265 goto unlock;
9266 }
9267
Guoqing Jiang659b2542015-12-21 10:50:59 +11009268 if (mddev_is_clustered(mddev)) {
9269 struct md_rdev *rdev;
9270 /* kick the device if another node issued a
9271 * remove disk.
9272 */
9273 rdev_for_each(rdev, mddev) {
9274 if (test_and_clear_bit(ClusterRemove, &rdev->flags) &&
9275 rdev->raid_disk < 0)
9276 md_kick_rdev_from_array(rdev);
9277 }
9278 }
9279
NeilBrown480523f2019-08-20 10:21:09 +10009280 if (try_set_sync && !mddev->external && !mddev->in_sync) {
NeilBrown85572d72014-12-15 12:56:56 +11009281 spin_lock(&mddev->lock);
NeilBrown6497709b2017-03-15 14:05:14 +11009282 set_in_sync(mddev);
NeilBrown85572d72014-12-15 12:56:56 +11009283 spin_unlock(&mddev->lock);
NeilBrownfca4d842005-06-21 17:17:11 -07009284 }
NeilBrownfca4d842005-06-21 17:17:11 -07009285
Shaohua Li29530792016-12-08 15:48:19 -08009286 if (mddev->sb_flags)
NeilBrown850b2b422006-10-03 01:15:46 -07009287 md_update_sb(mddev, 0);
NeilBrown06d91a52005-06-21 17:17:12 -07009288
Linus Torvalds1da177e2005-04-16 15:20:36 -07009289 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
9290 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
9291 /* resync/recovery still happening */
9292 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9293 goto unlock;
9294 }
9295 if (mddev->sync_thread) {
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009296 md_reap_sync_thread(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009297 goto unlock;
9298 }
Neil Brown72a23c22008-06-28 08:31:41 +10009299 /* Set RUNNING before clearing NEEDED to avoid
9300 * any transients in the value of "sync_action".
9301 */
NeilBrown72f36d52012-10-11 14:25:57 +11009302 mddev->curr_resync_completed = 0;
NeilBrown23da4222014-12-15 12:57:01 +11009303 spin_lock(&mddev->lock);
Neil Brown72a23c22008-06-28 08:31:41 +10009304 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
NeilBrown23da4222014-12-15 12:57:01 +11009305 spin_unlock(&mddev->lock);
NeilBrown24dd4692005-11-08 21:39:26 -08009306 /* Clear some bits that don't mean anything, but
9307 * might be left set
9308 */
NeilBrown24dd4692005-11-08 21:39:26 -08009309 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
9310 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009311
NeilBrowned209582012-04-24 10:23:14 +10009312 if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
9313 test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
NeilBrownac05f252014-09-30 08:10:42 +10009314 goto not_running;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009315 /* no recovery is running.
9316 * remove any failed drives, then
9317 * add spares if possible.
NeilBrown72f36d52012-10-11 14:25:57 +11009318 * Spares are also removed and re-added, to allow
Linus Torvalds1da177e2005-04-16 15:20:36 -07009319 * the personality to fail the re-add.
9320 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009321
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009322 if (mddev->reshape_position != MaxSector) {
NeilBrown50ac1682009-06-18 08:47:55 +10009323 if (mddev->pers->check_reshape == NULL ||
9324 mddev->pers->check_reshape(mddev) != 0)
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009325 /* Cannot proceed */
NeilBrownac05f252014-09-30 08:10:42 +10009326 goto not_running;
NeilBrownb4c4c7b2007-02-28 20:11:48 -08009327 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
Neil Brown72a23c22008-06-28 08:31:41 +10009328 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
NeilBrown746d3202013-04-24 11:42:41 +10009329 } else if ((spares = remove_and_add_spares(mddev, NULL))) {
NeilBrown24dd4692005-11-08 21:39:26 -08009330 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9331 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
Dan Williams56ac36d2008-08-07 10:02:47 -07009332 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
Neil Brown72a23c22008-06-28 08:31:41 +10009333 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
NeilBrown24dd4692005-11-08 21:39:26 -08009334 } else if (mddev->recovery_cp < MaxSector) {
9335 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
Neil Brown72a23c22008-06-28 08:31:41 +10009336 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
NeilBrown24dd4692005-11-08 21:39:26 -08009337 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
9338 /* nothing to be done ... */
NeilBrownac05f252014-09-30 08:10:42 +10009339 goto not_running;
NeilBrown24dd4692005-11-08 21:39:26 -08009340
Linus Torvalds1da177e2005-04-16 15:20:36 -07009341 if (mddev->pers->sync_request) {
NeilBrownef99bf42012-05-22 13:55:08 +10009342 if (spares) {
NeilBrowna654b9d82005-06-21 17:17:27 -07009343 /* We are adding a device or devices to an array
9344 * which has the bitmap stored on all devices.
9345 * So make sure all bitmap pages get written
9346 */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07009347 md_bitmap_write_all(mddev->bitmap);
NeilBrowna654b9d82005-06-21 17:17:27 -07009348 }
NeilBrownac05f252014-09-30 08:10:42 +10009349 INIT_WORK(&mddev->del_work, md_start_sync);
9350 queue_work(md_misc_wq, &mddev->del_work);
9351 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009352 }
NeilBrownac05f252014-09-30 08:10:42 +10009353 not_running:
Neil Brown72a23c22008-06-28 08:31:41 +10009354 if (!mddev->sync_thread) {
9355 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
NeilBrownf851b602014-12-11 10:02:10 +11009356 wake_up(&resync_wait);
Neil Brown72a23c22008-06-28 08:31:41 +10009357 if (test_and_clear_bit(MD_RECOVERY_RECOVER,
9358 &mddev->recovery))
NeilBrown0c3573f2009-01-09 08:31:05 +11009359 if (mddev->sysfs_action)
NeilBrown00bcb4a2010-06-01 19:37:23 +10009360 sysfs_notify_dirent_safe(mddev->sysfs_action);
Neil Brown72a23c22008-06-28 08:31:41 +10009361 }
NeilBrownac05f252014-09-30 08:10:42 +10009362 unlock:
9363 wake_up(&mddev->sb_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009364 mddev_unlock(mddev);
9365 }
9366}
NeilBrown6c144d32014-09-30 16:15:38 +10009367EXPORT_SYMBOL(md_check_recovery);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009368
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009369void md_reap_sync_thread(struct mddev *mddev)
9370{
9371 struct md_rdev *rdev;
Guoqing Jiangaefb2e52018-10-18 16:37:44 +08009372 sector_t old_dev_sectors = mddev->dev_sectors;
9373 bool is_reshaped = false;
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009374
9375 /* resync has finished, collect result */
9376 md_unregister_thread(&mddev->sync_thread);
9377 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
Guoqing Jiang0d8ed0e92019-07-24 11:09:21 +02009378 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
9379 mddev->degraded != mddev->raid_disks) {
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009380 /* success...*/
9381 /* activate any spares */
9382 if (mddev->pers->spare_active(mddev)) {
Junxiao Bie1a86db2020-07-14 16:10:26 -07009383 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
Shaohua Li29530792016-12-08 15:48:19 -08009384 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009385 }
9386 }
9387 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
Guoqing Jiangaefb2e52018-10-18 16:37:44 +08009388 mddev->pers->finish_reshape) {
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009389 mddev->pers->finish_reshape(mddev);
Guoqing Jiangaefb2e52018-10-18 16:37:44 +08009390 if (mddev_is_clustered(mddev))
9391 is_reshaped = true;
9392 }
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009393
9394 /* If array is no-longer degraded, then any saved_raid_disk
NeilBrownf4667222013-12-09 12:04:56 +11009395 * information must be scrapped.
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009396 */
NeilBrownf4667222013-12-09 12:04:56 +11009397 if (!mddev->degraded)
9398 rdev_for_each(rdev, mddev)
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009399 rdev->saved_raid_disk = -1;
9400
9401 md_update_sb(mddev, 1);
Shaohua Li29530792016-12-08 15:48:19 -08009402 /* MD_SB_CHANGE_PENDING should be cleared by md_update_sb, so we can
Guoqing Jiangbb8bf152016-06-02 23:32:04 -04009403 * call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by
9404 * clustered raid */
9405 if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags))
9406 md_cluster_ops->resync_finish(mddev);
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009407 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
NeilBrownea358cd2015-06-12 20:05:04 +10009408 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009409 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9410 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
9411 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
9412 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
Guoqing Jiangaefb2e52018-10-18 16:37:44 +08009413 /*
9414 * We call md_cluster_ops->update_size here because sync_size could
9415 * be changed by md_update_sb, and MD_RECOVERY_RESHAPE is cleared,
9416 * so it is time to update size across cluster.
9417 */
9418 if (mddev_is_clustered(mddev) && is_reshaped
9419 && !test_bit(MD_CLOSING, &mddev->flags))
9420 md_cluster_ops->update_size(mddev, old_dev_sectors);
NeilBrownf851b602014-12-11 10:02:10 +11009421 wake_up(&resync_wait);
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009422 /* flag recovery needed just to double check */
9423 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9424 sysfs_notify_dirent_safe(mddev->sysfs_action);
9425 md_new_event(mddev);
9426 if (mddev->event_work.func)
9427 queue_work(md_misc_wq, &mddev->event_work);
9428}
NeilBrown6c144d32014-09-30 16:15:38 +10009429EXPORT_SYMBOL(md_reap_sync_thread);
Jonathan Brassowa91d5ac2013-04-24 11:42:43 +10009430
NeilBrownfd01b882011-10-11 16:47:53 +11009431void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev)
Dan Williams6bfe0b42008-04-30 00:52:32 -07009432{
NeilBrown00bcb4a2010-06-01 19:37:23 +10009433 sysfs_notify_dirent_safe(rdev->sysfs_state);
Dan Williams6bfe0b42008-04-30 00:52:32 -07009434 wait_event_timeout(rdev->blocked_wait,
NeilBrownde393cd2011-07-28 11:31:48 +10009435 !test_bit(Blocked, &rdev->flags) &&
9436 !test_bit(BlockedBadBlocks, &rdev->flags),
Dan Williams6bfe0b42008-04-30 00:52:32 -07009437 msecs_to_jiffies(5000));
9438 rdev_dec_pending(rdev, mddev);
9439}
9440EXPORT_SYMBOL(md_wait_for_blocked_rdev);
9441
NeilBrownc6563a82012-05-21 09:27:00 +10009442void md_finish_reshape(struct mddev *mddev)
9443{
9444 /* called be personality module when reshape completes. */
9445 struct md_rdev *rdev;
9446
9447 rdev_for_each(rdev, mddev) {
9448 if (rdev->data_offset > rdev->new_data_offset)
9449 rdev->sectors += rdev->data_offset - rdev->new_data_offset;
9450 else
9451 rdev->sectors -= rdev->new_data_offset - rdev->data_offset;
9452 rdev->data_offset = rdev->new_data_offset;
9453 }
9454}
9455EXPORT_SYMBOL(md_finish_reshape);
NeilBrown2230dfe2011-07-28 11:31:46 +10009456
Vishal Vermafc974ee2015-12-24 19:20:34 -07009457/* Bad block management */
NeilBrown2230dfe2011-07-28 11:31:46 +10009458
Vishal Vermafc974ee2015-12-24 19:20:34 -07009459/* Returns 1 on success, 0 on failure */
NeilBrown3cb03002011-10-11 16:45:26 +11009460int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
NeilBrownc6563a82012-05-21 09:27:00 +10009461 int is_new)
NeilBrown2230dfe2011-07-28 11:31:46 +10009462{
Guoqing Jiang85ad1d12016-05-03 22:22:13 -04009463 struct mddev *mddev = rdev->mddev;
NeilBrownc6563a82012-05-21 09:27:00 +10009464 int rv;
9465 if (is_new)
9466 s += rdev->new_data_offset;
9467 else
9468 s += rdev->data_offset;
Vishal Vermafc974ee2015-12-24 19:20:34 -07009469 rv = badblocks_set(&rdev->badblocks, s, sectors, 0);
9470 if (rv == 0) {
NeilBrown2230dfe2011-07-28 11:31:46 +10009471 /* Make sure they get written out promptly */
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02009472 if (test_bit(ExternalBbl, &rdev->flags))
Junxiao Bie1a86db2020-07-14 16:10:26 -07009473 sysfs_notify_dirent_safe(rdev->sysfs_unack_badblocks);
NeilBrown8bd2f0a2011-12-08 16:26:08 +11009474 sysfs_notify_dirent_safe(rdev->sysfs_state);
Shaohua Li29530792016-12-08 15:48:19 -08009475 set_mask_bits(&mddev->sb_flags, 0,
9476 BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING));
NeilBrown2230dfe2011-07-28 11:31:46 +10009477 md_wakeup_thread(rdev->mddev->thread);
Vishal Vermafc974ee2015-12-24 19:20:34 -07009478 return 1;
9479 } else
9480 return 0;
NeilBrown2230dfe2011-07-28 11:31:46 +10009481}
9482EXPORT_SYMBOL_GPL(rdev_set_badblocks);
9483
NeilBrownc6563a82012-05-21 09:27:00 +10009484int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
9485 int is_new)
NeilBrown2230dfe2011-07-28 11:31:46 +10009486{
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02009487 int rv;
NeilBrownc6563a82012-05-21 09:27:00 +10009488 if (is_new)
9489 s += rdev->new_data_offset;
9490 else
9491 s += rdev->data_offset;
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02009492 rv = badblocks_clear(&rdev->badblocks, s, sectors);
9493 if ((rv == 0) && test_bit(ExternalBbl, &rdev->flags))
Junxiao Bie1a86db2020-07-14 16:10:26 -07009494 sysfs_notify_dirent_safe(rdev->sysfs_badblocks);
Tomasz Majchrzak35b785f2016-10-21 16:26:57 +02009495 return rv;
NeilBrown2230dfe2011-07-28 11:31:46 +10009496}
9497EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
9498
Adrian Bunk75c96f82005-05-05 16:16:09 -07009499static int md_notify_reboot(struct notifier_block *this,
9500 unsigned long code, void *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009501{
9502 struct list_head *tmp;
NeilBrownfd01b882011-10-11 16:47:53 +11009503 struct mddev *mddev;
Daniel P. Berrange2dba6a92011-09-23 10:40:45 +01009504 int need_delay = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009505
NeilBrownc744a652012-03-19 12:46:37 +11009506 for_each_mddev(mddev, tmp) {
9507 if (mddev_trylock(mddev)) {
NeilBrown30b8aa92012-04-24 10:23:16 +10009508 if (mddev->pers)
9509 __md_stop_writes(mddev);
NeilBrown0f62fb22014-05-06 09:36:08 +10009510 if (mddev->persistent)
9511 mddev->safemode = 2;
NeilBrownc744a652012-03-19 12:46:37 +11009512 mddev_unlock(mddev);
Daniel P. Berrange2dba6a92011-09-23 10:40:45 +01009513 }
NeilBrownc744a652012-03-19 12:46:37 +11009514 need_delay = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009515 }
NeilBrownc744a652012-03-19 12:46:37 +11009516 /*
9517 * certain more exotic SCSI devices are known to be
9518 * volatile wrt too early system reboots. While the
9519 * right place to handle this issue is the given
9520 * driver, we do want to have a safe RAID driver ...
9521 */
9522 if (need_delay)
9523 mdelay(1000*1);
9524
Linus Torvalds1da177e2005-04-16 15:20:36 -07009525 return NOTIFY_DONE;
9526}
9527
Adrian Bunk75c96f82005-05-05 16:16:09 -07009528static struct notifier_block md_notifier = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009529 .notifier_call = md_notify_reboot,
9530 .next = NULL,
9531 .priority = INT_MAX, /* before any real devices */
9532};
9533
9534static void md_geninit(void)
9535{
NeilBrown36a4e1f2011-10-07 14:23:17 +11009536 pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
Linus Torvalds1da177e2005-04-16 15:20:36 -07009537
Alexey Dobriyan97a32532020-02-03 17:37:17 -08009538 proc_create("mdstat", S_IRUGO, NULL, &mdstat_proc_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009539}
9540
Adrian Bunk75c96f82005-05-05 16:16:09 -07009541static int __init md_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009542{
Tejun Heoe804ac72010-10-15 15:36:08 +02009543 int ret = -ENOMEM;
9544
Tejun Heoada609e2011-01-25 14:35:54 +01009545 md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0);
Tejun Heoe804ac72010-10-15 15:36:08 +02009546 if (!md_wq)
9547 goto err_wq;
9548
9549 md_misc_wq = alloc_workqueue("md_misc", 0, 0);
9550 if (!md_misc_wq)
9551 goto err_misc_wq;
9552
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02009553 md_rdev_misc_wq = alloc_workqueue("md_rdev_misc", 0, 0);
Guoqing Jiangcf0b9b42020-10-08 05:19:09 +02009554 if (!md_rdev_misc_wq)
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02009555 goto err_rdev_misc_wq;
9556
Christoph Hellwig28144f92020-10-29 15:58:34 +01009557 ret = __register_blkdev(MD_MAJOR, "md", md_probe);
9558 if (ret < 0)
Tejun Heoe804ac72010-10-15 15:36:08 +02009559 goto err_md;
9560
Christoph Hellwig28144f92020-10-29 15:58:34 +01009561 ret = __register_blkdev(0, "mdp", md_probe);
9562 if (ret < 0)
Tejun Heoe804ac72010-10-15 15:36:08 +02009563 goto err_mdp;
9564 mdp_major = ret;
9565
Linus Torvalds1da177e2005-04-16 15:20:36 -07009566 register_reboot_notifier(&md_notifier);
Eric W. Biederman0b4d4142007-02-14 00:34:09 -08009567 raid_table_header = register_sysctl_table(raid_root_table);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009568
9569 md_geninit();
NeilBrownd710e132008-10-13 11:55:12 +11009570 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009571
Tejun Heoe804ac72010-10-15 15:36:08 +02009572err_mdp:
9573 unregister_blkdev(MD_MAJOR, "md");
9574err_md:
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02009575 destroy_workqueue(md_rdev_misc_wq);
9576err_rdev_misc_wq:
Tejun Heoe804ac72010-10-15 15:36:08 +02009577 destroy_workqueue(md_misc_wq);
9578err_misc_wq:
9579 destroy_workqueue(md_wq);
9580err_wq:
9581 return ret;
9582}
Linus Torvalds1da177e2005-04-16 15:20:36 -07009583
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009584static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
Goldwyn Rodrigues1d7e3e92014-06-07 01:53:00 -05009585{
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009586 struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
9587 struct md_rdev *rdev2;
9588 int role, ret;
9589 char b[BDEVNAME_SIZE];
Goldwyn Rodrigues1d7e3e92014-06-07 01:53:00 -05009590
Guoqing Jiang818da592017-03-01 16:42:40 +08009591 /*
9592 * If size is changed in another node then we need to
9593 * do resize as well.
9594 */
9595 if (mddev->dev_sectors != le64_to_cpu(sb->size)) {
9596 ret = mddev->pers->resize(mddev, le64_to_cpu(sb->size));
9597 if (ret)
9598 pr_info("md-cluster: resize failed\n");
9599 else
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07009600 md_bitmap_update_sb(mddev->bitmap);
Guoqing Jiang818da592017-03-01 16:42:40 +08009601 }
9602
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009603 /* Check for change of roles in the active devices */
9604 rdev_for_each(rdev2, mddev) {
9605 if (test_bit(Faulty, &rdev2->flags))
9606 continue;
9607
9608 /* Check if the roles changed */
9609 role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]);
Goldwyn Rodriguesdbb64f82015-10-01 13:20:27 -05009610
9611 if (test_bit(Candidate, &rdev2->flags)) {
9612 if (role == 0xfffe) {
9613 pr_info("md: Removing Candidate device %s because add failed\n", bdevname(rdev2->bdev,b));
9614 md_kick_rdev_from_array(rdev2);
9615 continue;
9616 }
9617 else
9618 clear_bit(Candidate, &rdev2->flags);
9619 }
9620
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009621 if (role != rdev2->raid_disk) {
Guoqing Jiangca1e98e2018-10-18 16:37:45 +08009622 /*
9623 * got activated except reshape is happening.
9624 */
9625 if (rdev2->raid_disk == -1 && role != 0xffff &&
9626 !(le32_to_cpu(sb->feature_map) &
9627 MD_FEATURE_RESHAPE_ACTIVE)) {
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009628 rdev2->saved_raid_disk = role;
9629 ret = remove_and_add_spares(mddev, rdev2);
9630 pr_info("Activated spare: %s\n",
NeilBrown9d487392016-11-02 14:16:49 +11009631 bdevname(rdev2->bdev,b));
Guoqing Jianga5781832016-05-02 11:33:14 -04009632 /* wakeup mddev->thread here, so array could
9633 * perform resync with the new activated disk */
9634 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9635 md_wakeup_thread(mddev->thread);
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009636 }
9637 /* device faulty
9638 * We just want to do the minimum to mark the disk
9639 * as faulty. The recovery is performed by the
9640 * one who initiated the error.
9641 */
9642 if ((role == 0xfffe) || (role == 0xfffd)) {
9643 md_error(mddev, rdev2);
9644 clear_bit(Blocked, &rdev2->flags);
9645 }
9646 }
Goldwyn Rodrigues1d7e3e92014-06-07 01:53:00 -05009647 }
9648
Zhao Heminga8da01f2020-11-19 19:41:33 +08009649 if (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) {
9650 ret = update_raid_disks(mddev, le32_to_cpu(sb->raid_disks));
9651 if (ret)
9652 pr_warn("md: updating array disks failed. %d\n", ret);
9653 }
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009654
Guoqing Jiang7564bed2018-10-18 16:37:42 +08009655 /*
9656 * Since mddev->delta_disks has already updated in update_raid_disks,
9657 * so it is time to check reshape.
9658 */
9659 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) &&
9660 (le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
9661 /*
9662 * reshape is happening in the remote node, we need to
9663 * update reshape_position and call start_reshape.
9664 */
Christoph Hellwiged4d0a4e2019-04-04 18:56:10 +02009665 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
Guoqing Jiang7564bed2018-10-18 16:37:42 +08009666 if (mddev->pers->update_reshape_pos)
9667 mddev->pers->update_reshape_pos(mddev);
9668 if (mddev->pers->start_reshape)
9669 mddev->pers->start_reshape(mddev);
9670 } else if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) &&
9671 mddev->reshape_position != MaxSector &&
9672 !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
9673 /* reshape is just done in another node. */
9674 mddev->reshape_position = MaxSector;
9675 if (mddev->pers->update_reshape_pos)
9676 mddev->pers->update_reshape_pos(mddev);
9677 }
9678
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009679 /* Finally set the event to be up to date */
9680 mddev->events = le64_to_cpu(sb->events);
9681}
9682
9683static int read_rdev(struct mddev *mddev, struct md_rdev *rdev)
9684{
9685 int err;
9686 struct page *swapout = rdev->sb_page;
9687 struct mdp_superblock_1 *sb;
9688
9689 /* Store the sb page of the rdev in the swapout temporary
9690 * variable in case we err in the future
9691 */
9692 rdev->sb_page = NULL;
NeilBrown7f0f0d82016-11-02 14:16:49 +11009693 err = alloc_disk_sb(rdev);
9694 if (err == 0) {
9695 ClearPageUptodate(rdev->sb_page);
9696 rdev->sb_loaded = 0;
9697 err = super_types[mddev->major_version].
9698 load_super(rdev, NULL, mddev->minor_version);
9699 }
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009700 if (err < 0) {
9701 pr_warn("%s: %d Could not reload rdev(%d) err: %d. Restoring old values\n",
9702 __func__, __LINE__, rdev->desc_nr, err);
NeilBrown7f0f0d82016-11-02 14:16:49 +11009703 if (rdev->sb_page)
9704 put_page(rdev->sb_page);
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009705 rdev->sb_page = swapout;
9706 rdev->sb_loaded = 1;
9707 return err;
9708 }
9709
9710 sb = page_address(rdev->sb_page);
9711 /* Read the offset unconditionally, even if MD_FEATURE_RECOVERY_OFFSET
9712 * is not set
9713 */
9714
9715 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RECOVERY_OFFSET))
9716 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
9717
9718 /* The other node finished recovery, call spare_active to set
9719 * device In_sync and mddev->degraded
9720 */
9721 if (rdev->recovery_offset == MaxSector &&
9722 !test_bit(In_sync, &rdev->flags) &&
9723 mddev->pers->spare_active(mddev))
Junxiao Bie1a86db2020-07-14 16:10:26 -07009724 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
Goldwyn Rodrigues70bcecd2015-08-21 10:33:39 -05009725
9726 put_page(swapout);
9727 return 0;
9728}
9729
9730void md_reload_sb(struct mddev *mddev, int nr)
9731{
9732 struct md_rdev *rdev;
9733 int err;
9734
9735 /* Find the rdev */
9736 rdev_for_each_rcu(rdev, mddev) {
9737 if (rdev->desc_nr == nr)
9738 break;
9739 }
9740
9741 if (!rdev || rdev->desc_nr != nr) {
9742 pr_warn("%s: %d Could not find rdev with nr %d\n", __func__, __LINE__, nr);
9743 return;
9744 }
9745
9746 err = read_rdev(mddev, rdev);
9747 if (err < 0)
9748 return;
9749
9750 check_sb_changes(mddev, rdev);
9751
9752 /* Read all rdev's to update recovery_offset */
Guoqing Jiang0ea99242018-04-09 17:01:21 +08009753 rdev_for_each_rcu(rdev, mddev) {
9754 if (!test_bit(Faulty, &rdev->flags))
9755 read_rdev(mddev, rdev);
9756 }
Goldwyn Rodrigues1d7e3e92014-06-07 01:53:00 -05009757}
9758EXPORT_SYMBOL(md_reload_sb);
9759
Linus Torvalds1da177e2005-04-16 15:20:36 -07009760#ifndef MODULE
9761
9762/*
9763 * Searches all registered partitions for autorun RAID arrays
9764 * at boot time.
9765 */
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009766
Cong Wang5b1f5bc32016-06-08 09:20:16 -07009767static DEFINE_MUTEX(detected_devices_mutex);
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009768static LIST_HEAD(all_detected_devices);
9769struct detected_devices_node {
9770 struct list_head list;
9771 dev_t dev;
9772};
Linus Torvalds1da177e2005-04-16 15:20:36 -07009773
9774void md_autodetect_dev(dev_t dev)
9775{
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009776 struct detected_devices_node *node_detected_dev;
9777
9778 node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
9779 if (node_detected_dev) {
9780 node_detected_dev->dev = dev;
Cong Wang5b1f5bc32016-06-08 09:20:16 -07009781 mutex_lock(&detected_devices_mutex);
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009782 list_add_tail(&node_detected_dev->list, &all_detected_devices);
Cong Wang5b1f5bc32016-06-08 09:20:16 -07009783 mutex_unlock(&detected_devices_mutex);
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009784 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009785}
9786
Christoph Hellwigd82fa812020-06-06 15:00:24 +02009787void md_autostart_arrays(int part)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009788{
NeilBrown3cb03002011-10-11 16:45:26 +11009789 struct md_rdev *rdev;
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009790 struct detected_devices_node *node_detected_dev;
9791 dev_t dev;
9792 int i_scanned, i_passed;
9793
9794 i_scanned = 0;
9795 i_passed = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009796
NeilBrown9d487392016-11-02 14:16:49 +11009797 pr_info("md: Autodetecting RAID arrays.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07009798
Cong Wang5b1f5bc32016-06-08 09:20:16 -07009799 mutex_lock(&detected_devices_mutex);
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009800 while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
9801 i_scanned++;
9802 node_detected_dev = list_entry(all_detected_devices.next,
9803 struct detected_devices_node, list);
9804 list_del(&node_detected_dev->list);
9805 dev = node_detected_dev->dev;
9806 kfree(node_detected_dev);
Shaohua Li90bcf1332016-09-14 14:26:54 -07009807 mutex_unlock(&detected_devices_mutex);
NeilBrowndf968c42007-07-17 04:06:11 -07009808 rdev = md_import_device(dev,0, 90);
Shaohua Li90bcf1332016-09-14 14:26:54 -07009809 mutex_lock(&detected_devices_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009810 if (IS_ERR(rdev))
9811 continue;
9812
NeilBrown403df472014-09-30 15:52:29 +10009813 if (test_bit(Faulty, &rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07009814 continue;
NeilBrown403df472014-09-30 15:52:29 +10009815
NeilBrownd0fae182008-03-04 14:29:31 -08009816 set_bit(AutoDetected, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009817 list_add(&rdev->same_set, &pending_raid_disks);
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009818 i_passed++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009819 }
Cong Wang5b1f5bc32016-06-08 09:20:16 -07009820 mutex_unlock(&detected_devices_mutex);
Michael J. Evans4d936ec2007-10-16 23:30:52 -07009821
NeilBrown9d487392016-11-02 14:16:49 +11009822 pr_debug("md: Scanned %d and added %d devices.\n", i_scanned, i_passed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009823
9824 autorun_devices(part);
9825}
9826
Jeff Garzikfdee8ae2006-12-10 02:20:50 -08009827#endif /* !MODULE */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009828
9829static __exit void md_exit(void)
9830{
NeilBrownfd01b882011-10-11 16:47:53 +11009831 struct mddev *mddev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009832 struct list_head *tmp;
NeilBrowne2f23b62014-04-09 14:33:51 +10009833 int delay = 1;
Greg Kroah-Hartman8ab5e4c2005-06-20 21:15:16 -07009834
Christoph Hellwig3dbd8c22009-03-31 14:27:02 +11009835 unregister_blkdev(MD_MAJOR,"md");
Linus Torvalds1da177e2005-04-16 15:20:36 -07009836 unregister_blkdev(mdp_major, "mdp");
9837 unregister_reboot_notifier(&md_notifier);
9838 unregister_sysctl_table(raid_table_header);
NeilBrowne2f23b62014-04-09 14:33:51 +10009839
9840 /* We cannot unload the modules while some process is
9841 * waiting for us in select() or poll() - wake them up
9842 */
9843 md_unloading = 1;
9844 while (waitqueue_active(&md_event_waiters)) {
9845 /* not safe to leave yet */
9846 wake_up(&md_event_waiters);
9847 msleep(delay);
9848 delay += delay;
9849 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009850 remove_proc_entry("mdstat", NULL);
NeilBrowne2f23b62014-04-09 14:33:51 +10009851
NeilBrown29ac4aa2008-02-06 01:39:58 -08009852 for_each_mddev(mddev, tmp) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009853 export_array(mddev);
NeilBrown93568632017-02-06 13:41:39 +11009854 mddev->ctime = 0;
NeilBrownd3374822009-01-09 08:31:10 +11009855 mddev->hold_active = 0;
NeilBrown93568632017-02-06 13:41:39 +11009856 /*
9857 * for_each_mddev() will call mddev_put() at the end of each
9858 * iteration. As the mddev is now fully clear, this will
9859 * schedule the mddev for destruction by a workqueue, and the
9860 * destroy_workqueue() below will wait for that to complete.
9861 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07009862 }
Guoqing Jiangcc1ffe62020-04-04 23:57:08 +02009863 destroy_workqueue(md_rdev_misc_wq);
Tejun Heoe804ac72010-10-15 15:36:08 +02009864 destroy_workqueue(md_misc_wq);
9865 destroy_workqueue(md_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009866}
9867
Dan Williams685784a2007-07-09 11:56:42 -07009868subsys_initcall(md_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009869module_exit(md_exit)
9870
Kees Cooke4dca7b2017-10-17 19:04:42 -07009871static int get_ro(char *buffer, const struct kernel_param *kp)
NeilBrownf91de922005-11-08 21:39:36 -08009872{
Xiongfeng Wang3f999802020-05-11 16:23:25 +08009873 return sprintf(buffer, "%d\n", start_readonly);
NeilBrownf91de922005-11-08 21:39:36 -08009874}
Kees Cooke4dca7b2017-10-17 19:04:42 -07009875static int set_ro(const char *val, const struct kernel_param *kp)
NeilBrownf91de922005-11-08 21:39:36 -08009876{
Alexey Dobriyan4c9309c2015-05-16 14:02:38 +03009877 return kstrtouint(val, 10, (unsigned int *)&start_readonly);
NeilBrownf91de922005-11-08 21:39:36 -08009878}
9879
NeilBrown80ca3a42006-07-10 04:44:18 -07009880module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
9881module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
NeilBrownefeb53c2009-01-09 08:31:10 +11009882module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
NeilBrown78b63502017-04-12 16:26:13 +10009883module_param(create_on_open, bool, S_IRUSR|S_IWUSR);
NeilBrownf91de922005-11-08 21:39:36 -08009884
Linus Torvalds1da177e2005-04-16 15:20:36 -07009885MODULE_LICENSE("GPL");
NeilBrown0efb9e62009-12-14 12:49:58 +11009886MODULE_DESCRIPTION("MD RAID framework");
NeilBrownaa1595e2005-08-04 12:53:32 -07009887MODULE_ALIAS("md");
NeilBrown72008652005-08-26 18:34:15 -07009888MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);