blob: 95a5f3757fa30854ab46227a1bc92b413dfd8e10 [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
NeilBrown32a76272005-06-21 17:17:14 -07002/*
3 * bitmap.c two-level bitmap (C) Peter T. Breuer (ptb@ot.uc3m.es) 2003
4 *
5 * bitmap_create - sets up the bitmap structure
6 * bitmap_destroy - destroys the bitmap structure
7 *
8 * additions, Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.:
9 * - added disk storage for bitmap
10 * - changes to allow various bitmap chunk sizes
NeilBrown32a76272005-06-21 17:17:14 -070011 */
12
13/*
14 * Still to do:
15 *
16 * flush after percent set rather than just time based. (maybe both).
NeilBrown32a76272005-06-21 17:17:14 -070017 */
18
NeilBrownbff61972009-03-31 14:33:13 +110019#include <linux/blkdev.h>
NeilBrown32a76272005-06-21 17:17:14 -070020#include <linux/module.h>
NeilBrown32a76272005-06-21 17:17:14 -070021#include <linux/errno.h>
22#include <linux/slab.h>
23#include <linux/init.h>
NeilBrown32a76272005-06-21 17:17:14 -070024#include <linux/timer.h>
25#include <linux/sched.h>
26#include <linux/list.h>
27#include <linux/file.h>
28#include <linux/mount.h>
29#include <linux/buffer_head.h>
NeilBrown57148962012-03-19 12:46:40 +110030#include <linux/seq_file.h>
NeilBrown581dbd92016-11-14 16:30:21 +110031#include <trace/events/block.h>
NeilBrown43b2e5d2009-03-31 14:33:13 +110032#include "md.h"
Mike Snitzer935fe092017-10-10 17:02:41 -040033#include "md-bitmap.h"
NeilBrown32a76272005-06-21 17:17:14 -070034
NeilBrownac2f40b2010-06-01 19:37:31 +100035static inline char *bmname(struct bitmap *bitmap)
NeilBrown32a76272005-06-21 17:17:14 -070036{
37 return bitmap->mddev ? mdname(bitmap->mddev) : "mdX";
38}
39
NeilBrown32a76272005-06-21 17:17:14 -070040/*
NeilBrown32a76272005-06-21 17:17:14 -070041 * check a page and, if necessary, allocate it (or hijack it if the alloc fails)
42 *
43 * 1) check to see if this page is allocated, if it's not then try to alloc
44 * 2) if the alloc fails, set the page's hijacked flag so we'll use the
45 * page pointer directly as a counter
46 *
47 * if we find our page, we increment the page's refcount so that it stays
48 * allocated while we're using it
49 */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -070050static int md_bitmap_checkpage(struct bitmap_counts *bitmap,
51 unsigned long page, int create, int no_hijack)
NeilBrownee305ac2009-09-23 18:06:44 +100052__releases(bitmap->lock)
53__acquires(bitmap->lock)
NeilBrown32a76272005-06-21 17:17:14 -070054{
55 unsigned char *mappage;
56
57 if (page >= bitmap->pages) {
NeilBrown1187cf02009-03-31 14:27:02 +110058 /* This can happen if bitmap_start_sync goes beyond
59 * End-of-device while looking for a whole page.
60 * It is harmless.
61 */
NeilBrown32a76272005-06-21 17:17:14 -070062 return -EINVAL;
63 }
64
NeilBrown32a76272005-06-21 17:17:14 -070065 if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */
66 return 0;
67
68 if (bitmap->bp[page].map) /* page is already allocated, just return */
69 return 0;
70
71 if (!create)
72 return -ENOENT;
73
NeilBrown32a76272005-06-21 17:17:14 -070074 /* this page has not been allocated yet */
75
NeilBrownac2f40b2010-06-01 19:37:31 +100076 spin_unlock_irq(&bitmap->lock);
NeilBrownd9590142015-02-02 17:08:03 +110077 /* It is possible that this is being called inside a
78 * prepare_to_wait/finish_wait loop from raid5c:make_request().
79 * In general it is not permitted to sleep in that context as it
80 * can cause the loop to spin freely.
81 * That doesn't apply here as we can only reach this point
82 * once with any loop.
83 * When this function completes, either bp[page].map or
84 * bp[page].hijacked. In either case, this function will
85 * abort before getting to this point again. So there is
86 * no risk of a free-spin, and so it is safe to assert
87 * that sleeping here is allowed.
88 */
89 sched_annotate_sleep();
NeilBrown792a1d42012-03-19 12:46:41 +110090 mappage = kzalloc(PAGE_SIZE, GFP_NOIO);
NeilBrownac2f40b2010-06-01 19:37:31 +100091 spin_lock_irq(&bitmap->lock);
92
93 if (mappage == NULL) {
NeilBrown40cffcc2012-05-22 13:55:24 +100094 pr_debug("md/bitmap: map page allocation failed, hijacking\n");
Guoqing Jiangc9d65032016-05-02 11:50:11 -040095 /* We don't support hijack for cluster raid */
96 if (no_hijack)
97 return -ENOMEM;
NeilBrown32a76272005-06-21 17:17:14 -070098 /* failed - set the hijacked flag so that we can use the
99 * pointer as a counter */
NeilBrown32a76272005-06-21 17:17:14 -0700100 if (!bitmap->bp[page].map)
101 bitmap->bp[page].hijacked = 1;
NeilBrownac2f40b2010-06-01 19:37:31 +1000102 } else if (bitmap->bp[page].map ||
103 bitmap->bp[page].hijacked) {
NeilBrown32a76272005-06-21 17:17:14 -0700104 /* somebody beat us to getting the page */
NeilBrown792a1d42012-03-19 12:46:41 +1100105 kfree(mappage);
NeilBrownac2f40b2010-06-01 19:37:31 +1000106 } else {
107
108 /* no page was in place and we have one, so install it */
109
110 bitmap->bp[page].map = mappage;
111 bitmap->missing_pages--;
NeilBrown32a76272005-06-21 17:17:14 -0700112 }
NeilBrown32a76272005-06-21 17:17:14 -0700113 return 0;
114}
115
NeilBrown32a76272005-06-21 17:17:14 -0700116/* if page is completely empty, put it back on the free list, or dealloc it */
117/* if page was hijacked, unmark the flag so it might get alloced next time */
118/* Note: lock should be held when calling this */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700119static void md_bitmap_checkfree(struct bitmap_counts *bitmap, unsigned long page)
NeilBrown32a76272005-06-21 17:17:14 -0700120{
121 char *ptr;
122
123 if (bitmap->bp[page].count) /* page is still busy */
124 return;
125
126 /* page is no longer in use, it can be released */
127
128 if (bitmap->bp[page].hijacked) { /* page was hijacked, undo this now */
129 bitmap->bp[page].hijacked = 0;
130 bitmap->bp[page].map = NULL;
NeilBrownac2f40b2010-06-01 19:37:31 +1000131 } else {
132 /* normal case, free the page */
133 ptr = bitmap->bp[page].map;
134 bitmap->bp[page].map = NULL;
135 bitmap->missing_pages++;
NeilBrown792a1d42012-03-19 12:46:41 +1100136 kfree(ptr);
NeilBrown32a76272005-06-21 17:17:14 -0700137 }
NeilBrown32a76272005-06-21 17:17:14 -0700138}
139
NeilBrown32a76272005-06-21 17:17:14 -0700140/*
141 * bitmap file handling - read and write the bitmap file and its superblock
142 */
143
NeilBrown32a76272005-06-21 17:17:14 -0700144/*
145 * basic page I/O operations
146 */
147
NeilBrowna654b9d82005-06-21 17:17:27 -0700148/* IO operations when bitmap is stored near all superblocks */
NeilBrown27581e52012-05-22 13:55:08 +1000149static int read_sb_page(struct mddev *mddev, loff_t offset,
150 struct page *page,
151 unsigned long index, int size)
NeilBrowna654b9d82005-06-21 17:17:27 -0700152{
153 /* choose a good rdev and read the page from there */
154
NeilBrown3cb03002011-10-11 16:45:26 +1100155 struct md_rdev *rdev;
NeilBrowna654b9d82005-06-21 17:17:27 -0700156 sector_t target;
NeilBrowna654b9d82005-06-21 17:17:27 -0700157
NeilBrowndafb20f2012-03-19 12:46:39 +1100158 rdev_for_each(rdev, mddev) {
NeilBrownb2d444d2005-11-08 21:39:31 -0800159 if (! test_bit(In_sync, &rdev->flags)
Guoqing Jiang4aaf76942017-07-04 11:20:30 +0800160 || test_bit(Faulty, &rdev->flags)
161 || test_bit(Bitmap_sync, &rdev->flags))
NeilBrownab904d62005-09-09 16:23:52 -0700162 continue;
163
Jonathan Brassowccebd4c2011-01-14 09:14:33 +1100164 target = offset + index * (PAGE_SIZE/512);
NeilBrowna654b9d82005-06-21 17:17:27 -0700165
NeilBrown2b193362010-10-27 15:16:40 +1100166 if (sync_page_io(rdev, target,
Martin K. Petersene1defc42009-05-22 17:17:49 -0400167 roundup(size, bdev_logical_block_size(rdev->bdev)),
Mike Christie796a5cf2016-06-05 14:32:07 -0500168 page, REQ_OP_READ, 0, true)) {
NeilBrownab904d62005-09-09 16:23:52 -0700169 page->index = index;
NeilBrown27581e52012-05-22 13:55:08 +1000170 return 0;
NeilBrownab904d62005-09-09 16:23:52 -0700171 }
172 }
NeilBrown27581e52012-05-22 13:55:08 +1000173 return -EIO;
NeilBrowna654b9d82005-06-21 17:17:27 -0700174}
175
NeilBrownfd01b882011-10-11 16:47:53 +1100176static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mddev)
NeilBrownb2d2c4c2008-09-01 12:48:13 +1000177{
178 /* Iterate the disks of an mddev, using rcu to protect access to the
179 * linked list, and raising the refcount of devices we return to ensure
180 * they don't disappear while in use.
181 * As devices are only added or removed when raid_disk is < 0 and
182 * nr_pending is 0 and In_sync is clear, the entries we return will
183 * still be in the same position on the list when we re-enter
Michael Wangfd177482012-10-11 13:43:21 +1100184 * list_for_each_entry_continue_rcu.
NeilBrown8532e342015-05-20 15:05:09 +1000185 *
186 * Note that if entered with 'rdev == NULL' to start at the
187 * beginning, we temporarily assign 'rdev' to an address which
188 * isn't really an rdev, but which can be used by
189 * list_for_each_entry_continue_rcu() to find the first entry.
NeilBrownb2d2c4c2008-09-01 12:48:13 +1000190 */
NeilBrownb2d2c4c2008-09-01 12:48:13 +1000191 rcu_read_lock();
192 if (rdev == NULL)
193 /* start at the beginning */
NeilBrown8532e342015-05-20 15:05:09 +1000194 rdev = list_entry(&mddev->disks, struct md_rdev, same_set);
NeilBrownb2d2c4c2008-09-01 12:48:13 +1000195 else {
196 /* release the previous rdev and start from there. */
197 rdev_dec_pending(rdev, mddev);
NeilBrownb2d2c4c2008-09-01 12:48:13 +1000198 }
Michael Wangfd177482012-10-11 13:43:21 +1100199 list_for_each_entry_continue_rcu(rdev, &mddev->disks, same_set) {
NeilBrownb2d2c4c2008-09-01 12:48:13 +1000200 if (rdev->raid_disk >= 0 &&
NeilBrownb2d2c4c2008-09-01 12:48:13 +1000201 !test_bit(Faulty, &rdev->flags)) {
202 /* this is a usable devices */
203 atomic_inc(&rdev->nr_pending);
204 rcu_read_unlock();
205 return rdev;
206 }
207 }
208 rcu_read_unlock();
209 return NULL;
210}
211
NeilBrownab6085c2007-05-23 13:58:10 -0700212static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
NeilBrowna654b9d82005-06-21 17:17:27 -0700213{
NeilBrown46533ff2016-11-18 16:16:11 +1100214 struct md_rdev *rdev;
Jonathan Brassowa6ff7e02011-01-14 09:14:34 +1100215 struct block_device *bdev;
NeilBrownfd01b882011-10-11 16:47:53 +1100216 struct mddev *mddev = bitmap->mddev;
NeilBrown1ec885c2012-05-22 13:55:10 +1000217 struct bitmap_storage *store = &bitmap->storage;
NeilBrowna654b9d82005-06-21 17:17:27 -0700218
NeilBrown46533ff2016-11-18 16:16:11 +1100219restart:
220 rdev = NULL;
NeilBrownb2d2c4c2008-09-01 12:48:13 +1000221 while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
NeilBrownac2f40b2010-06-01 19:37:31 +1000222 int size = PAGE_SIZE;
223 loff_t offset = mddev->bitmap_info.offset;
Jonathan Brassowa6ff7e02011-01-14 09:14:34 +1100224
225 bdev = (rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev;
226
NeilBrown9b1215c2012-05-22 13:55:11 +1000227 if (page->index == store->file_pages-1) {
228 int last_page_size = store->bytes & (PAGE_SIZE-1);
229 if (last_page_size == 0)
230 last_page_size = PAGE_SIZE;
231 size = roundup(last_page_size,
Jonathan Brassowa6ff7e02011-01-14 09:14:34 +1100232 bdev_logical_block_size(bdev));
NeilBrown9b1215c2012-05-22 13:55:11 +1000233 }
NeilBrownac2f40b2010-06-01 19:37:31 +1000234 /* Just make sure we aren't corrupting data or
235 * metadata
236 */
237 if (mddev->external) {
238 /* Bitmap could be anywhere. */
239 if (rdev->sb_start + offset + (page->index
240 * (PAGE_SIZE/512))
241 > rdev->data_offset
242 &&
243 rdev->sb_start + offset
244 < (rdev->data_offset + mddev->dev_sectors
245 + (PAGE_SIZE/512)))
246 goto bad_alignment;
247 } else if (offset < 0) {
248 /* DATA BITMAP METADATA */
249 if (offset
250 + (long)(page->index * (PAGE_SIZE/512))
251 + size/512 > 0)
252 /* bitmap runs in to metadata */
253 goto bad_alignment;
254 if (rdev->data_offset + mddev->dev_sectors
255 > rdev->sb_start + offset)
256 /* data runs in to bitmap */
257 goto bad_alignment;
258 } else if (rdev->sb_start < rdev->data_offset) {
259 /* METADATA BITMAP DATA */
260 if (rdev->sb_start
261 + offset
262 + page->index*(PAGE_SIZE/512) + size/512
263 > rdev->data_offset)
264 /* bitmap runs in to data */
265 goto bad_alignment;
266 } else {
267 /* DATA METADATA BITMAP - no problems */
268 }
269 md_super_write(mddev, rdev,
270 rdev->sb_start + offset
271 + page->index * (PAGE_SIZE/512),
272 size,
273 page);
NeilBrownb2d2c4c2008-09-01 12:48:13 +1000274 }
NeilBrowna654b9d82005-06-21 17:17:27 -0700275
NeilBrown46533ff2016-11-18 16:16:11 +1100276 if (wait && md_super_wait(mddev) < 0)
277 goto restart;
NeilBrowna654b9d82005-06-21 17:17:27 -0700278 return 0;
NeilBrown4b809912008-07-21 17:05:25 +1000279
280 bad_alignment:
NeilBrown4b809912008-07-21 17:05:25 +1000281 return -EINVAL;
NeilBrowna654b9d82005-06-21 17:17:27 -0700282}
283
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700284static void md_bitmap_file_kick(struct bitmap *bitmap);
NeilBrown32a76272005-06-21 17:17:14 -0700285/*
NeilBrowna654b9d82005-06-21 17:17:27 -0700286 * write out a page to a file
NeilBrown32a76272005-06-21 17:17:14 -0700287 */
NeilBrown4ad13662007-07-17 04:06:13 -0700288static void write_page(struct bitmap *bitmap, struct page *page, int wait)
NeilBrown32a76272005-06-21 17:17:14 -0700289{
NeilBrownd785a062006-06-26 00:27:48 -0700290 struct buffer_head *bh;
NeilBrown32a76272005-06-21 17:17:14 -0700291
NeilBrown1ec885c2012-05-22 13:55:10 +1000292 if (bitmap->storage.file == NULL) {
NeilBrownf0d76d72007-07-17 04:06:12 -0700293 switch (write_sb_page(bitmap, page, wait)) {
294 case -EINVAL:
NeilBrownb405fe92012-05-22 13:55:15 +1000295 set_bit(BITMAP_WRITE_ERROR, &bitmap->flags);
NeilBrownf0d76d72007-07-17 04:06:12 -0700296 }
NeilBrown4ad13662007-07-17 04:06:13 -0700297 } else {
NeilBrowna654b9d82005-06-21 17:17:27 -0700298
NeilBrown4ad13662007-07-17 04:06:13 -0700299 bh = page_buffers(page);
NeilBrownc7084432006-01-06 00:20:45 -0800300
NeilBrown4ad13662007-07-17 04:06:13 -0700301 while (bh && bh->b_blocknr) {
302 atomic_inc(&bitmap->pending_writes);
303 set_buffer_locked(bh);
304 set_buffer_mapped(bh);
Mike Christie2a222ca2016-06-05 14:31:43 -0500305 submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
NeilBrown4ad13662007-07-17 04:06:13 -0700306 bh = bh->b_this_page;
307 }
NeilBrown32a76272005-06-21 17:17:14 -0700308
NeilBrownac2f40b2010-06-01 19:37:31 +1000309 if (wait)
NeilBrown4ad13662007-07-17 04:06:13 -0700310 wait_event(bitmap->write_wait,
311 atomic_read(&bitmap->pending_writes)==0);
NeilBrown32a76272005-06-21 17:17:14 -0700312 }
NeilBrownb405fe92012-05-22 13:55:15 +1000313 if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700314 md_bitmap_file_kick(bitmap);
NeilBrown32a76272005-06-21 17:17:14 -0700315}
316
NeilBrownd785a062006-06-26 00:27:48 -0700317static void end_bitmap_write(struct buffer_head *bh, int uptodate)
NeilBrown32a76272005-06-21 17:17:14 -0700318{
NeilBrownd785a062006-06-26 00:27:48 -0700319 struct bitmap *bitmap = bh->b_private;
NeilBrownd785a062006-06-26 00:27:48 -0700320
NeilBrownb405fe92012-05-22 13:55:15 +1000321 if (!uptodate)
322 set_bit(BITMAP_WRITE_ERROR, &bitmap->flags);
NeilBrownd785a062006-06-26 00:27:48 -0700323 if (atomic_dec_and_test(&bitmap->pending_writes))
324 wake_up(&bitmap->write_wait);
325}
326
NeilBrownd785a062006-06-26 00:27:48 -0700327static void free_buffers(struct page *page)
328{
NeilBrown27581e52012-05-22 13:55:08 +1000329 struct buffer_head *bh;
NeilBrownd785a062006-06-26 00:27:48 -0700330
NeilBrown27581e52012-05-22 13:55:08 +1000331 if (!PagePrivate(page))
332 return;
333
334 bh = page_buffers(page);
NeilBrownd785a062006-06-26 00:27:48 -0700335 while (bh) {
336 struct buffer_head *next = bh->b_this_page;
337 free_buffer_head(bh);
338 bh = next;
339 }
Guoqing Jiangdb2c1d82020-06-01 21:47:42 -0700340 detach_page_private(page);
NeilBrownd785a062006-06-26 00:27:48 -0700341 put_page(page);
342}
343
344/* read a page from a file.
345 * We both read the page, and attach buffers to the page to record the
346 * address of each block (using bmap). These addresses will be used
347 * to write the block later, completely bypassing the filesystem.
348 * This usage is similar to how swap files are handled, and allows us
349 * to write to a file with no concerns of memory allocation failing.
350 */
NeilBrown27581e52012-05-22 13:55:08 +1000351static int read_page(struct file *file, unsigned long index,
352 struct bitmap *bitmap,
353 unsigned long count,
354 struct page *page)
NeilBrownd785a062006-06-26 00:27:48 -0700355{
NeilBrown27581e52012-05-22 13:55:08 +1000356 int ret = 0;
Al Viro496ad9a2013-01-23 17:07:38 -0500357 struct inode *inode = file_inode(file);
NeilBrownd785a062006-06-26 00:27:48 -0700358 struct buffer_head *bh;
Carlos Maiolino30460e12020-01-09 14:30:41 +0100359 sector_t block, blk_cur;
NeilBrown32a76272005-06-21 17:17:14 -0700360
NeilBrown36a4e1f2011-10-07 14:23:17 +1100361 pr_debug("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE,
362 (unsigned long long)index << PAGE_SHIFT);
NeilBrown32a76272005-06-21 17:17:14 -0700363
Jens Axboe640ab982017-09-27 05:40:16 -0600364 bh = alloc_page_buffers(page, 1<<inode->i_blkbits, false);
NeilBrownd785a062006-06-26 00:27:48 -0700365 if (!bh) {
NeilBrown27581e52012-05-22 13:55:08 +1000366 ret = -ENOMEM;
NeilBrownd785a062006-06-26 00:27:48 -0700367 goto out;
368 }
Guoqing Jiangdb2c1d82020-06-01 21:47:42 -0700369 attach_page_private(page, bh);
Carlos Maiolino30460e12020-01-09 14:30:41 +0100370 blk_cur = index << (PAGE_SHIFT - inode->i_blkbits);
NeilBrownd785a062006-06-26 00:27:48 -0700371 while (bh) {
Carlos Maiolino30460e12020-01-09 14:30:41 +0100372 block = blk_cur;
373
NeilBrownd785a062006-06-26 00:27:48 -0700374 if (count == 0)
375 bh->b_blocknr = 0;
376 else {
Carlos Maiolino30460e12020-01-09 14:30:41 +0100377 ret = bmap(inode, &block);
378 if (ret || !block) {
NeilBrown27581e52012-05-22 13:55:08 +1000379 ret = -EINVAL;
Carlos Maiolino30460e12020-01-09 14:30:41 +0100380 bh->b_blocknr = 0;
NeilBrownd785a062006-06-26 00:27:48 -0700381 goto out;
382 }
Carlos Maiolino30460e12020-01-09 14:30:41 +0100383
384 bh->b_blocknr = block;
NeilBrownd785a062006-06-26 00:27:48 -0700385 bh->b_bdev = inode->i_sb->s_bdev;
386 if (count < (1<<inode->i_blkbits))
387 count = 0;
388 else
389 count -= (1<<inode->i_blkbits);
NeilBrown32a76272005-06-21 17:17:14 -0700390
NeilBrownd785a062006-06-26 00:27:48 -0700391 bh->b_end_io = end_bitmap_write;
392 bh->b_private = bitmap;
NeilBrownce25c312006-06-26 00:27:49 -0700393 atomic_inc(&bitmap->pending_writes);
394 set_buffer_locked(bh);
395 set_buffer_mapped(bh);
Mike Christie2a222ca2016-06-05 14:31:43 -0500396 submit_bh(REQ_OP_READ, 0, bh);
NeilBrownd785a062006-06-26 00:27:48 -0700397 }
Carlos Maiolino30460e12020-01-09 14:30:41 +0100398 blk_cur++;
NeilBrownd785a062006-06-26 00:27:48 -0700399 bh = bh->b_this_page;
400 }
NeilBrownd785a062006-06-26 00:27:48 -0700401 page->index = index;
NeilBrownce25c312006-06-26 00:27:49 -0700402
403 wait_event(bitmap->write_wait,
404 atomic_read(&bitmap->pending_writes)==0);
NeilBrownb405fe92012-05-22 13:55:15 +1000405 if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
NeilBrown27581e52012-05-22 13:55:08 +1000406 ret = -EIO;
NeilBrown32a76272005-06-21 17:17:14 -0700407out:
NeilBrown27581e52012-05-22 13:55:08 +1000408 if (ret)
NeilBrownec0cc222016-11-02 14:16:49 +1100409 pr_err("md: bitmap read error: (%dB @ %llu): %d\n",
410 (int)PAGE_SIZE,
411 (unsigned long long)index << PAGE_SHIFT,
412 ret);
NeilBrown27581e52012-05-22 13:55:08 +1000413 return ret;
NeilBrown32a76272005-06-21 17:17:14 -0700414}
415
416/*
417 * bitmap file superblock operations
418 */
419
NeilBrown85c9ccd2016-11-04 16:46:03 +1100420/*
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700421 * md_bitmap_wait_writes() should be called before writing any bitmap
NeilBrown85c9ccd2016-11-04 16:46:03 +1100422 * blocks, to ensure previous writes, particularly from
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700423 * md_bitmap_daemon_work(), have completed.
NeilBrown85c9ccd2016-11-04 16:46:03 +1100424 */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700425static void md_bitmap_wait_writes(struct bitmap *bitmap)
NeilBrown85c9ccd2016-11-04 16:46:03 +1100426{
427 if (bitmap->storage.file)
428 wait_event(bitmap->write_wait,
429 atomic_read(&bitmap->pending_writes)==0);
430 else
NeilBrown46533ff2016-11-18 16:16:11 +1100431 /* Note that we ignore the return value. The writes
432 * might have failed, but that would just mean that
433 * some bits which should be cleared haven't been,
434 * which is safe. The relevant bitmap blocks will
435 * probably get written again, but there is no great
436 * loss if they aren't.
437 */
NeilBrown85c9ccd2016-11-04 16:46:03 +1100438 md_super_wait(bitmap->mddev);
439}
440
441
NeilBrown32a76272005-06-21 17:17:14 -0700442/* update the event counter and sync the superblock to disk */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700443void md_bitmap_update_sb(struct bitmap *bitmap)
NeilBrown32a76272005-06-21 17:17:14 -0700444{
445 bitmap_super_t *sb;
NeilBrown32a76272005-06-21 17:17:14 -0700446
447 if (!bitmap || !bitmap->mddev) /* no bitmap for this array */
NeilBrown4ad13662007-07-17 04:06:13 -0700448 return;
NeilBrownece5cff2009-12-14 12:49:56 +1100449 if (bitmap->mddev->bitmap_info.external)
450 return;
NeilBrown1ec885c2012-05-22 13:55:10 +1000451 if (!bitmap->storage.sb_page) /* no superblock */
NeilBrown4ad13662007-07-17 04:06:13 -0700452 return;
NeilBrown1ec885c2012-05-22 13:55:10 +1000453 sb = kmap_atomic(bitmap->storage.sb_page);
NeilBrown32a76272005-06-21 17:17:14 -0700454 sb->events = cpu_to_le64(bitmap->mddev->events);
NeilBrown8258c532011-05-11 14:26:30 +1000455 if (bitmap->mddev->events < bitmap->events_cleared)
Neil Browna0da84f2008-06-28 08:31:22 +1000456 /* rocking back to read-only */
457 bitmap->events_cleared = bitmap->mddev->events;
NeilBrown8258c532011-05-11 14:26:30 +1000458 sb->events_cleared = cpu_to_le64(bitmap->events_cleared);
Hou Tao97f0eb92017-11-06 10:11:25 +0800459 /*
460 * clear BITMAP_WRITE_ERROR bit to protect against the case that
461 * a bitmap write error occurred but the later writes succeeded.
462 */
463 sb->state = cpu_to_le32(bitmap->flags & ~BIT(BITMAP_WRITE_ERROR));
NeilBrown43a70502009-12-14 12:49:55 +1100464 /* Just in case these have been changed via sysfs: */
465 sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ);
466 sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind);
NeilBrownb81a0402012-05-22 13:55:26 +1000467 /* This might have been changed by a reshape */
468 sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
469 sb->chunksize = cpu_to_le32(bitmap->mddev->bitmap_info.chunksize);
Goldwyn Rodriguesc4ce8672014-03-29 10:20:02 -0500470 sb->nodes = cpu_to_le32(bitmap->mddev->bitmap_info.nodes);
NeilBrown1dff2b82012-05-22 13:55:34 +1000471 sb->sectors_reserved = cpu_to_le32(bitmap->mddev->
472 bitmap_info.space);
Cong Wangb2f46e62011-11-28 13:25:44 +0800473 kunmap_atomic(sb);
NeilBrown1ec885c2012-05-22 13:55:10 +1000474 write_page(bitmap, bitmap->storage.sb_page, 1);
NeilBrown32a76272005-06-21 17:17:14 -0700475}
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700476EXPORT_SYMBOL(md_bitmap_update_sb);
NeilBrown32a76272005-06-21 17:17:14 -0700477
478/* print out the bitmap file superblock */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700479void md_bitmap_print_sb(struct bitmap *bitmap)
NeilBrown32a76272005-06-21 17:17:14 -0700480{
481 bitmap_super_t *sb;
482
NeilBrown1ec885c2012-05-22 13:55:10 +1000483 if (!bitmap || !bitmap->storage.sb_page)
NeilBrown32a76272005-06-21 17:17:14 -0700484 return;
NeilBrown1ec885c2012-05-22 13:55:10 +1000485 sb = kmap_atomic(bitmap->storage.sb_page);
NeilBrownec0cc222016-11-02 14:16:49 +1100486 pr_debug("%s: bitmap file superblock:\n", bmname(bitmap));
487 pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic));
488 pr_debug(" version: %d\n", le32_to_cpu(sb->version));
489 pr_debug(" uuid: %08x.%08x.%08x.%08x\n",
Christoph Hellwigc35403f2019-04-04 18:56:11 +0200490 le32_to_cpu(*(__le32 *)(sb->uuid+0)),
491 le32_to_cpu(*(__le32 *)(sb->uuid+4)),
492 le32_to_cpu(*(__le32 *)(sb->uuid+8)),
493 le32_to_cpu(*(__le32 *)(sb->uuid+12)));
NeilBrownec0cc222016-11-02 14:16:49 +1100494 pr_debug(" events: %llu\n",
495 (unsigned long long) le64_to_cpu(sb->events));
496 pr_debug("events cleared: %llu\n",
497 (unsigned long long) le64_to_cpu(sb->events_cleared));
498 pr_debug(" state: %08x\n", le32_to_cpu(sb->state));
499 pr_debug(" chunksize: %d B\n", le32_to_cpu(sb->chunksize));
500 pr_debug(" daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep));
501 pr_debug(" sync size: %llu KB\n",
502 (unsigned long long)le64_to_cpu(sb->sync_size)/2);
503 pr_debug("max write behind: %d\n", le32_to_cpu(sb->write_behind));
Cong Wangb2f46e62011-11-28 13:25:44 +0800504 kunmap_atomic(sb);
NeilBrown32a76272005-06-21 17:17:14 -0700505}
506
Jonathan Brassow9c810752011-06-08 17:59:30 -0500507/*
508 * bitmap_new_disk_sb
509 * @bitmap
510 *
511 * This function is somewhat the reverse of bitmap_read_sb. bitmap_read_sb
512 * reads and verifies the on-disk bitmap superblock and populates bitmap_info.
513 * This function verifies 'bitmap_info' and populates the on-disk bitmap
514 * structure, which is to be written to disk.
515 *
516 * Returns: 0 on success, -Exxx on error
517 */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700518static int md_bitmap_new_disk_sb(struct bitmap *bitmap)
Jonathan Brassow9c810752011-06-08 17:59:30 -0500519{
520 bitmap_super_t *sb;
521 unsigned long chunksize, daemon_sleep, write_behind;
Jonathan Brassow9c810752011-06-08 17:59:30 -0500522
Goldwyn Rodriguesd3b178a2015-07-22 12:09:17 -0500523 bitmap->storage.sb_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
Jianpeng Ma582e2e02012-10-11 13:45:36 +1100524 if (bitmap->storage.sb_page == NULL)
525 return -ENOMEM;
NeilBrown1ec885c2012-05-22 13:55:10 +1000526 bitmap->storage.sb_page->index = 0;
Jonathan Brassow9c810752011-06-08 17:59:30 -0500527
NeilBrown1ec885c2012-05-22 13:55:10 +1000528 sb = kmap_atomic(bitmap->storage.sb_page);
Jonathan Brassow9c810752011-06-08 17:59:30 -0500529
530 sb->magic = cpu_to_le32(BITMAP_MAGIC);
531 sb->version = cpu_to_le32(BITMAP_MAJOR_HI);
532
533 chunksize = bitmap->mddev->bitmap_info.chunksize;
534 BUG_ON(!chunksize);
535 if (!is_power_of_2(chunksize)) {
Cong Wangb2f46e62011-11-28 13:25:44 +0800536 kunmap_atomic(sb);
NeilBrownec0cc222016-11-02 14:16:49 +1100537 pr_warn("bitmap chunksize not a power of 2\n");
Jonathan Brassow9c810752011-06-08 17:59:30 -0500538 return -EINVAL;
539 }
540 sb->chunksize = cpu_to_le32(chunksize);
541
542 daemon_sleep = bitmap->mddev->bitmap_info.daemon_sleep;
Eric Engestromc97e0602016-03-07 12:01:05 +0000543 if (!daemon_sleep || (daemon_sleep > MAX_SCHEDULE_TIMEOUT)) {
NeilBrownec0cc222016-11-02 14:16:49 +1100544 pr_debug("Choosing daemon_sleep default (5 sec)\n");
Jonathan Brassow9c810752011-06-08 17:59:30 -0500545 daemon_sleep = 5 * HZ;
546 }
547 sb->daemon_sleep = cpu_to_le32(daemon_sleep);
548 bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
549
550 /*
551 * FIXME: write_behind for RAID1. If not specified, what
552 * is a good choice? We choose COUNTER_MAX / 2 arbitrarily.
553 */
554 write_behind = bitmap->mddev->bitmap_info.max_write_behind;
555 if (write_behind > COUNTER_MAX)
556 write_behind = COUNTER_MAX / 2;
557 sb->write_behind = cpu_to_le32(write_behind);
558 bitmap->mddev->bitmap_info.max_write_behind = write_behind;
559
560 /* keep the array size field of the bitmap superblock up to date */
561 sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
562
563 memcpy(sb->uuid, bitmap->mddev->uuid, 16);
564
NeilBrownb405fe92012-05-22 13:55:15 +1000565 set_bit(BITMAP_STALE, &bitmap->flags);
NeilBrown84e92342012-05-22 13:55:14 +1000566 sb->state = cpu_to_le32(bitmap->flags);
Jonathan Brassow9c810752011-06-08 17:59:30 -0500567 bitmap->events_cleared = bitmap->mddev->events;
568 sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
Goldwyn Rodriguesd3b178a2015-07-22 12:09:17 -0500569 bitmap->mddev->bitmap_info.nodes = 0;
Jonathan Brassow9c810752011-06-08 17:59:30 -0500570
Cong Wangb2f46e62011-11-28 13:25:44 +0800571 kunmap_atomic(sb);
Jonathan Brassow9c810752011-06-08 17:59:30 -0500572
573 return 0;
574}
575
NeilBrown32a76272005-06-21 17:17:14 -0700576/* read the superblock from the bitmap file and initialize some bitmap fields */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700577static int md_bitmap_read_sb(struct bitmap *bitmap)
NeilBrown32a76272005-06-21 17:17:14 -0700578{
579 char *reason = NULL;
580 bitmap_super_t *sb;
NeilBrown4b6d2872005-09-09 16:23:47 -0700581 unsigned long chunksize, daemon_sleep, write_behind;
NeilBrown32a76272005-06-21 17:17:14 -0700582 unsigned long long events;
Goldwyn Rodriguesc4ce8672014-03-29 10:20:02 -0500583 int nodes = 0;
NeilBrown1dff2b82012-05-22 13:55:34 +1000584 unsigned long sectors_reserved = 0;
NeilBrown32a76272005-06-21 17:17:14 -0700585 int err = -EINVAL;
NeilBrown27581e52012-05-22 13:55:08 +1000586 struct page *sb_page;
Goldwyn Rodrigues33e38ac2015-07-01 12:19:56 +1000587 loff_t offset = bitmap->mddev->bitmap_info.offset;
NeilBrown32a76272005-06-21 17:17:14 -0700588
NeilBrown1ec885c2012-05-22 13:55:10 +1000589 if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) {
NeilBrownef99bf42012-05-22 13:55:08 +1000590 chunksize = 128 * 1024 * 1024;
591 daemon_sleep = 5 * HZ;
592 write_behind = 0;
NeilBrownb405fe92012-05-22 13:55:15 +1000593 set_bit(BITMAP_STALE, &bitmap->flags);
NeilBrownef99bf42012-05-22 13:55:08 +1000594 err = 0;
595 goto out_no_sb;
596 }
NeilBrown32a76272005-06-21 17:17:14 -0700597 /* page 0 is the superblock, read it... */
NeilBrown27581e52012-05-22 13:55:08 +1000598 sb_page = alloc_page(GFP_KERNEL);
599 if (!sb_page)
600 return -ENOMEM;
NeilBrown1ec885c2012-05-22 13:55:10 +1000601 bitmap->storage.sb_page = sb_page;
NeilBrown27581e52012-05-22 13:55:08 +1000602
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -0500603re_read:
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -0500604 /* If cluster_slot is set, the cluster is setup */
605 if (bitmap->cluster_slot >= 0) {
Stephen Rothwell3b0e6aa2015-03-03 13:35:31 +1100606 sector_t bm_blocks = bitmap->mddev->resync_max_sectors;
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -0500607
Stephen Rothwell3b0e6aa2015-03-03 13:35:31 +1100608 sector_div(bm_blocks,
609 bitmap->mddev->bitmap_info.chunksize >> 9);
Goldwyn Rodrigues124eb762015-03-24 11:29:05 -0500610 /* bits to bytes */
611 bm_blocks = ((bm_blocks+7) >> 3) + sizeof(bitmap_super_t);
612 /* to 4k blocks */
NeilBrown935f3d42015-03-02 17:02:29 +1100613 bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096);
Goldwyn Rodrigues33e38ac2015-07-01 12:19:56 +1000614 offset = bitmap->mddev->bitmap_info.offset + (bitmap->cluster_slot * (bm_blocks << 3));
NeilBrownec0cc222016-11-02 14:16:49 +1100615 pr_debug("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__,
Goldwyn Rodrigues33e38ac2015-07-01 12:19:56 +1000616 bitmap->cluster_slot, offset);
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -0500617 }
618
NeilBrown1ec885c2012-05-22 13:55:10 +1000619 if (bitmap->storage.file) {
620 loff_t isize = i_size_read(bitmap->storage.file->f_mapping->host);
NeilBrownf49d5e62007-01-26 00:57:03 -0800621 int bytes = isize > PAGE_SIZE ? PAGE_SIZE : isize;
622
NeilBrown1ec885c2012-05-22 13:55:10 +1000623 err = read_page(bitmap->storage.file, 0,
NeilBrown27581e52012-05-22 13:55:08 +1000624 bitmap, bytes, sb_page);
NeilBrownf49d5e62007-01-26 00:57:03 -0800625 } else {
NeilBrown27581e52012-05-22 13:55:08 +1000626 err = read_sb_page(bitmap->mddev,
Goldwyn Rodrigues33e38ac2015-07-01 12:19:56 +1000627 offset,
NeilBrown27581e52012-05-22 13:55:08 +1000628 sb_page,
Shaohua Li938b5332017-10-16 19:03:44 -0700629 0, sizeof(bitmap_super_t));
NeilBrowna654b9d82005-06-21 17:17:27 -0700630 }
NeilBrown27581e52012-05-22 13:55:08 +1000631 if (err)
NeilBrown32a76272005-06-21 17:17:14 -0700632 return err;
NeilBrown32a76272005-06-21 17:17:14 -0700633
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -0500634 err = -EINVAL;
NeilBrown27581e52012-05-22 13:55:08 +1000635 sb = kmap_atomic(sb_page);
NeilBrown32a76272005-06-21 17:17:14 -0700636
NeilBrown32a76272005-06-21 17:17:14 -0700637 chunksize = le32_to_cpu(sb->chunksize);
NeilBrown1b04be92009-12-14 12:49:53 +1100638 daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
NeilBrown4b6d2872005-09-09 16:23:47 -0700639 write_behind = le32_to_cpu(sb->write_behind);
NeilBrown1dff2b82012-05-22 13:55:34 +1000640 sectors_reserved = le32_to_cpu(sb->sectors_reserved);
Goldwyn Rodrigues3c462c82015-08-19 07:35:54 +1000641 /* Setup nodes/clustername only if bitmap version is
642 * cluster-compatible
Goldwyn Rodriguesd3b178a2015-07-22 12:09:17 -0500643 */
Goldwyn Rodrigues3c462c82015-08-19 07:35:54 +1000644 if (sb->version == cpu_to_le32(BITMAP_MAJOR_CLUSTERED)) {
Goldwyn Rodriguesd3b178a2015-07-22 12:09:17 -0500645 nodes = le32_to_cpu(sb->nodes);
646 strlcpy(bitmap->mddev->bitmap_info.cluster_name,
647 sb->cluster_name, 64);
648 }
NeilBrown32a76272005-06-21 17:17:14 -0700649
650 /* verify that the bitmap-specific fields are valid */
651 if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
652 reason = "bad magic";
NeilBrownbd926c62005-11-08 21:39:32 -0800653 else if (le32_to_cpu(sb->version) < BITMAP_MAJOR_LO ||
Goldwyn Rodrigues3c462c82015-08-19 07:35:54 +1000654 le32_to_cpu(sb->version) > BITMAP_MAJOR_CLUSTERED)
NeilBrown32a76272005-06-21 17:17:14 -0700655 reason = "unrecognized superblock version";
NeilBrown1187cf02009-03-31 14:27:02 +1100656 else if (chunksize < 512)
NeilBrown7dd5d342006-01-06 00:20:39 -0800657 reason = "bitmap chunksize too small";
Jonathan Brassowd7445402011-06-08 18:01:10 -0500658 else if (!is_power_of_2(chunksize))
NeilBrown32a76272005-06-21 17:17:14 -0700659 reason = "bitmap chunksize not a power of 2";
NeilBrown1b04be92009-12-14 12:49:53 +1100660 else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT)
NeilBrown7dd5d342006-01-06 00:20:39 -0800661 reason = "daemon sleep period out of range";
NeilBrown4b6d2872005-09-09 16:23:47 -0700662 else if (write_behind > COUNTER_MAX)
663 reason = "write-behind limit out of range (0 - 16383)";
NeilBrown32a76272005-06-21 17:17:14 -0700664 if (reason) {
NeilBrownec0cc222016-11-02 14:16:49 +1100665 pr_warn("%s: invalid bitmap file superblock: %s\n",
NeilBrown32a76272005-06-21 17:17:14 -0700666 bmname(bitmap), reason);
667 goto out;
668 }
669
670 /* keep the array size field of the bitmap superblock up to date */
671 sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
672
NeilBrown278c1ca2012-03-19 12:46:40 +1100673 if (bitmap->mddev->persistent) {
674 /*
675 * We have a persistent array superblock, so compare the
676 * bitmap's UUID and event counter to the mddev's
677 */
678 if (memcmp(sb->uuid, bitmap->mddev->uuid, 16)) {
NeilBrownec0cc222016-11-02 14:16:49 +1100679 pr_warn("%s: bitmap superblock UUID mismatch\n",
680 bmname(bitmap));
NeilBrown278c1ca2012-03-19 12:46:40 +1100681 goto out;
682 }
683 events = le64_to_cpu(sb->events);
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -0500684 if (!nodes && (events < bitmap->mddev->events)) {
NeilBrownec0cc222016-11-02 14:16:49 +1100685 pr_warn("%s: bitmap file is out of date (%llu < %llu) -- forcing full recovery\n",
686 bmname(bitmap), events,
687 (unsigned long long) bitmap->mddev->events);
NeilBrownb405fe92012-05-22 13:55:15 +1000688 set_bit(BITMAP_STALE, &bitmap->flags);
NeilBrown278c1ca2012-03-19 12:46:40 +1100689 }
690 }
NeilBrown32a76272005-06-21 17:17:14 -0700691
NeilBrown32a76272005-06-21 17:17:14 -0700692 /* assign fields using values from superblock */
NeilBrown4f2e6392006-10-21 10:24:09 -0700693 bitmap->flags |= le32_to_cpu(sb->state);
NeilBrownbd926c62005-11-08 21:39:32 -0800694 if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN)
NeilBrownb405fe92012-05-22 13:55:15 +1000695 set_bit(BITMAP_HOSTENDIAN, &bitmap->flags);
NeilBrown32a76272005-06-21 17:17:14 -0700696 bitmap->events_cleared = le64_to_cpu(sb->events_cleared);
Goldwyn Rodriguescf921cc2014-03-30 00:42:49 -0500697 strlcpy(bitmap->mddev->bitmap_info.cluster_name, sb->cluster_name, 64);
NeilBrown32a76272005-06-21 17:17:14 -0700698 err = 0;
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -0500699
NeilBrown32a76272005-06-21 17:17:14 -0700700out:
Cong Wangb2f46e62011-11-28 13:25:44 +0800701 kunmap_atomic(sb);
Zhilong Liu3560741e2017-03-15 16:14:53 +0800702 /* Assigning chunksize is required for "re_read" */
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -0500703 bitmap->mddev->bitmap_info.chunksize = chunksize;
Goldwyn Rodriguesf7357272015-07-22 12:09:16 -0500704 if (err == 0 && nodes && (bitmap->cluster_slot < 0)) {
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -0500705 err = md_setup_cluster(bitmap->mddev, nodes);
706 if (err) {
NeilBrownec0cc222016-11-02 14:16:49 +1100707 pr_warn("%s: Could not setup cluster service (%d)\n",
708 bmname(bitmap), err);
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -0500709 goto out_no_sb;
710 }
711 bitmap->cluster_slot = md_cluster_ops->slot_number(bitmap->mddev);
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -0500712 goto re_read;
713 }
714
715
NeilBrownef99bf42012-05-22 13:55:08 +1000716out_no_sb:
NeilBrownb405fe92012-05-22 13:55:15 +1000717 if (test_bit(BITMAP_STALE, &bitmap->flags))
NeilBrownef99bf42012-05-22 13:55:08 +1000718 bitmap->events_cleared = bitmap->mddev->events;
719 bitmap->mddev->bitmap_info.chunksize = chunksize;
720 bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
721 bitmap->mddev->bitmap_info.max_write_behind = write_behind;
Goldwyn Rodriguesc4ce8672014-03-29 10:20:02 -0500722 bitmap->mddev->bitmap_info.nodes = nodes;
NeilBrown1dff2b82012-05-22 13:55:34 +1000723 if (bitmap->mddev->bitmap_info.space == 0 ||
724 bitmap->mddev->bitmap_info.space > sectors_reserved)
725 bitmap->mddev->bitmap_info.space = sectors_reserved;
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -0500726 if (err) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700727 md_bitmap_print_sb(bitmap);
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -0500728 if (bitmap->cluster_slot < 0)
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -0500729 md_cluster_stop(bitmap->mddev);
730 }
NeilBrown32a76272005-06-21 17:17:14 -0700731 return err;
732}
733
NeilBrown32a76272005-06-21 17:17:14 -0700734/*
735 * general bitmap file operations
736 */
737
NeilBrownece5cff2009-12-14 12:49:56 +1100738/*
739 * on-disk bitmap:
740 *
741 * Use one bit per "chunk" (block set). We do the disk I/O on the bitmap
742 * file a page at a time. There's a superblock at the start of the file.
743 */
NeilBrown32a76272005-06-21 17:17:14 -0700744/* calculate the index of the page that contains this bit */
NeilBrown1ec885c2012-05-22 13:55:10 +1000745static inline unsigned long file_page_index(struct bitmap_storage *store,
746 unsigned long chunk)
NeilBrown32a76272005-06-21 17:17:14 -0700747{
NeilBrown1ec885c2012-05-22 13:55:10 +1000748 if (store->sb_page)
NeilBrownece5cff2009-12-14 12:49:56 +1100749 chunk += sizeof(bitmap_super_t) << 3;
750 return chunk >> PAGE_BIT_SHIFT;
NeilBrown32a76272005-06-21 17:17:14 -0700751}
752
753/* calculate the (bit) offset of this bit within a page */
NeilBrown1ec885c2012-05-22 13:55:10 +1000754static inline unsigned long file_page_offset(struct bitmap_storage *store,
755 unsigned long chunk)
NeilBrown32a76272005-06-21 17:17:14 -0700756{
NeilBrown1ec885c2012-05-22 13:55:10 +1000757 if (store->sb_page)
NeilBrownece5cff2009-12-14 12:49:56 +1100758 chunk += sizeof(bitmap_super_t) << 3;
759 return chunk & (PAGE_BITS - 1);
NeilBrown32a76272005-06-21 17:17:14 -0700760}
761
762/*
763 * return a pointer to the page in the filemap that contains the given bit
764 *
NeilBrown32a76272005-06-21 17:17:14 -0700765 */
NeilBrown1ec885c2012-05-22 13:55:10 +1000766static inline struct page *filemap_get_page(struct bitmap_storage *store,
Jonathan Brassow3520fa42011-07-27 11:00:37 +1000767 unsigned long chunk)
NeilBrown32a76272005-06-21 17:17:14 -0700768{
NeilBrown1ec885c2012-05-22 13:55:10 +1000769 if (file_page_index(store, chunk) >= store->file_pages)
NeilBrownac2f40b2010-06-01 19:37:31 +1000770 return NULL;
NeilBrownf2e06c52014-05-28 13:39:23 +1000771 return store->filemap[file_page_index(store, chunk)];
NeilBrown32a76272005-06-21 17:17:14 -0700772}
773
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700774static int md_bitmap_storage_alloc(struct bitmap_storage *store,
775 unsigned long chunks, int with_super,
776 int slot_number)
NeilBrownd1244cb2012-05-22 13:55:12 +1000777{
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -0500778 int pnum, offset = 0;
NeilBrownd1244cb2012-05-22 13:55:12 +1000779 unsigned long num_pages;
780 unsigned long bytes;
781
782 bytes = DIV_ROUND_UP(chunks, 8);
783 if (with_super)
784 bytes += sizeof(bitmap_super_t);
785
786 num_pages = DIV_ROUND_UP(bytes, PAGE_SIZE);
Guoqing Jiang7f86ffe2016-05-02 11:50:13 -0400787 offset = slot_number * num_pages;
NeilBrownd1244cb2012-05-22 13:55:12 +1000788
Kees Cook6da2ec52018-06-12 13:55:00 -0700789 store->filemap = kmalloc_array(num_pages, sizeof(struct page *),
790 GFP_KERNEL);
NeilBrownd1244cb2012-05-22 13:55:12 +1000791 if (!store->filemap)
792 return -ENOMEM;
793
794 if (with_super && !store->sb_page) {
NeilBrownd60b4792012-05-22 13:55:25 +1000795 store->sb_page = alloc_page(GFP_KERNEL|__GFP_ZERO);
NeilBrownd1244cb2012-05-22 13:55:12 +1000796 if (store->sb_page == NULL)
797 return -ENOMEM;
NeilBrownd1244cb2012-05-22 13:55:12 +1000798 }
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -0500799
NeilBrownd1244cb2012-05-22 13:55:12 +1000800 pnum = 0;
801 if (store->sb_page) {
802 store->filemap[0] = store->sb_page;
803 pnum = 1;
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -0500804 store->sb_page->index = offset;
NeilBrownd1244cb2012-05-22 13:55:12 +1000805 }
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -0500806
NeilBrownd1244cb2012-05-22 13:55:12 +1000807 for ( ; pnum < num_pages; pnum++) {
NeilBrownd60b4792012-05-22 13:55:25 +1000808 store->filemap[pnum] = alloc_page(GFP_KERNEL|__GFP_ZERO);
NeilBrownd1244cb2012-05-22 13:55:12 +1000809 if (!store->filemap[pnum]) {
810 store->file_pages = pnum;
811 return -ENOMEM;
812 }
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -0500813 store->filemap[pnum]->index = pnum + offset;
NeilBrownd1244cb2012-05-22 13:55:12 +1000814 }
815 store->file_pages = pnum;
816
817 /* We need 4 bits per page, rounded up to a multiple
818 * of sizeof(unsigned long) */
819 store->filemap_attr = kzalloc(
820 roundup(DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)),
821 GFP_KERNEL);
822 if (!store->filemap_attr)
823 return -ENOMEM;
824
825 store->bytes = bytes;
826
827 return 0;
828}
829
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700830static void md_bitmap_file_unmap(struct bitmap_storage *store)
NeilBrown32a76272005-06-21 17:17:14 -0700831{
832 struct page **map, *sb_page;
NeilBrown32a76272005-06-21 17:17:14 -0700833 int pages;
NeilBrownfae7d322012-05-22 13:55:21 +1000834 struct file *file;
NeilBrown32a76272005-06-21 17:17:14 -0700835
NeilBrownfae7d322012-05-22 13:55:21 +1000836 file = store->file;
NeilBrown1ec885c2012-05-22 13:55:10 +1000837 map = store->filemap;
NeilBrown1ec885c2012-05-22 13:55:10 +1000838 pages = store->file_pages;
NeilBrown1ec885c2012-05-22 13:55:10 +1000839 sb_page = store->sb_page;
NeilBrown32a76272005-06-21 17:17:14 -0700840
841 while (pages--)
NeilBrownece5cff2009-12-14 12:49:56 +1100842 if (map[pages] != sb_page) /* 0 is sb_page, release it below */
NeilBrownd785a062006-06-26 00:27:48 -0700843 free_buffers(map[pages]);
NeilBrown32a76272005-06-21 17:17:14 -0700844 kfree(map);
NeilBrownfae7d322012-05-22 13:55:21 +1000845 kfree(store->filemap_attr);
NeilBrown32a76272005-06-21 17:17:14 -0700846
NeilBrownd785a062006-06-26 00:27:48 -0700847 if (sb_page)
848 free_buffers(sb_page);
NeilBrown32a76272005-06-21 17:17:14 -0700849
NeilBrownd785a062006-06-26 00:27:48 -0700850 if (file) {
Al Viro496ad9a2013-01-23 17:07:38 -0500851 struct inode *inode = file_inode(file);
Andrew Mortonfc0ecff2007-02-10 01:45:39 -0800852 invalidate_mapping_pages(inode->i_mapping, 0, -1);
NeilBrown32a76272005-06-21 17:17:14 -0700853 fput(file);
NeilBrownd785a062006-06-26 00:27:48 -0700854 }
NeilBrown32a76272005-06-21 17:17:14 -0700855}
856
NeilBrown32a76272005-06-21 17:17:14 -0700857/*
858 * bitmap_file_kick - if an error occurs while manipulating the bitmap file
859 * then it is no longer reliable, so we stop using it and we mark the file
860 * as failed in the superblock
861 */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700862static void md_bitmap_file_kick(struct bitmap *bitmap)
NeilBrown32a76272005-06-21 17:17:14 -0700863{
864 char *path, *ptr = NULL;
865
NeilBrownb405fe92012-05-22 13:55:15 +1000866 if (!test_and_set_bit(BITMAP_STALE, &bitmap->flags)) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700867 md_bitmap_update_sb(bitmap);
NeilBrown32a76272005-06-21 17:17:14 -0700868
NeilBrown1ec885c2012-05-22 13:55:10 +1000869 if (bitmap->storage.file) {
NeilBrown4ad13662007-07-17 04:06:13 -0700870 path = kmalloc(PAGE_SIZE, GFP_KERNEL);
871 if (path)
Miklos Szeredi9bf39ab2015-06-19 10:29:13 +0200872 ptr = file_path(bitmap->storage.file,
NeilBrown1ec885c2012-05-22 13:55:10 +1000873 path, PAGE_SIZE);
Christoph Hellwig6bcfd602008-05-23 13:04:34 -0700874
NeilBrownec0cc222016-11-02 14:16:49 +1100875 pr_warn("%s: kicking failed bitmap file %s from array!\n",
876 bmname(bitmap), IS_ERR(ptr) ? "" : ptr);
NeilBrown32a76272005-06-21 17:17:14 -0700877
NeilBrown4ad13662007-07-17 04:06:13 -0700878 kfree(path);
879 } else
NeilBrownec0cc222016-11-02 14:16:49 +1100880 pr_warn("%s: disabling internal bitmap due to errors\n",
881 bmname(bitmap));
NeilBrowna654b9d82005-06-21 17:17:27 -0700882 }
NeilBrown32a76272005-06-21 17:17:14 -0700883}
884
885enum bitmap_page_attr {
NeilBrownac2f40b2010-06-01 19:37:31 +1000886 BITMAP_PAGE_DIRTY = 0, /* there are set bits that need to be synced */
NeilBrown5a537df2011-09-21 15:37:46 +1000887 BITMAP_PAGE_PENDING = 1, /* there are bits that are being cleaned.
888 * i.e. counter is 1 or 2. */
NeilBrownac2f40b2010-06-01 19:37:31 +1000889 BITMAP_PAGE_NEEDWRITE = 2, /* there are cleared bits that need to be synced */
NeilBrown32a76272005-06-21 17:17:14 -0700890};
891
NeilBrownd1891222012-05-22 13:55:09 +1000892static inline void set_page_attr(struct bitmap *bitmap, int pnum,
893 enum bitmap_page_attr attr)
NeilBrown32a76272005-06-21 17:17:14 -0700894{
NeilBrownbdfd1142012-05-22 13:55:22 +1000895 set_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
NeilBrown32a76272005-06-21 17:17:14 -0700896}
897
NeilBrownd1891222012-05-22 13:55:09 +1000898static inline void clear_page_attr(struct bitmap *bitmap, int pnum,
899 enum bitmap_page_attr attr)
NeilBrown32a76272005-06-21 17:17:14 -0700900{
NeilBrownbdfd1142012-05-22 13:55:22 +1000901 clear_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
NeilBrown32a76272005-06-21 17:17:14 -0700902}
903
NeilBrownbdfd1142012-05-22 13:55:22 +1000904static inline int test_page_attr(struct bitmap *bitmap, int pnum,
905 enum bitmap_page_attr attr)
NeilBrown32a76272005-06-21 17:17:14 -0700906{
NeilBrown1ec885c2012-05-22 13:55:10 +1000907 return test_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
NeilBrown32a76272005-06-21 17:17:14 -0700908}
909
NeilBrownbdfd1142012-05-22 13:55:22 +1000910static inline int test_and_clear_page_attr(struct bitmap *bitmap, int pnum,
911 enum bitmap_page_attr attr)
912{
913 return test_and_clear_bit((pnum<<2) + attr,
914 bitmap->storage.filemap_attr);
915}
NeilBrown32a76272005-06-21 17:17:14 -0700916/*
917 * bitmap_file_set_bit -- called before performing a write to the md device
918 * to set (and eventually sync) a particular bit in the bitmap file
919 *
920 * we set the bit immediately, then we record the page number so that
921 * when an unplug occurs, we can flush the dirty pages out to disk
922 */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700923static void md_bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
NeilBrown32a76272005-06-21 17:17:14 -0700924{
925 unsigned long bit;
Jonathan Brassow3520fa42011-07-27 11:00:37 +1000926 struct page *page;
NeilBrown32a76272005-06-21 17:17:14 -0700927 void *kaddr;
NeilBrown40cffcc2012-05-22 13:55:24 +1000928 unsigned long chunk = block >> bitmap->counts.chunkshift;
Guoqing Jiang23cea66a2016-05-02 11:50:14 -0400929 struct bitmap_storage *store = &bitmap->storage;
930 unsigned long node_offset = 0;
931
932 if (mddev_is_clustered(bitmap->mddev))
933 node_offset = bitmap->cluster_slot * store->file_pages;
NeilBrown32a76272005-06-21 17:17:14 -0700934
NeilBrown1ec885c2012-05-22 13:55:10 +1000935 page = filemap_get_page(&bitmap->storage, chunk);
Jonathan Brassow3520fa42011-07-27 11:00:37 +1000936 if (!page)
937 return;
NeilBrown1ec885c2012-05-22 13:55:10 +1000938 bit = file_page_offset(&bitmap->storage, chunk);
NeilBrown32a76272005-06-21 17:17:14 -0700939
Jonathan Brassow3520fa42011-07-27 11:00:37 +1000940 /* set the bit */
Cong Wangb2f46e62011-11-28 13:25:44 +0800941 kaddr = kmap_atomic(page);
NeilBrownb405fe92012-05-22 13:55:15 +1000942 if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
Jonathan Brassow3520fa42011-07-27 11:00:37 +1000943 set_bit(bit, kaddr);
944 else
Akinobu Mita3f810b62013-04-24 11:42:41 +1000945 set_bit_le(bit, kaddr);
Cong Wangb2f46e62011-11-28 13:25:44 +0800946 kunmap_atomic(kaddr);
NeilBrown36a4e1f2011-10-07 14:23:17 +1100947 pr_debug("set file bit %lu page %lu\n", bit, page->index);
NeilBrown32a76272005-06-21 17:17:14 -0700948 /* record page number so it gets flushed to disk when unplug occurs */
Guoqing Jiang23cea66a2016-05-02 11:50:14 -0400949 set_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_DIRTY);
NeilBrown32a76272005-06-21 17:17:14 -0700950}
951
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700952static void md_bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block)
NeilBrownef99bf42012-05-22 13:55:08 +1000953{
954 unsigned long bit;
955 struct page *page;
956 void *paddr;
NeilBrown40cffcc2012-05-22 13:55:24 +1000957 unsigned long chunk = block >> bitmap->counts.chunkshift;
Guoqing Jiang23cea66a2016-05-02 11:50:14 -0400958 struct bitmap_storage *store = &bitmap->storage;
959 unsigned long node_offset = 0;
960
961 if (mddev_is_clustered(bitmap->mddev))
962 node_offset = bitmap->cluster_slot * store->file_pages;
NeilBrownef99bf42012-05-22 13:55:08 +1000963
NeilBrown1ec885c2012-05-22 13:55:10 +1000964 page = filemap_get_page(&bitmap->storage, chunk);
NeilBrownef99bf42012-05-22 13:55:08 +1000965 if (!page)
966 return;
NeilBrown1ec885c2012-05-22 13:55:10 +1000967 bit = file_page_offset(&bitmap->storage, chunk);
NeilBrownef99bf42012-05-22 13:55:08 +1000968 paddr = kmap_atomic(page);
NeilBrownb405fe92012-05-22 13:55:15 +1000969 if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
NeilBrownef99bf42012-05-22 13:55:08 +1000970 clear_bit(bit, paddr);
971 else
Akinobu Mita3f810b62013-04-24 11:42:41 +1000972 clear_bit_le(bit, paddr);
NeilBrownef99bf42012-05-22 13:55:08 +1000973 kunmap_atomic(paddr);
Guoqing Jiang23cea66a2016-05-02 11:50:14 -0400974 if (!test_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_NEEDWRITE)) {
975 set_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_PENDING);
NeilBrownef99bf42012-05-22 13:55:08 +1000976 bitmap->allclean = 0;
977 }
978}
979
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700980static int md_bitmap_file_test_bit(struct bitmap *bitmap, sector_t block)
Goldwyn Rodrigues11dd35d2014-06-07 00:36:26 -0500981{
982 unsigned long bit;
983 struct page *page;
984 void *paddr;
985 unsigned long chunk = block >> bitmap->counts.chunkshift;
986 int set = 0;
987
988 page = filemap_get_page(&bitmap->storage, chunk);
989 if (!page)
990 return -EINVAL;
991 bit = file_page_offset(&bitmap->storage, chunk);
992 paddr = kmap_atomic(page);
993 if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
994 set = test_bit(bit, paddr);
995 else
996 set = test_bit_le(bit, paddr);
997 kunmap_atomic(paddr);
998 return set;
999}
1000
1001
NeilBrown32a76272005-06-21 17:17:14 -07001002/* this gets called when the md device is ready to unplug its underlying
1003 * (slave) device queues -- before we let any writes go down, we need to
1004 * sync the dirty pages of the bitmap file to disk */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001005void md_bitmap_unplug(struct bitmap *bitmap)
NeilBrown32a76272005-06-21 17:17:14 -07001006{
NeilBrown74667122012-05-22 13:55:19 +10001007 unsigned long i;
NeilBrownec7a3192006-06-26 00:27:45 -07001008 int dirty, need_write;
NeilBrown85c9ccd2016-11-04 16:46:03 +11001009 int writing = 0;
NeilBrown32a76272005-06-21 17:17:14 -07001010
NeilBrown62f82fa2012-05-22 13:55:21 +10001011 if (!bitmap || !bitmap->storage.filemap ||
1012 test_bit(BITMAP_STALE, &bitmap->flags))
NeilBrown4ad13662007-07-17 04:06:13 -07001013 return;
NeilBrown32a76272005-06-21 17:17:14 -07001014
1015 /* look at each page to see if there are any set bits that need to be
1016 * flushed out to disk */
NeilBrown1ec885c2012-05-22 13:55:10 +10001017 for (i = 0; i < bitmap->storage.file_pages; i++) {
NeilBrownbdfd1142012-05-22 13:55:22 +10001018 dirty = test_and_clear_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
1019 need_write = test_and_clear_page_attr(bitmap, i,
1020 BITMAP_PAGE_NEEDWRITE);
1021 if (dirty || need_write) {
NeilBrown581dbd92016-11-14 16:30:21 +11001022 if (!writing) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001023 md_bitmap_wait_writes(bitmap);
NeilBrown581dbd92016-11-14 16:30:21 +11001024 if (bitmap->mddev->queue)
1025 blk_add_trace_msg(bitmap->mddev->queue,
1026 "md bitmap_unplug");
1027 }
NeilBrownd1891222012-05-22 13:55:09 +10001028 clear_page_attr(bitmap, i, BITMAP_PAGE_PENDING);
NeilBrownbdfd1142012-05-22 13:55:22 +10001029 write_page(bitmap, bitmap->storage.filemap[i], 0);
NeilBrown85c9ccd2016-11-04 16:46:03 +11001030 writing = 1;
NeilBrownbdfd1142012-05-22 13:55:22 +10001031 }
NeilBrown32a76272005-06-21 17:17:14 -07001032 }
NeilBrown85c9ccd2016-11-04 16:46:03 +11001033 if (writing)
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001034 md_bitmap_wait_writes(bitmap);
NeilBrown4b5060d2014-09-09 14:13:51 +10001035
NeilBrownb405fe92012-05-22 13:55:15 +10001036 if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001037 md_bitmap_file_kick(bitmap);
NeilBrown32a76272005-06-21 17:17:14 -07001038}
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001039EXPORT_SYMBOL(md_bitmap_unplug);
NeilBrown32a76272005-06-21 17:17:14 -07001040
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001041static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed);
NeilBrown32a76272005-06-21 17:17:14 -07001042/* * bitmap_init_from_disk -- called at bitmap_create time to initialize
1043 * the in-memory bitmap from the on-disk bitmap -- also, sets up the
1044 * memory mapping of the bitmap file
1045 * Special cases:
1046 * if there's no bitmap file, or if the bitmap file had been
1047 * previously kicked from the array, we mark all the bits as
1048 * 1's in order to cause a full resync.
NeilBrown6a079972005-09-09 16:23:44 -07001049 *
1050 * We ignore all bits for sectors that end earlier than 'start'.
1051 * This is used when reading an out-of-date bitmap...
NeilBrown32a76272005-06-21 17:17:14 -07001052 */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001053static int md_bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
NeilBrown32a76272005-06-21 17:17:14 -07001054{
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -05001055 unsigned long i, chunks, index, oldindex, bit, node_offset = 0;
NeilBrown27581e52012-05-22 13:55:08 +10001056 struct page *page = NULL;
NeilBrownd1244cb2012-05-22 13:55:12 +10001057 unsigned long bit_cnt = 0;
NeilBrown32a76272005-06-21 17:17:14 -07001058 struct file *file;
NeilBrownd1244cb2012-05-22 13:55:12 +10001059 unsigned long offset;
NeilBrown32a76272005-06-21 17:17:14 -07001060 int outofdate;
1061 int ret = -ENOSPC;
NeilBrownea03aff2006-01-06 00:20:34 -08001062 void *paddr;
NeilBrown1ec885c2012-05-22 13:55:10 +10001063 struct bitmap_storage *store = &bitmap->storage;
NeilBrown32a76272005-06-21 17:17:14 -07001064
NeilBrown40cffcc2012-05-22 13:55:24 +10001065 chunks = bitmap->counts.chunks;
NeilBrown1ec885c2012-05-22 13:55:10 +10001066 file = store->file;
NeilBrown32a76272005-06-21 17:17:14 -07001067
NeilBrownef99bf42012-05-22 13:55:08 +10001068 if (!file && !bitmap->mddev->bitmap_info.offset) {
1069 /* No permanent bitmap - fill with '1s'. */
NeilBrown1ec885c2012-05-22 13:55:10 +10001070 store->filemap = NULL;
1071 store->file_pages = 0;
NeilBrownef99bf42012-05-22 13:55:08 +10001072 for (i = 0; i < chunks ; i++) {
1073 /* if the disk bit is set, set the memory bit */
NeilBrown40cffcc2012-05-22 13:55:24 +10001074 int needed = ((sector_t)(i+1) << (bitmap->counts.chunkshift)
NeilBrownef99bf42012-05-22 13:55:08 +10001075 >= start);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001076 md_bitmap_set_memory_bits(bitmap,
1077 (sector_t)i << bitmap->counts.chunkshift,
1078 needed);
NeilBrownef99bf42012-05-22 13:55:08 +10001079 }
1080 return 0;
1081 }
NeilBrown32a76272005-06-21 17:17:14 -07001082
NeilBrownb405fe92012-05-22 13:55:15 +10001083 outofdate = test_bit(BITMAP_STALE, &bitmap->flags);
NeilBrown32a76272005-06-21 17:17:14 -07001084 if (outofdate)
NeilBrownec0cc222016-11-02 14:16:49 +11001085 pr_warn("%s: bitmap file is out of date, doing full recovery\n", bmname(bitmap));
NeilBrown32a76272005-06-21 17:17:14 -07001086
NeilBrownd1244cb2012-05-22 13:55:12 +10001087 if (file && i_size_read(file->f_mapping->host) < store->bytes) {
NeilBrownec0cc222016-11-02 14:16:49 +11001088 pr_warn("%s: bitmap file too short %lu < %lu\n",
1089 bmname(bitmap),
1090 (unsigned long) i_size_read(file->f_mapping->host),
1091 store->bytes);
NeilBrown4ad13662007-07-17 04:06:13 -07001092 goto err;
NeilBrown32a76272005-06-21 17:17:14 -07001093 }
NeilBrownbc7f77d2005-06-21 17:17:17 -07001094
NeilBrown32a76272005-06-21 17:17:14 -07001095 oldindex = ~0L;
NeilBrownd1244cb2012-05-22 13:55:12 +10001096 offset = 0;
1097 if (!bitmap->mddev->bitmap_info.external)
1098 offset = sizeof(bitmap_super_t);
NeilBrown32a76272005-06-21 17:17:14 -07001099
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -05001100 if (mddev_is_clustered(bitmap->mddev))
1101 node_offset = bitmap->cluster_slot * (DIV_ROUND_UP(store->bytes, PAGE_SIZE));
1102
NeilBrown32a76272005-06-21 17:17:14 -07001103 for (i = 0; i < chunks; i++) {
NeilBrownbd926c62005-11-08 21:39:32 -08001104 int b;
NeilBrown1ec885c2012-05-22 13:55:10 +10001105 index = file_page_index(&bitmap->storage, i);
1106 bit = file_page_offset(&bitmap->storage, i);
NeilBrown32a76272005-06-21 17:17:14 -07001107 if (index != oldindex) { /* this is a new page, read it in */
NeilBrownd785a062006-06-26 00:27:48 -07001108 int count;
NeilBrown32a76272005-06-21 17:17:14 -07001109 /* unmap the old page, we're done with it */
NeilBrownd1244cb2012-05-22 13:55:12 +10001110 if (index == store->file_pages-1)
1111 count = store->bytes - index * PAGE_SIZE;
NeilBrownd785a062006-06-26 00:27:48 -07001112 else
1113 count = PAGE_SIZE;
NeilBrown1ec885c2012-05-22 13:55:10 +10001114 page = store->filemap[index];
NeilBrown27581e52012-05-22 13:55:08 +10001115 if (file)
1116 ret = read_page(file, index, bitmap,
1117 count, page);
1118 else
1119 ret = read_sb_page(
1120 bitmap->mddev,
1121 bitmap->mddev->bitmap_info.offset,
1122 page,
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -05001123 index + node_offset, count);
NeilBrown27581e52012-05-22 13:55:08 +10001124
1125 if (ret)
NeilBrown4ad13662007-07-17 04:06:13 -07001126 goto err;
NeilBrowna654b9d82005-06-21 17:17:27 -07001127
NeilBrown32a76272005-06-21 17:17:14 -07001128 oldindex = index;
NeilBrown32a76272005-06-21 17:17:14 -07001129
1130 if (outofdate) {
1131 /*
1132 * if bitmap is out of date, dirty the
NeilBrownac2f40b2010-06-01 19:37:31 +10001133 * whole page and write it out
NeilBrown32a76272005-06-21 17:17:14 -07001134 */
Cong Wangb2f46e62011-11-28 13:25:44 +08001135 paddr = kmap_atomic(page);
NeilBrownea03aff2006-01-06 00:20:34 -08001136 memset(paddr + offset, 0xff,
NeilBrown6a079972005-09-09 16:23:44 -07001137 PAGE_SIZE - offset);
Cong Wangb2f46e62011-11-28 13:25:44 +08001138 kunmap_atomic(paddr);
NeilBrown4ad13662007-07-17 04:06:13 -07001139 write_page(bitmap, page, 1);
1140
1141 ret = -EIO;
NeilBrownb405fe92012-05-22 13:55:15 +10001142 if (test_bit(BITMAP_WRITE_ERROR,
1143 &bitmap->flags))
NeilBrown4ad13662007-07-17 04:06:13 -07001144 goto err;
NeilBrown32a76272005-06-21 17:17:14 -07001145 }
NeilBrown32a76272005-06-21 17:17:14 -07001146 }
Cong Wangb2f46e62011-11-28 13:25:44 +08001147 paddr = kmap_atomic(page);
NeilBrownb405fe92012-05-22 13:55:15 +10001148 if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
NeilBrownea03aff2006-01-06 00:20:34 -08001149 b = test_bit(bit, paddr);
NeilBrownbd926c62005-11-08 21:39:32 -08001150 else
Akinobu Mita6b33aff2011-03-23 16:42:13 -07001151 b = test_bit_le(bit, paddr);
Cong Wangb2f46e62011-11-28 13:25:44 +08001152 kunmap_atomic(paddr);
NeilBrownbd926c62005-11-08 21:39:32 -08001153 if (b) {
NeilBrown32a76272005-06-21 17:17:14 -07001154 /* if the disk bit is set, set the memory bit */
NeilBrown40cffcc2012-05-22 13:55:24 +10001155 int needed = ((sector_t)(i+1) << bitmap->counts.chunkshift
NeilBrowndb305e52009-05-07 12:49:06 +10001156 >= start);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001157 md_bitmap_set_memory_bits(bitmap,
1158 (sector_t)i << bitmap->counts.chunkshift,
1159 needed);
NeilBrown32a76272005-06-21 17:17:14 -07001160 bit_cnt++;
1161 }
NeilBrown27581e52012-05-22 13:55:08 +10001162 offset = 0;
NeilBrown32a76272005-06-21 17:17:14 -07001163 }
1164
NeilBrownec0cc222016-11-02 14:16:49 +11001165 pr_debug("%s: bitmap initialized from disk: read %lu pages, set %lu of %lu bits\n",
1166 bmname(bitmap), store->file_pages,
1167 bit_cnt, chunks);
NeilBrown32a76272005-06-21 17:17:14 -07001168
NeilBrown4ad13662007-07-17 04:06:13 -07001169 return 0;
1170
1171 err:
NeilBrownec0cc222016-11-02 14:16:49 +11001172 pr_warn("%s: bitmap initialisation failed: %d\n",
1173 bmname(bitmap), ret);
NeilBrown32a76272005-06-21 17:17:14 -07001174 return ret;
1175}
1176
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001177void md_bitmap_write_all(struct bitmap *bitmap)
NeilBrowna654b9d82005-06-21 17:17:27 -07001178{
1179 /* We don't actually write all bitmap blocks here,
1180 * just flag them as needing to be written
1181 */
NeilBrownec7a3192006-06-26 00:27:45 -07001182 int i;
NeilBrowna654b9d82005-06-21 17:17:27 -07001183
NeilBrown1ec885c2012-05-22 13:55:10 +10001184 if (!bitmap || !bitmap->storage.filemap)
NeilBrownef99bf42012-05-22 13:55:08 +10001185 return;
NeilBrown1ec885c2012-05-22 13:55:10 +10001186 if (bitmap->storage.file)
NeilBrownef99bf42012-05-22 13:55:08 +10001187 /* Only one copy, so nothing needed */
1188 return;
1189
NeilBrown1ec885c2012-05-22 13:55:10 +10001190 for (i = 0; i < bitmap->storage.file_pages; i++)
NeilBrownd1891222012-05-22 13:55:09 +10001191 set_page_attr(bitmap, i,
NeilBrownec7a3192006-06-26 00:27:45 -07001192 BITMAP_PAGE_NEEDWRITE);
NeilBrown2585f3e2011-09-21 15:37:46 +10001193 bitmap->allclean = 0;
NeilBrowna654b9d82005-06-21 17:17:27 -07001194}
1195
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001196static void md_bitmap_count_page(struct bitmap_counts *bitmap,
1197 sector_t offset, int inc)
NeilBrown32a76272005-06-21 17:17:14 -07001198{
NeilBrown61a0d802012-03-19 12:46:41 +11001199 sector_t chunk = offset >> bitmap->chunkshift;
NeilBrown32a76272005-06-21 17:17:14 -07001200 unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
1201 bitmap->bp[page].count += inc;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001202 md_bitmap_checkfree(bitmap, page);
NeilBrown32a76272005-06-21 17:17:14 -07001203}
NeilBrownbf07bb72012-05-22 13:55:06 +10001204
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001205static void md_bitmap_set_pending(struct bitmap_counts *bitmap, sector_t offset)
NeilBrownbf07bb72012-05-22 13:55:06 +10001206{
1207 sector_t chunk = offset >> bitmap->chunkshift;
1208 unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
1209 struct bitmap_page *bp = &bitmap->bp[page];
1210
1211 if (!bp->pending)
1212 bp->pending = 1;
1213}
1214
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001215static bitmap_counter_t *md_bitmap_get_counter(struct bitmap_counts *bitmap,
1216 sector_t offset, sector_t *blocks,
1217 int create);
NeilBrown32a76272005-06-21 17:17:14 -07001218
1219/*
1220 * bitmap daemon -- periodically wakes up to clean bits and flush pages
1221 * out to disk
1222 */
1223
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001224void md_bitmap_daemon_work(struct mddev *mddev)
NeilBrown32a76272005-06-21 17:17:14 -07001225{
NeilBrownaa5cbd12009-12-14 12:49:46 +11001226 struct bitmap *bitmap;
NeilBrownaa3163f2005-06-21 17:17:22 -07001227 unsigned long j;
NeilBrownbf07bb72012-05-22 13:55:06 +10001228 unsigned long nextpage;
NeilBrown57dab0b2010-10-19 10:03:39 +11001229 sector_t blocks;
NeilBrown40cffcc2012-05-22 13:55:24 +10001230 struct bitmap_counts *counts;
NeilBrown32a76272005-06-21 17:17:14 -07001231
NeilBrownaa5cbd12009-12-14 12:49:46 +11001232 /* Use a mutex to guard daemon_work against
1233 * bitmap_destroy.
1234 */
NeilBrownc3d97142009-12-14 12:49:52 +11001235 mutex_lock(&mddev->bitmap_info.mutex);
NeilBrownaa5cbd12009-12-14 12:49:46 +11001236 bitmap = mddev->bitmap;
1237 if (bitmap == NULL) {
NeilBrownc3d97142009-12-14 12:49:52 +11001238 mutex_unlock(&mddev->bitmap_info.mutex);
NeilBrown4ad13662007-07-17 04:06:13 -07001239 return;
NeilBrownaa5cbd12009-12-14 12:49:46 +11001240 }
NeilBrown42a04b52009-12-14 12:49:53 +11001241 if (time_before(jiffies, bitmap->daemon_lastrun
NeilBrown2e61ebb2011-12-23 10:17:50 +11001242 + mddev->bitmap_info.daemon_sleep))
NeilBrown7be3dfe2008-03-10 11:43:48 -07001243 goto done;
1244
NeilBrown32a76272005-06-21 17:17:14 -07001245 bitmap->daemon_lastrun = jiffies;
NeilBrown8311c292008-03-04 14:29:30 -08001246 if (bitmap->allclean) {
NeilBrown2e61ebb2011-12-23 10:17:50 +11001247 mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
NeilBrownaa5cbd12009-12-14 12:49:46 +11001248 goto done;
NeilBrown8311c292008-03-04 14:29:30 -08001249 }
1250 bitmap->allclean = 1;
NeilBrown32a76272005-06-21 17:17:14 -07001251
NeilBrown581dbd92016-11-14 16:30:21 +11001252 if (bitmap->mddev->queue)
1253 blk_add_trace_msg(bitmap->mddev->queue,
1254 "md bitmap_daemon_work");
1255
NeilBrownbf07bb72012-05-22 13:55:06 +10001256 /* Any file-page which is PENDING now needs to be written.
1257 * So set NEEDWRITE now, then after we make any last-minute changes
1258 * we will write it.
1259 */
NeilBrown1ec885c2012-05-22 13:55:10 +10001260 for (j = 0; j < bitmap->storage.file_pages; j++)
NeilBrownbdfd1142012-05-22 13:55:22 +10001261 if (test_and_clear_page_attr(bitmap, j,
1262 BITMAP_PAGE_PENDING))
NeilBrownd1891222012-05-22 13:55:09 +10001263 set_page_attr(bitmap, j,
NeilBrownbf07bb72012-05-22 13:55:06 +10001264 BITMAP_PAGE_NEEDWRITE);
NeilBrownbf07bb72012-05-22 13:55:06 +10001265
1266 if (bitmap->need_sync &&
1267 mddev->bitmap_info.external == 0) {
1268 /* Arrange for superblock update as well as
1269 * other changes */
1270 bitmap_super_t *sb;
1271 bitmap->need_sync = 0;
NeilBrown1ec885c2012-05-22 13:55:10 +10001272 if (bitmap->storage.filemap) {
1273 sb = kmap_atomic(bitmap->storage.sb_page);
NeilBrownef99bf42012-05-22 13:55:08 +10001274 sb->events_cleared =
1275 cpu_to_le64(bitmap->events_cleared);
1276 kunmap_atomic(sb);
NeilBrownd1891222012-05-22 13:55:09 +10001277 set_page_attr(bitmap, 0,
NeilBrownef99bf42012-05-22 13:55:08 +10001278 BITMAP_PAGE_NEEDWRITE);
1279 }
NeilBrownbf07bb72012-05-22 13:55:06 +10001280 }
1281 /* Now look at the bitmap counters and if any are '2' or '1',
1282 * decrement and handle accordingly.
1283 */
NeilBrown40cffcc2012-05-22 13:55:24 +10001284 counts = &bitmap->counts;
1285 spin_lock_irq(&counts->lock);
NeilBrownbf07bb72012-05-22 13:55:06 +10001286 nextpage = 0;
NeilBrown40cffcc2012-05-22 13:55:24 +10001287 for (j = 0; j < counts->chunks; j++) {
NeilBrown32a76272005-06-21 17:17:14 -07001288 bitmap_counter_t *bmc;
NeilBrown40cffcc2012-05-22 13:55:24 +10001289 sector_t block = (sector_t)j << counts->chunkshift;
Jonathan Brassow3520fa42011-07-27 11:00:37 +10001290
NeilBrownbf07bb72012-05-22 13:55:06 +10001291 if (j == nextpage) {
1292 nextpage += PAGE_COUNTER_RATIO;
NeilBrown40cffcc2012-05-22 13:55:24 +10001293 if (!counts->bp[j >> PAGE_COUNTER_SHIFT].pending) {
NeilBrownbf07bb72012-05-22 13:55:06 +10001294 j |= PAGE_COUNTER_MASK;
NeilBrownaa3163f2005-06-21 17:17:22 -07001295 continue;
1296 }
NeilBrown40cffcc2012-05-22 13:55:24 +10001297 counts->bp[j >> PAGE_COUNTER_SHIFT].pending = 0;
NeilBrown32a76272005-06-21 17:17:14 -07001298 }
NeilBrown32a76272005-06-21 17:17:14 -07001299
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001300 bmc = md_bitmap_get_counter(counts, block, &blocks, 0);
NeilBrownbf07bb72012-05-22 13:55:06 +10001301 if (!bmc) {
1302 j |= PAGE_COUNTER_MASK;
1303 continue;
1304 }
1305 if (*bmc == 1 && !bitmap->need_sync) {
1306 /* We can clear the bit */
NeilBrownbf07bb72012-05-22 13:55:06 +10001307 *bmc = 0;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001308 md_bitmap_count_page(counts, block, -1);
1309 md_bitmap_file_clear_bit(bitmap, block);
NeilBrownbf07bb72012-05-22 13:55:06 +10001310 } else if (*bmc && *bmc <= 2) {
1311 *bmc = 1;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001312 md_bitmap_set_pending(counts, block);
NeilBrown2585f3e2011-09-21 15:37:46 +10001313 bitmap->allclean = 0;
NeilBrown32a76272005-06-21 17:17:14 -07001314 }
NeilBrown32a76272005-06-21 17:17:14 -07001315 }
NeilBrown40cffcc2012-05-22 13:55:24 +10001316 spin_unlock_irq(&counts->lock);
NeilBrown32a76272005-06-21 17:17:14 -07001317
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001318 md_bitmap_wait_writes(bitmap);
NeilBrownbf07bb72012-05-22 13:55:06 +10001319 /* Now start writeout on any page in NEEDWRITE that isn't DIRTY.
1320 * DIRTY pages need to be written by bitmap_unplug so it can wait
1321 * for them.
1322 * If we find any DIRTY page we stop there and let bitmap_unplug
1323 * handle all the rest. This is important in the case where
1324 * the first blocking holds the superblock and it has been updated.
1325 * We mustn't write any other blocks before the superblock.
1326 */
NeilBrown62f82fa2012-05-22 13:55:21 +10001327 for (j = 0;
1328 j < bitmap->storage.file_pages
1329 && !test_bit(BITMAP_STALE, &bitmap->flags);
1330 j++) {
NeilBrownd1891222012-05-22 13:55:09 +10001331 if (test_page_attr(bitmap, j,
NeilBrownbf07bb72012-05-22 13:55:06 +10001332 BITMAP_PAGE_DIRTY))
1333 /* bitmap_unplug will handle the rest */
1334 break;
Zhiqiang Liu55180492019-12-07 11:00:08 +08001335 if (bitmap->storage.filemap &&
1336 test_and_clear_page_attr(bitmap, j,
NeilBrownbdfd1142012-05-22 13:55:22 +10001337 BITMAP_PAGE_NEEDWRITE)) {
NeilBrown1ec885c2012-05-22 13:55:10 +10001338 write_page(bitmap, bitmap->storage.filemap[j], 0);
NeilBrownbf07bb72012-05-22 13:55:06 +10001339 }
1340 }
NeilBrownbf07bb72012-05-22 13:55:06 +10001341
NeilBrown7be3dfe2008-03-10 11:43:48 -07001342 done:
NeilBrown8311c292008-03-04 14:29:30 -08001343 if (bitmap->allclean == 0)
NeilBrown2e61ebb2011-12-23 10:17:50 +11001344 mddev->thread->timeout =
1345 mddev->bitmap_info.daemon_sleep;
NeilBrownc3d97142009-12-14 12:49:52 +11001346 mutex_unlock(&mddev->bitmap_info.mutex);
NeilBrown32a76272005-06-21 17:17:14 -07001347}
1348
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001349static bitmap_counter_t *md_bitmap_get_counter(struct bitmap_counts *bitmap,
1350 sector_t offset, sector_t *blocks,
1351 int create)
NeilBrownee305ac2009-09-23 18:06:44 +10001352__releases(bitmap->lock)
1353__acquires(bitmap->lock)
NeilBrown32a76272005-06-21 17:17:14 -07001354{
1355 /* If 'create', we might release the lock and reclaim it.
1356 * The lock must have been taken with interrupts enabled.
1357 * If !create, we don't release the lock.
1358 */
NeilBrown61a0d802012-03-19 12:46:41 +11001359 sector_t chunk = offset >> bitmap->chunkshift;
NeilBrown32a76272005-06-21 17:17:14 -07001360 unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
1361 unsigned long pageoff = (chunk & PAGE_COUNTER_MASK) << COUNTER_BYTE_SHIFT;
1362 sector_t csize;
NeilBrownef425672010-06-01 19:37:33 +10001363 int err;
NeilBrown32a76272005-06-21 17:17:14 -07001364
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001365 err = md_bitmap_checkpage(bitmap, page, create, 0);
NeilBrownef425672010-06-01 19:37:33 +10001366
1367 if (bitmap->bp[page].hijacked ||
1368 bitmap->bp[page].map == NULL)
NeilBrown61a0d802012-03-19 12:46:41 +11001369 csize = ((sector_t)1) << (bitmap->chunkshift +
NeilBrownef425672010-06-01 19:37:33 +10001370 PAGE_COUNTER_SHIFT - 1);
1371 else
NeilBrown61a0d802012-03-19 12:46:41 +11001372 csize = ((sector_t)1) << bitmap->chunkshift;
NeilBrownef425672010-06-01 19:37:33 +10001373 *blocks = csize - (offset & (csize - 1));
1374
1375 if (err < 0)
NeilBrown32a76272005-06-21 17:17:14 -07001376 return NULL;
NeilBrownef425672010-06-01 19:37:33 +10001377
NeilBrown32a76272005-06-21 17:17:14 -07001378 /* now locked ... */
1379
1380 if (bitmap->bp[page].hijacked) { /* hijacked pointer */
1381 /* should we use the first or second counter field
1382 * of the hijacked pointer? */
1383 int hi = (pageoff > PAGE_COUNTER_MASK);
NeilBrown32a76272005-06-21 17:17:14 -07001384 return &((bitmap_counter_t *)
1385 &bitmap->bp[page].map)[hi];
NeilBrownef425672010-06-01 19:37:33 +10001386 } else /* page is allocated */
NeilBrown32a76272005-06-21 17:17:14 -07001387 return (bitmap_counter_t *)
1388 &(bitmap->bp[page].map[pageoff]);
NeilBrown32a76272005-06-21 17:17:14 -07001389}
1390
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001391int md_bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind)
NeilBrown32a76272005-06-21 17:17:14 -07001392{
NeilBrownac2f40b2010-06-01 19:37:31 +10001393 if (!bitmap)
1394 return 0;
NeilBrown4b6d2872005-09-09 16:23:47 -07001395
1396 if (behind) {
Paul Clements696fcd52010-03-08 16:02:37 +11001397 int bw;
NeilBrown4b6d2872005-09-09 16:23:47 -07001398 atomic_inc(&bitmap->behind_writes);
Paul Clements696fcd52010-03-08 16:02:37 +11001399 bw = atomic_read(&bitmap->behind_writes);
1400 if (bw > bitmap->behind_writes_used)
1401 bitmap->behind_writes_used = bw;
1402
NeilBrown36a4e1f2011-10-07 14:23:17 +11001403 pr_debug("inc write-behind count %d/%lu\n",
1404 bw, bitmap->mddev->bitmap_info.max_write_behind);
NeilBrown4b6d2872005-09-09 16:23:47 -07001405 }
1406
NeilBrown32a76272005-06-21 17:17:14 -07001407 while (sectors) {
NeilBrown57dab0b2010-10-19 10:03:39 +11001408 sector_t blocks;
NeilBrown32a76272005-06-21 17:17:14 -07001409 bitmap_counter_t *bmc;
1410
NeilBrown40cffcc2012-05-22 13:55:24 +10001411 spin_lock_irq(&bitmap->counts.lock);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001412 bmc = md_bitmap_get_counter(&bitmap->counts, offset, &blocks, 1);
NeilBrown32a76272005-06-21 17:17:14 -07001413 if (!bmc) {
NeilBrown40cffcc2012-05-22 13:55:24 +10001414 spin_unlock_irq(&bitmap->counts.lock);
NeilBrown32a76272005-06-21 17:17:14 -07001415 return 0;
1416 }
1417
Namhyung Kim27d5ea02011-06-09 11:42:57 +10001418 if (unlikely(COUNTER(*bmc) == COUNTER_MAX)) {
Neil Brownda6e1a32007-02-08 14:20:37 -08001419 DEFINE_WAIT(__wait);
1420 /* note that it is safe to do the prepare_to_wait
1421 * after the test as long as we do it before dropping
1422 * the spinlock.
1423 */
1424 prepare_to_wait(&bitmap->overflow_wait, &__wait,
1425 TASK_UNINTERRUPTIBLE);
NeilBrown40cffcc2012-05-22 13:55:24 +10001426 spin_unlock_irq(&bitmap->counts.lock);
NeilBrownf54a9d02012-08-02 08:33:20 +10001427 schedule();
Neil Brownda6e1a32007-02-08 14:20:37 -08001428 finish_wait(&bitmap->overflow_wait, &__wait);
1429 continue;
1430 }
1431
NeilBrownac2f40b2010-06-01 19:37:31 +10001432 switch (*bmc) {
NeilBrown32a76272005-06-21 17:17:14 -07001433 case 0:
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001434 md_bitmap_file_set_bit(bitmap, offset);
1435 md_bitmap_count_page(&bitmap->counts, offset, 1);
NeilBrown32a76272005-06-21 17:17:14 -07001436 /* fall through */
1437 case 1:
1438 *bmc = 2;
1439 }
Neil Brownda6e1a32007-02-08 14:20:37 -08001440
NeilBrown32a76272005-06-21 17:17:14 -07001441 (*bmc)++;
1442
NeilBrown40cffcc2012-05-22 13:55:24 +10001443 spin_unlock_irq(&bitmap->counts.lock);
NeilBrown32a76272005-06-21 17:17:14 -07001444
1445 offset += blocks;
1446 if (sectors > blocks)
1447 sectors -= blocks;
NeilBrownac2f40b2010-06-01 19:37:31 +10001448 else
1449 sectors = 0;
NeilBrown32a76272005-06-21 17:17:14 -07001450 }
1451 return 0;
1452}
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001453EXPORT_SYMBOL(md_bitmap_startwrite);
NeilBrown32a76272005-06-21 17:17:14 -07001454
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001455void md_bitmap_endwrite(struct bitmap *bitmap, sector_t offset,
1456 unsigned long sectors, int success, int behind)
NeilBrown32a76272005-06-21 17:17:14 -07001457{
NeilBrownac2f40b2010-06-01 19:37:31 +10001458 if (!bitmap)
1459 return;
NeilBrown4b6d2872005-09-09 16:23:47 -07001460 if (behind) {
NeilBrowne5551902010-03-31 11:21:44 +11001461 if (atomic_dec_and_test(&bitmap->behind_writes))
1462 wake_up(&bitmap->behind_wait);
NeilBrown36a4e1f2011-10-07 14:23:17 +11001463 pr_debug("dec write-behind count %d/%lu\n",
1464 atomic_read(&bitmap->behind_writes),
1465 bitmap->mddev->bitmap_info.max_write_behind);
NeilBrown4b6d2872005-09-09 16:23:47 -07001466 }
1467
NeilBrown32a76272005-06-21 17:17:14 -07001468 while (sectors) {
NeilBrown57dab0b2010-10-19 10:03:39 +11001469 sector_t blocks;
NeilBrown32a76272005-06-21 17:17:14 -07001470 unsigned long flags;
1471 bitmap_counter_t *bmc;
1472
NeilBrown40cffcc2012-05-22 13:55:24 +10001473 spin_lock_irqsave(&bitmap->counts.lock, flags);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001474 bmc = md_bitmap_get_counter(&bitmap->counts, offset, &blocks, 0);
NeilBrown32a76272005-06-21 17:17:14 -07001475 if (!bmc) {
NeilBrown40cffcc2012-05-22 13:55:24 +10001476 spin_unlock_irqrestore(&bitmap->counts.lock, flags);
NeilBrown32a76272005-06-21 17:17:14 -07001477 return;
1478 }
1479
NeilBrown961902c2011-12-23 09:57:48 +11001480 if (success && !bitmap->mddev->degraded &&
Neil Browna0da84f2008-06-28 08:31:22 +10001481 bitmap->events_cleared < bitmap->mddev->events) {
1482 bitmap->events_cleared = bitmap->mddev->events;
1483 bitmap->need_sync = 1;
NeilBrown5ff5aff2010-06-01 19:37:32 +10001484 sysfs_notify_dirent_safe(bitmap->sysfs_can_clear);
Neil Browna0da84f2008-06-28 08:31:22 +10001485 }
1486
Namhyung Kim27d5ea02011-06-09 11:42:57 +10001487 if (!success && !NEEDED(*bmc))
NeilBrown32a76272005-06-21 17:17:14 -07001488 *bmc |= NEEDED_MASK;
1489
Namhyung Kim27d5ea02011-06-09 11:42:57 +10001490 if (COUNTER(*bmc) == COUNTER_MAX)
Neil Brownda6e1a32007-02-08 14:20:37 -08001491 wake_up(&bitmap->overflow_wait);
1492
NeilBrown32a76272005-06-21 17:17:14 -07001493 (*bmc)--;
NeilBrown2585f3e2011-09-21 15:37:46 +10001494 if (*bmc <= 2) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001495 md_bitmap_set_pending(&bitmap->counts, offset);
NeilBrown2585f3e2011-09-21 15:37:46 +10001496 bitmap->allclean = 0;
1497 }
NeilBrown40cffcc2012-05-22 13:55:24 +10001498 spin_unlock_irqrestore(&bitmap->counts.lock, flags);
NeilBrown32a76272005-06-21 17:17:14 -07001499 offset += blocks;
1500 if (sectors > blocks)
1501 sectors -= blocks;
NeilBrownac2f40b2010-06-01 19:37:31 +10001502 else
1503 sectors = 0;
NeilBrown32a76272005-06-21 17:17:14 -07001504 }
1505}
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001506EXPORT_SYMBOL(md_bitmap_endwrite);
NeilBrown32a76272005-06-21 17:17:14 -07001507
NeilBrown57dab0b2010-10-19 10:03:39 +11001508static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
NeilBrown1187cf02009-03-31 14:27:02 +11001509 int degraded)
NeilBrown32a76272005-06-21 17:17:14 -07001510{
1511 bitmap_counter_t *bmc;
1512 int rv;
1513 if (bitmap == NULL) {/* FIXME or bitmap set as 'failed' */
1514 *blocks = 1024;
1515 return 1; /* always resync if no bitmap */
1516 }
NeilBrown40cffcc2012-05-22 13:55:24 +10001517 spin_lock_irq(&bitmap->counts.lock);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001518 bmc = md_bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
NeilBrown32a76272005-06-21 17:17:14 -07001519 rv = 0;
1520 if (bmc) {
1521 /* locked */
1522 if (RESYNC(*bmc))
1523 rv = 1;
1524 else if (NEEDED(*bmc)) {
1525 rv = 1;
NeilBrown6a806c52005-07-15 03:56:35 -07001526 if (!degraded) { /* don't set/clear bits if degraded */
1527 *bmc |= RESYNC_MASK;
1528 *bmc &= ~NEEDED_MASK;
1529 }
NeilBrown32a76272005-06-21 17:17:14 -07001530 }
1531 }
NeilBrown40cffcc2012-05-22 13:55:24 +10001532 spin_unlock_irq(&bitmap->counts.lock);
NeilBrown32a76272005-06-21 17:17:14 -07001533 return rv;
1534}
1535
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001536int md_bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
1537 int degraded)
NeilBrown1187cf02009-03-31 14:27:02 +11001538{
1539 /* bitmap_start_sync must always report on multiples of whole
1540 * pages, otherwise resync (which is very PAGE_SIZE based) will
1541 * get confused.
1542 * So call __bitmap_start_sync repeatedly (if needed) until
1543 * At least PAGE_SIZE>>9 blocks are covered.
1544 * Return the 'or' of the result.
1545 */
1546 int rv = 0;
NeilBrown57dab0b2010-10-19 10:03:39 +11001547 sector_t blocks1;
NeilBrown1187cf02009-03-31 14:27:02 +11001548
1549 *blocks = 0;
1550 while (*blocks < (PAGE_SIZE>>9)) {
1551 rv |= __bitmap_start_sync(bitmap, offset,
1552 &blocks1, degraded);
1553 offset += blocks1;
1554 *blocks += blocks1;
1555 }
1556 return rv;
1557}
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001558EXPORT_SYMBOL(md_bitmap_start_sync);
NeilBrown1187cf02009-03-31 14:27:02 +11001559
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001560void md_bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted)
NeilBrown32a76272005-06-21 17:17:14 -07001561{
1562 bitmap_counter_t *bmc;
1563 unsigned long flags;
NeilBrownac2f40b2010-06-01 19:37:31 +10001564
1565 if (bitmap == NULL) {
NeilBrown32a76272005-06-21 17:17:14 -07001566 *blocks = 1024;
1567 return;
1568 }
NeilBrown40cffcc2012-05-22 13:55:24 +10001569 spin_lock_irqsave(&bitmap->counts.lock, flags);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001570 bmc = md_bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
NeilBrown32a76272005-06-21 17:17:14 -07001571 if (bmc == NULL)
1572 goto unlock;
1573 /* locked */
NeilBrown32a76272005-06-21 17:17:14 -07001574 if (RESYNC(*bmc)) {
1575 *bmc &= ~RESYNC_MASK;
1576
1577 if (!NEEDED(*bmc) && aborted)
1578 *bmc |= NEEDED_MASK;
1579 else {
NeilBrown2585f3e2011-09-21 15:37:46 +10001580 if (*bmc <= 2) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001581 md_bitmap_set_pending(&bitmap->counts, offset);
NeilBrown2585f3e2011-09-21 15:37:46 +10001582 bitmap->allclean = 0;
1583 }
NeilBrown32a76272005-06-21 17:17:14 -07001584 }
1585 }
1586 unlock:
NeilBrown40cffcc2012-05-22 13:55:24 +10001587 spin_unlock_irqrestore(&bitmap->counts.lock, flags);
NeilBrown32a76272005-06-21 17:17:14 -07001588}
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001589EXPORT_SYMBOL(md_bitmap_end_sync);
NeilBrown32a76272005-06-21 17:17:14 -07001590
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001591void md_bitmap_close_sync(struct bitmap *bitmap)
NeilBrown32a76272005-06-21 17:17:14 -07001592{
1593 /* Sync has finished, and any bitmap chunks that weren't synced
1594 * properly have been aborted. It remains to us to clear the
1595 * RESYNC bit wherever it is still on
1596 */
1597 sector_t sector = 0;
NeilBrown57dab0b2010-10-19 10:03:39 +11001598 sector_t blocks;
NeilBrownb47490c2008-02-06 01:39:50 -08001599 if (!bitmap)
1600 return;
NeilBrown32a76272005-06-21 17:17:14 -07001601 while (sector < bitmap->mddev->resync_max_sectors) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001602 md_bitmap_end_sync(bitmap, sector, &blocks, 0);
NeilBrownb47490c2008-02-06 01:39:50 -08001603 sector += blocks;
NeilBrown32a76272005-06-21 17:17:14 -07001604 }
1605}
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001606EXPORT_SYMBOL(md_bitmap_close_sync);
NeilBrown32a76272005-06-21 17:17:14 -07001607
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001608void md_bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force)
NeilBrownb47490c2008-02-06 01:39:50 -08001609{
1610 sector_t s = 0;
NeilBrown57dab0b2010-10-19 10:03:39 +11001611 sector_t blocks;
NeilBrownb47490c2008-02-06 01:39:50 -08001612
1613 if (!bitmap)
1614 return;
1615 if (sector == 0) {
1616 bitmap->last_end_sync = jiffies;
1617 return;
1618 }
Goldwyn Rodriguesc40f3412015-08-19 08:14:42 +10001619 if (!force && time_before(jiffies, (bitmap->last_end_sync
NeilBrown1b04be92009-12-14 12:49:53 +11001620 + bitmap->mddev->bitmap_info.daemon_sleep)))
NeilBrownb47490c2008-02-06 01:39:50 -08001621 return;
1622 wait_event(bitmap->mddev->recovery_wait,
1623 atomic_read(&bitmap->mddev->recovery_active) == 0);
1624
NeilBrown75d3da42011-01-14 09:14:34 +11001625 bitmap->mddev->curr_resync_completed = sector;
Shaohua Li29530792016-12-08 15:48:19 -08001626 set_bit(MD_SB_CHANGE_CLEAN, &bitmap->mddev->sb_flags);
NeilBrown40cffcc2012-05-22 13:55:24 +10001627 sector &= ~((1ULL << bitmap->counts.chunkshift) - 1);
NeilBrownb47490c2008-02-06 01:39:50 -08001628 s = 0;
1629 while (s < sector && s < bitmap->mddev->resync_max_sectors) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001630 md_bitmap_end_sync(bitmap, s, &blocks, 0);
NeilBrownb47490c2008-02-06 01:39:50 -08001631 s += blocks;
1632 }
1633 bitmap->last_end_sync = jiffies;
NeilBrownacb180b2009-04-14 16:28:34 +10001634 sysfs_notify(&bitmap->mddev->kobj, NULL, "sync_completed");
NeilBrownb47490c2008-02-06 01:39:50 -08001635}
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001636EXPORT_SYMBOL(md_bitmap_cond_end_sync);
NeilBrownb47490c2008-02-06 01:39:50 -08001637
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001638void md_bitmap_sync_with_cluster(struct mddev *mddev,
Guoqing Jiang18c9ff72016-05-02 11:50:12 -04001639 sector_t old_lo, sector_t old_hi,
1640 sector_t new_lo, sector_t new_hi)
1641{
1642 struct bitmap *bitmap = mddev->bitmap;
1643 sector_t sector, blocks = 0;
1644
1645 for (sector = old_lo; sector < new_lo; ) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001646 md_bitmap_end_sync(bitmap, sector, &blocks, 0);
Guoqing Jiang18c9ff72016-05-02 11:50:12 -04001647 sector += blocks;
1648 }
1649 WARN((blocks > new_lo) && old_lo, "alignment is not correct for lo\n");
1650
1651 for (sector = old_hi; sector < new_hi; ) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001652 md_bitmap_start_sync(bitmap, sector, &blocks, 0);
Guoqing Jiang18c9ff72016-05-02 11:50:12 -04001653 sector += blocks;
1654 }
1655 WARN((blocks > new_hi) && old_hi, "alignment is not correct for hi\n");
1656}
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001657EXPORT_SYMBOL(md_bitmap_sync_with_cluster);
Guoqing Jiang18c9ff72016-05-02 11:50:12 -04001658
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001659static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed)
NeilBrown32a76272005-06-21 17:17:14 -07001660{
1661 /* For each chunk covered by any of these sectors, set the
NeilBrownef99bf42012-05-22 13:55:08 +10001662 * counter to 2 and possibly set resync_needed. They should all
NeilBrown32a76272005-06-21 17:17:14 -07001663 * be 0 at this point
1664 */
NeilBrown193f1c92005-08-04 12:53:33 -07001665
NeilBrown57dab0b2010-10-19 10:03:39 +11001666 sector_t secs;
NeilBrown193f1c92005-08-04 12:53:33 -07001667 bitmap_counter_t *bmc;
NeilBrown40cffcc2012-05-22 13:55:24 +10001668 spin_lock_irq(&bitmap->counts.lock);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001669 bmc = md_bitmap_get_counter(&bitmap->counts, offset, &secs, 1);
NeilBrown193f1c92005-08-04 12:53:33 -07001670 if (!bmc) {
NeilBrown40cffcc2012-05-22 13:55:24 +10001671 spin_unlock_irq(&bitmap->counts.lock);
NeilBrown193f1c92005-08-04 12:53:33 -07001672 return;
NeilBrown32a76272005-06-21 17:17:14 -07001673 }
NeilBrownac2f40b2010-06-01 19:37:31 +10001674 if (!*bmc) {
Goldwyn Rodrigues11dd35d2014-06-07 00:36:26 -05001675 *bmc = 2;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001676 md_bitmap_count_page(&bitmap->counts, offset, 1);
1677 md_bitmap_set_pending(&bitmap->counts, offset);
NeilBrown2585f3e2011-09-21 15:37:46 +10001678 bitmap->allclean = 0;
NeilBrown193f1c92005-08-04 12:53:33 -07001679 }
Goldwyn Rodrigues11dd35d2014-06-07 00:36:26 -05001680 if (needed)
1681 *bmc |= NEEDED_MASK;
NeilBrown40cffcc2012-05-22 13:55:24 +10001682 spin_unlock_irq(&bitmap->counts.lock);
NeilBrown32a76272005-06-21 17:17:14 -07001683}
1684
Paul Clements9b1d1da2006-10-03 01:15:49 -07001685/* dirty the memory and file bits for bitmap chunks "s" to "e" */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001686void md_bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
Paul Clements9b1d1da2006-10-03 01:15:49 -07001687{
1688 unsigned long chunk;
1689
1690 for (chunk = s; chunk <= e; chunk++) {
NeilBrown40cffcc2012-05-22 13:55:24 +10001691 sector_t sec = (sector_t)chunk << bitmap->counts.chunkshift;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001692 md_bitmap_set_memory_bits(bitmap, sec, 1);
1693 md_bitmap_file_set_bit(bitmap, sec);
NeilBrownffa23322009-12-14 12:49:56 +11001694 if (sec < bitmap->mddev->recovery_cp)
1695 /* We are asserting that the array is dirty,
1696 * so move the recovery_cp address back so
1697 * that it is obvious that it is dirty
1698 */
1699 bitmap->mddev->recovery_cp = sec;
Paul Clements9b1d1da2006-10-03 01:15:49 -07001700 }
1701}
1702
NeilBrown32a76272005-06-21 17:17:14 -07001703/*
NeilBrown6b8b3e82005-08-04 12:53:35 -07001704 * flush out any pending updates
1705 */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001706void md_bitmap_flush(struct mddev *mddev)
NeilBrown6b8b3e82005-08-04 12:53:35 -07001707{
1708 struct bitmap *bitmap = mddev->bitmap;
NeilBrown42a04b52009-12-14 12:49:53 +11001709 long sleep;
NeilBrown6b8b3e82005-08-04 12:53:35 -07001710
1711 if (!bitmap) /* there was no bitmap */
1712 return;
1713
1714 /* run the daemon_work three time to ensure everything is flushed
1715 * that can be
1716 */
NeilBrown1b04be92009-12-14 12:49:53 +11001717 sleep = mddev->bitmap_info.daemon_sleep * 2;
NeilBrown42a04b52009-12-14 12:49:53 +11001718 bitmap->daemon_lastrun -= sleep;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001719 md_bitmap_daemon_work(mddev);
NeilBrown42a04b52009-12-14 12:49:53 +11001720 bitmap->daemon_lastrun -= sleep;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001721 md_bitmap_daemon_work(mddev);
NeilBrown42a04b52009-12-14 12:49:53 +11001722 bitmap->daemon_lastrun -= sleep;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001723 md_bitmap_daemon_work(mddev);
1724 md_bitmap_update_sb(bitmap);
NeilBrown6b8b3e82005-08-04 12:53:35 -07001725}
1726
1727/*
NeilBrown32a76272005-06-21 17:17:14 -07001728 * free memory that was allocated
1729 */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001730void md_bitmap_free(struct bitmap *bitmap)
NeilBrown32a76272005-06-21 17:17:14 -07001731{
1732 unsigned long k, pages;
1733 struct bitmap_page *bp;
NeilBrown32a76272005-06-21 17:17:14 -07001734
1735 if (!bitmap) /* there was no bitmap */
1736 return;
1737
Guoqing Jiangf9a67b12016-04-01 17:08:49 +08001738 if (bitmap->sysfs_can_clear)
1739 sysfs_put(bitmap->sysfs_can_clear);
1740
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05001741 if (mddev_is_clustered(bitmap->mddev) && bitmap->mddev->cluster_info &&
1742 bitmap->cluster_slot == md_cluster_ops->slot_number(bitmap->mddev))
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -05001743 md_cluster_stop(bitmap->mddev);
1744
NeilBrownfae7d322012-05-22 13:55:21 +10001745 /* Shouldn't be needed - but just in case.... */
1746 wait_event(bitmap->write_wait,
1747 atomic_read(&bitmap->pending_writes) == 0);
1748
1749 /* release the bitmap file */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001750 md_bitmap_file_unmap(&bitmap->storage);
NeilBrown32a76272005-06-21 17:17:14 -07001751
NeilBrown40cffcc2012-05-22 13:55:24 +10001752 bp = bitmap->counts.bp;
1753 pages = bitmap->counts.pages;
NeilBrown32a76272005-06-21 17:17:14 -07001754
1755 /* free all allocated memory */
1756
NeilBrown32a76272005-06-21 17:17:14 -07001757 if (bp) /* deallocate the page memory */
1758 for (k = 0; k < pages; k++)
1759 if (bp[k].map && !bp[k].hijacked)
1760 kfree(bp[k].map);
1761 kfree(bp);
1762 kfree(bitmap);
1763}
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001764EXPORT_SYMBOL(md_bitmap_free);
NeilBrownaa5cbd12009-12-14 12:49:46 +11001765
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001766void md_bitmap_wait_behind_writes(struct mddev *mddev)
Guoqing Jiang48df4982017-03-14 09:40:20 +08001767{
1768 struct bitmap *bitmap = mddev->bitmap;
1769
1770 /* wait for behind writes to complete */
1771 if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
1772 pr_debug("md:%s: behind writes in progress - waiting to stop.\n",
1773 mdname(mddev));
1774 /* need to kick something here to make sure I/O goes? */
1775 wait_event(bitmap->behind_wait,
1776 atomic_read(&bitmap->behind_writes) == 0);
1777 }
1778}
1779
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001780void md_bitmap_destroy(struct mddev *mddev)
NeilBrown3178b0d2005-09-09 16:23:50 -07001781{
1782 struct bitmap *bitmap = mddev->bitmap;
1783
1784 if (!bitmap) /* there was no bitmap */
1785 return;
1786
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001787 md_bitmap_wait_behind_writes(mddev);
Guoqing Jiang69b00b52019-12-23 10:49:00 +01001788 if (!mddev->serialize_policy)
1789 mddev_destroy_serial_pool(mddev, NULL, true);
Guoqing Jiang48df4982017-03-14 09:40:20 +08001790
NeilBrownc3d97142009-12-14 12:49:52 +11001791 mutex_lock(&mddev->bitmap_info.mutex);
NeilBrown978a7a42014-12-15 12:56:58 +11001792 spin_lock(&mddev->lock);
NeilBrown3178b0d2005-09-09 16:23:50 -07001793 mddev->bitmap = NULL; /* disconnect from the md device */
NeilBrown978a7a42014-12-15 12:56:58 +11001794 spin_unlock(&mddev->lock);
NeilBrownc3d97142009-12-14 12:49:52 +11001795 mutex_unlock(&mddev->bitmap_info.mutex);
NeilBrownb15c2e52006-01-06 00:20:16 -08001796 if (mddev->thread)
1797 mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
NeilBrown3178b0d2005-09-09 16:23:50 -07001798
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001799 md_bitmap_free(bitmap);
NeilBrown3178b0d2005-09-09 16:23:50 -07001800}
NeilBrown32a76272005-06-21 17:17:14 -07001801
1802/*
1803 * initialize the bitmap structure
1804 * if this returns an error, bitmap_destroy must be called to do clean up
Guoqing Jiangf9a67b12016-04-01 17:08:49 +08001805 * once mddev->bitmap is set
NeilBrown32a76272005-06-21 17:17:14 -07001806 */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001807struct bitmap *md_bitmap_create(struct mddev *mddev, int slot)
NeilBrown32a76272005-06-21 17:17:14 -07001808{
1809 struct bitmap *bitmap;
NeilBrown1f593902009-04-20 11:50:24 +10001810 sector_t blocks = mddev->resync_max_sectors;
NeilBrownc3d97142009-12-14 12:49:52 +11001811 struct file *file = mddev->bitmap_info.file;
NeilBrown32a76272005-06-21 17:17:14 -07001812 int err;
Tejun Heo324a56e2013-12-11 14:11:53 -05001813 struct kernfs_node *bm = NULL;
NeilBrown32a76272005-06-21 17:17:14 -07001814
Alexey Dobriyan5f6e3c832006-10-11 01:22:26 -07001815 BUILD_BUG_ON(sizeof(bitmap_super_t) != 256);
NeilBrown32a76272005-06-21 17:17:14 -07001816
NeilBrownc3d97142009-12-14 12:49:52 +11001817 BUG_ON(file && mddev->bitmap_info.offset);
NeilBrowna654b9d82005-06-21 17:17:27 -07001818
NeilBrown230b55f2017-10-17 14:24:09 +11001819 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
1820 pr_notice("md/raid:%s: array with journal cannot have bitmap\n",
1821 mdname(mddev));
1822 return ERR_PTR(-EBUSY);
1823 }
1824
NeilBrown9ffae0c2006-01-06 00:20:32 -08001825 bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL);
NeilBrown32a76272005-06-21 17:17:14 -07001826 if (!bitmap)
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05001827 return ERR_PTR(-ENOMEM);
NeilBrown32a76272005-06-21 17:17:14 -07001828
NeilBrown40cffcc2012-05-22 13:55:24 +10001829 spin_lock_init(&bitmap->counts.lock);
NeilBrownce25c312006-06-26 00:27:49 -07001830 atomic_set(&bitmap->pending_writes, 0);
1831 init_waitqueue_head(&bitmap->write_wait);
Neil Brownda6e1a32007-02-08 14:20:37 -08001832 init_waitqueue_head(&bitmap->overflow_wait);
NeilBrowne5551902010-03-31 11:21:44 +11001833 init_waitqueue_head(&bitmap->behind_wait);
NeilBrownce25c312006-06-26 00:27:49 -07001834
NeilBrown32a76272005-06-21 17:17:14 -07001835 bitmap->mddev = mddev;
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05001836 bitmap->cluster_slot = slot;
NeilBrown32a76272005-06-21 17:17:14 -07001837
NeilBrown5ff5aff2010-06-01 19:37:32 +10001838 if (mddev->kobj.sd)
Tejun Heo388975c2013-09-11 23:19:13 -04001839 bm = sysfs_get_dirent(mddev->kobj.sd, "bitmap");
NeilBrownece5cff2009-12-14 12:49:56 +11001840 if (bm) {
Tejun Heo388975c2013-09-11 23:19:13 -04001841 bitmap->sysfs_can_clear = sysfs_get_dirent(bm, "can_clear");
NeilBrownece5cff2009-12-14 12:49:56 +11001842 sysfs_put(bm);
1843 } else
1844 bitmap->sysfs_can_clear = NULL;
1845
NeilBrown1ec885c2012-05-22 13:55:10 +10001846 bitmap->storage.file = file;
NeilBrownce25c312006-06-26 00:27:49 -07001847 if (file) {
1848 get_file(file);
NeilBrownae8fa282009-10-16 15:56:01 +11001849 /* As future accesses to this file will use bmap,
1850 * and bypass the page cache, we must sync the file
1851 * first.
1852 */
Christoph Hellwig8018ab02010-03-22 17:32:25 +01001853 vfs_fsync(file, 1);
NeilBrownce25c312006-06-26 00:27:49 -07001854 }
NeilBrown42a04b52009-12-14 12:49:53 +11001855 /* read superblock from bitmap file (this sets mddev->bitmap_info.chunksize) */
Jonathan Brassow9c810752011-06-08 17:59:30 -05001856 if (!mddev->bitmap_info.external) {
1857 /*
1858 * If 'MD_ARRAY_FIRST_USE' is set, then device-mapper is
1859 * instructing us to create a new on-disk bitmap instance.
1860 */
1861 if (test_and_clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags))
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001862 err = md_bitmap_new_disk_sb(bitmap);
Jonathan Brassow9c810752011-06-08 17:59:30 -05001863 else
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001864 err = md_bitmap_read_sb(bitmap);
Jonathan Brassow9c810752011-06-08 17:59:30 -05001865 } else {
NeilBrownece5cff2009-12-14 12:49:56 +11001866 err = 0;
1867 if (mddev->bitmap_info.chunksize == 0 ||
1868 mddev->bitmap_info.daemon_sleep == 0)
1869 /* chunksize and time_base need to be
1870 * set first. */
1871 err = -EINVAL;
1872 }
NeilBrown32a76272005-06-21 17:17:14 -07001873 if (err)
NeilBrown3178b0d2005-09-09 16:23:50 -07001874 goto error;
NeilBrown32a76272005-06-21 17:17:14 -07001875
NeilBrown624ce4f2009-12-14 12:49:56 +11001876 bitmap->daemon_lastrun = jiffies;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001877 err = md_bitmap_resize(bitmap, blocks, mddev->bitmap_info.chunksize, 1);
NeilBrownd60b4792012-05-22 13:55:25 +10001878 if (err)
NeilBrown3178b0d2005-09-09 16:23:50 -07001879 goto error;
NeilBrown32a76272005-06-21 17:17:14 -07001880
NeilBrownec0cc222016-11-02 14:16:49 +11001881 pr_debug("created bitmap (%lu pages) for device %s\n",
1882 bitmap->counts.pages, bmname(bitmap));
NeilBrown69e51b42010-06-01 19:37:35 +10001883
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05001884 err = test_bit(BITMAP_WRITE_ERROR, &bitmap->flags) ? -EIO : 0;
1885 if (err)
1886 goto error;
NeilBrown69e51b42010-06-01 19:37:35 +10001887
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05001888 return bitmap;
NeilBrown69e51b42010-06-01 19:37:35 +10001889 error:
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001890 md_bitmap_free(bitmap);
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05001891 return ERR_PTR(err);
NeilBrown69e51b42010-06-01 19:37:35 +10001892}
1893
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001894int md_bitmap_load(struct mddev *mddev)
NeilBrown69e51b42010-06-01 19:37:35 +10001895{
1896 int err = 0;
Jonathan Brassow3520fa42011-07-27 11:00:37 +10001897 sector_t start = 0;
NeilBrown69e51b42010-06-01 19:37:35 +10001898 sector_t sector = 0;
1899 struct bitmap *bitmap = mddev->bitmap;
Guoqing Jiang617b1942019-06-14 17:10:38 +08001900 struct md_rdev *rdev;
NeilBrown69e51b42010-06-01 19:37:35 +10001901
1902 if (!bitmap)
1903 goto out;
1904
Guoqing Jiang617b1942019-06-14 17:10:38 +08001905 rdev_for_each(rdev, mddev)
Guoqing Jiang404659c2019-12-23 10:48:53 +01001906 mddev_create_serial_pool(mddev, rdev, true);
Guoqing Jiang617b1942019-06-14 17:10:38 +08001907
Guoqing Jiang51e453a2016-05-04 02:17:09 -04001908 if (mddev_is_clustered(mddev))
1909 md_cluster_ops->load_bitmaps(mddev, mddev->bitmap_info.nodes);
1910
NeilBrown69e51b42010-06-01 19:37:35 +10001911 /* Clear out old bitmap info first: Either there is none, or we
1912 * are resuming after someone else has possibly changed things,
1913 * so we should forget old cached info.
1914 * All chunks should be clean, but some might need_sync.
1915 */
1916 while (sector < mddev->resync_max_sectors) {
NeilBrown57dab0b2010-10-19 10:03:39 +11001917 sector_t blocks;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001918 md_bitmap_start_sync(bitmap, sector, &blocks, 0);
NeilBrown69e51b42010-06-01 19:37:35 +10001919 sector += blocks;
1920 }
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001921 md_bitmap_close_sync(bitmap);
NeilBrown69e51b42010-06-01 19:37:35 +10001922
Jonathan Brassow3520fa42011-07-27 11:00:37 +10001923 if (mddev->degraded == 0
1924 || bitmap->events_cleared == mddev->events)
1925 /* no need to keep dirty bits to optimise a
1926 * re-add of a missing device */
1927 start = mddev->recovery_cp;
NeilBrown69e51b42010-06-01 19:37:35 +10001928
NeilBrownafbaa902012-04-12 16:05:06 +10001929 mutex_lock(&mddev->bitmap_info.mutex);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001930 err = md_bitmap_init_from_disk(bitmap, start);
NeilBrownafbaa902012-04-12 16:05:06 +10001931 mutex_unlock(&mddev->bitmap_info.mutex);
Jonathan Brassow3520fa42011-07-27 11:00:37 +10001932
NeilBrown32a76272005-06-21 17:17:14 -07001933 if (err)
NeilBrown69e51b42010-06-01 19:37:35 +10001934 goto out;
NeilBrownb405fe92012-05-22 13:55:15 +10001935 clear_bit(BITMAP_STALE, &bitmap->flags);
NeilBrownef99bf42012-05-22 13:55:08 +10001936
1937 /* Kick recovery in case any bits were set */
1938 set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery);
NeilBrown3178b0d2005-09-09 16:23:50 -07001939
NeilBrown1b04be92009-12-14 12:49:53 +11001940 mddev->thread->timeout = mddev->bitmap_info.daemon_sleep;
NeilBrown9cd30fd2009-12-14 12:49:54 +11001941 md_wakeup_thread(mddev->thread);
NeilBrownb15c2e52006-01-06 00:20:16 -08001942
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001943 md_bitmap_update_sb(bitmap);
NeilBrown4ad13662007-07-17 04:06:13 -07001944
NeilBrownb405fe92012-05-22 13:55:15 +10001945 if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
NeilBrown69e51b42010-06-01 19:37:35 +10001946 err = -EIO;
1947out:
NeilBrown3178b0d2005-09-09 16:23:50 -07001948 return err;
NeilBrown32a76272005-06-21 17:17:14 -07001949}
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001950EXPORT_SYMBOL_GPL(md_bitmap_load);
NeilBrown32a76272005-06-21 17:17:14 -07001951
Guoqing Jiangb98938d2017-03-01 16:42:39 +08001952struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot)
1953{
1954 int rv = 0;
1955 struct bitmap *bitmap;
1956
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001957 bitmap = md_bitmap_create(mddev, slot);
Guoqing Jiangb98938d2017-03-01 16:42:39 +08001958 if (IS_ERR(bitmap)) {
1959 rv = PTR_ERR(bitmap);
1960 return ERR_PTR(rv);
1961 }
1962
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001963 rv = md_bitmap_init_from_disk(bitmap, 0);
Guoqing Jiangb98938d2017-03-01 16:42:39 +08001964 if (rv) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001965 md_bitmap_free(bitmap);
Guoqing Jiangb98938d2017-03-01 16:42:39 +08001966 return ERR_PTR(rv);
1967 }
1968
1969 return bitmap;
1970}
1971EXPORT_SYMBOL(get_bitmap_from_slot);
1972
Goldwyn Rodrigues11dd35d2014-06-07 00:36:26 -05001973/* Loads the bitmap associated with slot and copies the resync information
1974 * to our bitmap
1975 */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001976int md_bitmap_copy_from_slot(struct mddev *mddev, int slot,
Goldwyn Rodrigues97f6cd32015-04-14 10:45:42 -05001977 sector_t *low, sector_t *high, bool clear_bits)
Goldwyn Rodrigues11dd35d2014-06-07 00:36:26 -05001978{
1979 int rv = 0, i, j;
1980 sector_t block, lo = 0, hi = 0;
1981 struct bitmap_counts *counts;
Guoqing Jiangb98938d2017-03-01 16:42:39 +08001982 struct bitmap *bitmap;
Goldwyn Rodrigues11dd35d2014-06-07 00:36:26 -05001983
Guoqing Jiangb98938d2017-03-01 16:42:39 +08001984 bitmap = get_bitmap_from_slot(mddev, slot);
1985 if (IS_ERR(bitmap)) {
1986 pr_err("%s can't get bitmap from slot %d\n", __func__, slot);
1987 return -1;
1988 }
Goldwyn Rodrigues11dd35d2014-06-07 00:36:26 -05001989
1990 counts = &bitmap->counts;
1991 for (j = 0; j < counts->chunks; j++) {
1992 block = (sector_t)j << counts->chunkshift;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001993 if (md_bitmap_file_test_bit(bitmap, block)) {
Goldwyn Rodrigues11dd35d2014-06-07 00:36:26 -05001994 if (!lo)
1995 lo = block;
1996 hi = block;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001997 md_bitmap_file_clear_bit(bitmap, block);
1998 md_bitmap_set_memory_bits(mddev->bitmap, block, 1);
1999 md_bitmap_file_set_bit(mddev->bitmap, block);
Goldwyn Rodrigues11dd35d2014-06-07 00:36:26 -05002000 }
2001 }
2002
Goldwyn Rodrigues97f6cd32015-04-14 10:45:42 -05002003 if (clear_bits) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002004 md_bitmap_update_sb(bitmap);
Guoqing Jiangc84400c2016-05-02 11:50:15 -04002005 /* BITMAP_PAGE_PENDING is set, but bitmap_unplug needs
2006 * BITMAP_PAGE_DIRTY or _NEEDWRITE to write ... */
Goldwyn Rodrigues97f6cd32015-04-14 10:45:42 -05002007 for (i = 0; i < bitmap->storage.file_pages; i++)
Guoqing Jiangc84400c2016-05-02 11:50:15 -04002008 if (test_page_attr(bitmap, i, BITMAP_PAGE_PENDING))
2009 set_page_attr(bitmap, i, BITMAP_PAGE_NEEDWRITE);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002010 md_bitmap_unplug(bitmap);
Goldwyn Rodrigues97f6cd32015-04-14 10:45:42 -05002011 }
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002012 md_bitmap_unplug(mddev->bitmap);
Goldwyn Rodrigues11dd35d2014-06-07 00:36:26 -05002013 *low = lo;
2014 *high = hi;
Guoqing Jiangb98938d2017-03-01 16:42:39 +08002015
Goldwyn Rodrigues11dd35d2014-06-07 00:36:26 -05002016 return rv;
2017}
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002018EXPORT_SYMBOL_GPL(md_bitmap_copy_from_slot);
Goldwyn Rodrigues11dd35d2014-06-07 00:36:26 -05002019
2020
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002021void md_bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
NeilBrown57148962012-03-19 12:46:40 +11002022{
2023 unsigned long chunk_kb;
NeilBrown40cffcc2012-05-22 13:55:24 +10002024 struct bitmap_counts *counts;
NeilBrown57148962012-03-19 12:46:40 +11002025
2026 if (!bitmap)
2027 return;
2028
NeilBrown40cffcc2012-05-22 13:55:24 +10002029 counts = &bitmap->counts;
2030
NeilBrown57148962012-03-19 12:46:40 +11002031 chunk_kb = bitmap->mddev->bitmap_info.chunksize >> 10;
2032 seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
2033 "%lu%s chunk",
NeilBrown40cffcc2012-05-22 13:55:24 +10002034 counts->pages - counts->missing_pages,
2035 counts->pages,
2036 (counts->pages - counts->missing_pages)
NeilBrown57148962012-03-19 12:46:40 +11002037 << (PAGE_SHIFT - 10),
2038 chunk_kb ? chunk_kb : bitmap->mddev->bitmap_info.chunksize,
2039 chunk_kb ? "KB" : "B");
NeilBrown1ec885c2012-05-22 13:55:10 +10002040 if (bitmap->storage.file) {
NeilBrown57148962012-03-19 12:46:40 +11002041 seq_printf(seq, ", file: ");
Miklos Szeredi2726d562015-06-19 10:30:28 +02002042 seq_file_path(seq, bitmap->storage.file, " \t\n");
NeilBrown57148962012-03-19 12:46:40 +11002043 }
2044
2045 seq_printf(seq, "\n");
NeilBrown57148962012-03-19 12:46:40 +11002046}
2047
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002048int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
NeilBrownd60b4792012-05-22 13:55:25 +10002049 int chunksize, int init)
2050{
2051 /* If chunk_size is 0, choose an appropriate chunk size.
2052 * Then possibly allocate new storage space.
2053 * Then quiesce, copy bits, replace bitmap, and re-start
2054 *
2055 * This function is called both to set up the initial bitmap
2056 * and to resize the bitmap while the array is active.
2057 * If this happens as a result of the array being resized,
2058 * chunksize will be zero, and we need to choose a suitable
2059 * chunksize, otherwise we use what we are given.
2060 */
2061 struct bitmap_storage store;
2062 struct bitmap_counts old_counts;
2063 unsigned long chunks;
2064 sector_t block;
2065 sector_t old_blocks, new_blocks;
2066 int chunkshift;
2067 int ret = 0;
2068 long pages;
2069 struct bitmap_page *new_bp;
2070
NeilBrowne8a27f82017-08-31 10:23:25 +10002071 if (bitmap->storage.file && !init) {
2072 pr_info("md: cannot resize file-based bitmap\n");
2073 return -EINVAL;
2074 }
2075
NeilBrownd60b4792012-05-22 13:55:25 +10002076 if (chunksize == 0) {
2077 /* If there is enough space, leave the chunk size unchanged,
2078 * else increase by factor of two until there is enough space.
2079 */
2080 long bytes;
2081 long space = bitmap->mddev->bitmap_info.space;
2082
2083 if (space == 0) {
2084 /* We don't know how much space there is, so limit
2085 * to current size - in sectors.
2086 */
2087 bytes = DIV_ROUND_UP(bitmap->counts.chunks, 8);
2088 if (!bitmap->mddev->bitmap_info.external)
2089 bytes += sizeof(bitmap_super_t);
2090 space = DIV_ROUND_UP(bytes, 512);
2091 bitmap->mddev->bitmap_info.space = space;
2092 }
2093 chunkshift = bitmap->counts.chunkshift;
2094 chunkshift--;
2095 do {
2096 /* 'chunkshift' is shift from block size to chunk size */
2097 chunkshift++;
2098 chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift);
2099 bytes = DIV_ROUND_UP(chunks, 8);
2100 if (!bitmap->mddev->bitmap_info.external)
2101 bytes += sizeof(bitmap_super_t);
2102 } while (bytes > (space << 9));
2103 } else
2104 chunkshift = ffz(~chunksize) - BITMAP_BLOCK_SHIFT;
2105
2106 chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift);
2107 memset(&store, 0, sizeof(store));
2108 if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file)
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002109 ret = md_bitmap_storage_alloc(&store, chunks,
2110 !bitmap->mddev->bitmap_info.external,
2111 mddev_is_clustered(bitmap->mddev)
2112 ? bitmap->cluster_slot : 0);
Guoqing Jiangcbb38732016-10-31 10:19:00 +08002113 if (ret) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002114 md_bitmap_file_unmap(&store);
NeilBrownd60b4792012-05-22 13:55:25 +10002115 goto err;
Guoqing Jiangcbb38732016-10-31 10:19:00 +08002116 }
NeilBrownd60b4792012-05-22 13:55:25 +10002117
2118 pages = DIV_ROUND_UP(chunks, PAGE_COUNTER_RATIO);
2119
Kees Cook6396bb22018-06-12 14:03:40 -07002120 new_bp = kcalloc(pages, sizeof(*new_bp), GFP_KERNEL);
NeilBrownd60b4792012-05-22 13:55:25 +10002121 ret = -ENOMEM;
2122 if (!new_bp) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002123 md_bitmap_file_unmap(&store);
NeilBrownd60b4792012-05-22 13:55:25 +10002124 goto err;
2125 }
2126
2127 if (!init)
2128 bitmap->mddev->pers->quiesce(bitmap->mddev, 1);
2129
2130 store.file = bitmap->storage.file;
2131 bitmap->storage.file = NULL;
2132
2133 if (store.sb_page && bitmap->storage.sb_page)
2134 memcpy(page_address(store.sb_page),
2135 page_address(bitmap->storage.sb_page),
Shaohua Li938b5332017-10-16 19:03:44 -07002136 sizeof(bitmap_super_t));
Guoqing Jiangfadcbd22019-09-26 13:53:50 +02002137 spin_lock_irq(&bitmap->counts.lock);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002138 md_bitmap_file_unmap(&bitmap->storage);
NeilBrownd60b4792012-05-22 13:55:25 +10002139 bitmap->storage = store;
2140
2141 old_counts = bitmap->counts;
2142 bitmap->counts.bp = new_bp;
2143 bitmap->counts.pages = pages;
2144 bitmap->counts.missing_pages = pages;
2145 bitmap->counts.chunkshift = chunkshift;
2146 bitmap->counts.chunks = chunks;
2147 bitmap->mddev->bitmap_info.chunksize = 1 << (chunkshift +
2148 BITMAP_BLOCK_SHIFT);
2149
2150 blocks = min(old_counts.chunks << old_counts.chunkshift,
2151 chunks << chunkshift);
2152
Guoqing Jiangc9d65032016-05-02 11:50:11 -04002153 /* For cluster raid, need to pre-allocate bitmap */
2154 if (mddev_is_clustered(bitmap->mddev)) {
2155 unsigned long page;
2156 for (page = 0; page < pages; page++) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002157 ret = md_bitmap_checkpage(&bitmap->counts, page, 1, 1);
Guoqing Jiangc9d65032016-05-02 11:50:11 -04002158 if (ret) {
2159 unsigned long k;
2160
2161 /* deallocate the page memory */
2162 for (k = 0; k < page; k++) {
kbuild test robotbc47e842016-05-02 11:50:16 -04002163 kfree(new_bp[k].map);
Guoqing Jiangc9d65032016-05-02 11:50:11 -04002164 }
Zdenek Kabelac0868b992017-11-08 13:44:56 +01002165 kfree(new_bp);
Guoqing Jiangc9d65032016-05-02 11:50:11 -04002166
2167 /* restore some fields from old_counts */
2168 bitmap->counts.bp = old_counts.bp;
2169 bitmap->counts.pages = old_counts.pages;
2170 bitmap->counts.missing_pages = old_counts.pages;
2171 bitmap->counts.chunkshift = old_counts.chunkshift;
2172 bitmap->counts.chunks = old_counts.chunks;
2173 bitmap->mddev->bitmap_info.chunksize = 1 << (old_counts.chunkshift +
2174 BITMAP_BLOCK_SHIFT);
2175 blocks = old_counts.chunks << old_counts.chunkshift;
NeilBrownec0cc222016-11-02 14:16:49 +11002176 pr_warn("Could not pre-allocate in-memory bitmap for cluster raid\n");
Guoqing Jiangc9d65032016-05-02 11:50:11 -04002177 break;
2178 } else
2179 bitmap->counts.bp[page].count += 1;
2180 }
2181 }
2182
NeilBrownd60b4792012-05-22 13:55:25 +10002183 for (block = 0; block < blocks; ) {
2184 bitmap_counter_t *bmc_old, *bmc_new;
2185 int set;
2186
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002187 bmc_old = md_bitmap_get_counter(&old_counts, block, &old_blocks, 0);
NeilBrownd60b4792012-05-22 13:55:25 +10002188 set = bmc_old && NEEDED(*bmc_old);
2189
2190 if (set) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002191 bmc_new = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1);
NeilBrownd60b4792012-05-22 13:55:25 +10002192 if (*bmc_new == 0) {
2193 /* need to set on-disk bits too. */
2194 sector_t end = block + new_blocks;
2195 sector_t start = block >> chunkshift;
2196 start <<= chunkshift;
2197 while (start < end) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002198 md_bitmap_file_set_bit(bitmap, block);
NeilBrownd60b4792012-05-22 13:55:25 +10002199 start += 1 << chunkshift;
2200 }
2201 *bmc_new = 2;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002202 md_bitmap_count_page(&bitmap->counts, block, 1);
2203 md_bitmap_set_pending(&bitmap->counts, block);
NeilBrownd60b4792012-05-22 13:55:25 +10002204 }
2205 *bmc_new |= NEEDED_MASK;
2206 if (new_blocks < old_blocks)
2207 old_blocks = new_blocks;
2208 }
2209 block += old_blocks;
2210 }
2211
Zdenek Kabelac0868b992017-11-08 13:44:56 +01002212 if (bitmap->counts.bp != old_counts.bp) {
2213 unsigned long k;
2214 for (k = 0; k < old_counts.pages; k++)
2215 if (!old_counts.bp[k].hijacked)
2216 kfree(old_counts.bp[k].map);
2217 kfree(old_counts.bp);
2218 }
2219
NeilBrownd60b4792012-05-22 13:55:25 +10002220 if (!init) {
2221 int i;
2222 while (block < (chunks << chunkshift)) {
2223 bitmap_counter_t *bmc;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002224 bmc = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1);
NeilBrownd60b4792012-05-22 13:55:25 +10002225 if (bmc) {
2226 /* new space. It needs to be resynced, so
2227 * we set NEEDED_MASK.
2228 */
2229 if (*bmc == 0) {
2230 *bmc = NEEDED_MASK | 2;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002231 md_bitmap_count_page(&bitmap->counts, block, 1);
2232 md_bitmap_set_pending(&bitmap->counts, block);
NeilBrownd60b4792012-05-22 13:55:25 +10002233 }
2234 }
2235 block += new_blocks;
2236 }
2237 for (i = 0; i < bitmap->storage.file_pages; i++)
2238 set_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
2239 }
2240 spin_unlock_irq(&bitmap->counts.lock);
2241
2242 if (!init) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002243 md_bitmap_unplug(bitmap);
NeilBrownd60b4792012-05-22 13:55:25 +10002244 bitmap->mddev->pers->quiesce(bitmap->mddev, 0);
2245 }
2246 ret = 0;
2247err:
2248 return ret;
2249}
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002250EXPORT_SYMBOL_GPL(md_bitmap_resize);
NeilBrownd60b4792012-05-22 13:55:25 +10002251
NeilBrown43a70502009-12-14 12:49:55 +11002252static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11002253location_show(struct mddev *mddev, char *page)
NeilBrown43a70502009-12-14 12:49:55 +11002254{
2255 ssize_t len;
NeilBrownac2f40b2010-06-01 19:37:31 +10002256 if (mddev->bitmap_info.file)
NeilBrown43a70502009-12-14 12:49:55 +11002257 len = sprintf(page, "file");
NeilBrownac2f40b2010-06-01 19:37:31 +10002258 else if (mddev->bitmap_info.offset)
NeilBrown43a70502009-12-14 12:49:55 +11002259 len = sprintf(page, "%+lld", (long long)mddev->bitmap_info.offset);
NeilBrownac2f40b2010-06-01 19:37:31 +10002260 else
NeilBrown43a70502009-12-14 12:49:55 +11002261 len = sprintf(page, "none");
2262 len += sprintf(page+len, "\n");
2263 return len;
2264}
2265
2266static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11002267location_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown43a70502009-12-14 12:49:55 +11002268{
Shaohua Lid9dd26b2016-07-30 10:05:31 -07002269 int rv;
NeilBrown43a70502009-12-14 12:49:55 +11002270
Shaohua Lid9dd26b2016-07-30 10:05:31 -07002271 rv = mddev_lock(mddev);
2272 if (rv)
2273 return rv;
NeilBrown43a70502009-12-14 12:49:55 +11002274 if (mddev->pers) {
Shaohua Lid9dd26b2016-07-30 10:05:31 -07002275 if (!mddev->pers->quiesce) {
2276 rv = -EBUSY;
2277 goto out;
2278 }
2279 if (mddev->recovery || mddev->sync_thread) {
2280 rv = -EBUSY;
2281 goto out;
2282 }
NeilBrown43a70502009-12-14 12:49:55 +11002283 }
2284
2285 if (mddev->bitmap || mddev->bitmap_info.file ||
2286 mddev->bitmap_info.offset) {
2287 /* bitmap already configured. Only option is to clear it */
Shaohua Lid9dd26b2016-07-30 10:05:31 -07002288 if (strncmp(buf, "none", 4) != 0) {
2289 rv = -EBUSY;
2290 goto out;
2291 }
NeilBrown43a70502009-12-14 12:49:55 +11002292 if (mddev->pers) {
Jack Wangf8f83d82018-10-08 17:24:03 +02002293 mddev_suspend(mddev);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002294 md_bitmap_destroy(mddev);
Jack Wangf8f83d82018-10-08 17:24:03 +02002295 mddev_resume(mddev);
NeilBrown43a70502009-12-14 12:49:55 +11002296 }
2297 mddev->bitmap_info.offset = 0;
2298 if (mddev->bitmap_info.file) {
2299 struct file *f = mddev->bitmap_info.file;
2300 mddev->bitmap_info.file = NULL;
NeilBrown43a70502009-12-14 12:49:55 +11002301 fput(f);
2302 }
2303 } else {
2304 /* No bitmap, OK to set a location */
2305 long long offset;
2306 if (strncmp(buf, "none", 4) == 0)
2307 /* nothing to be done */;
2308 else if (strncmp(buf, "file:", 5) == 0) {
2309 /* Not supported yet */
Shaohua Lid9dd26b2016-07-30 10:05:31 -07002310 rv = -EINVAL;
2311 goto out;
NeilBrown43a70502009-12-14 12:49:55 +11002312 } else {
NeilBrown43a70502009-12-14 12:49:55 +11002313 if (buf[0] == '+')
Jingoo Hanb29bebd2013-06-01 16:15:16 +09002314 rv = kstrtoll(buf+1, 10, &offset);
NeilBrown43a70502009-12-14 12:49:55 +11002315 else
Jingoo Hanb29bebd2013-06-01 16:15:16 +09002316 rv = kstrtoll(buf, 10, &offset);
NeilBrown43a70502009-12-14 12:49:55 +11002317 if (rv)
Shaohua Lid9dd26b2016-07-30 10:05:31 -07002318 goto out;
2319 if (offset == 0) {
2320 rv = -EINVAL;
2321 goto out;
2322 }
NeilBrownece5cff2009-12-14 12:49:56 +11002323 if (mddev->bitmap_info.external == 0 &&
2324 mddev->major_version == 0 &&
Shaohua Lid9dd26b2016-07-30 10:05:31 -07002325 offset != mddev->bitmap_info.default_offset) {
2326 rv = -EINVAL;
2327 goto out;
2328 }
NeilBrown43a70502009-12-14 12:49:55 +11002329 mddev->bitmap_info.offset = offset;
2330 if (mddev->pers) {
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05002331 struct bitmap *bitmap;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002332 bitmap = md_bitmap_create(mddev, -1);
Jack Wangf8f83d82018-10-08 17:24:03 +02002333 mddev_suspend(mddev);
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05002334 if (IS_ERR(bitmap))
2335 rv = PTR_ERR(bitmap);
2336 else {
2337 mddev->bitmap = bitmap;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002338 rv = md_bitmap_load(mddev);
Guoqing Jiangf9a67b12016-04-01 17:08:49 +08002339 if (rv)
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05002340 mddev->bitmap_info.offset = 0;
NeilBrown43a70502009-12-14 12:49:55 +11002341 }
Guoqing Jiangf9a67b12016-04-01 17:08:49 +08002342 if (rv) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002343 md_bitmap_destroy(mddev);
Jack Wangf8f83d82018-10-08 17:24:03 +02002344 mddev_resume(mddev);
Shaohua Lid9dd26b2016-07-30 10:05:31 -07002345 goto out;
Guoqing Jiangf9a67b12016-04-01 17:08:49 +08002346 }
Jack Wangf8f83d82018-10-08 17:24:03 +02002347 mddev_resume(mddev);
NeilBrown43a70502009-12-14 12:49:55 +11002348 }
2349 }
2350 }
2351 if (!mddev->external) {
2352 /* Ensure new bitmap info is stored in
2353 * metadata promptly.
2354 */
Shaohua Li29530792016-12-08 15:48:19 -08002355 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrown43a70502009-12-14 12:49:55 +11002356 md_wakeup_thread(mddev->thread);
2357 }
Shaohua Lid9dd26b2016-07-30 10:05:31 -07002358 rv = 0;
2359out:
2360 mddev_unlock(mddev);
2361 if (rv)
2362 return rv;
NeilBrown43a70502009-12-14 12:49:55 +11002363 return len;
2364}
2365
2366static struct md_sysfs_entry bitmap_location =
2367__ATTR(location, S_IRUGO|S_IWUSR, location_show, location_store);
2368
NeilBrown6409bb02012-05-22 13:55:07 +10002369/* 'bitmap/space' is the space available at 'location' for the
2370 * bitmap. This allows the kernel to know when it is safe to
2371 * resize the bitmap to match a resized array.
2372 */
2373static ssize_t
2374space_show(struct mddev *mddev, char *page)
2375{
2376 return sprintf(page, "%lu\n", mddev->bitmap_info.space);
2377}
2378
2379static ssize_t
2380space_store(struct mddev *mddev, const char *buf, size_t len)
2381{
2382 unsigned long sectors;
2383 int rv;
2384
2385 rv = kstrtoul(buf, 10, &sectors);
2386 if (rv)
2387 return rv;
2388
2389 if (sectors == 0)
2390 return -EINVAL;
2391
2392 if (mddev->bitmap &&
NeilBrown9b1215c2012-05-22 13:55:11 +10002393 sectors < (mddev->bitmap->storage.bytes + 511) >> 9)
NeilBrown6409bb02012-05-22 13:55:07 +10002394 return -EFBIG; /* Bitmap is too big for this small space */
2395
2396 /* could make sure it isn't too big, but that isn't really
2397 * needed - user-space should be careful.
2398 */
2399 mddev->bitmap_info.space = sectors;
2400 return len;
2401}
2402
2403static struct md_sysfs_entry bitmap_space =
2404__ATTR(space, S_IRUGO|S_IWUSR, space_show, space_store);
2405
NeilBrown43a70502009-12-14 12:49:55 +11002406static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11002407timeout_show(struct mddev *mddev, char *page)
NeilBrown43a70502009-12-14 12:49:55 +11002408{
2409 ssize_t len;
2410 unsigned long secs = mddev->bitmap_info.daemon_sleep / HZ;
2411 unsigned long jifs = mddev->bitmap_info.daemon_sleep % HZ;
NeilBrownac2f40b2010-06-01 19:37:31 +10002412
NeilBrown43a70502009-12-14 12:49:55 +11002413 len = sprintf(page, "%lu", secs);
2414 if (jifs)
2415 len += sprintf(page+len, ".%03u", jiffies_to_msecs(jifs));
2416 len += sprintf(page+len, "\n");
2417 return len;
2418}
2419
2420static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11002421timeout_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown43a70502009-12-14 12:49:55 +11002422{
2423 /* timeout can be set at any time */
2424 unsigned long timeout;
2425 int rv = strict_strtoul_scaled(buf, &timeout, 4);
2426 if (rv)
2427 return rv;
2428
2429 /* just to make sure we don't overflow... */
2430 if (timeout >= LONG_MAX / HZ)
2431 return -EINVAL;
2432
2433 timeout = timeout * HZ / 10000;
2434
2435 if (timeout >= MAX_SCHEDULE_TIMEOUT)
2436 timeout = MAX_SCHEDULE_TIMEOUT-1;
2437 if (timeout < 1)
2438 timeout = 1;
2439 mddev->bitmap_info.daemon_sleep = timeout;
2440 if (mddev->thread) {
2441 /* if thread->timeout is MAX_SCHEDULE_TIMEOUT, then
2442 * the bitmap is all clean and we don't need to
2443 * adjust the timeout right now
2444 */
2445 if (mddev->thread->timeout < MAX_SCHEDULE_TIMEOUT) {
2446 mddev->thread->timeout = timeout;
2447 md_wakeup_thread(mddev->thread);
2448 }
2449 }
2450 return len;
2451}
2452
2453static struct md_sysfs_entry bitmap_timeout =
2454__ATTR(time_base, S_IRUGO|S_IWUSR, timeout_show, timeout_store);
2455
2456static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11002457backlog_show(struct mddev *mddev, char *page)
NeilBrown43a70502009-12-14 12:49:55 +11002458{
2459 return sprintf(page, "%lu\n", mddev->bitmap_info.max_write_behind);
2460}
2461
2462static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11002463backlog_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown43a70502009-12-14 12:49:55 +11002464{
2465 unsigned long backlog;
Guoqing Jiang10c92fc2019-06-14 17:10:37 +08002466 unsigned long old_mwb = mddev->bitmap_info.max_write_behind;
Jingoo Hanb29bebd2013-06-01 16:15:16 +09002467 int rv = kstrtoul(buf, 10, &backlog);
NeilBrown43a70502009-12-14 12:49:55 +11002468 if (rv)
2469 return rv;
2470 if (backlog > COUNTER_MAX)
2471 return -EINVAL;
2472 mddev->bitmap_info.max_write_behind = backlog;
Guoqing Jiang404659c2019-12-23 10:48:53 +01002473 if (!backlog && mddev->serial_info_pool) {
2474 /* serial_info_pool is not needed if backlog is zero */
Guoqing Jiang69b00b52019-12-23 10:49:00 +01002475 if (!mddev->serialize_policy)
2476 mddev_destroy_serial_pool(mddev, NULL, false);
Guoqing Jiang404659c2019-12-23 10:48:53 +01002477 } else if (backlog && !mddev->serial_info_pool) {
2478 /* serial_info_pool is needed since backlog is not zero */
Guoqing Jiang10c92fc2019-06-14 17:10:37 +08002479 struct md_rdev *rdev;
2480
2481 rdev_for_each(rdev, mddev)
Guoqing Jiang404659c2019-12-23 10:48:53 +01002482 mddev_create_serial_pool(mddev, rdev, false);
Guoqing Jiang10c92fc2019-06-14 17:10:37 +08002483 }
2484 if (old_mwb != backlog)
2485 md_bitmap_update_sb(mddev->bitmap);
NeilBrown43a70502009-12-14 12:49:55 +11002486 return len;
2487}
2488
2489static struct md_sysfs_entry bitmap_backlog =
2490__ATTR(backlog, S_IRUGO|S_IWUSR, backlog_show, backlog_store);
2491
2492static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11002493chunksize_show(struct mddev *mddev, char *page)
NeilBrown43a70502009-12-14 12:49:55 +11002494{
2495 return sprintf(page, "%lu\n", mddev->bitmap_info.chunksize);
2496}
2497
2498static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11002499chunksize_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown43a70502009-12-14 12:49:55 +11002500{
2501 /* Can only be changed when no bitmap is active */
2502 int rv;
2503 unsigned long csize;
2504 if (mddev->bitmap)
2505 return -EBUSY;
Jingoo Hanb29bebd2013-06-01 16:15:16 +09002506 rv = kstrtoul(buf, 10, &csize);
NeilBrown43a70502009-12-14 12:49:55 +11002507 if (rv)
2508 return rv;
2509 if (csize < 512 ||
2510 !is_power_of_2(csize))
2511 return -EINVAL;
2512 mddev->bitmap_info.chunksize = csize;
2513 return len;
2514}
2515
2516static struct md_sysfs_entry bitmap_chunksize =
2517__ATTR(chunksize, S_IRUGO|S_IWUSR, chunksize_show, chunksize_store);
2518
NeilBrownfd01b882011-10-11 16:47:53 +11002519static ssize_t metadata_show(struct mddev *mddev, char *page)
NeilBrownece5cff2009-12-14 12:49:56 +11002520{
Goldwyn Rodriguesc4ce8672014-03-29 10:20:02 -05002521 if (mddev_is_clustered(mddev))
2522 return sprintf(page, "clustered\n");
NeilBrownece5cff2009-12-14 12:49:56 +11002523 return sprintf(page, "%s\n", (mddev->bitmap_info.external
2524 ? "external" : "internal"));
2525}
2526
NeilBrownfd01b882011-10-11 16:47:53 +11002527static ssize_t metadata_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrownece5cff2009-12-14 12:49:56 +11002528{
2529 if (mddev->bitmap ||
2530 mddev->bitmap_info.file ||
2531 mddev->bitmap_info.offset)
2532 return -EBUSY;
2533 if (strncmp(buf, "external", 8) == 0)
2534 mddev->bitmap_info.external = 1;
Goldwyn Rodriguesc4ce8672014-03-29 10:20:02 -05002535 else if ((strncmp(buf, "internal", 8) == 0) ||
2536 (strncmp(buf, "clustered", 9) == 0))
NeilBrownece5cff2009-12-14 12:49:56 +11002537 mddev->bitmap_info.external = 0;
2538 else
2539 return -EINVAL;
2540 return len;
2541}
2542
2543static struct md_sysfs_entry bitmap_metadata =
2544__ATTR(metadata, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
2545
NeilBrownfd01b882011-10-11 16:47:53 +11002546static ssize_t can_clear_show(struct mddev *mddev, char *page)
NeilBrownece5cff2009-12-14 12:49:56 +11002547{
2548 int len;
NeilBrownb7b17c92014-12-15 12:56:59 +11002549 spin_lock(&mddev->lock);
NeilBrownece5cff2009-12-14 12:49:56 +11002550 if (mddev->bitmap)
2551 len = sprintf(page, "%s\n", (mddev->bitmap->need_sync ?
2552 "false" : "true"));
2553 else
2554 len = sprintf(page, "\n");
NeilBrownb7b17c92014-12-15 12:56:59 +11002555 spin_unlock(&mddev->lock);
NeilBrownece5cff2009-12-14 12:49:56 +11002556 return len;
2557}
2558
NeilBrownfd01b882011-10-11 16:47:53 +11002559static ssize_t can_clear_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrownece5cff2009-12-14 12:49:56 +11002560{
2561 if (mddev->bitmap == NULL)
2562 return -ENOENT;
2563 if (strncmp(buf, "false", 5) == 0)
2564 mddev->bitmap->need_sync = 1;
2565 else if (strncmp(buf, "true", 4) == 0) {
2566 if (mddev->degraded)
2567 return -EBUSY;
2568 mddev->bitmap->need_sync = 0;
2569 } else
2570 return -EINVAL;
2571 return len;
2572}
2573
2574static struct md_sysfs_entry bitmap_can_clear =
2575__ATTR(can_clear, S_IRUGO|S_IWUSR, can_clear_show, can_clear_store);
2576
Paul Clements696fcd52010-03-08 16:02:37 +11002577static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11002578behind_writes_used_show(struct mddev *mddev, char *page)
Paul Clements696fcd52010-03-08 16:02:37 +11002579{
NeilBrownb7b17c92014-12-15 12:56:59 +11002580 ssize_t ret;
2581 spin_lock(&mddev->lock);
Paul Clements696fcd52010-03-08 16:02:37 +11002582 if (mddev->bitmap == NULL)
NeilBrownb7b17c92014-12-15 12:56:59 +11002583 ret = sprintf(page, "0\n");
2584 else
2585 ret = sprintf(page, "%lu\n",
2586 mddev->bitmap->behind_writes_used);
2587 spin_unlock(&mddev->lock);
2588 return ret;
Paul Clements696fcd52010-03-08 16:02:37 +11002589}
2590
2591static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11002592behind_writes_used_reset(struct mddev *mddev, const char *buf, size_t len)
Paul Clements696fcd52010-03-08 16:02:37 +11002593{
2594 if (mddev->bitmap)
2595 mddev->bitmap->behind_writes_used = 0;
2596 return len;
2597}
2598
2599static struct md_sysfs_entry max_backlog_used =
2600__ATTR(max_backlog_used, S_IRUGO | S_IWUSR,
2601 behind_writes_used_show, behind_writes_used_reset);
2602
NeilBrown43a70502009-12-14 12:49:55 +11002603static struct attribute *md_bitmap_attrs[] = {
2604 &bitmap_location.attr,
NeilBrown6409bb02012-05-22 13:55:07 +10002605 &bitmap_space.attr,
NeilBrown43a70502009-12-14 12:49:55 +11002606 &bitmap_timeout.attr,
2607 &bitmap_backlog.attr,
2608 &bitmap_chunksize.attr,
NeilBrownece5cff2009-12-14 12:49:56 +11002609 &bitmap_metadata.attr,
2610 &bitmap_can_clear.attr,
Paul Clements696fcd52010-03-08 16:02:37 +11002611 &max_backlog_used.attr,
NeilBrown43a70502009-12-14 12:49:55 +11002612 NULL
2613};
2614struct attribute_group md_bitmap_group = {
2615 .name = "bitmap",
2616 .attrs = md_bitmap_attrs,
2617};
2618