blob: e29c6298ef5c97c8a4a4f0f001baec4034132a9d [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
NeilBrown32a76272005-06-21 17:17:14 -07002/*
3 * bitmap.c two-level bitmap (C) Peter T. Breuer (ptb@ot.uc3m.es) 2003
4 *
5 * bitmap_create - sets up the bitmap structure
6 * bitmap_destroy - destroys the bitmap structure
7 *
8 * additions, Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.:
9 * - added disk storage for bitmap
10 * - changes to allow various bitmap chunk sizes
NeilBrown32a76272005-06-21 17:17:14 -070011 */
12
13/*
14 * Still to do:
15 *
16 * flush after percent set rather than just time based. (maybe both).
NeilBrown32a76272005-06-21 17:17:14 -070017 */
18
NeilBrownbff61972009-03-31 14:33:13 +110019#include <linux/blkdev.h>
NeilBrown32a76272005-06-21 17:17:14 -070020#include <linux/module.h>
NeilBrown32a76272005-06-21 17:17:14 -070021#include <linux/errno.h>
22#include <linux/slab.h>
23#include <linux/init.h>
NeilBrown32a76272005-06-21 17:17:14 -070024#include <linux/timer.h>
25#include <linux/sched.h>
26#include <linux/list.h>
27#include <linux/file.h>
28#include <linux/mount.h>
29#include <linux/buffer_head.h>
NeilBrown57148962012-03-19 12:46:40 +110030#include <linux/seq_file.h>
NeilBrown581dbd92016-11-14 16:30:21 +110031#include <trace/events/block.h>
NeilBrown43b2e5d2009-03-31 14:33:13 +110032#include "md.h"
Mike Snitzer935fe092017-10-10 17:02:41 -040033#include "md-bitmap.h"
NeilBrown32a76272005-06-21 17:17:14 -070034
NeilBrownac2f40b2010-06-01 19:37:31 +100035static inline char *bmname(struct bitmap *bitmap)
NeilBrown32a76272005-06-21 17:17:14 -070036{
37 return bitmap->mddev ? mdname(bitmap->mddev) : "mdX";
38}
39
NeilBrown32a76272005-06-21 17:17:14 -070040/*
NeilBrown32a76272005-06-21 17:17:14 -070041 * check a page and, if necessary, allocate it (or hijack it if the alloc fails)
42 *
43 * 1) check to see if this page is allocated, if it's not then try to alloc
44 * 2) if the alloc fails, set the page's hijacked flag so we'll use the
45 * page pointer directly as a counter
46 *
47 * if we find our page, we increment the page's refcount so that it stays
48 * allocated while we're using it
49 */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -070050static int md_bitmap_checkpage(struct bitmap_counts *bitmap,
51 unsigned long page, int create, int no_hijack)
NeilBrownee305ac2009-09-23 18:06:44 +100052__releases(bitmap->lock)
53__acquires(bitmap->lock)
NeilBrown32a76272005-06-21 17:17:14 -070054{
55 unsigned char *mappage;
56
57 if (page >= bitmap->pages) {
NeilBrown1187cf02009-03-31 14:27:02 +110058 /* This can happen if bitmap_start_sync goes beyond
59 * End-of-device while looking for a whole page.
60 * It is harmless.
61 */
NeilBrown32a76272005-06-21 17:17:14 -070062 return -EINVAL;
63 }
64
NeilBrown32a76272005-06-21 17:17:14 -070065 if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */
66 return 0;
67
68 if (bitmap->bp[page].map) /* page is already allocated, just return */
69 return 0;
70
71 if (!create)
72 return -ENOENT;
73
NeilBrown32a76272005-06-21 17:17:14 -070074 /* this page has not been allocated yet */
75
NeilBrownac2f40b2010-06-01 19:37:31 +100076 spin_unlock_irq(&bitmap->lock);
NeilBrownd9590142015-02-02 17:08:03 +110077 /* It is possible that this is being called inside a
78 * prepare_to_wait/finish_wait loop from raid5c:make_request().
79 * In general it is not permitted to sleep in that context as it
80 * can cause the loop to spin freely.
81 * That doesn't apply here as we can only reach this point
82 * once with any loop.
83 * When this function completes, either bp[page].map or
84 * bp[page].hijacked. In either case, this function will
85 * abort before getting to this point again. So there is
86 * no risk of a free-spin, and so it is safe to assert
87 * that sleeping here is allowed.
88 */
89 sched_annotate_sleep();
NeilBrown792a1d42012-03-19 12:46:41 +110090 mappage = kzalloc(PAGE_SIZE, GFP_NOIO);
NeilBrownac2f40b2010-06-01 19:37:31 +100091 spin_lock_irq(&bitmap->lock);
92
93 if (mappage == NULL) {
NeilBrown40cffcc2012-05-22 13:55:24 +100094 pr_debug("md/bitmap: map page allocation failed, hijacking\n");
Guoqing Jiangc9d65032016-05-02 11:50:11 -040095 /* We don't support hijack for cluster raid */
96 if (no_hijack)
97 return -ENOMEM;
NeilBrown32a76272005-06-21 17:17:14 -070098 /* failed - set the hijacked flag so that we can use the
99 * pointer as a counter */
NeilBrown32a76272005-06-21 17:17:14 -0700100 if (!bitmap->bp[page].map)
101 bitmap->bp[page].hijacked = 1;
NeilBrownac2f40b2010-06-01 19:37:31 +1000102 } else if (bitmap->bp[page].map ||
103 bitmap->bp[page].hijacked) {
NeilBrown32a76272005-06-21 17:17:14 -0700104 /* somebody beat us to getting the page */
NeilBrown792a1d42012-03-19 12:46:41 +1100105 kfree(mappage);
NeilBrownac2f40b2010-06-01 19:37:31 +1000106 } else {
107
108 /* no page was in place and we have one, so install it */
109
110 bitmap->bp[page].map = mappage;
111 bitmap->missing_pages--;
NeilBrown32a76272005-06-21 17:17:14 -0700112 }
NeilBrown32a76272005-06-21 17:17:14 -0700113 return 0;
114}
115
NeilBrown32a76272005-06-21 17:17:14 -0700116/* if page is completely empty, put it back on the free list, or dealloc it */
117/* if page was hijacked, unmark the flag so it might get alloced next time */
118/* Note: lock should be held when calling this */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700119static void md_bitmap_checkfree(struct bitmap_counts *bitmap, unsigned long page)
NeilBrown32a76272005-06-21 17:17:14 -0700120{
121 char *ptr;
122
123 if (bitmap->bp[page].count) /* page is still busy */
124 return;
125
126 /* page is no longer in use, it can be released */
127
128 if (bitmap->bp[page].hijacked) { /* page was hijacked, undo this now */
129 bitmap->bp[page].hijacked = 0;
130 bitmap->bp[page].map = NULL;
NeilBrownac2f40b2010-06-01 19:37:31 +1000131 } else {
132 /* normal case, free the page */
133 ptr = bitmap->bp[page].map;
134 bitmap->bp[page].map = NULL;
135 bitmap->missing_pages++;
NeilBrown792a1d42012-03-19 12:46:41 +1100136 kfree(ptr);
NeilBrown32a76272005-06-21 17:17:14 -0700137 }
NeilBrown32a76272005-06-21 17:17:14 -0700138}
139
NeilBrown32a76272005-06-21 17:17:14 -0700140/*
141 * bitmap file handling - read and write the bitmap file and its superblock
142 */
143
NeilBrown32a76272005-06-21 17:17:14 -0700144/*
145 * basic page I/O operations
146 */
147
NeilBrowna654b9d82005-06-21 17:17:27 -0700148/* IO operations when bitmap is stored near all superblocks */
NeilBrown27581e52012-05-22 13:55:08 +1000149static int read_sb_page(struct mddev *mddev, loff_t offset,
150 struct page *page,
151 unsigned long index, int size)
NeilBrowna654b9d82005-06-21 17:17:27 -0700152{
153 /* choose a good rdev and read the page from there */
154
NeilBrown3cb03002011-10-11 16:45:26 +1100155 struct md_rdev *rdev;
NeilBrowna654b9d82005-06-21 17:17:27 -0700156 sector_t target;
NeilBrowna654b9d82005-06-21 17:17:27 -0700157
NeilBrowndafb20f2012-03-19 12:46:39 +1100158 rdev_for_each(rdev, mddev) {
NeilBrownb2d444d2005-11-08 21:39:31 -0800159 if (! test_bit(In_sync, &rdev->flags)
Guoqing Jiang4aaf76942017-07-04 11:20:30 +0800160 || test_bit(Faulty, &rdev->flags)
161 || test_bit(Bitmap_sync, &rdev->flags))
NeilBrownab904d62005-09-09 16:23:52 -0700162 continue;
163
Jonathan Brassowccebd4c2011-01-14 09:14:33 +1100164 target = offset + index * (PAGE_SIZE/512);
NeilBrowna654b9d82005-06-21 17:17:27 -0700165
NeilBrown2b193362010-10-27 15:16:40 +1100166 if (sync_page_io(rdev, target,
Martin K. Petersene1defc42009-05-22 17:17:49 -0400167 roundup(size, bdev_logical_block_size(rdev->bdev)),
Mike Christie796a5cf2016-06-05 14:32:07 -0500168 page, REQ_OP_READ, 0, true)) {
NeilBrownab904d62005-09-09 16:23:52 -0700169 page->index = index;
NeilBrown27581e52012-05-22 13:55:08 +1000170 return 0;
NeilBrownab904d62005-09-09 16:23:52 -0700171 }
172 }
NeilBrown27581e52012-05-22 13:55:08 +1000173 return -EIO;
NeilBrowna654b9d82005-06-21 17:17:27 -0700174}
175
NeilBrownfd01b882011-10-11 16:47:53 +1100176static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mddev)
NeilBrownb2d2c4c2008-09-01 12:48:13 +1000177{
178 /* Iterate the disks of an mddev, using rcu to protect access to the
179 * linked list, and raising the refcount of devices we return to ensure
180 * they don't disappear while in use.
181 * As devices are only added or removed when raid_disk is < 0 and
182 * nr_pending is 0 and In_sync is clear, the entries we return will
183 * still be in the same position on the list when we re-enter
Michael Wangfd177482012-10-11 13:43:21 +1100184 * list_for_each_entry_continue_rcu.
NeilBrown8532e342015-05-20 15:05:09 +1000185 *
186 * Note that if entered with 'rdev == NULL' to start at the
187 * beginning, we temporarily assign 'rdev' to an address which
188 * isn't really an rdev, but which can be used by
189 * list_for_each_entry_continue_rcu() to find the first entry.
NeilBrownb2d2c4c2008-09-01 12:48:13 +1000190 */
NeilBrownb2d2c4c2008-09-01 12:48:13 +1000191 rcu_read_lock();
192 if (rdev == NULL)
193 /* start at the beginning */
NeilBrown8532e342015-05-20 15:05:09 +1000194 rdev = list_entry(&mddev->disks, struct md_rdev, same_set);
NeilBrownb2d2c4c2008-09-01 12:48:13 +1000195 else {
196 /* release the previous rdev and start from there. */
197 rdev_dec_pending(rdev, mddev);
NeilBrownb2d2c4c2008-09-01 12:48:13 +1000198 }
Michael Wangfd177482012-10-11 13:43:21 +1100199 list_for_each_entry_continue_rcu(rdev, &mddev->disks, same_set) {
NeilBrownb2d2c4c2008-09-01 12:48:13 +1000200 if (rdev->raid_disk >= 0 &&
NeilBrownb2d2c4c2008-09-01 12:48:13 +1000201 !test_bit(Faulty, &rdev->flags)) {
202 /* this is a usable devices */
203 atomic_inc(&rdev->nr_pending);
204 rcu_read_unlock();
205 return rdev;
206 }
207 }
208 rcu_read_unlock();
209 return NULL;
210}
211
NeilBrownab6085c2007-05-23 13:58:10 -0700212static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
NeilBrowna654b9d82005-06-21 17:17:27 -0700213{
NeilBrown46533ff2016-11-18 16:16:11 +1100214 struct md_rdev *rdev;
Jonathan Brassowa6ff7e02011-01-14 09:14:34 +1100215 struct block_device *bdev;
NeilBrownfd01b882011-10-11 16:47:53 +1100216 struct mddev *mddev = bitmap->mddev;
NeilBrown1ec885c2012-05-22 13:55:10 +1000217 struct bitmap_storage *store = &bitmap->storage;
NeilBrowna654b9d82005-06-21 17:17:27 -0700218
NeilBrown46533ff2016-11-18 16:16:11 +1100219restart:
220 rdev = NULL;
NeilBrownb2d2c4c2008-09-01 12:48:13 +1000221 while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
NeilBrownac2f40b2010-06-01 19:37:31 +1000222 int size = PAGE_SIZE;
223 loff_t offset = mddev->bitmap_info.offset;
Jonathan Brassowa6ff7e02011-01-14 09:14:34 +1100224
225 bdev = (rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev;
226
NeilBrown9b1215c2012-05-22 13:55:11 +1000227 if (page->index == store->file_pages-1) {
228 int last_page_size = store->bytes & (PAGE_SIZE-1);
229 if (last_page_size == 0)
230 last_page_size = PAGE_SIZE;
231 size = roundup(last_page_size,
Jonathan Brassowa6ff7e02011-01-14 09:14:34 +1100232 bdev_logical_block_size(bdev));
NeilBrown9b1215c2012-05-22 13:55:11 +1000233 }
NeilBrownac2f40b2010-06-01 19:37:31 +1000234 /* Just make sure we aren't corrupting data or
235 * metadata
236 */
237 if (mddev->external) {
238 /* Bitmap could be anywhere. */
239 if (rdev->sb_start + offset + (page->index
240 * (PAGE_SIZE/512))
241 > rdev->data_offset
242 &&
243 rdev->sb_start + offset
244 < (rdev->data_offset + mddev->dev_sectors
245 + (PAGE_SIZE/512)))
246 goto bad_alignment;
247 } else if (offset < 0) {
248 /* DATA BITMAP METADATA */
249 if (offset
250 + (long)(page->index * (PAGE_SIZE/512))
251 + size/512 > 0)
252 /* bitmap runs in to metadata */
253 goto bad_alignment;
254 if (rdev->data_offset + mddev->dev_sectors
255 > rdev->sb_start + offset)
256 /* data runs in to bitmap */
257 goto bad_alignment;
258 } else if (rdev->sb_start < rdev->data_offset) {
259 /* METADATA BITMAP DATA */
260 if (rdev->sb_start
261 + offset
262 + page->index*(PAGE_SIZE/512) + size/512
263 > rdev->data_offset)
264 /* bitmap runs in to data */
265 goto bad_alignment;
266 } else {
267 /* DATA METADATA BITMAP - no problems */
268 }
269 md_super_write(mddev, rdev,
270 rdev->sb_start + offset
271 + page->index * (PAGE_SIZE/512),
272 size,
273 page);
NeilBrownb2d2c4c2008-09-01 12:48:13 +1000274 }
NeilBrowna654b9d82005-06-21 17:17:27 -0700275
NeilBrown46533ff2016-11-18 16:16:11 +1100276 if (wait && md_super_wait(mddev) < 0)
277 goto restart;
NeilBrowna654b9d82005-06-21 17:17:27 -0700278 return 0;
NeilBrown4b809912008-07-21 17:05:25 +1000279
280 bad_alignment:
NeilBrown4b809912008-07-21 17:05:25 +1000281 return -EINVAL;
NeilBrowna654b9d82005-06-21 17:17:27 -0700282}
283
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700284static void md_bitmap_file_kick(struct bitmap *bitmap);
NeilBrown32a76272005-06-21 17:17:14 -0700285/*
NeilBrowna654b9d82005-06-21 17:17:27 -0700286 * write out a page to a file
NeilBrown32a76272005-06-21 17:17:14 -0700287 */
NeilBrown4ad13662007-07-17 04:06:13 -0700288static void write_page(struct bitmap *bitmap, struct page *page, int wait)
NeilBrown32a76272005-06-21 17:17:14 -0700289{
NeilBrownd785a062006-06-26 00:27:48 -0700290 struct buffer_head *bh;
NeilBrown32a76272005-06-21 17:17:14 -0700291
NeilBrown1ec885c2012-05-22 13:55:10 +1000292 if (bitmap->storage.file == NULL) {
NeilBrownf0d76d72007-07-17 04:06:12 -0700293 switch (write_sb_page(bitmap, page, wait)) {
294 case -EINVAL:
NeilBrownb405fe92012-05-22 13:55:15 +1000295 set_bit(BITMAP_WRITE_ERROR, &bitmap->flags);
NeilBrownf0d76d72007-07-17 04:06:12 -0700296 }
NeilBrown4ad13662007-07-17 04:06:13 -0700297 } else {
NeilBrowna654b9d82005-06-21 17:17:27 -0700298
NeilBrown4ad13662007-07-17 04:06:13 -0700299 bh = page_buffers(page);
NeilBrownc7084432006-01-06 00:20:45 -0800300
NeilBrown4ad13662007-07-17 04:06:13 -0700301 while (bh && bh->b_blocknr) {
302 atomic_inc(&bitmap->pending_writes);
303 set_buffer_locked(bh);
304 set_buffer_mapped(bh);
Mike Christie2a222ca2016-06-05 14:31:43 -0500305 submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
NeilBrown4ad13662007-07-17 04:06:13 -0700306 bh = bh->b_this_page;
307 }
NeilBrown32a76272005-06-21 17:17:14 -0700308
NeilBrownac2f40b2010-06-01 19:37:31 +1000309 if (wait)
NeilBrown4ad13662007-07-17 04:06:13 -0700310 wait_event(bitmap->write_wait,
311 atomic_read(&bitmap->pending_writes)==0);
NeilBrown32a76272005-06-21 17:17:14 -0700312 }
NeilBrownb405fe92012-05-22 13:55:15 +1000313 if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700314 md_bitmap_file_kick(bitmap);
NeilBrown32a76272005-06-21 17:17:14 -0700315}
316
NeilBrownd785a062006-06-26 00:27:48 -0700317static void end_bitmap_write(struct buffer_head *bh, int uptodate)
NeilBrown32a76272005-06-21 17:17:14 -0700318{
NeilBrownd785a062006-06-26 00:27:48 -0700319 struct bitmap *bitmap = bh->b_private;
NeilBrownd785a062006-06-26 00:27:48 -0700320
NeilBrownb405fe92012-05-22 13:55:15 +1000321 if (!uptodate)
322 set_bit(BITMAP_WRITE_ERROR, &bitmap->flags);
NeilBrownd785a062006-06-26 00:27:48 -0700323 if (atomic_dec_and_test(&bitmap->pending_writes))
324 wake_up(&bitmap->write_wait);
325}
326
NeilBrownd785a062006-06-26 00:27:48 -0700327static void free_buffers(struct page *page)
328{
NeilBrown27581e52012-05-22 13:55:08 +1000329 struct buffer_head *bh;
NeilBrownd785a062006-06-26 00:27:48 -0700330
NeilBrown27581e52012-05-22 13:55:08 +1000331 if (!PagePrivate(page))
332 return;
333
334 bh = page_buffers(page);
NeilBrownd785a062006-06-26 00:27:48 -0700335 while (bh) {
336 struct buffer_head *next = bh->b_this_page;
337 free_buffer_head(bh);
338 bh = next;
339 }
Guoqing Jiangdb2c1d82020-06-01 21:47:42 -0700340 detach_page_private(page);
NeilBrownd785a062006-06-26 00:27:48 -0700341 put_page(page);
342}
343
344/* read a page from a file.
345 * We both read the page, and attach buffers to the page to record the
346 * address of each block (using bmap). These addresses will be used
347 * to write the block later, completely bypassing the filesystem.
348 * This usage is similar to how swap files are handled, and allows us
349 * to write to a file with no concerns of memory allocation failing.
350 */
NeilBrown27581e52012-05-22 13:55:08 +1000351static int read_page(struct file *file, unsigned long index,
352 struct bitmap *bitmap,
353 unsigned long count,
354 struct page *page)
NeilBrownd785a062006-06-26 00:27:48 -0700355{
NeilBrown27581e52012-05-22 13:55:08 +1000356 int ret = 0;
Al Viro496ad9a2013-01-23 17:07:38 -0500357 struct inode *inode = file_inode(file);
NeilBrownd785a062006-06-26 00:27:48 -0700358 struct buffer_head *bh;
Carlos Maiolino30460e12020-01-09 14:30:41 +0100359 sector_t block, blk_cur;
Xianting Tian313b8252020-08-18 13:42:06 +0800360 unsigned long blocksize = i_blocksize(inode);
NeilBrown32a76272005-06-21 17:17:14 -0700361
NeilBrown36a4e1f2011-10-07 14:23:17 +1100362 pr_debug("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE,
363 (unsigned long long)index << PAGE_SHIFT);
NeilBrown32a76272005-06-21 17:17:14 -0700364
Xianting Tian313b8252020-08-18 13:42:06 +0800365 bh = alloc_page_buffers(page, blocksize, false);
NeilBrownd785a062006-06-26 00:27:48 -0700366 if (!bh) {
NeilBrown27581e52012-05-22 13:55:08 +1000367 ret = -ENOMEM;
NeilBrownd785a062006-06-26 00:27:48 -0700368 goto out;
369 }
Guoqing Jiangdb2c1d82020-06-01 21:47:42 -0700370 attach_page_private(page, bh);
Carlos Maiolino30460e12020-01-09 14:30:41 +0100371 blk_cur = index << (PAGE_SHIFT - inode->i_blkbits);
NeilBrownd785a062006-06-26 00:27:48 -0700372 while (bh) {
Carlos Maiolino30460e12020-01-09 14:30:41 +0100373 block = blk_cur;
374
NeilBrownd785a062006-06-26 00:27:48 -0700375 if (count == 0)
376 bh->b_blocknr = 0;
377 else {
Carlos Maiolino30460e12020-01-09 14:30:41 +0100378 ret = bmap(inode, &block);
379 if (ret || !block) {
NeilBrown27581e52012-05-22 13:55:08 +1000380 ret = -EINVAL;
Carlos Maiolino30460e12020-01-09 14:30:41 +0100381 bh->b_blocknr = 0;
NeilBrownd785a062006-06-26 00:27:48 -0700382 goto out;
383 }
Carlos Maiolino30460e12020-01-09 14:30:41 +0100384
385 bh->b_blocknr = block;
NeilBrownd785a062006-06-26 00:27:48 -0700386 bh->b_bdev = inode->i_sb->s_bdev;
Xianting Tian313b8252020-08-18 13:42:06 +0800387 if (count < blocksize)
NeilBrownd785a062006-06-26 00:27:48 -0700388 count = 0;
389 else
Xianting Tian313b8252020-08-18 13:42:06 +0800390 count -= blocksize;
NeilBrown32a76272005-06-21 17:17:14 -0700391
NeilBrownd785a062006-06-26 00:27:48 -0700392 bh->b_end_io = end_bitmap_write;
393 bh->b_private = bitmap;
NeilBrownce25c312006-06-26 00:27:49 -0700394 atomic_inc(&bitmap->pending_writes);
395 set_buffer_locked(bh);
396 set_buffer_mapped(bh);
Mike Christie2a222ca2016-06-05 14:31:43 -0500397 submit_bh(REQ_OP_READ, 0, bh);
NeilBrownd785a062006-06-26 00:27:48 -0700398 }
Carlos Maiolino30460e12020-01-09 14:30:41 +0100399 blk_cur++;
NeilBrownd785a062006-06-26 00:27:48 -0700400 bh = bh->b_this_page;
401 }
NeilBrownd785a062006-06-26 00:27:48 -0700402 page->index = index;
NeilBrownce25c312006-06-26 00:27:49 -0700403
404 wait_event(bitmap->write_wait,
405 atomic_read(&bitmap->pending_writes)==0);
NeilBrownb405fe92012-05-22 13:55:15 +1000406 if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
NeilBrown27581e52012-05-22 13:55:08 +1000407 ret = -EIO;
NeilBrown32a76272005-06-21 17:17:14 -0700408out:
NeilBrown27581e52012-05-22 13:55:08 +1000409 if (ret)
NeilBrownec0cc222016-11-02 14:16:49 +1100410 pr_err("md: bitmap read error: (%dB @ %llu): %d\n",
411 (int)PAGE_SIZE,
412 (unsigned long long)index << PAGE_SHIFT,
413 ret);
NeilBrown27581e52012-05-22 13:55:08 +1000414 return ret;
NeilBrown32a76272005-06-21 17:17:14 -0700415}
416
417/*
418 * bitmap file superblock operations
419 */
420
NeilBrown85c9ccd2016-11-04 16:46:03 +1100421/*
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700422 * md_bitmap_wait_writes() should be called before writing any bitmap
NeilBrown85c9ccd2016-11-04 16:46:03 +1100423 * blocks, to ensure previous writes, particularly from
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700424 * md_bitmap_daemon_work(), have completed.
NeilBrown85c9ccd2016-11-04 16:46:03 +1100425 */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700426static void md_bitmap_wait_writes(struct bitmap *bitmap)
NeilBrown85c9ccd2016-11-04 16:46:03 +1100427{
428 if (bitmap->storage.file)
429 wait_event(bitmap->write_wait,
430 atomic_read(&bitmap->pending_writes)==0);
431 else
NeilBrown46533ff2016-11-18 16:16:11 +1100432 /* Note that we ignore the return value. The writes
433 * might have failed, but that would just mean that
434 * some bits which should be cleared haven't been,
435 * which is safe. The relevant bitmap blocks will
436 * probably get written again, but there is no great
437 * loss if they aren't.
438 */
NeilBrown85c9ccd2016-11-04 16:46:03 +1100439 md_super_wait(bitmap->mddev);
440}
441
442
NeilBrown32a76272005-06-21 17:17:14 -0700443/* update the event counter and sync the superblock to disk */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700444void md_bitmap_update_sb(struct bitmap *bitmap)
NeilBrown32a76272005-06-21 17:17:14 -0700445{
446 bitmap_super_t *sb;
NeilBrown32a76272005-06-21 17:17:14 -0700447
448 if (!bitmap || !bitmap->mddev) /* no bitmap for this array */
NeilBrown4ad13662007-07-17 04:06:13 -0700449 return;
NeilBrownece5cff2009-12-14 12:49:56 +1100450 if (bitmap->mddev->bitmap_info.external)
451 return;
NeilBrown1ec885c2012-05-22 13:55:10 +1000452 if (!bitmap->storage.sb_page) /* no superblock */
NeilBrown4ad13662007-07-17 04:06:13 -0700453 return;
NeilBrown1ec885c2012-05-22 13:55:10 +1000454 sb = kmap_atomic(bitmap->storage.sb_page);
NeilBrown32a76272005-06-21 17:17:14 -0700455 sb->events = cpu_to_le64(bitmap->mddev->events);
NeilBrown8258c532011-05-11 14:26:30 +1000456 if (bitmap->mddev->events < bitmap->events_cleared)
Neil Browna0da84f2008-06-28 08:31:22 +1000457 /* rocking back to read-only */
458 bitmap->events_cleared = bitmap->mddev->events;
NeilBrown8258c532011-05-11 14:26:30 +1000459 sb->events_cleared = cpu_to_le64(bitmap->events_cleared);
Hou Tao97f0eb92017-11-06 10:11:25 +0800460 /*
461 * clear BITMAP_WRITE_ERROR bit to protect against the case that
462 * a bitmap write error occurred but the later writes succeeded.
463 */
464 sb->state = cpu_to_le32(bitmap->flags & ~BIT(BITMAP_WRITE_ERROR));
NeilBrown43a70502009-12-14 12:49:55 +1100465 /* Just in case these have been changed via sysfs: */
466 sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ);
467 sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind);
NeilBrownb81a0402012-05-22 13:55:26 +1000468 /* This might have been changed by a reshape */
469 sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
470 sb->chunksize = cpu_to_le32(bitmap->mddev->bitmap_info.chunksize);
Goldwyn Rodriguesc4ce8672014-03-29 10:20:02 -0500471 sb->nodes = cpu_to_le32(bitmap->mddev->bitmap_info.nodes);
NeilBrown1dff2b82012-05-22 13:55:34 +1000472 sb->sectors_reserved = cpu_to_le32(bitmap->mddev->
473 bitmap_info.space);
Cong Wangb2f46e62011-11-28 13:25:44 +0800474 kunmap_atomic(sb);
NeilBrown1ec885c2012-05-22 13:55:10 +1000475 write_page(bitmap, bitmap->storage.sb_page, 1);
NeilBrown32a76272005-06-21 17:17:14 -0700476}
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700477EXPORT_SYMBOL(md_bitmap_update_sb);
NeilBrown32a76272005-06-21 17:17:14 -0700478
479/* print out the bitmap file superblock */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700480void md_bitmap_print_sb(struct bitmap *bitmap)
NeilBrown32a76272005-06-21 17:17:14 -0700481{
482 bitmap_super_t *sb;
483
NeilBrown1ec885c2012-05-22 13:55:10 +1000484 if (!bitmap || !bitmap->storage.sb_page)
NeilBrown32a76272005-06-21 17:17:14 -0700485 return;
NeilBrown1ec885c2012-05-22 13:55:10 +1000486 sb = kmap_atomic(bitmap->storage.sb_page);
NeilBrownec0cc222016-11-02 14:16:49 +1100487 pr_debug("%s: bitmap file superblock:\n", bmname(bitmap));
488 pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic));
489 pr_debug(" version: %d\n", le32_to_cpu(sb->version));
490 pr_debug(" uuid: %08x.%08x.%08x.%08x\n",
Christoph Hellwigc35403f2019-04-04 18:56:11 +0200491 le32_to_cpu(*(__le32 *)(sb->uuid+0)),
492 le32_to_cpu(*(__le32 *)(sb->uuid+4)),
493 le32_to_cpu(*(__le32 *)(sb->uuid+8)),
494 le32_to_cpu(*(__le32 *)(sb->uuid+12)));
NeilBrownec0cc222016-11-02 14:16:49 +1100495 pr_debug(" events: %llu\n",
496 (unsigned long long) le64_to_cpu(sb->events));
497 pr_debug("events cleared: %llu\n",
498 (unsigned long long) le64_to_cpu(sb->events_cleared));
499 pr_debug(" state: %08x\n", le32_to_cpu(sb->state));
500 pr_debug(" chunksize: %d B\n", le32_to_cpu(sb->chunksize));
501 pr_debug(" daemon sleep: %ds\n", le32_to_cpu(sb->daemon_sleep));
502 pr_debug(" sync size: %llu KB\n",
503 (unsigned long long)le64_to_cpu(sb->sync_size)/2);
504 pr_debug("max write behind: %d\n", le32_to_cpu(sb->write_behind));
Cong Wangb2f46e62011-11-28 13:25:44 +0800505 kunmap_atomic(sb);
NeilBrown32a76272005-06-21 17:17:14 -0700506}
507
Jonathan Brassow9c810752011-06-08 17:59:30 -0500508/*
509 * bitmap_new_disk_sb
510 * @bitmap
511 *
512 * This function is somewhat the reverse of bitmap_read_sb. bitmap_read_sb
513 * reads and verifies the on-disk bitmap superblock and populates bitmap_info.
514 * This function verifies 'bitmap_info' and populates the on-disk bitmap
515 * structure, which is to be written to disk.
516 *
517 * Returns: 0 on success, -Exxx on error
518 */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700519static int md_bitmap_new_disk_sb(struct bitmap *bitmap)
Jonathan Brassow9c810752011-06-08 17:59:30 -0500520{
521 bitmap_super_t *sb;
522 unsigned long chunksize, daemon_sleep, write_behind;
Jonathan Brassow9c810752011-06-08 17:59:30 -0500523
Goldwyn Rodriguesd3b178a2015-07-22 12:09:17 -0500524 bitmap->storage.sb_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
Jianpeng Ma582e2e02012-10-11 13:45:36 +1100525 if (bitmap->storage.sb_page == NULL)
526 return -ENOMEM;
NeilBrown1ec885c2012-05-22 13:55:10 +1000527 bitmap->storage.sb_page->index = 0;
Jonathan Brassow9c810752011-06-08 17:59:30 -0500528
NeilBrown1ec885c2012-05-22 13:55:10 +1000529 sb = kmap_atomic(bitmap->storage.sb_page);
Jonathan Brassow9c810752011-06-08 17:59:30 -0500530
531 sb->magic = cpu_to_le32(BITMAP_MAGIC);
532 sb->version = cpu_to_le32(BITMAP_MAJOR_HI);
533
534 chunksize = bitmap->mddev->bitmap_info.chunksize;
535 BUG_ON(!chunksize);
536 if (!is_power_of_2(chunksize)) {
Cong Wangb2f46e62011-11-28 13:25:44 +0800537 kunmap_atomic(sb);
NeilBrownec0cc222016-11-02 14:16:49 +1100538 pr_warn("bitmap chunksize not a power of 2\n");
Jonathan Brassow9c810752011-06-08 17:59:30 -0500539 return -EINVAL;
540 }
541 sb->chunksize = cpu_to_le32(chunksize);
542
543 daemon_sleep = bitmap->mddev->bitmap_info.daemon_sleep;
Eric Engestromc97e0602016-03-07 12:01:05 +0000544 if (!daemon_sleep || (daemon_sleep > MAX_SCHEDULE_TIMEOUT)) {
NeilBrownec0cc222016-11-02 14:16:49 +1100545 pr_debug("Choosing daemon_sleep default (5 sec)\n");
Jonathan Brassow9c810752011-06-08 17:59:30 -0500546 daemon_sleep = 5 * HZ;
547 }
548 sb->daemon_sleep = cpu_to_le32(daemon_sleep);
549 bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
550
551 /*
552 * FIXME: write_behind for RAID1. If not specified, what
553 * is a good choice? We choose COUNTER_MAX / 2 arbitrarily.
554 */
555 write_behind = bitmap->mddev->bitmap_info.max_write_behind;
556 if (write_behind > COUNTER_MAX)
557 write_behind = COUNTER_MAX / 2;
558 sb->write_behind = cpu_to_le32(write_behind);
559 bitmap->mddev->bitmap_info.max_write_behind = write_behind;
560
561 /* keep the array size field of the bitmap superblock up to date */
562 sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
563
564 memcpy(sb->uuid, bitmap->mddev->uuid, 16);
565
NeilBrownb405fe92012-05-22 13:55:15 +1000566 set_bit(BITMAP_STALE, &bitmap->flags);
NeilBrown84e92342012-05-22 13:55:14 +1000567 sb->state = cpu_to_le32(bitmap->flags);
Jonathan Brassow9c810752011-06-08 17:59:30 -0500568 bitmap->events_cleared = bitmap->mddev->events;
569 sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
Goldwyn Rodriguesd3b178a2015-07-22 12:09:17 -0500570 bitmap->mddev->bitmap_info.nodes = 0;
Jonathan Brassow9c810752011-06-08 17:59:30 -0500571
Cong Wangb2f46e62011-11-28 13:25:44 +0800572 kunmap_atomic(sb);
Jonathan Brassow9c810752011-06-08 17:59:30 -0500573
574 return 0;
575}
576
NeilBrown32a76272005-06-21 17:17:14 -0700577/* read the superblock from the bitmap file and initialize some bitmap fields */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700578static int md_bitmap_read_sb(struct bitmap *bitmap)
NeilBrown32a76272005-06-21 17:17:14 -0700579{
580 char *reason = NULL;
581 bitmap_super_t *sb;
NeilBrown4b6d2872005-09-09 16:23:47 -0700582 unsigned long chunksize, daemon_sleep, write_behind;
NeilBrown32a76272005-06-21 17:17:14 -0700583 unsigned long long events;
Goldwyn Rodriguesc4ce8672014-03-29 10:20:02 -0500584 int nodes = 0;
NeilBrown1dff2b82012-05-22 13:55:34 +1000585 unsigned long sectors_reserved = 0;
NeilBrown32a76272005-06-21 17:17:14 -0700586 int err = -EINVAL;
NeilBrown27581e52012-05-22 13:55:08 +1000587 struct page *sb_page;
Goldwyn Rodrigues33e38ac2015-07-01 12:19:56 +1000588 loff_t offset = bitmap->mddev->bitmap_info.offset;
NeilBrown32a76272005-06-21 17:17:14 -0700589
NeilBrown1ec885c2012-05-22 13:55:10 +1000590 if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) {
NeilBrownef99bf42012-05-22 13:55:08 +1000591 chunksize = 128 * 1024 * 1024;
592 daemon_sleep = 5 * HZ;
593 write_behind = 0;
NeilBrownb405fe92012-05-22 13:55:15 +1000594 set_bit(BITMAP_STALE, &bitmap->flags);
NeilBrownef99bf42012-05-22 13:55:08 +1000595 err = 0;
596 goto out_no_sb;
597 }
NeilBrown32a76272005-06-21 17:17:14 -0700598 /* page 0 is the superblock, read it... */
NeilBrown27581e52012-05-22 13:55:08 +1000599 sb_page = alloc_page(GFP_KERNEL);
600 if (!sb_page)
601 return -ENOMEM;
NeilBrown1ec885c2012-05-22 13:55:10 +1000602 bitmap->storage.sb_page = sb_page;
NeilBrown27581e52012-05-22 13:55:08 +1000603
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -0500604re_read:
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -0500605 /* If cluster_slot is set, the cluster is setup */
606 if (bitmap->cluster_slot >= 0) {
Stephen Rothwell3b0e6aa2015-03-03 13:35:31 +1100607 sector_t bm_blocks = bitmap->mddev->resync_max_sectors;
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -0500608
Zhao Heminga9130962020-10-06 00:00:23 +0800609 bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks,
610 (bitmap->mddev->bitmap_info.chunksize >> 9));
Goldwyn Rodrigues124eb762015-03-24 11:29:05 -0500611 /* bits to bytes */
612 bm_blocks = ((bm_blocks+7) >> 3) + sizeof(bitmap_super_t);
613 /* to 4k blocks */
NeilBrown935f3d42015-03-02 17:02:29 +1100614 bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096);
Goldwyn Rodrigues33e38ac2015-07-01 12:19:56 +1000615 offset = bitmap->mddev->bitmap_info.offset + (bitmap->cluster_slot * (bm_blocks << 3));
NeilBrownec0cc222016-11-02 14:16:49 +1100616 pr_debug("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__,
Goldwyn Rodrigues33e38ac2015-07-01 12:19:56 +1000617 bitmap->cluster_slot, offset);
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -0500618 }
619
NeilBrown1ec885c2012-05-22 13:55:10 +1000620 if (bitmap->storage.file) {
621 loff_t isize = i_size_read(bitmap->storage.file->f_mapping->host);
NeilBrownf49d5e62007-01-26 00:57:03 -0800622 int bytes = isize > PAGE_SIZE ? PAGE_SIZE : isize;
623
NeilBrown1ec885c2012-05-22 13:55:10 +1000624 err = read_page(bitmap->storage.file, 0,
NeilBrown27581e52012-05-22 13:55:08 +1000625 bitmap, bytes, sb_page);
NeilBrownf49d5e62007-01-26 00:57:03 -0800626 } else {
NeilBrown27581e52012-05-22 13:55:08 +1000627 err = read_sb_page(bitmap->mddev,
Goldwyn Rodrigues33e38ac2015-07-01 12:19:56 +1000628 offset,
NeilBrown27581e52012-05-22 13:55:08 +1000629 sb_page,
Shaohua Li938b5332017-10-16 19:03:44 -0700630 0, sizeof(bitmap_super_t));
NeilBrowna654b9d82005-06-21 17:17:27 -0700631 }
NeilBrown27581e52012-05-22 13:55:08 +1000632 if (err)
NeilBrown32a76272005-06-21 17:17:14 -0700633 return err;
NeilBrown32a76272005-06-21 17:17:14 -0700634
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -0500635 err = -EINVAL;
NeilBrown27581e52012-05-22 13:55:08 +1000636 sb = kmap_atomic(sb_page);
NeilBrown32a76272005-06-21 17:17:14 -0700637
NeilBrown32a76272005-06-21 17:17:14 -0700638 chunksize = le32_to_cpu(sb->chunksize);
NeilBrown1b04be92009-12-14 12:49:53 +1100639 daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
NeilBrown4b6d2872005-09-09 16:23:47 -0700640 write_behind = le32_to_cpu(sb->write_behind);
NeilBrown1dff2b82012-05-22 13:55:34 +1000641 sectors_reserved = le32_to_cpu(sb->sectors_reserved);
Goldwyn Rodrigues3c462c82015-08-19 07:35:54 +1000642 /* Setup nodes/clustername only if bitmap version is
643 * cluster-compatible
Goldwyn Rodriguesd3b178a2015-07-22 12:09:17 -0500644 */
Goldwyn Rodrigues3c462c82015-08-19 07:35:54 +1000645 if (sb->version == cpu_to_le32(BITMAP_MAJOR_CLUSTERED)) {
Goldwyn Rodriguesd3b178a2015-07-22 12:09:17 -0500646 nodes = le32_to_cpu(sb->nodes);
647 strlcpy(bitmap->mddev->bitmap_info.cluster_name,
648 sb->cluster_name, 64);
649 }
NeilBrown32a76272005-06-21 17:17:14 -0700650
651 /* verify that the bitmap-specific fields are valid */
652 if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
653 reason = "bad magic";
NeilBrownbd926c62005-11-08 21:39:32 -0800654 else if (le32_to_cpu(sb->version) < BITMAP_MAJOR_LO ||
Goldwyn Rodrigues3c462c82015-08-19 07:35:54 +1000655 le32_to_cpu(sb->version) > BITMAP_MAJOR_CLUSTERED)
NeilBrown32a76272005-06-21 17:17:14 -0700656 reason = "unrecognized superblock version";
NeilBrown1187cf02009-03-31 14:27:02 +1100657 else if (chunksize < 512)
NeilBrown7dd5d342006-01-06 00:20:39 -0800658 reason = "bitmap chunksize too small";
Jonathan Brassowd7445402011-06-08 18:01:10 -0500659 else if (!is_power_of_2(chunksize))
NeilBrown32a76272005-06-21 17:17:14 -0700660 reason = "bitmap chunksize not a power of 2";
NeilBrown1b04be92009-12-14 12:49:53 +1100661 else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT)
NeilBrown7dd5d342006-01-06 00:20:39 -0800662 reason = "daemon sleep period out of range";
NeilBrown4b6d2872005-09-09 16:23:47 -0700663 else if (write_behind > COUNTER_MAX)
664 reason = "write-behind limit out of range (0 - 16383)";
NeilBrown32a76272005-06-21 17:17:14 -0700665 if (reason) {
NeilBrownec0cc222016-11-02 14:16:49 +1100666 pr_warn("%s: invalid bitmap file superblock: %s\n",
NeilBrown32a76272005-06-21 17:17:14 -0700667 bmname(bitmap), reason);
668 goto out;
669 }
670
671 /* keep the array size field of the bitmap superblock up to date */
672 sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
673
NeilBrown278c1ca2012-03-19 12:46:40 +1100674 if (bitmap->mddev->persistent) {
675 /*
676 * We have a persistent array superblock, so compare the
677 * bitmap's UUID and event counter to the mddev's
678 */
679 if (memcmp(sb->uuid, bitmap->mddev->uuid, 16)) {
NeilBrownec0cc222016-11-02 14:16:49 +1100680 pr_warn("%s: bitmap superblock UUID mismatch\n",
681 bmname(bitmap));
NeilBrown278c1ca2012-03-19 12:46:40 +1100682 goto out;
683 }
684 events = le64_to_cpu(sb->events);
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -0500685 if (!nodes && (events < bitmap->mddev->events)) {
NeilBrownec0cc222016-11-02 14:16:49 +1100686 pr_warn("%s: bitmap file is out of date (%llu < %llu) -- forcing full recovery\n",
687 bmname(bitmap), events,
688 (unsigned long long) bitmap->mddev->events);
NeilBrownb405fe92012-05-22 13:55:15 +1000689 set_bit(BITMAP_STALE, &bitmap->flags);
NeilBrown278c1ca2012-03-19 12:46:40 +1100690 }
691 }
NeilBrown32a76272005-06-21 17:17:14 -0700692
NeilBrown32a76272005-06-21 17:17:14 -0700693 /* assign fields using values from superblock */
NeilBrown4f2e6392006-10-21 10:24:09 -0700694 bitmap->flags |= le32_to_cpu(sb->state);
NeilBrownbd926c62005-11-08 21:39:32 -0800695 if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN)
NeilBrownb405fe92012-05-22 13:55:15 +1000696 set_bit(BITMAP_HOSTENDIAN, &bitmap->flags);
NeilBrown32a76272005-06-21 17:17:14 -0700697 bitmap->events_cleared = le64_to_cpu(sb->events_cleared);
Goldwyn Rodriguescf921cc2014-03-30 00:42:49 -0500698 strlcpy(bitmap->mddev->bitmap_info.cluster_name, sb->cluster_name, 64);
NeilBrown32a76272005-06-21 17:17:14 -0700699 err = 0;
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -0500700
NeilBrown32a76272005-06-21 17:17:14 -0700701out:
Cong Wangb2f46e62011-11-28 13:25:44 +0800702 kunmap_atomic(sb);
Zhilong Liu3560741e2017-03-15 16:14:53 +0800703 /* Assigning chunksize is required for "re_read" */
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -0500704 bitmap->mddev->bitmap_info.chunksize = chunksize;
Goldwyn Rodriguesf7357272015-07-22 12:09:16 -0500705 if (err == 0 && nodes && (bitmap->cluster_slot < 0)) {
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -0500706 err = md_setup_cluster(bitmap->mddev, nodes);
707 if (err) {
NeilBrownec0cc222016-11-02 14:16:49 +1100708 pr_warn("%s: Could not setup cluster service (%d)\n",
709 bmname(bitmap), err);
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -0500710 goto out_no_sb;
711 }
712 bitmap->cluster_slot = md_cluster_ops->slot_number(bitmap->mddev);
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -0500713 goto re_read;
714 }
715
716
NeilBrownef99bf42012-05-22 13:55:08 +1000717out_no_sb:
NeilBrownb405fe92012-05-22 13:55:15 +1000718 if (test_bit(BITMAP_STALE, &bitmap->flags))
NeilBrownef99bf42012-05-22 13:55:08 +1000719 bitmap->events_cleared = bitmap->mddev->events;
720 bitmap->mddev->bitmap_info.chunksize = chunksize;
721 bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
722 bitmap->mddev->bitmap_info.max_write_behind = write_behind;
Goldwyn Rodriguesc4ce8672014-03-29 10:20:02 -0500723 bitmap->mddev->bitmap_info.nodes = nodes;
NeilBrown1dff2b82012-05-22 13:55:34 +1000724 if (bitmap->mddev->bitmap_info.space == 0 ||
725 bitmap->mddev->bitmap_info.space > sectors_reserved)
726 bitmap->mddev->bitmap_info.space = sectors_reserved;
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -0500727 if (err) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700728 md_bitmap_print_sb(bitmap);
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -0500729 if (bitmap->cluster_slot < 0)
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -0500730 md_cluster_stop(bitmap->mddev);
731 }
NeilBrown32a76272005-06-21 17:17:14 -0700732 return err;
733}
734
NeilBrown32a76272005-06-21 17:17:14 -0700735/*
736 * general bitmap file operations
737 */
738
NeilBrownece5cff2009-12-14 12:49:56 +1100739/*
740 * on-disk bitmap:
741 *
742 * Use one bit per "chunk" (block set). We do the disk I/O on the bitmap
743 * file a page at a time. There's a superblock at the start of the file.
744 */
NeilBrown32a76272005-06-21 17:17:14 -0700745/* calculate the index of the page that contains this bit */
NeilBrown1ec885c2012-05-22 13:55:10 +1000746static inline unsigned long file_page_index(struct bitmap_storage *store,
747 unsigned long chunk)
NeilBrown32a76272005-06-21 17:17:14 -0700748{
NeilBrown1ec885c2012-05-22 13:55:10 +1000749 if (store->sb_page)
NeilBrownece5cff2009-12-14 12:49:56 +1100750 chunk += sizeof(bitmap_super_t) << 3;
751 return chunk >> PAGE_BIT_SHIFT;
NeilBrown32a76272005-06-21 17:17:14 -0700752}
753
754/* calculate the (bit) offset of this bit within a page */
NeilBrown1ec885c2012-05-22 13:55:10 +1000755static inline unsigned long file_page_offset(struct bitmap_storage *store,
756 unsigned long chunk)
NeilBrown32a76272005-06-21 17:17:14 -0700757{
NeilBrown1ec885c2012-05-22 13:55:10 +1000758 if (store->sb_page)
NeilBrownece5cff2009-12-14 12:49:56 +1100759 chunk += sizeof(bitmap_super_t) << 3;
760 return chunk & (PAGE_BITS - 1);
NeilBrown32a76272005-06-21 17:17:14 -0700761}
762
763/*
764 * return a pointer to the page in the filemap that contains the given bit
765 *
NeilBrown32a76272005-06-21 17:17:14 -0700766 */
NeilBrown1ec885c2012-05-22 13:55:10 +1000767static inline struct page *filemap_get_page(struct bitmap_storage *store,
Jonathan Brassow3520fa42011-07-27 11:00:37 +1000768 unsigned long chunk)
NeilBrown32a76272005-06-21 17:17:14 -0700769{
NeilBrown1ec885c2012-05-22 13:55:10 +1000770 if (file_page_index(store, chunk) >= store->file_pages)
NeilBrownac2f40b2010-06-01 19:37:31 +1000771 return NULL;
NeilBrownf2e06c52014-05-28 13:39:23 +1000772 return store->filemap[file_page_index(store, chunk)];
NeilBrown32a76272005-06-21 17:17:14 -0700773}
774
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700775static int md_bitmap_storage_alloc(struct bitmap_storage *store,
776 unsigned long chunks, int with_super,
777 int slot_number)
NeilBrownd1244cb2012-05-22 13:55:12 +1000778{
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -0500779 int pnum, offset = 0;
NeilBrownd1244cb2012-05-22 13:55:12 +1000780 unsigned long num_pages;
781 unsigned long bytes;
782
783 bytes = DIV_ROUND_UP(chunks, 8);
784 if (with_super)
785 bytes += sizeof(bitmap_super_t);
786
787 num_pages = DIV_ROUND_UP(bytes, PAGE_SIZE);
Guoqing Jiang7f86ffe2016-05-02 11:50:13 -0400788 offset = slot_number * num_pages;
NeilBrownd1244cb2012-05-22 13:55:12 +1000789
Kees Cook6da2ec52018-06-12 13:55:00 -0700790 store->filemap = kmalloc_array(num_pages, sizeof(struct page *),
791 GFP_KERNEL);
NeilBrownd1244cb2012-05-22 13:55:12 +1000792 if (!store->filemap)
793 return -ENOMEM;
794
795 if (with_super && !store->sb_page) {
NeilBrownd60b4792012-05-22 13:55:25 +1000796 store->sb_page = alloc_page(GFP_KERNEL|__GFP_ZERO);
NeilBrownd1244cb2012-05-22 13:55:12 +1000797 if (store->sb_page == NULL)
798 return -ENOMEM;
NeilBrownd1244cb2012-05-22 13:55:12 +1000799 }
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -0500800
NeilBrownd1244cb2012-05-22 13:55:12 +1000801 pnum = 0;
802 if (store->sb_page) {
803 store->filemap[0] = store->sb_page;
804 pnum = 1;
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -0500805 store->sb_page->index = offset;
NeilBrownd1244cb2012-05-22 13:55:12 +1000806 }
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -0500807
NeilBrownd1244cb2012-05-22 13:55:12 +1000808 for ( ; pnum < num_pages; pnum++) {
NeilBrownd60b4792012-05-22 13:55:25 +1000809 store->filemap[pnum] = alloc_page(GFP_KERNEL|__GFP_ZERO);
NeilBrownd1244cb2012-05-22 13:55:12 +1000810 if (!store->filemap[pnum]) {
811 store->file_pages = pnum;
812 return -ENOMEM;
813 }
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -0500814 store->filemap[pnum]->index = pnum + offset;
NeilBrownd1244cb2012-05-22 13:55:12 +1000815 }
816 store->file_pages = pnum;
817
818 /* We need 4 bits per page, rounded up to a multiple
819 * of sizeof(unsigned long) */
820 store->filemap_attr = kzalloc(
821 roundup(DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)),
822 GFP_KERNEL);
823 if (!store->filemap_attr)
824 return -ENOMEM;
825
826 store->bytes = bytes;
827
828 return 0;
829}
830
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700831static void md_bitmap_file_unmap(struct bitmap_storage *store)
NeilBrown32a76272005-06-21 17:17:14 -0700832{
833 struct page **map, *sb_page;
NeilBrown32a76272005-06-21 17:17:14 -0700834 int pages;
NeilBrownfae7d322012-05-22 13:55:21 +1000835 struct file *file;
NeilBrown32a76272005-06-21 17:17:14 -0700836
NeilBrownfae7d322012-05-22 13:55:21 +1000837 file = store->file;
NeilBrown1ec885c2012-05-22 13:55:10 +1000838 map = store->filemap;
NeilBrown1ec885c2012-05-22 13:55:10 +1000839 pages = store->file_pages;
NeilBrown1ec885c2012-05-22 13:55:10 +1000840 sb_page = store->sb_page;
NeilBrown32a76272005-06-21 17:17:14 -0700841
842 while (pages--)
NeilBrownece5cff2009-12-14 12:49:56 +1100843 if (map[pages] != sb_page) /* 0 is sb_page, release it below */
NeilBrownd785a062006-06-26 00:27:48 -0700844 free_buffers(map[pages]);
NeilBrown32a76272005-06-21 17:17:14 -0700845 kfree(map);
NeilBrownfae7d322012-05-22 13:55:21 +1000846 kfree(store->filemap_attr);
NeilBrown32a76272005-06-21 17:17:14 -0700847
NeilBrownd785a062006-06-26 00:27:48 -0700848 if (sb_page)
849 free_buffers(sb_page);
NeilBrown32a76272005-06-21 17:17:14 -0700850
NeilBrownd785a062006-06-26 00:27:48 -0700851 if (file) {
Al Viro496ad9a2013-01-23 17:07:38 -0500852 struct inode *inode = file_inode(file);
Andrew Mortonfc0ecff2007-02-10 01:45:39 -0800853 invalidate_mapping_pages(inode->i_mapping, 0, -1);
NeilBrown32a76272005-06-21 17:17:14 -0700854 fput(file);
NeilBrownd785a062006-06-26 00:27:48 -0700855 }
NeilBrown32a76272005-06-21 17:17:14 -0700856}
857
NeilBrown32a76272005-06-21 17:17:14 -0700858/*
859 * bitmap_file_kick - if an error occurs while manipulating the bitmap file
860 * then it is no longer reliable, so we stop using it and we mark the file
861 * as failed in the superblock
862 */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700863static void md_bitmap_file_kick(struct bitmap *bitmap)
NeilBrown32a76272005-06-21 17:17:14 -0700864{
865 char *path, *ptr = NULL;
866
NeilBrownb405fe92012-05-22 13:55:15 +1000867 if (!test_and_set_bit(BITMAP_STALE, &bitmap->flags)) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700868 md_bitmap_update_sb(bitmap);
NeilBrown32a76272005-06-21 17:17:14 -0700869
NeilBrown1ec885c2012-05-22 13:55:10 +1000870 if (bitmap->storage.file) {
NeilBrown4ad13662007-07-17 04:06:13 -0700871 path = kmalloc(PAGE_SIZE, GFP_KERNEL);
872 if (path)
Miklos Szeredi9bf39ab2015-06-19 10:29:13 +0200873 ptr = file_path(bitmap->storage.file,
NeilBrown1ec885c2012-05-22 13:55:10 +1000874 path, PAGE_SIZE);
Christoph Hellwig6bcfd602008-05-23 13:04:34 -0700875
NeilBrownec0cc222016-11-02 14:16:49 +1100876 pr_warn("%s: kicking failed bitmap file %s from array!\n",
877 bmname(bitmap), IS_ERR(ptr) ? "" : ptr);
NeilBrown32a76272005-06-21 17:17:14 -0700878
NeilBrown4ad13662007-07-17 04:06:13 -0700879 kfree(path);
880 } else
NeilBrownec0cc222016-11-02 14:16:49 +1100881 pr_warn("%s: disabling internal bitmap due to errors\n",
882 bmname(bitmap));
NeilBrowna654b9d82005-06-21 17:17:27 -0700883 }
NeilBrown32a76272005-06-21 17:17:14 -0700884}
885
886enum bitmap_page_attr {
NeilBrownac2f40b2010-06-01 19:37:31 +1000887 BITMAP_PAGE_DIRTY = 0, /* there are set bits that need to be synced */
NeilBrown5a537df2011-09-21 15:37:46 +1000888 BITMAP_PAGE_PENDING = 1, /* there are bits that are being cleaned.
889 * i.e. counter is 1 or 2. */
NeilBrownac2f40b2010-06-01 19:37:31 +1000890 BITMAP_PAGE_NEEDWRITE = 2, /* there are cleared bits that need to be synced */
NeilBrown32a76272005-06-21 17:17:14 -0700891};
892
NeilBrownd1891222012-05-22 13:55:09 +1000893static inline void set_page_attr(struct bitmap *bitmap, int pnum,
894 enum bitmap_page_attr attr)
NeilBrown32a76272005-06-21 17:17:14 -0700895{
NeilBrownbdfd1142012-05-22 13:55:22 +1000896 set_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
NeilBrown32a76272005-06-21 17:17:14 -0700897}
898
NeilBrownd1891222012-05-22 13:55:09 +1000899static inline void clear_page_attr(struct bitmap *bitmap, int pnum,
900 enum bitmap_page_attr attr)
NeilBrown32a76272005-06-21 17:17:14 -0700901{
NeilBrownbdfd1142012-05-22 13:55:22 +1000902 clear_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
NeilBrown32a76272005-06-21 17:17:14 -0700903}
904
NeilBrownbdfd1142012-05-22 13:55:22 +1000905static inline int test_page_attr(struct bitmap *bitmap, int pnum,
906 enum bitmap_page_attr attr)
NeilBrown32a76272005-06-21 17:17:14 -0700907{
NeilBrown1ec885c2012-05-22 13:55:10 +1000908 return test_bit((pnum<<2) + attr, bitmap->storage.filemap_attr);
NeilBrown32a76272005-06-21 17:17:14 -0700909}
910
NeilBrownbdfd1142012-05-22 13:55:22 +1000911static inline int test_and_clear_page_attr(struct bitmap *bitmap, int pnum,
912 enum bitmap_page_attr attr)
913{
914 return test_and_clear_bit((pnum<<2) + attr,
915 bitmap->storage.filemap_attr);
916}
NeilBrown32a76272005-06-21 17:17:14 -0700917/*
918 * bitmap_file_set_bit -- called before performing a write to the md device
919 * to set (and eventually sync) a particular bit in the bitmap file
920 *
921 * we set the bit immediately, then we record the page number so that
922 * when an unplug occurs, we can flush the dirty pages out to disk
923 */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700924static void md_bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
NeilBrown32a76272005-06-21 17:17:14 -0700925{
926 unsigned long bit;
Jonathan Brassow3520fa42011-07-27 11:00:37 +1000927 struct page *page;
NeilBrown32a76272005-06-21 17:17:14 -0700928 void *kaddr;
NeilBrown40cffcc2012-05-22 13:55:24 +1000929 unsigned long chunk = block >> bitmap->counts.chunkshift;
Guoqing Jiang23cea66a2016-05-02 11:50:14 -0400930 struct bitmap_storage *store = &bitmap->storage;
931 unsigned long node_offset = 0;
932
933 if (mddev_is_clustered(bitmap->mddev))
934 node_offset = bitmap->cluster_slot * store->file_pages;
NeilBrown32a76272005-06-21 17:17:14 -0700935
NeilBrown1ec885c2012-05-22 13:55:10 +1000936 page = filemap_get_page(&bitmap->storage, chunk);
Jonathan Brassow3520fa42011-07-27 11:00:37 +1000937 if (!page)
938 return;
NeilBrown1ec885c2012-05-22 13:55:10 +1000939 bit = file_page_offset(&bitmap->storage, chunk);
NeilBrown32a76272005-06-21 17:17:14 -0700940
Jonathan Brassow3520fa42011-07-27 11:00:37 +1000941 /* set the bit */
Cong Wangb2f46e62011-11-28 13:25:44 +0800942 kaddr = kmap_atomic(page);
NeilBrownb405fe92012-05-22 13:55:15 +1000943 if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
Jonathan Brassow3520fa42011-07-27 11:00:37 +1000944 set_bit(bit, kaddr);
945 else
Akinobu Mita3f810b62013-04-24 11:42:41 +1000946 set_bit_le(bit, kaddr);
Cong Wangb2f46e62011-11-28 13:25:44 +0800947 kunmap_atomic(kaddr);
NeilBrown36a4e1f2011-10-07 14:23:17 +1100948 pr_debug("set file bit %lu page %lu\n", bit, page->index);
NeilBrown32a76272005-06-21 17:17:14 -0700949 /* record page number so it gets flushed to disk when unplug occurs */
Guoqing Jiang23cea66a2016-05-02 11:50:14 -0400950 set_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_DIRTY);
NeilBrown32a76272005-06-21 17:17:14 -0700951}
952
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700953static void md_bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block)
NeilBrownef99bf42012-05-22 13:55:08 +1000954{
955 unsigned long bit;
956 struct page *page;
957 void *paddr;
NeilBrown40cffcc2012-05-22 13:55:24 +1000958 unsigned long chunk = block >> bitmap->counts.chunkshift;
Guoqing Jiang23cea66a2016-05-02 11:50:14 -0400959 struct bitmap_storage *store = &bitmap->storage;
960 unsigned long node_offset = 0;
961
962 if (mddev_is_clustered(bitmap->mddev))
963 node_offset = bitmap->cluster_slot * store->file_pages;
NeilBrownef99bf42012-05-22 13:55:08 +1000964
NeilBrown1ec885c2012-05-22 13:55:10 +1000965 page = filemap_get_page(&bitmap->storage, chunk);
NeilBrownef99bf42012-05-22 13:55:08 +1000966 if (!page)
967 return;
NeilBrown1ec885c2012-05-22 13:55:10 +1000968 bit = file_page_offset(&bitmap->storage, chunk);
NeilBrownef99bf42012-05-22 13:55:08 +1000969 paddr = kmap_atomic(page);
NeilBrownb405fe92012-05-22 13:55:15 +1000970 if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
NeilBrownef99bf42012-05-22 13:55:08 +1000971 clear_bit(bit, paddr);
972 else
Akinobu Mita3f810b62013-04-24 11:42:41 +1000973 clear_bit_le(bit, paddr);
NeilBrownef99bf42012-05-22 13:55:08 +1000974 kunmap_atomic(paddr);
Guoqing Jiang23cea66a2016-05-02 11:50:14 -0400975 if (!test_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_NEEDWRITE)) {
976 set_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_PENDING);
NeilBrownef99bf42012-05-22 13:55:08 +1000977 bitmap->allclean = 0;
978 }
979}
980
Andy Shevchenkoe64e40182018-08-01 15:20:50 -0700981static int md_bitmap_file_test_bit(struct bitmap *bitmap, sector_t block)
Goldwyn Rodrigues11dd35d2014-06-07 00:36:26 -0500982{
983 unsigned long bit;
984 struct page *page;
985 void *paddr;
986 unsigned long chunk = block >> bitmap->counts.chunkshift;
987 int set = 0;
988
989 page = filemap_get_page(&bitmap->storage, chunk);
990 if (!page)
991 return -EINVAL;
992 bit = file_page_offset(&bitmap->storage, chunk);
993 paddr = kmap_atomic(page);
994 if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
995 set = test_bit(bit, paddr);
996 else
997 set = test_bit_le(bit, paddr);
998 kunmap_atomic(paddr);
999 return set;
1000}
1001
1002
NeilBrown32a76272005-06-21 17:17:14 -07001003/* this gets called when the md device is ready to unplug its underlying
1004 * (slave) device queues -- before we let any writes go down, we need to
1005 * sync the dirty pages of the bitmap file to disk */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001006void md_bitmap_unplug(struct bitmap *bitmap)
NeilBrown32a76272005-06-21 17:17:14 -07001007{
NeilBrown74667122012-05-22 13:55:19 +10001008 unsigned long i;
NeilBrownec7a3192006-06-26 00:27:45 -07001009 int dirty, need_write;
NeilBrown85c9ccd2016-11-04 16:46:03 +11001010 int writing = 0;
NeilBrown32a76272005-06-21 17:17:14 -07001011
NeilBrown62f82fa2012-05-22 13:55:21 +10001012 if (!bitmap || !bitmap->storage.filemap ||
1013 test_bit(BITMAP_STALE, &bitmap->flags))
NeilBrown4ad13662007-07-17 04:06:13 -07001014 return;
NeilBrown32a76272005-06-21 17:17:14 -07001015
1016 /* look at each page to see if there are any set bits that need to be
1017 * flushed out to disk */
NeilBrown1ec885c2012-05-22 13:55:10 +10001018 for (i = 0; i < bitmap->storage.file_pages; i++) {
NeilBrownbdfd1142012-05-22 13:55:22 +10001019 dirty = test_and_clear_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
1020 need_write = test_and_clear_page_attr(bitmap, i,
1021 BITMAP_PAGE_NEEDWRITE);
1022 if (dirty || need_write) {
NeilBrown581dbd92016-11-14 16:30:21 +11001023 if (!writing) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001024 md_bitmap_wait_writes(bitmap);
NeilBrown581dbd92016-11-14 16:30:21 +11001025 if (bitmap->mddev->queue)
1026 blk_add_trace_msg(bitmap->mddev->queue,
1027 "md bitmap_unplug");
1028 }
NeilBrownd1891222012-05-22 13:55:09 +10001029 clear_page_attr(bitmap, i, BITMAP_PAGE_PENDING);
NeilBrownbdfd1142012-05-22 13:55:22 +10001030 write_page(bitmap, bitmap->storage.filemap[i], 0);
NeilBrown85c9ccd2016-11-04 16:46:03 +11001031 writing = 1;
NeilBrownbdfd1142012-05-22 13:55:22 +10001032 }
NeilBrown32a76272005-06-21 17:17:14 -07001033 }
NeilBrown85c9ccd2016-11-04 16:46:03 +11001034 if (writing)
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001035 md_bitmap_wait_writes(bitmap);
NeilBrown4b5060d2014-09-09 14:13:51 +10001036
NeilBrownb405fe92012-05-22 13:55:15 +10001037 if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001038 md_bitmap_file_kick(bitmap);
NeilBrown32a76272005-06-21 17:17:14 -07001039}
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001040EXPORT_SYMBOL(md_bitmap_unplug);
NeilBrown32a76272005-06-21 17:17:14 -07001041
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001042static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed);
NeilBrown32a76272005-06-21 17:17:14 -07001043/* * bitmap_init_from_disk -- called at bitmap_create time to initialize
1044 * the in-memory bitmap from the on-disk bitmap -- also, sets up the
1045 * memory mapping of the bitmap file
1046 * Special cases:
1047 * if there's no bitmap file, or if the bitmap file had been
1048 * previously kicked from the array, we mark all the bits as
1049 * 1's in order to cause a full resync.
NeilBrown6a079972005-09-09 16:23:44 -07001050 *
1051 * We ignore all bits for sectors that end earlier than 'start'.
1052 * This is used when reading an out-of-date bitmap...
NeilBrown32a76272005-06-21 17:17:14 -07001053 */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001054static int md_bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
NeilBrown32a76272005-06-21 17:17:14 -07001055{
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -05001056 unsigned long i, chunks, index, oldindex, bit, node_offset = 0;
NeilBrown27581e52012-05-22 13:55:08 +10001057 struct page *page = NULL;
NeilBrownd1244cb2012-05-22 13:55:12 +10001058 unsigned long bit_cnt = 0;
NeilBrown32a76272005-06-21 17:17:14 -07001059 struct file *file;
NeilBrownd1244cb2012-05-22 13:55:12 +10001060 unsigned long offset;
NeilBrown32a76272005-06-21 17:17:14 -07001061 int outofdate;
1062 int ret = -ENOSPC;
NeilBrownea03aff2006-01-06 00:20:34 -08001063 void *paddr;
NeilBrown1ec885c2012-05-22 13:55:10 +10001064 struct bitmap_storage *store = &bitmap->storage;
NeilBrown32a76272005-06-21 17:17:14 -07001065
NeilBrown40cffcc2012-05-22 13:55:24 +10001066 chunks = bitmap->counts.chunks;
NeilBrown1ec885c2012-05-22 13:55:10 +10001067 file = store->file;
NeilBrown32a76272005-06-21 17:17:14 -07001068
NeilBrownef99bf42012-05-22 13:55:08 +10001069 if (!file && !bitmap->mddev->bitmap_info.offset) {
1070 /* No permanent bitmap - fill with '1s'. */
NeilBrown1ec885c2012-05-22 13:55:10 +10001071 store->filemap = NULL;
1072 store->file_pages = 0;
NeilBrownef99bf42012-05-22 13:55:08 +10001073 for (i = 0; i < chunks ; i++) {
1074 /* if the disk bit is set, set the memory bit */
NeilBrown40cffcc2012-05-22 13:55:24 +10001075 int needed = ((sector_t)(i+1) << (bitmap->counts.chunkshift)
NeilBrownef99bf42012-05-22 13:55:08 +10001076 >= start);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001077 md_bitmap_set_memory_bits(bitmap,
1078 (sector_t)i << bitmap->counts.chunkshift,
1079 needed);
NeilBrownef99bf42012-05-22 13:55:08 +10001080 }
1081 return 0;
1082 }
NeilBrown32a76272005-06-21 17:17:14 -07001083
NeilBrownb405fe92012-05-22 13:55:15 +10001084 outofdate = test_bit(BITMAP_STALE, &bitmap->flags);
NeilBrown32a76272005-06-21 17:17:14 -07001085 if (outofdate)
NeilBrownec0cc222016-11-02 14:16:49 +11001086 pr_warn("%s: bitmap file is out of date, doing full recovery\n", bmname(bitmap));
NeilBrown32a76272005-06-21 17:17:14 -07001087
NeilBrownd1244cb2012-05-22 13:55:12 +10001088 if (file && i_size_read(file->f_mapping->host) < store->bytes) {
NeilBrownec0cc222016-11-02 14:16:49 +11001089 pr_warn("%s: bitmap file too short %lu < %lu\n",
1090 bmname(bitmap),
1091 (unsigned long) i_size_read(file->f_mapping->host),
1092 store->bytes);
NeilBrown4ad13662007-07-17 04:06:13 -07001093 goto err;
NeilBrown32a76272005-06-21 17:17:14 -07001094 }
NeilBrownbc7f77d2005-06-21 17:17:17 -07001095
NeilBrown32a76272005-06-21 17:17:14 -07001096 oldindex = ~0L;
NeilBrownd1244cb2012-05-22 13:55:12 +10001097 offset = 0;
1098 if (!bitmap->mddev->bitmap_info.external)
1099 offset = sizeof(bitmap_super_t);
NeilBrown32a76272005-06-21 17:17:14 -07001100
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -05001101 if (mddev_is_clustered(bitmap->mddev))
1102 node_offset = bitmap->cluster_slot * (DIV_ROUND_UP(store->bytes, PAGE_SIZE));
1103
NeilBrown32a76272005-06-21 17:17:14 -07001104 for (i = 0; i < chunks; i++) {
NeilBrownbd926c62005-11-08 21:39:32 -08001105 int b;
NeilBrown1ec885c2012-05-22 13:55:10 +10001106 index = file_page_index(&bitmap->storage, i);
1107 bit = file_page_offset(&bitmap->storage, i);
NeilBrown32a76272005-06-21 17:17:14 -07001108 if (index != oldindex) { /* this is a new page, read it in */
NeilBrownd785a062006-06-26 00:27:48 -07001109 int count;
NeilBrown32a76272005-06-21 17:17:14 -07001110 /* unmap the old page, we're done with it */
NeilBrownd1244cb2012-05-22 13:55:12 +10001111 if (index == store->file_pages-1)
1112 count = store->bytes - index * PAGE_SIZE;
NeilBrownd785a062006-06-26 00:27:48 -07001113 else
1114 count = PAGE_SIZE;
NeilBrown1ec885c2012-05-22 13:55:10 +10001115 page = store->filemap[index];
NeilBrown27581e52012-05-22 13:55:08 +10001116 if (file)
1117 ret = read_page(file, index, bitmap,
1118 count, page);
1119 else
1120 ret = read_sb_page(
1121 bitmap->mddev,
1122 bitmap->mddev->bitmap_info.offset,
1123 page,
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -05001124 index + node_offset, count);
NeilBrown27581e52012-05-22 13:55:08 +10001125
1126 if (ret)
NeilBrown4ad13662007-07-17 04:06:13 -07001127 goto err;
NeilBrowna654b9d82005-06-21 17:17:27 -07001128
NeilBrown32a76272005-06-21 17:17:14 -07001129 oldindex = index;
NeilBrown32a76272005-06-21 17:17:14 -07001130
1131 if (outofdate) {
1132 /*
1133 * if bitmap is out of date, dirty the
NeilBrownac2f40b2010-06-01 19:37:31 +10001134 * whole page and write it out
NeilBrown32a76272005-06-21 17:17:14 -07001135 */
Cong Wangb2f46e62011-11-28 13:25:44 +08001136 paddr = kmap_atomic(page);
NeilBrownea03aff2006-01-06 00:20:34 -08001137 memset(paddr + offset, 0xff,
NeilBrown6a079972005-09-09 16:23:44 -07001138 PAGE_SIZE - offset);
Cong Wangb2f46e62011-11-28 13:25:44 +08001139 kunmap_atomic(paddr);
NeilBrown4ad13662007-07-17 04:06:13 -07001140 write_page(bitmap, page, 1);
1141
1142 ret = -EIO;
NeilBrownb405fe92012-05-22 13:55:15 +10001143 if (test_bit(BITMAP_WRITE_ERROR,
1144 &bitmap->flags))
NeilBrown4ad13662007-07-17 04:06:13 -07001145 goto err;
NeilBrown32a76272005-06-21 17:17:14 -07001146 }
NeilBrown32a76272005-06-21 17:17:14 -07001147 }
Cong Wangb2f46e62011-11-28 13:25:44 +08001148 paddr = kmap_atomic(page);
NeilBrownb405fe92012-05-22 13:55:15 +10001149 if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
NeilBrownea03aff2006-01-06 00:20:34 -08001150 b = test_bit(bit, paddr);
NeilBrownbd926c62005-11-08 21:39:32 -08001151 else
Akinobu Mita6b33aff2011-03-23 16:42:13 -07001152 b = test_bit_le(bit, paddr);
Cong Wangb2f46e62011-11-28 13:25:44 +08001153 kunmap_atomic(paddr);
NeilBrownbd926c62005-11-08 21:39:32 -08001154 if (b) {
NeilBrown32a76272005-06-21 17:17:14 -07001155 /* if the disk bit is set, set the memory bit */
NeilBrown40cffcc2012-05-22 13:55:24 +10001156 int needed = ((sector_t)(i+1) << bitmap->counts.chunkshift
NeilBrowndb305e52009-05-07 12:49:06 +10001157 >= start);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001158 md_bitmap_set_memory_bits(bitmap,
1159 (sector_t)i << bitmap->counts.chunkshift,
1160 needed);
NeilBrown32a76272005-06-21 17:17:14 -07001161 bit_cnt++;
1162 }
NeilBrown27581e52012-05-22 13:55:08 +10001163 offset = 0;
NeilBrown32a76272005-06-21 17:17:14 -07001164 }
1165
NeilBrownec0cc222016-11-02 14:16:49 +11001166 pr_debug("%s: bitmap initialized from disk: read %lu pages, set %lu of %lu bits\n",
1167 bmname(bitmap), store->file_pages,
1168 bit_cnt, chunks);
NeilBrown32a76272005-06-21 17:17:14 -07001169
NeilBrown4ad13662007-07-17 04:06:13 -07001170 return 0;
1171
1172 err:
NeilBrownec0cc222016-11-02 14:16:49 +11001173 pr_warn("%s: bitmap initialisation failed: %d\n",
1174 bmname(bitmap), ret);
NeilBrown32a76272005-06-21 17:17:14 -07001175 return ret;
1176}
1177
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001178void md_bitmap_write_all(struct bitmap *bitmap)
NeilBrowna654b9d82005-06-21 17:17:27 -07001179{
1180 /* We don't actually write all bitmap blocks here,
1181 * just flag them as needing to be written
1182 */
NeilBrownec7a3192006-06-26 00:27:45 -07001183 int i;
NeilBrowna654b9d82005-06-21 17:17:27 -07001184
NeilBrown1ec885c2012-05-22 13:55:10 +10001185 if (!bitmap || !bitmap->storage.filemap)
NeilBrownef99bf42012-05-22 13:55:08 +10001186 return;
NeilBrown1ec885c2012-05-22 13:55:10 +10001187 if (bitmap->storage.file)
NeilBrownef99bf42012-05-22 13:55:08 +10001188 /* Only one copy, so nothing needed */
1189 return;
1190
NeilBrown1ec885c2012-05-22 13:55:10 +10001191 for (i = 0; i < bitmap->storage.file_pages; i++)
NeilBrownd1891222012-05-22 13:55:09 +10001192 set_page_attr(bitmap, i,
NeilBrownec7a3192006-06-26 00:27:45 -07001193 BITMAP_PAGE_NEEDWRITE);
NeilBrown2585f3e2011-09-21 15:37:46 +10001194 bitmap->allclean = 0;
NeilBrowna654b9d82005-06-21 17:17:27 -07001195}
1196
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001197static void md_bitmap_count_page(struct bitmap_counts *bitmap,
1198 sector_t offset, int inc)
NeilBrown32a76272005-06-21 17:17:14 -07001199{
NeilBrown61a0d802012-03-19 12:46:41 +11001200 sector_t chunk = offset >> bitmap->chunkshift;
NeilBrown32a76272005-06-21 17:17:14 -07001201 unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
1202 bitmap->bp[page].count += inc;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001203 md_bitmap_checkfree(bitmap, page);
NeilBrown32a76272005-06-21 17:17:14 -07001204}
NeilBrownbf07bb72012-05-22 13:55:06 +10001205
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001206static void md_bitmap_set_pending(struct bitmap_counts *bitmap, sector_t offset)
NeilBrownbf07bb72012-05-22 13:55:06 +10001207{
1208 sector_t chunk = offset >> bitmap->chunkshift;
1209 unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
1210 struct bitmap_page *bp = &bitmap->bp[page];
1211
1212 if (!bp->pending)
1213 bp->pending = 1;
1214}
1215
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001216static bitmap_counter_t *md_bitmap_get_counter(struct bitmap_counts *bitmap,
1217 sector_t offset, sector_t *blocks,
1218 int create);
NeilBrown32a76272005-06-21 17:17:14 -07001219
1220/*
1221 * bitmap daemon -- periodically wakes up to clean bits and flush pages
1222 * out to disk
1223 */
1224
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001225void md_bitmap_daemon_work(struct mddev *mddev)
NeilBrown32a76272005-06-21 17:17:14 -07001226{
NeilBrownaa5cbd12009-12-14 12:49:46 +11001227 struct bitmap *bitmap;
NeilBrownaa3163f2005-06-21 17:17:22 -07001228 unsigned long j;
NeilBrownbf07bb72012-05-22 13:55:06 +10001229 unsigned long nextpage;
NeilBrown57dab0b2010-10-19 10:03:39 +11001230 sector_t blocks;
NeilBrown40cffcc2012-05-22 13:55:24 +10001231 struct bitmap_counts *counts;
NeilBrown32a76272005-06-21 17:17:14 -07001232
NeilBrownaa5cbd12009-12-14 12:49:46 +11001233 /* Use a mutex to guard daemon_work against
1234 * bitmap_destroy.
1235 */
NeilBrownc3d97142009-12-14 12:49:52 +11001236 mutex_lock(&mddev->bitmap_info.mutex);
NeilBrownaa5cbd12009-12-14 12:49:46 +11001237 bitmap = mddev->bitmap;
1238 if (bitmap == NULL) {
NeilBrownc3d97142009-12-14 12:49:52 +11001239 mutex_unlock(&mddev->bitmap_info.mutex);
NeilBrown4ad13662007-07-17 04:06:13 -07001240 return;
NeilBrownaa5cbd12009-12-14 12:49:46 +11001241 }
NeilBrown42a04b52009-12-14 12:49:53 +11001242 if (time_before(jiffies, bitmap->daemon_lastrun
NeilBrown2e61ebb2011-12-23 10:17:50 +11001243 + mddev->bitmap_info.daemon_sleep))
NeilBrown7be3dfe2008-03-10 11:43:48 -07001244 goto done;
1245
NeilBrown32a76272005-06-21 17:17:14 -07001246 bitmap->daemon_lastrun = jiffies;
NeilBrown8311c292008-03-04 14:29:30 -08001247 if (bitmap->allclean) {
NeilBrown2e61ebb2011-12-23 10:17:50 +11001248 mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
NeilBrownaa5cbd12009-12-14 12:49:46 +11001249 goto done;
NeilBrown8311c292008-03-04 14:29:30 -08001250 }
1251 bitmap->allclean = 1;
NeilBrown32a76272005-06-21 17:17:14 -07001252
NeilBrown581dbd92016-11-14 16:30:21 +11001253 if (bitmap->mddev->queue)
1254 blk_add_trace_msg(bitmap->mddev->queue,
1255 "md bitmap_daemon_work");
1256
NeilBrownbf07bb72012-05-22 13:55:06 +10001257 /* Any file-page which is PENDING now needs to be written.
1258 * So set NEEDWRITE now, then after we make any last-minute changes
1259 * we will write it.
1260 */
NeilBrown1ec885c2012-05-22 13:55:10 +10001261 for (j = 0; j < bitmap->storage.file_pages; j++)
NeilBrownbdfd1142012-05-22 13:55:22 +10001262 if (test_and_clear_page_attr(bitmap, j,
1263 BITMAP_PAGE_PENDING))
NeilBrownd1891222012-05-22 13:55:09 +10001264 set_page_attr(bitmap, j,
NeilBrownbf07bb72012-05-22 13:55:06 +10001265 BITMAP_PAGE_NEEDWRITE);
NeilBrownbf07bb72012-05-22 13:55:06 +10001266
1267 if (bitmap->need_sync &&
1268 mddev->bitmap_info.external == 0) {
1269 /* Arrange for superblock update as well as
1270 * other changes */
1271 bitmap_super_t *sb;
1272 bitmap->need_sync = 0;
NeilBrown1ec885c2012-05-22 13:55:10 +10001273 if (bitmap->storage.filemap) {
1274 sb = kmap_atomic(bitmap->storage.sb_page);
NeilBrownef99bf42012-05-22 13:55:08 +10001275 sb->events_cleared =
1276 cpu_to_le64(bitmap->events_cleared);
1277 kunmap_atomic(sb);
NeilBrownd1891222012-05-22 13:55:09 +10001278 set_page_attr(bitmap, 0,
NeilBrownef99bf42012-05-22 13:55:08 +10001279 BITMAP_PAGE_NEEDWRITE);
1280 }
NeilBrownbf07bb72012-05-22 13:55:06 +10001281 }
1282 /* Now look at the bitmap counters and if any are '2' or '1',
1283 * decrement and handle accordingly.
1284 */
NeilBrown40cffcc2012-05-22 13:55:24 +10001285 counts = &bitmap->counts;
1286 spin_lock_irq(&counts->lock);
NeilBrownbf07bb72012-05-22 13:55:06 +10001287 nextpage = 0;
NeilBrown40cffcc2012-05-22 13:55:24 +10001288 for (j = 0; j < counts->chunks; j++) {
NeilBrown32a76272005-06-21 17:17:14 -07001289 bitmap_counter_t *bmc;
NeilBrown40cffcc2012-05-22 13:55:24 +10001290 sector_t block = (sector_t)j << counts->chunkshift;
Jonathan Brassow3520fa42011-07-27 11:00:37 +10001291
NeilBrownbf07bb72012-05-22 13:55:06 +10001292 if (j == nextpage) {
1293 nextpage += PAGE_COUNTER_RATIO;
NeilBrown40cffcc2012-05-22 13:55:24 +10001294 if (!counts->bp[j >> PAGE_COUNTER_SHIFT].pending) {
NeilBrownbf07bb72012-05-22 13:55:06 +10001295 j |= PAGE_COUNTER_MASK;
NeilBrownaa3163f2005-06-21 17:17:22 -07001296 continue;
1297 }
NeilBrown40cffcc2012-05-22 13:55:24 +10001298 counts->bp[j >> PAGE_COUNTER_SHIFT].pending = 0;
NeilBrown32a76272005-06-21 17:17:14 -07001299 }
NeilBrown32a76272005-06-21 17:17:14 -07001300
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001301 bmc = md_bitmap_get_counter(counts, block, &blocks, 0);
NeilBrownbf07bb72012-05-22 13:55:06 +10001302 if (!bmc) {
1303 j |= PAGE_COUNTER_MASK;
1304 continue;
1305 }
1306 if (*bmc == 1 && !bitmap->need_sync) {
1307 /* We can clear the bit */
NeilBrownbf07bb72012-05-22 13:55:06 +10001308 *bmc = 0;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001309 md_bitmap_count_page(counts, block, -1);
1310 md_bitmap_file_clear_bit(bitmap, block);
NeilBrownbf07bb72012-05-22 13:55:06 +10001311 } else if (*bmc && *bmc <= 2) {
1312 *bmc = 1;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001313 md_bitmap_set_pending(counts, block);
NeilBrown2585f3e2011-09-21 15:37:46 +10001314 bitmap->allclean = 0;
NeilBrown32a76272005-06-21 17:17:14 -07001315 }
NeilBrown32a76272005-06-21 17:17:14 -07001316 }
NeilBrown40cffcc2012-05-22 13:55:24 +10001317 spin_unlock_irq(&counts->lock);
NeilBrown32a76272005-06-21 17:17:14 -07001318
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001319 md_bitmap_wait_writes(bitmap);
NeilBrownbf07bb72012-05-22 13:55:06 +10001320 /* Now start writeout on any page in NEEDWRITE that isn't DIRTY.
1321 * DIRTY pages need to be written by bitmap_unplug so it can wait
1322 * for them.
1323 * If we find any DIRTY page we stop there and let bitmap_unplug
1324 * handle all the rest. This is important in the case where
1325 * the first blocking holds the superblock and it has been updated.
1326 * We mustn't write any other blocks before the superblock.
1327 */
NeilBrown62f82fa2012-05-22 13:55:21 +10001328 for (j = 0;
1329 j < bitmap->storage.file_pages
1330 && !test_bit(BITMAP_STALE, &bitmap->flags);
1331 j++) {
NeilBrownd1891222012-05-22 13:55:09 +10001332 if (test_page_attr(bitmap, j,
NeilBrownbf07bb72012-05-22 13:55:06 +10001333 BITMAP_PAGE_DIRTY))
1334 /* bitmap_unplug will handle the rest */
1335 break;
Zhiqiang Liu55180492019-12-07 11:00:08 +08001336 if (bitmap->storage.filemap &&
1337 test_and_clear_page_attr(bitmap, j,
NeilBrownbdfd1142012-05-22 13:55:22 +10001338 BITMAP_PAGE_NEEDWRITE)) {
NeilBrown1ec885c2012-05-22 13:55:10 +10001339 write_page(bitmap, bitmap->storage.filemap[j], 0);
NeilBrownbf07bb72012-05-22 13:55:06 +10001340 }
1341 }
NeilBrownbf07bb72012-05-22 13:55:06 +10001342
NeilBrown7be3dfe2008-03-10 11:43:48 -07001343 done:
NeilBrown8311c292008-03-04 14:29:30 -08001344 if (bitmap->allclean == 0)
NeilBrown2e61ebb2011-12-23 10:17:50 +11001345 mddev->thread->timeout =
1346 mddev->bitmap_info.daemon_sleep;
NeilBrownc3d97142009-12-14 12:49:52 +11001347 mutex_unlock(&mddev->bitmap_info.mutex);
NeilBrown32a76272005-06-21 17:17:14 -07001348}
1349
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001350static bitmap_counter_t *md_bitmap_get_counter(struct bitmap_counts *bitmap,
1351 sector_t offset, sector_t *blocks,
1352 int create)
NeilBrownee305ac2009-09-23 18:06:44 +10001353__releases(bitmap->lock)
1354__acquires(bitmap->lock)
NeilBrown32a76272005-06-21 17:17:14 -07001355{
1356 /* If 'create', we might release the lock and reclaim it.
1357 * The lock must have been taken with interrupts enabled.
1358 * If !create, we don't release the lock.
1359 */
NeilBrown61a0d802012-03-19 12:46:41 +11001360 sector_t chunk = offset >> bitmap->chunkshift;
NeilBrown32a76272005-06-21 17:17:14 -07001361 unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
1362 unsigned long pageoff = (chunk & PAGE_COUNTER_MASK) << COUNTER_BYTE_SHIFT;
1363 sector_t csize;
NeilBrownef425672010-06-01 19:37:33 +10001364 int err;
NeilBrown32a76272005-06-21 17:17:14 -07001365
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001366 err = md_bitmap_checkpage(bitmap, page, create, 0);
NeilBrownef425672010-06-01 19:37:33 +10001367
1368 if (bitmap->bp[page].hijacked ||
1369 bitmap->bp[page].map == NULL)
NeilBrown61a0d802012-03-19 12:46:41 +11001370 csize = ((sector_t)1) << (bitmap->chunkshift +
Zhao Hemingd837f722020-10-06 00:00:24 +08001371 PAGE_COUNTER_SHIFT);
NeilBrownef425672010-06-01 19:37:33 +10001372 else
NeilBrown61a0d802012-03-19 12:46:41 +11001373 csize = ((sector_t)1) << bitmap->chunkshift;
NeilBrownef425672010-06-01 19:37:33 +10001374 *blocks = csize - (offset & (csize - 1));
1375
1376 if (err < 0)
NeilBrown32a76272005-06-21 17:17:14 -07001377 return NULL;
NeilBrownef425672010-06-01 19:37:33 +10001378
NeilBrown32a76272005-06-21 17:17:14 -07001379 /* now locked ... */
1380
1381 if (bitmap->bp[page].hijacked) { /* hijacked pointer */
1382 /* should we use the first or second counter field
1383 * of the hijacked pointer? */
1384 int hi = (pageoff > PAGE_COUNTER_MASK);
NeilBrown32a76272005-06-21 17:17:14 -07001385 return &((bitmap_counter_t *)
1386 &bitmap->bp[page].map)[hi];
NeilBrownef425672010-06-01 19:37:33 +10001387 } else /* page is allocated */
NeilBrown32a76272005-06-21 17:17:14 -07001388 return (bitmap_counter_t *)
1389 &(bitmap->bp[page].map[pageoff]);
NeilBrown32a76272005-06-21 17:17:14 -07001390}
1391
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001392int md_bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sectors, int behind)
NeilBrown32a76272005-06-21 17:17:14 -07001393{
NeilBrownac2f40b2010-06-01 19:37:31 +10001394 if (!bitmap)
1395 return 0;
NeilBrown4b6d2872005-09-09 16:23:47 -07001396
1397 if (behind) {
Paul Clements696fcd52010-03-08 16:02:37 +11001398 int bw;
NeilBrown4b6d2872005-09-09 16:23:47 -07001399 atomic_inc(&bitmap->behind_writes);
Paul Clements696fcd52010-03-08 16:02:37 +11001400 bw = atomic_read(&bitmap->behind_writes);
1401 if (bw > bitmap->behind_writes_used)
1402 bitmap->behind_writes_used = bw;
1403
NeilBrown36a4e1f2011-10-07 14:23:17 +11001404 pr_debug("inc write-behind count %d/%lu\n",
1405 bw, bitmap->mddev->bitmap_info.max_write_behind);
NeilBrown4b6d2872005-09-09 16:23:47 -07001406 }
1407
NeilBrown32a76272005-06-21 17:17:14 -07001408 while (sectors) {
NeilBrown57dab0b2010-10-19 10:03:39 +11001409 sector_t blocks;
NeilBrown32a76272005-06-21 17:17:14 -07001410 bitmap_counter_t *bmc;
1411
NeilBrown40cffcc2012-05-22 13:55:24 +10001412 spin_lock_irq(&bitmap->counts.lock);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001413 bmc = md_bitmap_get_counter(&bitmap->counts, offset, &blocks, 1);
NeilBrown32a76272005-06-21 17:17:14 -07001414 if (!bmc) {
NeilBrown40cffcc2012-05-22 13:55:24 +10001415 spin_unlock_irq(&bitmap->counts.lock);
NeilBrown32a76272005-06-21 17:17:14 -07001416 return 0;
1417 }
1418
Namhyung Kim27d5ea02011-06-09 11:42:57 +10001419 if (unlikely(COUNTER(*bmc) == COUNTER_MAX)) {
Neil Brownda6e1a32007-02-08 14:20:37 -08001420 DEFINE_WAIT(__wait);
1421 /* note that it is safe to do the prepare_to_wait
1422 * after the test as long as we do it before dropping
1423 * the spinlock.
1424 */
1425 prepare_to_wait(&bitmap->overflow_wait, &__wait,
1426 TASK_UNINTERRUPTIBLE);
NeilBrown40cffcc2012-05-22 13:55:24 +10001427 spin_unlock_irq(&bitmap->counts.lock);
NeilBrownf54a9d02012-08-02 08:33:20 +10001428 schedule();
Neil Brownda6e1a32007-02-08 14:20:37 -08001429 finish_wait(&bitmap->overflow_wait, &__wait);
1430 continue;
1431 }
1432
NeilBrownac2f40b2010-06-01 19:37:31 +10001433 switch (*bmc) {
NeilBrown32a76272005-06-21 17:17:14 -07001434 case 0:
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001435 md_bitmap_file_set_bit(bitmap, offset);
1436 md_bitmap_count_page(&bitmap->counts, offset, 1);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -05001437 fallthrough;
NeilBrown32a76272005-06-21 17:17:14 -07001438 case 1:
1439 *bmc = 2;
1440 }
Neil Brownda6e1a32007-02-08 14:20:37 -08001441
NeilBrown32a76272005-06-21 17:17:14 -07001442 (*bmc)++;
1443
NeilBrown40cffcc2012-05-22 13:55:24 +10001444 spin_unlock_irq(&bitmap->counts.lock);
NeilBrown32a76272005-06-21 17:17:14 -07001445
1446 offset += blocks;
1447 if (sectors > blocks)
1448 sectors -= blocks;
NeilBrownac2f40b2010-06-01 19:37:31 +10001449 else
1450 sectors = 0;
NeilBrown32a76272005-06-21 17:17:14 -07001451 }
1452 return 0;
1453}
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001454EXPORT_SYMBOL(md_bitmap_startwrite);
NeilBrown32a76272005-06-21 17:17:14 -07001455
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001456void md_bitmap_endwrite(struct bitmap *bitmap, sector_t offset,
1457 unsigned long sectors, int success, int behind)
NeilBrown32a76272005-06-21 17:17:14 -07001458{
NeilBrownac2f40b2010-06-01 19:37:31 +10001459 if (!bitmap)
1460 return;
NeilBrown4b6d2872005-09-09 16:23:47 -07001461 if (behind) {
NeilBrowne5551902010-03-31 11:21:44 +11001462 if (atomic_dec_and_test(&bitmap->behind_writes))
1463 wake_up(&bitmap->behind_wait);
NeilBrown36a4e1f2011-10-07 14:23:17 +11001464 pr_debug("dec write-behind count %d/%lu\n",
1465 atomic_read(&bitmap->behind_writes),
1466 bitmap->mddev->bitmap_info.max_write_behind);
NeilBrown4b6d2872005-09-09 16:23:47 -07001467 }
1468
NeilBrown32a76272005-06-21 17:17:14 -07001469 while (sectors) {
NeilBrown57dab0b2010-10-19 10:03:39 +11001470 sector_t blocks;
NeilBrown32a76272005-06-21 17:17:14 -07001471 unsigned long flags;
1472 bitmap_counter_t *bmc;
1473
NeilBrown40cffcc2012-05-22 13:55:24 +10001474 spin_lock_irqsave(&bitmap->counts.lock, flags);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001475 bmc = md_bitmap_get_counter(&bitmap->counts, offset, &blocks, 0);
NeilBrown32a76272005-06-21 17:17:14 -07001476 if (!bmc) {
NeilBrown40cffcc2012-05-22 13:55:24 +10001477 spin_unlock_irqrestore(&bitmap->counts.lock, flags);
NeilBrown32a76272005-06-21 17:17:14 -07001478 return;
1479 }
1480
NeilBrown961902c2011-12-23 09:57:48 +11001481 if (success && !bitmap->mddev->degraded &&
Neil Browna0da84f2008-06-28 08:31:22 +10001482 bitmap->events_cleared < bitmap->mddev->events) {
1483 bitmap->events_cleared = bitmap->mddev->events;
1484 bitmap->need_sync = 1;
NeilBrown5ff5aff2010-06-01 19:37:32 +10001485 sysfs_notify_dirent_safe(bitmap->sysfs_can_clear);
Neil Browna0da84f2008-06-28 08:31:22 +10001486 }
1487
Namhyung Kim27d5ea02011-06-09 11:42:57 +10001488 if (!success && !NEEDED(*bmc))
NeilBrown32a76272005-06-21 17:17:14 -07001489 *bmc |= NEEDED_MASK;
1490
Namhyung Kim27d5ea02011-06-09 11:42:57 +10001491 if (COUNTER(*bmc) == COUNTER_MAX)
Neil Brownda6e1a32007-02-08 14:20:37 -08001492 wake_up(&bitmap->overflow_wait);
1493
NeilBrown32a76272005-06-21 17:17:14 -07001494 (*bmc)--;
NeilBrown2585f3e2011-09-21 15:37:46 +10001495 if (*bmc <= 2) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001496 md_bitmap_set_pending(&bitmap->counts, offset);
NeilBrown2585f3e2011-09-21 15:37:46 +10001497 bitmap->allclean = 0;
1498 }
NeilBrown40cffcc2012-05-22 13:55:24 +10001499 spin_unlock_irqrestore(&bitmap->counts.lock, flags);
NeilBrown32a76272005-06-21 17:17:14 -07001500 offset += blocks;
1501 if (sectors > blocks)
1502 sectors -= blocks;
NeilBrownac2f40b2010-06-01 19:37:31 +10001503 else
1504 sectors = 0;
NeilBrown32a76272005-06-21 17:17:14 -07001505 }
1506}
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001507EXPORT_SYMBOL(md_bitmap_endwrite);
NeilBrown32a76272005-06-21 17:17:14 -07001508
NeilBrown57dab0b2010-10-19 10:03:39 +11001509static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
NeilBrown1187cf02009-03-31 14:27:02 +11001510 int degraded)
NeilBrown32a76272005-06-21 17:17:14 -07001511{
1512 bitmap_counter_t *bmc;
1513 int rv;
1514 if (bitmap == NULL) {/* FIXME or bitmap set as 'failed' */
1515 *blocks = 1024;
1516 return 1; /* always resync if no bitmap */
1517 }
NeilBrown40cffcc2012-05-22 13:55:24 +10001518 spin_lock_irq(&bitmap->counts.lock);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001519 bmc = md_bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
NeilBrown32a76272005-06-21 17:17:14 -07001520 rv = 0;
1521 if (bmc) {
1522 /* locked */
1523 if (RESYNC(*bmc))
1524 rv = 1;
1525 else if (NEEDED(*bmc)) {
1526 rv = 1;
NeilBrown6a806c52005-07-15 03:56:35 -07001527 if (!degraded) { /* don't set/clear bits if degraded */
1528 *bmc |= RESYNC_MASK;
1529 *bmc &= ~NEEDED_MASK;
1530 }
NeilBrown32a76272005-06-21 17:17:14 -07001531 }
1532 }
NeilBrown40cffcc2012-05-22 13:55:24 +10001533 spin_unlock_irq(&bitmap->counts.lock);
NeilBrown32a76272005-06-21 17:17:14 -07001534 return rv;
1535}
1536
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001537int md_bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
1538 int degraded)
NeilBrown1187cf02009-03-31 14:27:02 +11001539{
1540 /* bitmap_start_sync must always report on multiples of whole
1541 * pages, otherwise resync (which is very PAGE_SIZE based) will
1542 * get confused.
1543 * So call __bitmap_start_sync repeatedly (if needed) until
1544 * At least PAGE_SIZE>>9 blocks are covered.
1545 * Return the 'or' of the result.
1546 */
1547 int rv = 0;
NeilBrown57dab0b2010-10-19 10:03:39 +11001548 sector_t blocks1;
NeilBrown1187cf02009-03-31 14:27:02 +11001549
1550 *blocks = 0;
1551 while (*blocks < (PAGE_SIZE>>9)) {
1552 rv |= __bitmap_start_sync(bitmap, offset,
1553 &blocks1, degraded);
1554 offset += blocks1;
1555 *blocks += blocks1;
1556 }
1557 return rv;
1558}
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001559EXPORT_SYMBOL(md_bitmap_start_sync);
NeilBrown1187cf02009-03-31 14:27:02 +11001560
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001561void md_bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted)
NeilBrown32a76272005-06-21 17:17:14 -07001562{
1563 bitmap_counter_t *bmc;
1564 unsigned long flags;
NeilBrownac2f40b2010-06-01 19:37:31 +10001565
1566 if (bitmap == NULL) {
NeilBrown32a76272005-06-21 17:17:14 -07001567 *blocks = 1024;
1568 return;
1569 }
NeilBrown40cffcc2012-05-22 13:55:24 +10001570 spin_lock_irqsave(&bitmap->counts.lock, flags);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001571 bmc = md_bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
NeilBrown32a76272005-06-21 17:17:14 -07001572 if (bmc == NULL)
1573 goto unlock;
1574 /* locked */
NeilBrown32a76272005-06-21 17:17:14 -07001575 if (RESYNC(*bmc)) {
1576 *bmc &= ~RESYNC_MASK;
1577
1578 if (!NEEDED(*bmc) && aborted)
1579 *bmc |= NEEDED_MASK;
1580 else {
NeilBrown2585f3e2011-09-21 15:37:46 +10001581 if (*bmc <= 2) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001582 md_bitmap_set_pending(&bitmap->counts, offset);
NeilBrown2585f3e2011-09-21 15:37:46 +10001583 bitmap->allclean = 0;
1584 }
NeilBrown32a76272005-06-21 17:17:14 -07001585 }
1586 }
1587 unlock:
NeilBrown40cffcc2012-05-22 13:55:24 +10001588 spin_unlock_irqrestore(&bitmap->counts.lock, flags);
NeilBrown32a76272005-06-21 17:17:14 -07001589}
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001590EXPORT_SYMBOL(md_bitmap_end_sync);
NeilBrown32a76272005-06-21 17:17:14 -07001591
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001592void md_bitmap_close_sync(struct bitmap *bitmap)
NeilBrown32a76272005-06-21 17:17:14 -07001593{
1594 /* Sync has finished, and any bitmap chunks that weren't synced
1595 * properly have been aborted. It remains to us to clear the
1596 * RESYNC bit wherever it is still on
1597 */
1598 sector_t sector = 0;
NeilBrown57dab0b2010-10-19 10:03:39 +11001599 sector_t blocks;
NeilBrownb47490c2008-02-06 01:39:50 -08001600 if (!bitmap)
1601 return;
NeilBrown32a76272005-06-21 17:17:14 -07001602 while (sector < bitmap->mddev->resync_max_sectors) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001603 md_bitmap_end_sync(bitmap, sector, &blocks, 0);
NeilBrownb47490c2008-02-06 01:39:50 -08001604 sector += blocks;
NeilBrown32a76272005-06-21 17:17:14 -07001605 }
1606}
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001607EXPORT_SYMBOL(md_bitmap_close_sync);
NeilBrown32a76272005-06-21 17:17:14 -07001608
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001609void md_bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force)
NeilBrownb47490c2008-02-06 01:39:50 -08001610{
1611 sector_t s = 0;
NeilBrown57dab0b2010-10-19 10:03:39 +11001612 sector_t blocks;
NeilBrownb47490c2008-02-06 01:39:50 -08001613
1614 if (!bitmap)
1615 return;
1616 if (sector == 0) {
1617 bitmap->last_end_sync = jiffies;
1618 return;
1619 }
Goldwyn Rodriguesc40f3412015-08-19 08:14:42 +10001620 if (!force && time_before(jiffies, (bitmap->last_end_sync
NeilBrown1b04be92009-12-14 12:49:53 +11001621 + bitmap->mddev->bitmap_info.daemon_sleep)))
NeilBrownb47490c2008-02-06 01:39:50 -08001622 return;
1623 wait_event(bitmap->mddev->recovery_wait,
1624 atomic_read(&bitmap->mddev->recovery_active) == 0);
1625
NeilBrown75d3da42011-01-14 09:14:34 +11001626 bitmap->mddev->curr_resync_completed = sector;
Shaohua Li29530792016-12-08 15:48:19 -08001627 set_bit(MD_SB_CHANGE_CLEAN, &bitmap->mddev->sb_flags);
NeilBrown40cffcc2012-05-22 13:55:24 +10001628 sector &= ~((1ULL << bitmap->counts.chunkshift) - 1);
NeilBrownb47490c2008-02-06 01:39:50 -08001629 s = 0;
1630 while (s < sector && s < bitmap->mddev->resync_max_sectors) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001631 md_bitmap_end_sync(bitmap, s, &blocks, 0);
NeilBrownb47490c2008-02-06 01:39:50 -08001632 s += blocks;
1633 }
1634 bitmap->last_end_sync = jiffies;
Junxiao Bie1a86db2020-07-14 16:10:26 -07001635 sysfs_notify_dirent_safe(bitmap->mddev->sysfs_completed);
NeilBrownb47490c2008-02-06 01:39:50 -08001636}
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001637EXPORT_SYMBOL(md_bitmap_cond_end_sync);
NeilBrownb47490c2008-02-06 01:39:50 -08001638
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001639void md_bitmap_sync_with_cluster(struct mddev *mddev,
Guoqing Jiang18c9ff72016-05-02 11:50:12 -04001640 sector_t old_lo, sector_t old_hi,
1641 sector_t new_lo, sector_t new_hi)
1642{
1643 struct bitmap *bitmap = mddev->bitmap;
1644 sector_t sector, blocks = 0;
1645
1646 for (sector = old_lo; sector < new_lo; ) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001647 md_bitmap_end_sync(bitmap, sector, &blocks, 0);
Guoqing Jiang18c9ff72016-05-02 11:50:12 -04001648 sector += blocks;
1649 }
1650 WARN((blocks > new_lo) && old_lo, "alignment is not correct for lo\n");
1651
1652 for (sector = old_hi; sector < new_hi; ) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001653 md_bitmap_start_sync(bitmap, sector, &blocks, 0);
Guoqing Jiang18c9ff72016-05-02 11:50:12 -04001654 sector += blocks;
1655 }
1656 WARN((blocks > new_hi) && old_hi, "alignment is not correct for hi\n");
1657}
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001658EXPORT_SYMBOL(md_bitmap_sync_with_cluster);
Guoqing Jiang18c9ff72016-05-02 11:50:12 -04001659
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001660static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed)
NeilBrown32a76272005-06-21 17:17:14 -07001661{
1662 /* For each chunk covered by any of these sectors, set the
NeilBrownef99bf42012-05-22 13:55:08 +10001663 * counter to 2 and possibly set resync_needed. They should all
NeilBrown32a76272005-06-21 17:17:14 -07001664 * be 0 at this point
1665 */
NeilBrown193f1c92005-08-04 12:53:33 -07001666
NeilBrown57dab0b2010-10-19 10:03:39 +11001667 sector_t secs;
NeilBrown193f1c92005-08-04 12:53:33 -07001668 bitmap_counter_t *bmc;
NeilBrown40cffcc2012-05-22 13:55:24 +10001669 spin_lock_irq(&bitmap->counts.lock);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001670 bmc = md_bitmap_get_counter(&bitmap->counts, offset, &secs, 1);
NeilBrown193f1c92005-08-04 12:53:33 -07001671 if (!bmc) {
NeilBrown40cffcc2012-05-22 13:55:24 +10001672 spin_unlock_irq(&bitmap->counts.lock);
NeilBrown193f1c92005-08-04 12:53:33 -07001673 return;
NeilBrown32a76272005-06-21 17:17:14 -07001674 }
NeilBrownac2f40b2010-06-01 19:37:31 +10001675 if (!*bmc) {
Goldwyn Rodrigues11dd35d2014-06-07 00:36:26 -05001676 *bmc = 2;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001677 md_bitmap_count_page(&bitmap->counts, offset, 1);
1678 md_bitmap_set_pending(&bitmap->counts, offset);
NeilBrown2585f3e2011-09-21 15:37:46 +10001679 bitmap->allclean = 0;
NeilBrown193f1c92005-08-04 12:53:33 -07001680 }
Goldwyn Rodrigues11dd35d2014-06-07 00:36:26 -05001681 if (needed)
1682 *bmc |= NEEDED_MASK;
NeilBrown40cffcc2012-05-22 13:55:24 +10001683 spin_unlock_irq(&bitmap->counts.lock);
NeilBrown32a76272005-06-21 17:17:14 -07001684}
1685
Paul Clements9b1d1da2006-10-03 01:15:49 -07001686/* dirty the memory and file bits for bitmap chunks "s" to "e" */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001687void md_bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
Paul Clements9b1d1da2006-10-03 01:15:49 -07001688{
1689 unsigned long chunk;
1690
1691 for (chunk = s; chunk <= e; chunk++) {
NeilBrown40cffcc2012-05-22 13:55:24 +10001692 sector_t sec = (sector_t)chunk << bitmap->counts.chunkshift;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001693 md_bitmap_set_memory_bits(bitmap, sec, 1);
1694 md_bitmap_file_set_bit(bitmap, sec);
NeilBrownffa23322009-12-14 12:49:56 +11001695 if (sec < bitmap->mddev->recovery_cp)
1696 /* We are asserting that the array is dirty,
1697 * so move the recovery_cp address back so
1698 * that it is obvious that it is dirty
1699 */
1700 bitmap->mddev->recovery_cp = sec;
Paul Clements9b1d1da2006-10-03 01:15:49 -07001701 }
1702}
1703
NeilBrown32a76272005-06-21 17:17:14 -07001704/*
NeilBrown6b8b3e82005-08-04 12:53:35 -07001705 * flush out any pending updates
1706 */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001707void md_bitmap_flush(struct mddev *mddev)
NeilBrown6b8b3e82005-08-04 12:53:35 -07001708{
1709 struct bitmap *bitmap = mddev->bitmap;
NeilBrown42a04b52009-12-14 12:49:53 +11001710 long sleep;
NeilBrown6b8b3e82005-08-04 12:53:35 -07001711
1712 if (!bitmap) /* there was no bitmap */
1713 return;
1714
1715 /* run the daemon_work three time to ensure everything is flushed
1716 * that can be
1717 */
NeilBrown1b04be92009-12-14 12:49:53 +11001718 sleep = mddev->bitmap_info.daemon_sleep * 2;
NeilBrown42a04b52009-12-14 12:49:53 +11001719 bitmap->daemon_lastrun -= sleep;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001720 md_bitmap_daemon_work(mddev);
NeilBrown42a04b52009-12-14 12:49:53 +11001721 bitmap->daemon_lastrun -= sleep;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001722 md_bitmap_daemon_work(mddev);
NeilBrown42a04b52009-12-14 12:49:53 +11001723 bitmap->daemon_lastrun -= sleep;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001724 md_bitmap_daemon_work(mddev);
Sudhakar Panneerselvam404a8ef2021-04-13 04:08:29 +00001725 if (mddev->bitmap_info.external)
1726 md_super_wait(mddev);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001727 md_bitmap_update_sb(bitmap);
NeilBrown6b8b3e82005-08-04 12:53:35 -07001728}
1729
1730/*
NeilBrown32a76272005-06-21 17:17:14 -07001731 * free memory that was allocated
1732 */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001733void md_bitmap_free(struct bitmap *bitmap)
NeilBrown32a76272005-06-21 17:17:14 -07001734{
1735 unsigned long k, pages;
1736 struct bitmap_page *bp;
NeilBrown32a76272005-06-21 17:17:14 -07001737
1738 if (!bitmap) /* there was no bitmap */
1739 return;
1740
Guoqing Jiangf9a67b12016-04-01 17:08:49 +08001741 if (bitmap->sysfs_can_clear)
1742 sysfs_put(bitmap->sysfs_can_clear);
1743
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05001744 if (mddev_is_clustered(bitmap->mddev) && bitmap->mddev->cluster_info &&
1745 bitmap->cluster_slot == md_cluster_ops->slot_number(bitmap->mddev))
Goldwyn Rodriguesb97e92572014-06-06 11:50:56 -05001746 md_cluster_stop(bitmap->mddev);
1747
NeilBrownfae7d322012-05-22 13:55:21 +10001748 /* Shouldn't be needed - but just in case.... */
1749 wait_event(bitmap->write_wait,
1750 atomic_read(&bitmap->pending_writes) == 0);
1751
1752 /* release the bitmap file */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001753 md_bitmap_file_unmap(&bitmap->storage);
NeilBrown32a76272005-06-21 17:17:14 -07001754
NeilBrown40cffcc2012-05-22 13:55:24 +10001755 bp = bitmap->counts.bp;
1756 pages = bitmap->counts.pages;
NeilBrown32a76272005-06-21 17:17:14 -07001757
1758 /* free all allocated memory */
1759
NeilBrown32a76272005-06-21 17:17:14 -07001760 if (bp) /* deallocate the page memory */
1761 for (k = 0; k < pages; k++)
1762 if (bp[k].map && !bp[k].hijacked)
1763 kfree(bp[k].map);
1764 kfree(bp);
1765 kfree(bitmap);
1766}
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001767EXPORT_SYMBOL(md_bitmap_free);
NeilBrownaa5cbd12009-12-14 12:49:46 +11001768
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001769void md_bitmap_wait_behind_writes(struct mddev *mddev)
Guoqing Jiang48df4982017-03-14 09:40:20 +08001770{
1771 struct bitmap *bitmap = mddev->bitmap;
1772
1773 /* wait for behind writes to complete */
1774 if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
1775 pr_debug("md:%s: behind writes in progress - waiting to stop.\n",
1776 mdname(mddev));
1777 /* need to kick something here to make sure I/O goes? */
1778 wait_event(bitmap->behind_wait,
1779 atomic_read(&bitmap->behind_writes) == 0);
1780 }
1781}
1782
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001783void md_bitmap_destroy(struct mddev *mddev)
NeilBrown3178b0d2005-09-09 16:23:50 -07001784{
1785 struct bitmap *bitmap = mddev->bitmap;
1786
1787 if (!bitmap) /* there was no bitmap */
1788 return;
1789
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001790 md_bitmap_wait_behind_writes(mddev);
Guoqing Jiang69b00b52019-12-23 10:49:00 +01001791 if (!mddev->serialize_policy)
1792 mddev_destroy_serial_pool(mddev, NULL, true);
Guoqing Jiang48df4982017-03-14 09:40:20 +08001793
NeilBrownc3d97142009-12-14 12:49:52 +11001794 mutex_lock(&mddev->bitmap_info.mutex);
NeilBrown978a7a42014-12-15 12:56:58 +11001795 spin_lock(&mddev->lock);
NeilBrown3178b0d2005-09-09 16:23:50 -07001796 mddev->bitmap = NULL; /* disconnect from the md device */
NeilBrown978a7a42014-12-15 12:56:58 +11001797 spin_unlock(&mddev->lock);
NeilBrownc3d97142009-12-14 12:49:52 +11001798 mutex_unlock(&mddev->bitmap_info.mutex);
NeilBrownb15c2e52006-01-06 00:20:16 -08001799 if (mddev->thread)
1800 mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
NeilBrown3178b0d2005-09-09 16:23:50 -07001801
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001802 md_bitmap_free(bitmap);
NeilBrown3178b0d2005-09-09 16:23:50 -07001803}
NeilBrown32a76272005-06-21 17:17:14 -07001804
1805/*
1806 * initialize the bitmap structure
1807 * if this returns an error, bitmap_destroy must be called to do clean up
Guoqing Jiangf9a67b12016-04-01 17:08:49 +08001808 * once mddev->bitmap is set
NeilBrown32a76272005-06-21 17:17:14 -07001809 */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001810struct bitmap *md_bitmap_create(struct mddev *mddev, int slot)
NeilBrown32a76272005-06-21 17:17:14 -07001811{
1812 struct bitmap *bitmap;
NeilBrown1f593902009-04-20 11:50:24 +10001813 sector_t blocks = mddev->resync_max_sectors;
NeilBrownc3d97142009-12-14 12:49:52 +11001814 struct file *file = mddev->bitmap_info.file;
NeilBrown32a76272005-06-21 17:17:14 -07001815 int err;
Tejun Heo324a56e2013-12-11 14:11:53 -05001816 struct kernfs_node *bm = NULL;
NeilBrown32a76272005-06-21 17:17:14 -07001817
Alexey Dobriyan5f6e3c832006-10-11 01:22:26 -07001818 BUILD_BUG_ON(sizeof(bitmap_super_t) != 256);
NeilBrown32a76272005-06-21 17:17:14 -07001819
NeilBrownc3d97142009-12-14 12:49:52 +11001820 BUG_ON(file && mddev->bitmap_info.offset);
NeilBrowna654b9d82005-06-21 17:17:27 -07001821
NeilBrown230b55f2017-10-17 14:24:09 +11001822 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
1823 pr_notice("md/raid:%s: array with journal cannot have bitmap\n",
1824 mdname(mddev));
1825 return ERR_PTR(-EBUSY);
1826 }
1827
NeilBrown9ffae0c2006-01-06 00:20:32 -08001828 bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL);
NeilBrown32a76272005-06-21 17:17:14 -07001829 if (!bitmap)
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05001830 return ERR_PTR(-ENOMEM);
NeilBrown32a76272005-06-21 17:17:14 -07001831
NeilBrown40cffcc2012-05-22 13:55:24 +10001832 spin_lock_init(&bitmap->counts.lock);
NeilBrownce25c312006-06-26 00:27:49 -07001833 atomic_set(&bitmap->pending_writes, 0);
1834 init_waitqueue_head(&bitmap->write_wait);
Neil Brownda6e1a32007-02-08 14:20:37 -08001835 init_waitqueue_head(&bitmap->overflow_wait);
NeilBrowne5551902010-03-31 11:21:44 +11001836 init_waitqueue_head(&bitmap->behind_wait);
NeilBrownce25c312006-06-26 00:27:49 -07001837
NeilBrown32a76272005-06-21 17:17:14 -07001838 bitmap->mddev = mddev;
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05001839 bitmap->cluster_slot = slot;
NeilBrown32a76272005-06-21 17:17:14 -07001840
NeilBrown5ff5aff2010-06-01 19:37:32 +10001841 if (mddev->kobj.sd)
Tejun Heo388975c2013-09-11 23:19:13 -04001842 bm = sysfs_get_dirent(mddev->kobj.sd, "bitmap");
NeilBrownece5cff2009-12-14 12:49:56 +11001843 if (bm) {
Tejun Heo388975c2013-09-11 23:19:13 -04001844 bitmap->sysfs_can_clear = sysfs_get_dirent(bm, "can_clear");
NeilBrownece5cff2009-12-14 12:49:56 +11001845 sysfs_put(bm);
1846 } else
1847 bitmap->sysfs_can_clear = NULL;
1848
NeilBrown1ec885c2012-05-22 13:55:10 +10001849 bitmap->storage.file = file;
NeilBrownce25c312006-06-26 00:27:49 -07001850 if (file) {
1851 get_file(file);
NeilBrownae8fa282009-10-16 15:56:01 +11001852 /* As future accesses to this file will use bmap,
1853 * and bypass the page cache, we must sync the file
1854 * first.
1855 */
Christoph Hellwig8018ab02010-03-22 17:32:25 +01001856 vfs_fsync(file, 1);
NeilBrownce25c312006-06-26 00:27:49 -07001857 }
NeilBrown42a04b52009-12-14 12:49:53 +11001858 /* read superblock from bitmap file (this sets mddev->bitmap_info.chunksize) */
Jonathan Brassow9c810752011-06-08 17:59:30 -05001859 if (!mddev->bitmap_info.external) {
1860 /*
1861 * If 'MD_ARRAY_FIRST_USE' is set, then device-mapper is
1862 * instructing us to create a new on-disk bitmap instance.
1863 */
1864 if (test_and_clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags))
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001865 err = md_bitmap_new_disk_sb(bitmap);
Jonathan Brassow9c810752011-06-08 17:59:30 -05001866 else
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001867 err = md_bitmap_read_sb(bitmap);
Jonathan Brassow9c810752011-06-08 17:59:30 -05001868 } else {
NeilBrownece5cff2009-12-14 12:49:56 +11001869 err = 0;
1870 if (mddev->bitmap_info.chunksize == 0 ||
1871 mddev->bitmap_info.daemon_sleep == 0)
1872 /* chunksize and time_base need to be
1873 * set first. */
1874 err = -EINVAL;
1875 }
NeilBrown32a76272005-06-21 17:17:14 -07001876 if (err)
NeilBrown3178b0d2005-09-09 16:23:50 -07001877 goto error;
NeilBrown32a76272005-06-21 17:17:14 -07001878
NeilBrown624ce4f2009-12-14 12:49:56 +11001879 bitmap->daemon_lastrun = jiffies;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001880 err = md_bitmap_resize(bitmap, blocks, mddev->bitmap_info.chunksize, 1);
NeilBrownd60b4792012-05-22 13:55:25 +10001881 if (err)
NeilBrown3178b0d2005-09-09 16:23:50 -07001882 goto error;
NeilBrown32a76272005-06-21 17:17:14 -07001883
NeilBrownec0cc222016-11-02 14:16:49 +11001884 pr_debug("created bitmap (%lu pages) for device %s\n",
1885 bitmap->counts.pages, bmname(bitmap));
NeilBrown69e51b42010-06-01 19:37:35 +10001886
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05001887 err = test_bit(BITMAP_WRITE_ERROR, &bitmap->flags) ? -EIO : 0;
1888 if (err)
1889 goto error;
NeilBrown69e51b42010-06-01 19:37:35 +10001890
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05001891 return bitmap;
NeilBrown69e51b42010-06-01 19:37:35 +10001892 error:
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001893 md_bitmap_free(bitmap);
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05001894 return ERR_PTR(err);
NeilBrown69e51b42010-06-01 19:37:35 +10001895}
1896
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001897int md_bitmap_load(struct mddev *mddev)
NeilBrown69e51b42010-06-01 19:37:35 +10001898{
1899 int err = 0;
Jonathan Brassow3520fa42011-07-27 11:00:37 +10001900 sector_t start = 0;
NeilBrown69e51b42010-06-01 19:37:35 +10001901 sector_t sector = 0;
1902 struct bitmap *bitmap = mddev->bitmap;
Guoqing Jiang617b1942019-06-14 17:10:38 +08001903 struct md_rdev *rdev;
NeilBrown69e51b42010-06-01 19:37:35 +10001904
1905 if (!bitmap)
1906 goto out;
1907
Guoqing Jiang617b1942019-06-14 17:10:38 +08001908 rdev_for_each(rdev, mddev)
Guoqing Jiang404659c2019-12-23 10:48:53 +01001909 mddev_create_serial_pool(mddev, rdev, true);
Guoqing Jiang617b1942019-06-14 17:10:38 +08001910
Guoqing Jiang51e453a2016-05-04 02:17:09 -04001911 if (mddev_is_clustered(mddev))
1912 md_cluster_ops->load_bitmaps(mddev, mddev->bitmap_info.nodes);
1913
NeilBrown69e51b42010-06-01 19:37:35 +10001914 /* Clear out old bitmap info first: Either there is none, or we
1915 * are resuming after someone else has possibly changed things,
1916 * so we should forget old cached info.
1917 * All chunks should be clean, but some might need_sync.
1918 */
1919 while (sector < mddev->resync_max_sectors) {
NeilBrown57dab0b2010-10-19 10:03:39 +11001920 sector_t blocks;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001921 md_bitmap_start_sync(bitmap, sector, &blocks, 0);
NeilBrown69e51b42010-06-01 19:37:35 +10001922 sector += blocks;
1923 }
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001924 md_bitmap_close_sync(bitmap);
NeilBrown69e51b42010-06-01 19:37:35 +10001925
Jonathan Brassow3520fa42011-07-27 11:00:37 +10001926 if (mddev->degraded == 0
1927 || bitmap->events_cleared == mddev->events)
1928 /* no need to keep dirty bits to optimise a
1929 * re-add of a missing device */
1930 start = mddev->recovery_cp;
NeilBrown69e51b42010-06-01 19:37:35 +10001931
NeilBrownafbaa902012-04-12 16:05:06 +10001932 mutex_lock(&mddev->bitmap_info.mutex);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001933 err = md_bitmap_init_from_disk(bitmap, start);
NeilBrownafbaa902012-04-12 16:05:06 +10001934 mutex_unlock(&mddev->bitmap_info.mutex);
Jonathan Brassow3520fa42011-07-27 11:00:37 +10001935
NeilBrown32a76272005-06-21 17:17:14 -07001936 if (err)
NeilBrown69e51b42010-06-01 19:37:35 +10001937 goto out;
NeilBrownb405fe92012-05-22 13:55:15 +10001938 clear_bit(BITMAP_STALE, &bitmap->flags);
NeilBrownef99bf42012-05-22 13:55:08 +10001939
1940 /* Kick recovery in case any bits were set */
1941 set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery);
NeilBrown3178b0d2005-09-09 16:23:50 -07001942
NeilBrown1b04be92009-12-14 12:49:53 +11001943 mddev->thread->timeout = mddev->bitmap_info.daemon_sleep;
NeilBrown9cd30fd2009-12-14 12:49:54 +11001944 md_wakeup_thread(mddev->thread);
NeilBrownb15c2e52006-01-06 00:20:16 -08001945
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001946 md_bitmap_update_sb(bitmap);
NeilBrown4ad13662007-07-17 04:06:13 -07001947
NeilBrownb405fe92012-05-22 13:55:15 +10001948 if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
NeilBrown69e51b42010-06-01 19:37:35 +10001949 err = -EIO;
1950out:
NeilBrown3178b0d2005-09-09 16:23:50 -07001951 return err;
NeilBrown32a76272005-06-21 17:17:14 -07001952}
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001953EXPORT_SYMBOL_GPL(md_bitmap_load);
NeilBrown32a76272005-06-21 17:17:14 -07001954
Zhao Heming1383b342020-09-27 13:40:13 +08001955/* caller need to free returned bitmap with md_bitmap_free() */
Guoqing Jiangb98938d2017-03-01 16:42:39 +08001956struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot)
1957{
1958 int rv = 0;
1959 struct bitmap *bitmap;
1960
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001961 bitmap = md_bitmap_create(mddev, slot);
Guoqing Jiangb98938d2017-03-01 16:42:39 +08001962 if (IS_ERR(bitmap)) {
1963 rv = PTR_ERR(bitmap);
1964 return ERR_PTR(rv);
1965 }
1966
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001967 rv = md_bitmap_init_from_disk(bitmap, 0);
Guoqing Jiangb98938d2017-03-01 16:42:39 +08001968 if (rv) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001969 md_bitmap_free(bitmap);
Guoqing Jiangb98938d2017-03-01 16:42:39 +08001970 return ERR_PTR(rv);
1971 }
1972
1973 return bitmap;
1974}
1975EXPORT_SYMBOL(get_bitmap_from_slot);
1976
Goldwyn Rodrigues11dd35d2014-06-07 00:36:26 -05001977/* Loads the bitmap associated with slot and copies the resync information
1978 * to our bitmap
1979 */
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001980int md_bitmap_copy_from_slot(struct mddev *mddev, int slot,
Goldwyn Rodrigues97f6cd32015-04-14 10:45:42 -05001981 sector_t *low, sector_t *high, bool clear_bits)
Goldwyn Rodrigues11dd35d2014-06-07 00:36:26 -05001982{
1983 int rv = 0, i, j;
1984 sector_t block, lo = 0, hi = 0;
1985 struct bitmap_counts *counts;
Guoqing Jiangb98938d2017-03-01 16:42:39 +08001986 struct bitmap *bitmap;
Goldwyn Rodrigues11dd35d2014-06-07 00:36:26 -05001987
Guoqing Jiangb98938d2017-03-01 16:42:39 +08001988 bitmap = get_bitmap_from_slot(mddev, slot);
1989 if (IS_ERR(bitmap)) {
1990 pr_err("%s can't get bitmap from slot %d\n", __func__, slot);
1991 return -1;
1992 }
Goldwyn Rodrigues11dd35d2014-06-07 00:36:26 -05001993
1994 counts = &bitmap->counts;
1995 for (j = 0; j < counts->chunks; j++) {
1996 block = (sector_t)j << counts->chunkshift;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07001997 if (md_bitmap_file_test_bit(bitmap, block)) {
Goldwyn Rodrigues11dd35d2014-06-07 00:36:26 -05001998 if (!lo)
1999 lo = block;
2000 hi = block;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002001 md_bitmap_file_clear_bit(bitmap, block);
2002 md_bitmap_set_memory_bits(mddev->bitmap, block, 1);
2003 md_bitmap_file_set_bit(mddev->bitmap, block);
Goldwyn Rodrigues11dd35d2014-06-07 00:36:26 -05002004 }
2005 }
2006
Goldwyn Rodrigues97f6cd32015-04-14 10:45:42 -05002007 if (clear_bits) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002008 md_bitmap_update_sb(bitmap);
Guoqing Jiangc84400c2016-05-02 11:50:15 -04002009 /* BITMAP_PAGE_PENDING is set, but bitmap_unplug needs
2010 * BITMAP_PAGE_DIRTY or _NEEDWRITE to write ... */
Goldwyn Rodrigues97f6cd32015-04-14 10:45:42 -05002011 for (i = 0; i < bitmap->storage.file_pages; i++)
Guoqing Jiangc84400c2016-05-02 11:50:15 -04002012 if (test_page_attr(bitmap, i, BITMAP_PAGE_PENDING))
2013 set_page_attr(bitmap, i, BITMAP_PAGE_NEEDWRITE);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002014 md_bitmap_unplug(bitmap);
Goldwyn Rodrigues97f6cd32015-04-14 10:45:42 -05002015 }
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002016 md_bitmap_unplug(mddev->bitmap);
Goldwyn Rodrigues11dd35d2014-06-07 00:36:26 -05002017 *low = lo;
2018 *high = hi;
Zhao Heming1383b342020-09-27 13:40:13 +08002019 md_bitmap_free(bitmap);
Guoqing Jiangb98938d2017-03-01 16:42:39 +08002020
Goldwyn Rodrigues11dd35d2014-06-07 00:36:26 -05002021 return rv;
2022}
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002023EXPORT_SYMBOL_GPL(md_bitmap_copy_from_slot);
Goldwyn Rodrigues11dd35d2014-06-07 00:36:26 -05002024
2025
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002026void md_bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
NeilBrown57148962012-03-19 12:46:40 +11002027{
2028 unsigned long chunk_kb;
NeilBrown40cffcc2012-05-22 13:55:24 +10002029 struct bitmap_counts *counts;
NeilBrown57148962012-03-19 12:46:40 +11002030
2031 if (!bitmap)
2032 return;
2033
NeilBrown40cffcc2012-05-22 13:55:24 +10002034 counts = &bitmap->counts;
2035
NeilBrown57148962012-03-19 12:46:40 +11002036 chunk_kb = bitmap->mddev->bitmap_info.chunksize >> 10;
2037 seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
2038 "%lu%s chunk",
NeilBrown40cffcc2012-05-22 13:55:24 +10002039 counts->pages - counts->missing_pages,
2040 counts->pages,
2041 (counts->pages - counts->missing_pages)
NeilBrown57148962012-03-19 12:46:40 +11002042 << (PAGE_SHIFT - 10),
2043 chunk_kb ? chunk_kb : bitmap->mddev->bitmap_info.chunksize,
2044 chunk_kb ? "KB" : "B");
NeilBrown1ec885c2012-05-22 13:55:10 +10002045 if (bitmap->storage.file) {
NeilBrown57148962012-03-19 12:46:40 +11002046 seq_printf(seq, ", file: ");
Miklos Szeredi2726d562015-06-19 10:30:28 +02002047 seq_file_path(seq, bitmap->storage.file, " \t\n");
NeilBrown57148962012-03-19 12:46:40 +11002048 }
2049
2050 seq_printf(seq, "\n");
NeilBrown57148962012-03-19 12:46:40 +11002051}
2052
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002053int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks,
NeilBrownd60b4792012-05-22 13:55:25 +10002054 int chunksize, int init)
2055{
2056 /* If chunk_size is 0, choose an appropriate chunk size.
2057 * Then possibly allocate new storage space.
2058 * Then quiesce, copy bits, replace bitmap, and re-start
2059 *
2060 * This function is called both to set up the initial bitmap
2061 * and to resize the bitmap while the array is active.
2062 * If this happens as a result of the array being resized,
2063 * chunksize will be zero, and we need to choose a suitable
2064 * chunksize, otherwise we use what we are given.
2065 */
2066 struct bitmap_storage store;
2067 struct bitmap_counts old_counts;
2068 unsigned long chunks;
2069 sector_t block;
2070 sector_t old_blocks, new_blocks;
2071 int chunkshift;
2072 int ret = 0;
2073 long pages;
2074 struct bitmap_page *new_bp;
2075
NeilBrowne8a27f82017-08-31 10:23:25 +10002076 if (bitmap->storage.file && !init) {
2077 pr_info("md: cannot resize file-based bitmap\n");
2078 return -EINVAL;
2079 }
2080
NeilBrownd60b4792012-05-22 13:55:25 +10002081 if (chunksize == 0) {
2082 /* If there is enough space, leave the chunk size unchanged,
2083 * else increase by factor of two until there is enough space.
2084 */
2085 long bytes;
2086 long space = bitmap->mddev->bitmap_info.space;
2087
2088 if (space == 0) {
2089 /* We don't know how much space there is, so limit
2090 * to current size - in sectors.
2091 */
2092 bytes = DIV_ROUND_UP(bitmap->counts.chunks, 8);
2093 if (!bitmap->mddev->bitmap_info.external)
2094 bytes += sizeof(bitmap_super_t);
2095 space = DIV_ROUND_UP(bytes, 512);
2096 bitmap->mddev->bitmap_info.space = space;
2097 }
2098 chunkshift = bitmap->counts.chunkshift;
2099 chunkshift--;
2100 do {
2101 /* 'chunkshift' is shift from block size to chunk size */
2102 chunkshift++;
2103 chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift);
2104 bytes = DIV_ROUND_UP(chunks, 8);
2105 if (!bitmap->mddev->bitmap_info.external)
2106 bytes += sizeof(bitmap_super_t);
2107 } while (bytes > (space << 9));
2108 } else
2109 chunkshift = ffz(~chunksize) - BITMAP_BLOCK_SHIFT;
2110
2111 chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift);
2112 memset(&store, 0, sizeof(store));
2113 if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file)
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002114 ret = md_bitmap_storage_alloc(&store, chunks,
2115 !bitmap->mddev->bitmap_info.external,
2116 mddev_is_clustered(bitmap->mddev)
2117 ? bitmap->cluster_slot : 0);
Guoqing Jiangcbb38732016-10-31 10:19:00 +08002118 if (ret) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002119 md_bitmap_file_unmap(&store);
NeilBrownd60b4792012-05-22 13:55:25 +10002120 goto err;
Guoqing Jiangcbb38732016-10-31 10:19:00 +08002121 }
NeilBrownd60b4792012-05-22 13:55:25 +10002122
2123 pages = DIV_ROUND_UP(chunks, PAGE_COUNTER_RATIO);
2124
Kees Cook6396bb22018-06-12 14:03:40 -07002125 new_bp = kcalloc(pages, sizeof(*new_bp), GFP_KERNEL);
NeilBrownd60b4792012-05-22 13:55:25 +10002126 ret = -ENOMEM;
2127 if (!new_bp) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002128 md_bitmap_file_unmap(&store);
NeilBrownd60b4792012-05-22 13:55:25 +10002129 goto err;
2130 }
2131
2132 if (!init)
2133 bitmap->mddev->pers->quiesce(bitmap->mddev, 1);
2134
2135 store.file = bitmap->storage.file;
2136 bitmap->storage.file = NULL;
2137
2138 if (store.sb_page && bitmap->storage.sb_page)
2139 memcpy(page_address(store.sb_page),
2140 page_address(bitmap->storage.sb_page),
Shaohua Li938b5332017-10-16 19:03:44 -07002141 sizeof(bitmap_super_t));
Guoqing Jiangfadcbd22019-09-26 13:53:50 +02002142 spin_lock_irq(&bitmap->counts.lock);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002143 md_bitmap_file_unmap(&bitmap->storage);
NeilBrownd60b4792012-05-22 13:55:25 +10002144 bitmap->storage = store;
2145
2146 old_counts = bitmap->counts;
2147 bitmap->counts.bp = new_bp;
2148 bitmap->counts.pages = pages;
2149 bitmap->counts.missing_pages = pages;
2150 bitmap->counts.chunkshift = chunkshift;
2151 bitmap->counts.chunks = chunks;
2152 bitmap->mddev->bitmap_info.chunksize = 1 << (chunkshift +
2153 BITMAP_BLOCK_SHIFT);
2154
2155 blocks = min(old_counts.chunks << old_counts.chunkshift,
2156 chunks << chunkshift);
2157
Guoqing Jiangc9d65032016-05-02 11:50:11 -04002158 /* For cluster raid, need to pre-allocate bitmap */
2159 if (mddev_is_clustered(bitmap->mddev)) {
2160 unsigned long page;
2161 for (page = 0; page < pages; page++) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002162 ret = md_bitmap_checkpage(&bitmap->counts, page, 1, 1);
Guoqing Jiangc9d65032016-05-02 11:50:11 -04002163 if (ret) {
2164 unsigned long k;
2165
2166 /* deallocate the page memory */
2167 for (k = 0; k < page; k++) {
kbuild test robotbc47e842016-05-02 11:50:16 -04002168 kfree(new_bp[k].map);
Guoqing Jiangc9d65032016-05-02 11:50:11 -04002169 }
Zdenek Kabelac0868b992017-11-08 13:44:56 +01002170 kfree(new_bp);
Guoqing Jiangc9d65032016-05-02 11:50:11 -04002171
2172 /* restore some fields from old_counts */
2173 bitmap->counts.bp = old_counts.bp;
2174 bitmap->counts.pages = old_counts.pages;
2175 bitmap->counts.missing_pages = old_counts.pages;
2176 bitmap->counts.chunkshift = old_counts.chunkshift;
2177 bitmap->counts.chunks = old_counts.chunks;
2178 bitmap->mddev->bitmap_info.chunksize = 1 << (old_counts.chunkshift +
2179 BITMAP_BLOCK_SHIFT);
2180 blocks = old_counts.chunks << old_counts.chunkshift;
NeilBrownec0cc222016-11-02 14:16:49 +11002181 pr_warn("Could not pre-allocate in-memory bitmap for cluster raid\n");
Guoqing Jiangc9d65032016-05-02 11:50:11 -04002182 break;
2183 } else
2184 bitmap->counts.bp[page].count += 1;
2185 }
2186 }
2187
NeilBrownd60b4792012-05-22 13:55:25 +10002188 for (block = 0; block < blocks; ) {
2189 bitmap_counter_t *bmc_old, *bmc_new;
2190 int set;
2191
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002192 bmc_old = md_bitmap_get_counter(&old_counts, block, &old_blocks, 0);
NeilBrownd60b4792012-05-22 13:55:25 +10002193 set = bmc_old && NEEDED(*bmc_old);
2194
2195 if (set) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002196 bmc_new = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1);
NeilBrownd60b4792012-05-22 13:55:25 +10002197 if (*bmc_new == 0) {
2198 /* need to set on-disk bits too. */
2199 sector_t end = block + new_blocks;
2200 sector_t start = block >> chunkshift;
2201 start <<= chunkshift;
2202 while (start < end) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002203 md_bitmap_file_set_bit(bitmap, block);
NeilBrownd60b4792012-05-22 13:55:25 +10002204 start += 1 << chunkshift;
2205 }
2206 *bmc_new = 2;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002207 md_bitmap_count_page(&bitmap->counts, block, 1);
2208 md_bitmap_set_pending(&bitmap->counts, block);
NeilBrownd60b4792012-05-22 13:55:25 +10002209 }
2210 *bmc_new |= NEEDED_MASK;
2211 if (new_blocks < old_blocks)
2212 old_blocks = new_blocks;
2213 }
2214 block += old_blocks;
2215 }
2216
Zdenek Kabelac0868b992017-11-08 13:44:56 +01002217 if (bitmap->counts.bp != old_counts.bp) {
2218 unsigned long k;
2219 for (k = 0; k < old_counts.pages; k++)
2220 if (!old_counts.bp[k].hijacked)
2221 kfree(old_counts.bp[k].map);
2222 kfree(old_counts.bp);
2223 }
2224
NeilBrownd60b4792012-05-22 13:55:25 +10002225 if (!init) {
2226 int i;
2227 while (block < (chunks << chunkshift)) {
2228 bitmap_counter_t *bmc;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002229 bmc = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1);
NeilBrownd60b4792012-05-22 13:55:25 +10002230 if (bmc) {
2231 /* new space. It needs to be resynced, so
2232 * we set NEEDED_MASK.
2233 */
2234 if (*bmc == 0) {
2235 *bmc = NEEDED_MASK | 2;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002236 md_bitmap_count_page(&bitmap->counts, block, 1);
2237 md_bitmap_set_pending(&bitmap->counts, block);
NeilBrownd60b4792012-05-22 13:55:25 +10002238 }
2239 }
2240 block += new_blocks;
2241 }
2242 for (i = 0; i < bitmap->storage.file_pages; i++)
2243 set_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
2244 }
2245 spin_unlock_irq(&bitmap->counts.lock);
2246
2247 if (!init) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002248 md_bitmap_unplug(bitmap);
NeilBrownd60b4792012-05-22 13:55:25 +10002249 bitmap->mddev->pers->quiesce(bitmap->mddev, 0);
2250 }
2251 ret = 0;
2252err:
2253 return ret;
2254}
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002255EXPORT_SYMBOL_GPL(md_bitmap_resize);
NeilBrownd60b4792012-05-22 13:55:25 +10002256
NeilBrown43a70502009-12-14 12:49:55 +11002257static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11002258location_show(struct mddev *mddev, char *page)
NeilBrown43a70502009-12-14 12:49:55 +11002259{
2260 ssize_t len;
NeilBrownac2f40b2010-06-01 19:37:31 +10002261 if (mddev->bitmap_info.file)
NeilBrown43a70502009-12-14 12:49:55 +11002262 len = sprintf(page, "file");
NeilBrownac2f40b2010-06-01 19:37:31 +10002263 else if (mddev->bitmap_info.offset)
NeilBrown43a70502009-12-14 12:49:55 +11002264 len = sprintf(page, "%+lld", (long long)mddev->bitmap_info.offset);
NeilBrownac2f40b2010-06-01 19:37:31 +10002265 else
NeilBrown43a70502009-12-14 12:49:55 +11002266 len = sprintf(page, "none");
2267 len += sprintf(page+len, "\n");
2268 return len;
2269}
2270
2271static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11002272location_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown43a70502009-12-14 12:49:55 +11002273{
Shaohua Lid9dd26b2016-07-30 10:05:31 -07002274 int rv;
NeilBrown43a70502009-12-14 12:49:55 +11002275
Shaohua Lid9dd26b2016-07-30 10:05:31 -07002276 rv = mddev_lock(mddev);
2277 if (rv)
2278 return rv;
NeilBrown43a70502009-12-14 12:49:55 +11002279 if (mddev->pers) {
Shaohua Lid9dd26b2016-07-30 10:05:31 -07002280 if (!mddev->pers->quiesce) {
2281 rv = -EBUSY;
2282 goto out;
2283 }
2284 if (mddev->recovery || mddev->sync_thread) {
2285 rv = -EBUSY;
2286 goto out;
2287 }
NeilBrown43a70502009-12-14 12:49:55 +11002288 }
2289
2290 if (mddev->bitmap || mddev->bitmap_info.file ||
2291 mddev->bitmap_info.offset) {
2292 /* bitmap already configured. Only option is to clear it */
Shaohua Lid9dd26b2016-07-30 10:05:31 -07002293 if (strncmp(buf, "none", 4) != 0) {
2294 rv = -EBUSY;
2295 goto out;
2296 }
NeilBrown43a70502009-12-14 12:49:55 +11002297 if (mddev->pers) {
Jack Wangf8f83d82018-10-08 17:24:03 +02002298 mddev_suspend(mddev);
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002299 md_bitmap_destroy(mddev);
Jack Wangf8f83d82018-10-08 17:24:03 +02002300 mddev_resume(mddev);
NeilBrown43a70502009-12-14 12:49:55 +11002301 }
2302 mddev->bitmap_info.offset = 0;
2303 if (mddev->bitmap_info.file) {
2304 struct file *f = mddev->bitmap_info.file;
2305 mddev->bitmap_info.file = NULL;
NeilBrown43a70502009-12-14 12:49:55 +11002306 fput(f);
2307 }
2308 } else {
2309 /* No bitmap, OK to set a location */
2310 long long offset;
2311 if (strncmp(buf, "none", 4) == 0)
2312 /* nothing to be done */;
2313 else if (strncmp(buf, "file:", 5) == 0) {
2314 /* Not supported yet */
Shaohua Lid9dd26b2016-07-30 10:05:31 -07002315 rv = -EINVAL;
2316 goto out;
NeilBrown43a70502009-12-14 12:49:55 +11002317 } else {
NeilBrown43a70502009-12-14 12:49:55 +11002318 if (buf[0] == '+')
Jingoo Hanb29bebd2013-06-01 16:15:16 +09002319 rv = kstrtoll(buf+1, 10, &offset);
NeilBrown43a70502009-12-14 12:49:55 +11002320 else
Jingoo Hanb29bebd2013-06-01 16:15:16 +09002321 rv = kstrtoll(buf, 10, &offset);
NeilBrown43a70502009-12-14 12:49:55 +11002322 if (rv)
Shaohua Lid9dd26b2016-07-30 10:05:31 -07002323 goto out;
2324 if (offset == 0) {
2325 rv = -EINVAL;
2326 goto out;
2327 }
NeilBrownece5cff2009-12-14 12:49:56 +11002328 if (mddev->bitmap_info.external == 0 &&
2329 mddev->major_version == 0 &&
Shaohua Lid9dd26b2016-07-30 10:05:31 -07002330 offset != mddev->bitmap_info.default_offset) {
2331 rv = -EINVAL;
2332 goto out;
2333 }
NeilBrown43a70502009-12-14 12:49:55 +11002334 mddev->bitmap_info.offset = offset;
2335 if (mddev->pers) {
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05002336 struct bitmap *bitmap;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002337 bitmap = md_bitmap_create(mddev, -1);
Jack Wangf8f83d82018-10-08 17:24:03 +02002338 mddev_suspend(mddev);
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05002339 if (IS_ERR(bitmap))
2340 rv = PTR_ERR(bitmap);
2341 else {
2342 mddev->bitmap = bitmap;
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002343 rv = md_bitmap_load(mddev);
Guoqing Jiangf9a67b12016-04-01 17:08:49 +08002344 if (rv)
Goldwyn Rodriguesf9209a32014-06-06 12:43:49 -05002345 mddev->bitmap_info.offset = 0;
NeilBrown43a70502009-12-14 12:49:55 +11002346 }
Guoqing Jiangf9a67b12016-04-01 17:08:49 +08002347 if (rv) {
Andy Shevchenkoe64e40182018-08-01 15:20:50 -07002348 md_bitmap_destroy(mddev);
Jack Wangf8f83d82018-10-08 17:24:03 +02002349 mddev_resume(mddev);
Shaohua Lid9dd26b2016-07-30 10:05:31 -07002350 goto out;
Guoqing Jiangf9a67b12016-04-01 17:08:49 +08002351 }
Jack Wangf8f83d82018-10-08 17:24:03 +02002352 mddev_resume(mddev);
NeilBrown43a70502009-12-14 12:49:55 +11002353 }
2354 }
2355 }
2356 if (!mddev->external) {
2357 /* Ensure new bitmap info is stored in
2358 * metadata promptly.
2359 */
Shaohua Li29530792016-12-08 15:48:19 -08002360 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
NeilBrown43a70502009-12-14 12:49:55 +11002361 md_wakeup_thread(mddev->thread);
2362 }
Shaohua Lid9dd26b2016-07-30 10:05:31 -07002363 rv = 0;
2364out:
2365 mddev_unlock(mddev);
2366 if (rv)
2367 return rv;
NeilBrown43a70502009-12-14 12:49:55 +11002368 return len;
2369}
2370
2371static struct md_sysfs_entry bitmap_location =
2372__ATTR(location, S_IRUGO|S_IWUSR, location_show, location_store);
2373
NeilBrown6409bb02012-05-22 13:55:07 +10002374/* 'bitmap/space' is the space available at 'location' for the
2375 * bitmap. This allows the kernel to know when it is safe to
2376 * resize the bitmap to match a resized array.
2377 */
2378static ssize_t
2379space_show(struct mddev *mddev, char *page)
2380{
2381 return sprintf(page, "%lu\n", mddev->bitmap_info.space);
2382}
2383
2384static ssize_t
2385space_store(struct mddev *mddev, const char *buf, size_t len)
2386{
2387 unsigned long sectors;
2388 int rv;
2389
2390 rv = kstrtoul(buf, 10, &sectors);
2391 if (rv)
2392 return rv;
2393
2394 if (sectors == 0)
2395 return -EINVAL;
2396
2397 if (mddev->bitmap &&
NeilBrown9b1215c2012-05-22 13:55:11 +10002398 sectors < (mddev->bitmap->storage.bytes + 511) >> 9)
NeilBrown6409bb02012-05-22 13:55:07 +10002399 return -EFBIG; /* Bitmap is too big for this small space */
2400
2401 /* could make sure it isn't too big, but that isn't really
2402 * needed - user-space should be careful.
2403 */
2404 mddev->bitmap_info.space = sectors;
2405 return len;
2406}
2407
2408static struct md_sysfs_entry bitmap_space =
2409__ATTR(space, S_IRUGO|S_IWUSR, space_show, space_store);
2410
NeilBrown43a70502009-12-14 12:49:55 +11002411static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11002412timeout_show(struct mddev *mddev, char *page)
NeilBrown43a70502009-12-14 12:49:55 +11002413{
2414 ssize_t len;
2415 unsigned long secs = mddev->bitmap_info.daemon_sleep / HZ;
2416 unsigned long jifs = mddev->bitmap_info.daemon_sleep % HZ;
NeilBrownac2f40b2010-06-01 19:37:31 +10002417
NeilBrown43a70502009-12-14 12:49:55 +11002418 len = sprintf(page, "%lu", secs);
2419 if (jifs)
2420 len += sprintf(page+len, ".%03u", jiffies_to_msecs(jifs));
2421 len += sprintf(page+len, "\n");
2422 return len;
2423}
2424
2425static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11002426timeout_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown43a70502009-12-14 12:49:55 +11002427{
2428 /* timeout can be set at any time */
2429 unsigned long timeout;
2430 int rv = strict_strtoul_scaled(buf, &timeout, 4);
2431 if (rv)
2432 return rv;
2433
2434 /* just to make sure we don't overflow... */
2435 if (timeout >= LONG_MAX / HZ)
2436 return -EINVAL;
2437
2438 timeout = timeout * HZ / 10000;
2439
2440 if (timeout >= MAX_SCHEDULE_TIMEOUT)
2441 timeout = MAX_SCHEDULE_TIMEOUT-1;
2442 if (timeout < 1)
2443 timeout = 1;
2444 mddev->bitmap_info.daemon_sleep = timeout;
2445 if (mddev->thread) {
2446 /* if thread->timeout is MAX_SCHEDULE_TIMEOUT, then
2447 * the bitmap is all clean and we don't need to
2448 * adjust the timeout right now
2449 */
2450 if (mddev->thread->timeout < MAX_SCHEDULE_TIMEOUT) {
2451 mddev->thread->timeout = timeout;
2452 md_wakeup_thread(mddev->thread);
2453 }
2454 }
2455 return len;
2456}
2457
2458static struct md_sysfs_entry bitmap_timeout =
2459__ATTR(time_base, S_IRUGO|S_IWUSR, timeout_show, timeout_store);
2460
2461static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11002462backlog_show(struct mddev *mddev, char *page)
NeilBrown43a70502009-12-14 12:49:55 +11002463{
2464 return sprintf(page, "%lu\n", mddev->bitmap_info.max_write_behind);
2465}
2466
2467static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11002468backlog_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown43a70502009-12-14 12:49:55 +11002469{
2470 unsigned long backlog;
Guoqing Jiang10c92fc2019-06-14 17:10:37 +08002471 unsigned long old_mwb = mddev->bitmap_info.max_write_behind;
Jingoo Hanb29bebd2013-06-01 16:15:16 +09002472 int rv = kstrtoul(buf, 10, &backlog);
NeilBrown43a70502009-12-14 12:49:55 +11002473 if (rv)
2474 return rv;
2475 if (backlog > COUNTER_MAX)
2476 return -EINVAL;
2477 mddev->bitmap_info.max_write_behind = backlog;
Guoqing Jiang404659c2019-12-23 10:48:53 +01002478 if (!backlog && mddev->serial_info_pool) {
2479 /* serial_info_pool is not needed if backlog is zero */
Guoqing Jiang69b00b52019-12-23 10:49:00 +01002480 if (!mddev->serialize_policy)
2481 mddev_destroy_serial_pool(mddev, NULL, false);
Guoqing Jiang404659c2019-12-23 10:48:53 +01002482 } else if (backlog && !mddev->serial_info_pool) {
2483 /* serial_info_pool is needed since backlog is not zero */
Guoqing Jiang10c92fc2019-06-14 17:10:37 +08002484 struct md_rdev *rdev;
2485
2486 rdev_for_each(rdev, mddev)
Guoqing Jiang404659c2019-12-23 10:48:53 +01002487 mddev_create_serial_pool(mddev, rdev, false);
Guoqing Jiang10c92fc2019-06-14 17:10:37 +08002488 }
2489 if (old_mwb != backlog)
2490 md_bitmap_update_sb(mddev->bitmap);
NeilBrown43a70502009-12-14 12:49:55 +11002491 return len;
2492}
2493
2494static struct md_sysfs_entry bitmap_backlog =
2495__ATTR(backlog, S_IRUGO|S_IWUSR, backlog_show, backlog_store);
2496
2497static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11002498chunksize_show(struct mddev *mddev, char *page)
NeilBrown43a70502009-12-14 12:49:55 +11002499{
2500 return sprintf(page, "%lu\n", mddev->bitmap_info.chunksize);
2501}
2502
2503static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11002504chunksize_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrown43a70502009-12-14 12:49:55 +11002505{
2506 /* Can only be changed when no bitmap is active */
2507 int rv;
2508 unsigned long csize;
2509 if (mddev->bitmap)
2510 return -EBUSY;
Jingoo Hanb29bebd2013-06-01 16:15:16 +09002511 rv = kstrtoul(buf, 10, &csize);
NeilBrown43a70502009-12-14 12:49:55 +11002512 if (rv)
2513 return rv;
2514 if (csize < 512 ||
2515 !is_power_of_2(csize))
2516 return -EINVAL;
2517 mddev->bitmap_info.chunksize = csize;
2518 return len;
2519}
2520
2521static struct md_sysfs_entry bitmap_chunksize =
2522__ATTR(chunksize, S_IRUGO|S_IWUSR, chunksize_show, chunksize_store);
2523
NeilBrownfd01b882011-10-11 16:47:53 +11002524static ssize_t metadata_show(struct mddev *mddev, char *page)
NeilBrownece5cff2009-12-14 12:49:56 +11002525{
Goldwyn Rodriguesc4ce8672014-03-29 10:20:02 -05002526 if (mddev_is_clustered(mddev))
2527 return sprintf(page, "clustered\n");
NeilBrownece5cff2009-12-14 12:49:56 +11002528 return sprintf(page, "%s\n", (mddev->bitmap_info.external
2529 ? "external" : "internal"));
2530}
2531
NeilBrownfd01b882011-10-11 16:47:53 +11002532static ssize_t metadata_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrownece5cff2009-12-14 12:49:56 +11002533{
2534 if (mddev->bitmap ||
2535 mddev->bitmap_info.file ||
2536 mddev->bitmap_info.offset)
2537 return -EBUSY;
2538 if (strncmp(buf, "external", 8) == 0)
2539 mddev->bitmap_info.external = 1;
Goldwyn Rodriguesc4ce8672014-03-29 10:20:02 -05002540 else if ((strncmp(buf, "internal", 8) == 0) ||
2541 (strncmp(buf, "clustered", 9) == 0))
NeilBrownece5cff2009-12-14 12:49:56 +11002542 mddev->bitmap_info.external = 0;
2543 else
2544 return -EINVAL;
2545 return len;
2546}
2547
2548static struct md_sysfs_entry bitmap_metadata =
2549__ATTR(metadata, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
2550
NeilBrownfd01b882011-10-11 16:47:53 +11002551static ssize_t can_clear_show(struct mddev *mddev, char *page)
NeilBrownece5cff2009-12-14 12:49:56 +11002552{
2553 int len;
NeilBrownb7b17c92014-12-15 12:56:59 +11002554 spin_lock(&mddev->lock);
NeilBrownece5cff2009-12-14 12:49:56 +11002555 if (mddev->bitmap)
2556 len = sprintf(page, "%s\n", (mddev->bitmap->need_sync ?
2557 "false" : "true"));
2558 else
2559 len = sprintf(page, "\n");
NeilBrownb7b17c92014-12-15 12:56:59 +11002560 spin_unlock(&mddev->lock);
NeilBrownece5cff2009-12-14 12:49:56 +11002561 return len;
2562}
2563
NeilBrownfd01b882011-10-11 16:47:53 +11002564static ssize_t can_clear_store(struct mddev *mddev, const char *buf, size_t len)
NeilBrownece5cff2009-12-14 12:49:56 +11002565{
2566 if (mddev->bitmap == NULL)
2567 return -ENOENT;
2568 if (strncmp(buf, "false", 5) == 0)
2569 mddev->bitmap->need_sync = 1;
2570 else if (strncmp(buf, "true", 4) == 0) {
2571 if (mddev->degraded)
2572 return -EBUSY;
2573 mddev->bitmap->need_sync = 0;
2574 } else
2575 return -EINVAL;
2576 return len;
2577}
2578
2579static struct md_sysfs_entry bitmap_can_clear =
2580__ATTR(can_clear, S_IRUGO|S_IWUSR, can_clear_show, can_clear_store);
2581
Paul Clements696fcd52010-03-08 16:02:37 +11002582static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11002583behind_writes_used_show(struct mddev *mddev, char *page)
Paul Clements696fcd52010-03-08 16:02:37 +11002584{
NeilBrownb7b17c92014-12-15 12:56:59 +11002585 ssize_t ret;
2586 spin_lock(&mddev->lock);
Paul Clements696fcd52010-03-08 16:02:37 +11002587 if (mddev->bitmap == NULL)
NeilBrownb7b17c92014-12-15 12:56:59 +11002588 ret = sprintf(page, "0\n");
2589 else
2590 ret = sprintf(page, "%lu\n",
2591 mddev->bitmap->behind_writes_used);
2592 spin_unlock(&mddev->lock);
2593 return ret;
Paul Clements696fcd52010-03-08 16:02:37 +11002594}
2595
2596static ssize_t
NeilBrownfd01b882011-10-11 16:47:53 +11002597behind_writes_used_reset(struct mddev *mddev, const char *buf, size_t len)
Paul Clements696fcd52010-03-08 16:02:37 +11002598{
2599 if (mddev->bitmap)
2600 mddev->bitmap->behind_writes_used = 0;
2601 return len;
2602}
2603
2604static struct md_sysfs_entry max_backlog_used =
2605__ATTR(max_backlog_used, S_IRUGO | S_IWUSR,
2606 behind_writes_used_show, behind_writes_used_reset);
2607
NeilBrown43a70502009-12-14 12:49:55 +11002608static struct attribute *md_bitmap_attrs[] = {
2609 &bitmap_location.attr,
NeilBrown6409bb02012-05-22 13:55:07 +10002610 &bitmap_space.attr,
NeilBrown43a70502009-12-14 12:49:55 +11002611 &bitmap_timeout.attr,
2612 &bitmap_backlog.attr,
2613 &bitmap_chunksize.attr,
NeilBrownece5cff2009-12-14 12:49:56 +11002614 &bitmap_metadata.attr,
2615 &bitmap_can_clear.attr,
Paul Clements696fcd52010-03-08 16:02:37 +11002616 &max_backlog_used.attr,
NeilBrown43a70502009-12-14 12:49:55 +11002617 NULL
2618};
Rikard Falkebornc32dc042021-05-29 12:30:49 +02002619const struct attribute_group md_bitmap_group = {
NeilBrown43a70502009-12-14 12:49:55 +11002620 .name = "bitmap",
2621 .attrs = md_bitmap_attrs,
2622};