blob: b230d91ec43099ebde17d31c7ccd8582ef11bd1c [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd_actlog.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2003-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 2003-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2003-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23
24 */
25
26#include <linux/slab.h>
Lars Ellenberg7ad651b2011-02-21 13:21:03 +010027#include <linux/crc32c.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070028#include <linux/drbd.h>
Lars Ellenberg7ad651b2011-02-21 13:21:03 +010029#include <linux/drbd_limits.h>
30#include <linux/dynamic_debug.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070031#include "drbd_int.h"
Philipp Reisnerb411b362009-09-25 16:07:19 -070032#include "drbd_wrappers.h"
33
Lars Ellenberg85f103d2011-03-31 12:06:48 +020034
35enum al_transaction_types {
36 AL_TR_UPDATE = 0,
37 AL_TR_INITIALIZED = 0xffff
38};
Lars Ellenberg7ad651b2011-02-21 13:21:03 +010039/* all fields on disc in big endian */
40struct __packed al_transaction_on_disk {
41 /* don't we all like magic */
42 __be32 magic;
43
44 /* to identify the most recent transaction block
45 * in the on disk ring buffer */
46 __be32 tr_number;
47
48 /* checksum on the full 4k block, with this field set to 0. */
49 __be32 crc32c;
50
51 /* type of transaction, special transaction types like:
Lars Ellenberg85f103d2011-03-31 12:06:48 +020052 * purge-all, set-all-idle, set-all-active, ... to-be-defined
53 * see also enum al_transaction_types */
Lars Ellenberg7ad651b2011-02-21 13:21:03 +010054 __be16 transaction_type;
55
56 /* we currently allow only a few thousand extents,
57 * so 16bit will be enough for the slot number. */
58
59 /* how many updates in this transaction */
60 __be16 n_updates;
61
62 /* maximum slot number, "al-extents" in drbd.conf speak.
63 * Having this in each transaction should make reconfiguration
64 * of that parameter easier. */
65 __be16 context_size;
66
67 /* slot number the context starts with */
68 __be16 context_start_slot_nr;
69
70 /* Some reserved bytes. Expected usage is a 64bit counter of
71 * sectors-written since device creation, and other data generation tag
72 * supporting usage */
73 __be32 __reserved[4];
74
75 /* --- 36 byte used --- */
76
77 /* Reserve space for up to AL_UPDATES_PER_TRANSACTION changes
78 * in one transaction, then use the remaining byte in the 4k block for
79 * context information. "Flexible" number of updates per transaction
80 * does not help, as we have to account for the case when all update
81 * slots are used anyways, so it would only complicate code without
82 * additional benefit.
83 */
84 __be16 update_slot_nr[AL_UPDATES_PER_TRANSACTION];
85
86 /* but the extent number is 32bit, which at an extent size of 4 MiB
87 * allows to cover device sizes of up to 2**54 Byte (16 PiB) */
88 __be32 update_extent_nr[AL_UPDATES_PER_TRANSACTION];
89
90 /* --- 420 bytes used (36 + 64*6) --- */
91
92 /* 4096 - 420 = 3676 = 919 * 4 */
93 __be32 context[AL_CONTEXT_PER_TRANSACTION];
Philipp Reisnerb411b362009-09-25 16:07:19 -070094};
95
96struct update_odbm_work {
97 struct drbd_work w;
98 unsigned int enr;
99};
100
101struct update_al_work {
102 struct drbd_work w;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700103 struct completion event;
Lars Ellenberg7ad651b2011-02-21 13:21:03 +0100104 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700105};
106
Philipp Reisner1b7ab152011-07-15 17:19:02 +0200107static int al_write_transaction(struct drbd_conf *mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700108
Philipp Reisnercdfda632011-07-05 15:38:59 +0200109void *drbd_md_get_buffer(struct drbd_conf *mdev)
110{
111 int r;
112
113 wait_event(mdev->misc_wait,
114 (r = atomic_cmpxchg(&mdev->md_io_in_use, 0, 1)) == 0 ||
115 mdev->state.disk <= D_FAILED);
116
117 return r ? NULL : page_address(mdev->md_io_page);
118}
119
120void drbd_md_put_buffer(struct drbd_conf *mdev)
121{
122 if (atomic_dec_and_test(&mdev->md_io_in_use))
123 wake_up(&mdev->misc_wait);
124}
125
Lars Ellenberge34b6772012-09-27 15:07:11 +0200126void wait_until_done_or_force_detached(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
Philipp Reisner32db80f2012-02-22 11:51:57 +0100127 unsigned int *done)
Philipp Reisnercdfda632011-07-05 15:38:59 +0200128{
Philipp Reisner32db80f2012-02-22 11:51:57 +0100129 long dt;
130
131 rcu_read_lock();
132 dt = rcu_dereference(bdev->disk_conf)->disk_timeout;
133 rcu_read_unlock();
134 dt = dt * HZ / 10;
135 if (dt == 0)
136 dt = MAX_SCHEDULE_TIMEOUT;
137
Lars Ellenberge34b6772012-09-27 15:07:11 +0200138 dt = wait_event_timeout(mdev->misc_wait,
139 *done || test_bit(FORCE_DETACH, &mdev->flags), dt);
140 if (dt == 0) {
Philipp Reisner32db80f2012-02-22 11:51:57 +0100141 dev_err(DEV, "meta-data IO operation timed out\n");
Lars Ellenberge34b6772012-09-27 15:07:11 +0200142 drbd_chk_io_error(mdev, 1, DRBD_FORCE_DETACH);
143 }
Philipp Reisnercdfda632011-07-05 15:38:59 +0200144}
145
Philipp Reisnerb411b362009-09-25 16:07:19 -0700146static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
147 struct drbd_backing_dev *bdev,
148 struct page *page, sector_t sector,
149 int rw, int size)
150{
151 struct bio *bio;
Andreas Gruenbacherac29f402010-12-13 02:20:47 +0100152 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700153
Philipp Reisnercdfda632011-07-05 15:38:59 +0200154 mdev->md_io.done = 0;
155 mdev->md_io.error = -ENODEV;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700156
Philipp Reisnera8a4e512010-08-25 10:21:04 +0200157 if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags))
Lars Ellenberg86e1e982011-06-28 13:22:48 +0200158 rw |= REQ_FUA | REQ_FLUSH;
Jens Axboe721a9602011-03-09 11:56:30 +0100159 rw |= REQ_SYNC;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700160
Lars Ellenbergda4a75d2011-02-23 17:02:01 +0100161 bio = bio_alloc_drbd(GFP_NOIO);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700162 bio->bi_bdev = bdev->md_bdev;
163 bio->bi_sector = sector;
Andreas Gruenbacherac29f402010-12-13 02:20:47 +0100164 err = -EIO;
165 if (bio_add_page(bio, page, size, 0) != size)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700166 goto out;
Philipp Reisnercdfda632011-07-05 15:38:59 +0200167 bio->bi_private = &mdev->md_io;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700168 bio->bi_end_io = drbd_md_io_complete;
169 bio->bi_rw = rw;
170
Philipp Reisnercdfda632011-07-05 15:38:59 +0200171 if (!get_ldev_if_state(mdev, D_ATTACHING)) { /* Corresponding put_ldev in drbd_md_io_complete() */
172 dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in _drbd_md_sync_page_io()\n");
173 err = -ENODEV;
174 goto out;
175 }
176
177 bio_get(bio); /* one bio_put() is in the completion handler */
178 atomic_inc(&mdev->md_io_in_use); /* drbd_md_put_buffer() is in the completion handler */
Andreas Gruenbacher0cf9d272010-12-07 10:43:29 +0100179 if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700180 bio_endio(bio, -EIO);
181 else
182 submit_bio(rw, bio);
Lars Ellenberge34b6772012-09-27 15:07:11 +0200183 wait_until_done_or_force_detached(mdev, bdev, &mdev->md_io.done);
Andreas Gruenbacherac29f402010-12-13 02:20:47 +0100184 if (bio_flagged(bio, BIO_UPTODATE))
Philipp Reisnercdfda632011-07-05 15:38:59 +0200185 err = mdev->md_io.error;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700186
Philipp Reisnerb411b362009-09-25 16:07:19 -0700187 out:
188 bio_put(bio);
Andreas Gruenbacherac29f402010-12-13 02:20:47 +0100189 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700190}
191
192int drbd_md_sync_page_io(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
193 sector_t sector, int rw)
194{
Andreas Gruenbacher3fbf4d22010-12-13 02:25:41 +0100195 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700196 struct page *iop = mdev->md_io_page;
197
Philipp Reisnercdfda632011-07-05 15:38:59 +0200198 D_ASSERT(atomic_read(&mdev->md_io_in_use) == 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700199
200 BUG_ON(!bdev->md_bdev);
201
Lars Ellenberg7ad651b2011-02-21 13:21:03 +0100202 dev_dbg(DEV, "meta_data io: %s [%d]:%s(,%llus,%s)\n",
203 current->comm, current->pid, __func__,
204 (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700205
206 if (sector < drbd_md_first_sector(bdev) ||
Lars Ellenberg7ad651b2011-02-21 13:21:03 +0100207 sector + 7 > drbd_md_last_sector(bdev))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700208 dev_alert(DEV, "%s [%d]:%s(,%llus,%s) out of range md access!\n",
209 current->comm, current->pid, __func__,
210 (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ");
211
Lars Ellenbergae8bf312013-03-19 18:16:43 +0100212 /* we do all our meta data IO in aligned 4k blocks. */
213 err = _drbd_md_sync_page_io(mdev, bdev, iop, sector, rw, 4096);
Andreas Gruenbacher3fbf4d22010-12-13 02:25:41 +0100214 if (err) {
Andreas Gruenbacher935be262011-08-19 13:47:31 +0200215 dev_err(DEV, "drbd_md_sync_page_io(,%llus,%s) failed with error %d\n",
216 (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ", err);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700217 }
Andreas Gruenbacher3fbf4d22010-12-13 02:25:41 +0100218 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700219}
220
221static struct lc_element *_al_get(struct drbd_conf *mdev, unsigned int enr)
222{
223 struct lc_element *al_ext;
224 struct lc_element *tmp;
Philipp Reisnerf91ab622010-11-09 13:59:41 +0100225 int wake;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700226
227 spin_lock_irq(&mdev->al_lock);
228 tmp = lc_find(mdev->resync, enr/AL_EXT_PER_BM_SECT);
229 if (unlikely(tmp != NULL)) {
230 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
231 if (test_bit(BME_NO_WRITES, &bm_ext->flags)) {
Philipp Reisnerf91ab622010-11-09 13:59:41 +0100232 wake = !test_and_set_bit(BME_PRIORITY, &bm_ext->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700233 spin_unlock_irq(&mdev->al_lock);
Philipp Reisnerf91ab622010-11-09 13:59:41 +0100234 if (wake)
235 wake_up(&mdev->al_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700236 return NULL;
237 }
238 }
Lars Ellenberg46a15bc2011-02-21 13:21:01 +0100239 al_ext = lc_get(mdev->act_log, enr);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700240 spin_unlock_irq(&mdev->al_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700241 return al_ext;
242}
243
Lars Ellenberg181286a2011-03-31 15:18:56 +0200244void drbd_al_begin_io(struct drbd_conf *mdev, struct drbd_interval *i)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700245{
Lars Ellenberg77265472011-03-31 16:00:51 +0200246 /* for bios crossing activity log extent boundaries,
247 * we may need to activate two extents in one go */
Lars Ellenberge15766e2011-04-01 10:38:30 +0200248 unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
Lars Ellenberg81a35372012-07-30 09:00:54 +0200249 unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
Lars Ellenberge15766e2011-04-01 10:38:30 +0200250 unsigned enr;
Lars Ellenberg7dc1d672011-05-03 16:49:20 +0200251 bool locked = false;
252
Philipp Reisnerb411b362009-09-25 16:07:19 -0700253
Lars Ellenberg81a35372012-07-30 09:00:54 +0200254 D_ASSERT(first <= last);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700255 D_ASSERT(atomic_read(&mdev->local_cnt) > 0);
256
Lars Ellenberge15766e2011-04-01 10:38:30 +0200257 for (enr = first; enr <= last; enr++)
258 wait_event(mdev->al_wait, _al_get(mdev, enr) != NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700259
Lars Ellenberg7dc1d672011-05-03 16:49:20 +0200260 /* Serialize multiple transactions.
261 * This uses test_and_set_bit, memory barrier is implicit.
262 */
263 wait_event(mdev->al_wait,
264 mdev->act_log->pending_changes == 0 ||
265 (locked = lc_try_lock_for_transaction(mdev->act_log)));
266
267 if (locked) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700268 /* drbd_al_write_transaction(mdev,al_ext,enr);
269 * recurses into generic_make_request(), which
270 * disallows recursion, bios being serialized on the
271 * current->bio_tail list now.
272 * we have to delegate updates to the activity log
273 * to the worker thread. */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700274
Lars Ellenberg7ad651b2011-02-21 13:21:03 +0100275 /* Double check: it may have been committed by someone else,
276 * while we have been waiting for the lock. */
Lars Ellenberge15766e2011-04-01 10:38:30 +0200277 if (mdev->act_log->pending_changes) {
Philipp Reisner9a51ab12012-02-20 21:53:28 +0100278 bool write_al_updates;
279
280 rcu_read_lock();
281 write_al_updates = rcu_dereference(mdev->ldev->disk_conf)->al_updates;
282 rcu_read_unlock();
283
284 if (write_al_updates) {
285 al_write_transaction(mdev);
286 mdev->al_writ_cnt++;
287 }
Lars Ellenberg7ad651b2011-02-21 13:21:03 +0100288
289 spin_lock_irq(&mdev->al_lock);
290 /* FIXME
Philipp Reisner1b7ab152011-07-15 17:19:02 +0200291 if (err)
Lars Ellenberg7ad651b2011-02-21 13:21:03 +0100292 we need an "lc_cancel" here;
293 */
294 lc_committed(mdev->act_log);
295 spin_unlock_irq(&mdev->al_lock);
296 }
297 lc_unlock(mdev->act_log);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700298 wake_up(&mdev->al_wait);
299 }
300}
301
Lars Ellenberg181286a2011-03-31 15:18:56 +0200302void drbd_al_complete_io(struct drbd_conf *mdev, struct drbd_interval *i)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700303{
Lars Ellenberge15766e2011-04-01 10:38:30 +0200304 /* for bios crossing activity log extent boundaries,
305 * we may need to activate two extents in one go */
306 unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
Lars Ellenberg81a35372012-07-30 09:00:54 +0200307 unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
Lars Ellenberge15766e2011-04-01 10:38:30 +0200308 unsigned enr;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700309 struct lc_element *extent;
310 unsigned long flags;
311
Lars Ellenberg81a35372012-07-30 09:00:54 +0200312 D_ASSERT(first <= last);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700313 spin_lock_irqsave(&mdev->al_lock, flags);
314
Lars Ellenberge15766e2011-04-01 10:38:30 +0200315 for (enr = first; enr <= last; enr++) {
316 extent = lc_find(mdev->act_log, enr);
317 if (!extent) {
318 dev_err(DEV, "al_complete_io() called on inactive extent %u\n", enr);
319 continue;
320 }
Philipp Reisner376694a2011-11-07 10:54:28 +0100321 lc_put(mdev->act_log, extent);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700322 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700323 spin_unlock_irqrestore(&mdev->al_lock, flags);
Lars Ellenberge15766e2011-04-01 10:38:30 +0200324 wake_up(&mdev->al_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700325}
326
Lars Ellenberg19f843a2010-12-15 08:59:11 +0100327#if (PAGE_SHIFT + 3) < (AL_EXTENT_SHIFT - BM_BLOCK_SHIFT)
328/* Currently BM_BLOCK_SHIFT, BM_EXT_SHIFT and AL_EXTENT_SHIFT
329 * are still coupled, or assume too much about their relation.
330 * Code below will not work if this is violated.
331 * Will be cleaned up with some followup patch.
332 */
333# error FIXME
334#endif
335
336static unsigned int al_extent_to_bm_page(unsigned int al_enr)
337{
338 return al_enr >>
339 /* bit to page */
340 ((PAGE_SHIFT + 3) -
341 /* al extent number to bit */
342 (AL_EXTENT_SHIFT - BM_BLOCK_SHIFT));
343}
344
345static unsigned int rs_extent_to_bm_page(unsigned int rs_enr)
346{
347 return rs_enr >>
348 /* bit to page */
349 ((PAGE_SHIFT + 3) -
Lars Ellenbergacb104c32011-04-28 07:58:24 +0200350 /* resync extent number to bit */
Lars Ellenberg19f843a2010-12-15 08:59:11 +0100351 (BM_EXT_SHIFT - BM_BLOCK_SHIFT));
352}
353
Lars Ellenbergae8bf312013-03-19 18:16:43 +0100354static sector_t al_tr_number_to_on_disk_sector(struct drbd_conf *mdev)
355{
356 const unsigned int stripes = 1;
357 const unsigned int stripe_size_4kB = MD_32kB_SECT/MD_4kB_SECT;
358
359 /* transaction number, modulo on-disk ring buffer wrap around */
360 unsigned int t = mdev->al_tr_number % (stripe_size_4kB * stripes);
361
362 /* ... to aligned 4k on disk block */
363 t = ((t % stripes) * stripe_size_4kB) + t/stripes;
364
365 /* ... to 512 byte sector in activity log */
366 t *= 8;
367
368 /* ... plus offset to the on disk position */
369 return mdev->ldev->md.md_offset + mdev->ldev->md.al_offset + t;
370}
371
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +0100372static int
Philipp Reisner1b7ab152011-07-15 17:19:02 +0200373_al_write_transaction(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700374{
Lars Ellenberg7ad651b2011-02-21 13:21:03 +0100375 struct al_transaction_on_disk *buffer;
376 struct lc_element *e;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700377 sector_t sector;
Lars Ellenberg7ad651b2011-02-21 13:21:03 +0100378 int i, mx;
379 unsigned extent_nr;
380 unsigned crc = 0;
Philipp Reisner1b7ab152011-07-15 17:19:02 +0200381 int err = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700382
383 if (!get_ldev(mdev)) {
Lars Ellenberg7ad651b2011-02-21 13:21:03 +0100384 dev_err(DEV, "disk is %s, cannot start al transaction\n",
385 drbd_disk_str(mdev->state.disk));
Philipp Reisner1b7ab152011-07-15 17:19:02 +0200386 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700387 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700388
Lars Ellenberg6719fb02010-10-18 23:04:07 +0200389 /* The bitmap write may have failed, causing a state change. */
390 if (mdev->state.disk < D_INCONSISTENT) {
391 dev_err(DEV,
Lars Ellenberg7ad651b2011-02-21 13:21:03 +0100392 "disk is %s, cannot write al transaction\n",
393 drbd_disk_str(mdev->state.disk));
Lars Ellenberg6719fb02010-10-18 23:04:07 +0200394 put_ldev(mdev);
Philipp Reisner1b7ab152011-07-15 17:19:02 +0200395 return -EIO;
Lars Ellenberg6719fb02010-10-18 23:04:07 +0200396 }
397
Philipp Reisnercdfda632011-07-05 15:38:59 +0200398 buffer = drbd_md_get_buffer(mdev); /* protects md_io_buffer, al_tr_cycle, ... */
399 if (!buffer) {
400 dev_err(DEV, "disk failed while waiting for md_io buffer\n");
Philipp Reisnercdfda632011-07-05 15:38:59 +0200401 put_ldev(mdev);
Philipp Reisner1b7ab152011-07-15 17:19:02 +0200402 return -ENODEV;
Philipp Reisnercdfda632011-07-05 15:38:59 +0200403 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700404
Lars Ellenberg7ad651b2011-02-21 13:21:03 +0100405 memset(buffer, 0, sizeof(*buffer));
406 buffer->magic = cpu_to_be32(DRBD_AL_MAGIC);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700407 buffer->tr_number = cpu_to_be32(mdev->al_tr_number);
408
Lars Ellenberg7ad651b2011-02-21 13:21:03 +0100409 i = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700410
Lars Ellenberg7ad651b2011-02-21 13:21:03 +0100411 /* Even though no one can start to change this list
412 * once we set the LC_LOCKED -- from drbd_al_begin_io(),
413 * lc_try_lock_for_transaction() --, someone may still
414 * be in the process of changing it. */
415 spin_lock_irq(&mdev->al_lock);
416 list_for_each_entry(e, &mdev->act_log->to_be_changed, list) {
417 if (i == AL_UPDATES_PER_TRANSACTION) {
418 i++;
419 break;
420 }
421 buffer->update_slot_nr[i] = cpu_to_be16(e->lc_index);
422 buffer->update_extent_nr[i] = cpu_to_be32(e->lc_new_number);
423 if (e->lc_number != LC_FREE)
424 drbd_bm_mark_for_writeout(mdev,
425 al_extent_to_bm_page(e->lc_number));
426 i++;
427 }
428 spin_unlock_irq(&mdev->al_lock);
429 BUG_ON(i > AL_UPDATES_PER_TRANSACTION);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700430
Lars Ellenberg7ad651b2011-02-21 13:21:03 +0100431 buffer->n_updates = cpu_to_be16(i);
432 for ( ; i < AL_UPDATES_PER_TRANSACTION; i++) {
433 buffer->update_slot_nr[i] = cpu_to_be16(-1);
434 buffer->update_extent_nr[i] = cpu_to_be32(LC_FREE);
435 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700436
Lars Ellenberg7ad651b2011-02-21 13:21:03 +0100437 buffer->context_size = cpu_to_be16(mdev->act_log->nr_elements);
438 buffer->context_start_slot_nr = cpu_to_be16(mdev->al_tr_cycle);
439
440 mx = min_t(int, AL_CONTEXT_PER_TRANSACTION,
Philipp Reisnerb411b362009-09-25 16:07:19 -0700441 mdev->act_log->nr_elements - mdev->al_tr_cycle);
442 for (i = 0; i < mx; i++) {
443 unsigned idx = mdev->al_tr_cycle + i;
444 extent_nr = lc_element_by_index(mdev->act_log, idx)->lc_number;
Lars Ellenberg7ad651b2011-02-21 13:21:03 +0100445 buffer->context[i] = cpu_to_be32(extent_nr);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700446 }
Lars Ellenberg7ad651b2011-02-21 13:21:03 +0100447 for (; i < AL_CONTEXT_PER_TRANSACTION; i++)
448 buffer->context[i] = cpu_to_be32(LC_FREE);
449
450 mdev->al_tr_cycle += AL_CONTEXT_PER_TRANSACTION;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700451 if (mdev->al_tr_cycle >= mdev->act_log->nr_elements)
452 mdev->al_tr_cycle = 0;
453
Lars Ellenbergae8bf312013-03-19 18:16:43 +0100454 sector = al_tr_number_to_on_disk_sector(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700455
Lars Ellenberg7ad651b2011-02-21 13:21:03 +0100456 crc = crc32c(0, buffer, 4096);
457 buffer->crc32c = cpu_to_be32(crc);
458
Lars Ellenbergae8bf312013-03-19 18:16:43 +0100459 /* normal execution path goes through all three branches */
Lars Ellenberg7ad651b2011-02-21 13:21:03 +0100460 if (drbd_bm_write_hinted(mdev))
Philipp Reisner1b7ab152011-07-15 17:19:02 +0200461 err = -EIO;
Lars Ellenberg7ad651b2011-02-21 13:21:03 +0100462 /* drbd_chk_io_error done already */
Andreas Gruenbacher3fbf4d22010-12-13 02:25:41 +0100463 else if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
Philipp Reisner1b7ab152011-07-15 17:19:02 +0200464 err = -EIO;
Lars Ellenberg0c849662012-07-30 09:07:28 +0200465 drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
Lars Ellenberg7ad651b2011-02-21 13:21:03 +0100466 } else {
Lars Ellenberg7ad651b2011-02-21 13:21:03 +0100467 mdev->al_tr_number++;
468 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700469
Philipp Reisnercdfda632011-07-05 15:38:59 +0200470 drbd_md_put_buffer(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700471 put_ldev(mdev);
472
Philipp Reisner1b7ab152011-07-15 17:19:02 +0200473 return err;
474}
475
476
477static int w_al_write_transaction(struct drbd_work *w, int unused)
478{
479 struct update_al_work *aw = container_of(w, struct update_al_work, w);
480 struct drbd_conf *mdev = w->mdev;
481 int err;
482
483 err = _al_write_transaction(mdev);
484 aw->err = err;
485 complete(&aw->event);
486
487 return err != -EIO ? err : 0;
488}
489
490/* Calls from worker context (see w_restart_disk_io()) need to write the
491 transaction directly. Others came through generic_make_request(),
492 those need to delegate it to the worker. */
493static int al_write_transaction(struct drbd_conf *mdev)
494{
495 struct update_al_work al_work;
496
497 if (current == mdev->tconn->worker.task)
498 return _al_write_transaction(mdev);
499
500 init_completion(&al_work.event);
501 al_work.w.cb = w_al_write_transaction;
502 al_work.w.mdev = mdev;
Lars Ellenbergd5b27b02011-11-14 15:42:37 +0100503 drbd_queue_work_front(&mdev->tconn->sender_work, &al_work.w);
Philipp Reisner1b7ab152011-07-15 17:19:02 +0200504 wait_for_completion(&al_work.event);
505
506 return al_work.err;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700507}
508
Philipp Reisnerb411b362009-09-25 16:07:19 -0700509static int _try_lc_del(struct drbd_conf *mdev, struct lc_element *al_ext)
510{
511 int rv;
512
513 spin_lock_irq(&mdev->al_lock);
514 rv = (al_ext->refcnt == 0);
515 if (likely(rv))
516 lc_del(mdev->act_log, al_ext);
517 spin_unlock_irq(&mdev->al_lock);
518
519 return rv;
520}
521
522/**
523 * drbd_al_shrink() - Removes all active extents form the activity log
524 * @mdev: DRBD device.
525 *
526 * Removes all active extents form the activity log, waiting until
527 * the reference count of each entry dropped to 0 first, of course.
528 *
529 * You need to lock mdev->act_log with lc_try_lock() / lc_unlock()
530 */
531void drbd_al_shrink(struct drbd_conf *mdev)
532{
533 struct lc_element *al_ext;
534 int i;
535
Lars Ellenberg46a15bc2011-02-21 13:21:01 +0100536 D_ASSERT(test_bit(__LC_LOCKED, &mdev->act_log->flags));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700537
538 for (i = 0; i < mdev->act_log->nr_elements; i++) {
539 al_ext = lc_element_by_index(mdev->act_log, i);
540 if (al_ext->lc_number == LC_FREE)
541 continue;
542 wait_event(mdev->al_wait, _try_lc_del(mdev, al_ext));
543 }
544
545 wake_up(&mdev->al_wait);
546}
547
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +0100548static int w_update_odbm(struct drbd_work *w, int unused)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700549{
550 struct update_odbm_work *udw = container_of(w, struct update_odbm_work, w);
Philipp Reisner00d56942011-02-09 18:09:48 +0100551 struct drbd_conf *mdev = w->mdev;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100552 struct sib_info sib = { .sib_reason = SIB_SYNC_PROGRESS, };
Philipp Reisnerb411b362009-09-25 16:07:19 -0700553
554 if (!get_ldev(mdev)) {
555 if (__ratelimit(&drbd_ratelimit_state))
556 dev_warn(DEV, "Can not update on disk bitmap, local IO disabled.\n");
557 kfree(udw);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +0100558 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700559 }
560
Lars Ellenberg19f843a2010-12-15 08:59:11 +0100561 drbd_bm_write_page(mdev, rs_extent_to_bm_page(udw->enr));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700562 put_ldev(mdev);
563
564 kfree(udw);
565
566 if (drbd_bm_total_weight(mdev) <= mdev->rs_failed) {
567 switch (mdev->state.conn) {
568 case C_SYNC_SOURCE: case C_SYNC_TARGET:
569 case C_PAUSED_SYNC_S: case C_PAUSED_SYNC_T:
570 drbd_resync_finished(mdev);
571 default:
572 /* nothing to do */
573 break;
574 }
575 }
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +0100576 drbd_bcast_event(mdev, &sib);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700577
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +0100578 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700579}
580
581
582/* ATTENTION. The AL's extents are 4MB each, while the extents in the
583 * resync LRU-cache are 16MB each.
584 * The caller of this function has to hold an get_ldev() reference.
585 *
586 * TODO will be obsoleted once we have a caching lru of the on disk bitmap
587 */
588static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
589 int count, int success)
590{
591 struct lc_element *e;
592 struct update_odbm_work *udw;
593
594 unsigned int enr;
595
596 D_ASSERT(atomic_read(&mdev->local_cnt));
597
598 /* I simply assume that a sector/size pair never crosses
599 * a 16 MB extent border. (Currently this is true...) */
600 enr = BM_SECT_TO_EXT(sector);
601
602 e = lc_get(mdev->resync, enr);
603 if (e) {
604 struct bm_extent *ext = lc_entry(e, struct bm_extent, lce);
605 if (ext->lce.lc_number == enr) {
606 if (success)
607 ext->rs_left -= count;
608 else
609 ext->rs_failed += count;
610 if (ext->rs_left < ext->rs_failed) {
Philipp Reisner975b2972011-11-17 10:11:47 +0100611 dev_warn(DEV, "BAD! sector=%llus enr=%u rs_left=%d "
612 "rs_failed=%d count=%d cstate=%s\n",
Philipp Reisnerb411b362009-09-25 16:07:19 -0700613 (unsigned long long)sector,
614 ext->lce.lc_number, ext->rs_left,
Philipp Reisner975b2972011-11-17 10:11:47 +0100615 ext->rs_failed, count,
616 drbd_conn_str(mdev->state.conn));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700617
Philipp Reisner975b2972011-11-17 10:11:47 +0100618 /* We don't expect to be able to clear more bits
619 * than have been set when we originally counted
620 * the set bits to cache that value in ext->rs_left.
621 * Whatever the reason (disconnect during resync,
622 * delayed local completion of an application write),
623 * try to fix it up by recounting here. */
624 ext->rs_left = drbd_bm_e_weight(mdev, enr);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700625 }
626 } else {
627 /* Normally this element should be in the cache,
628 * since drbd_rs_begin_io() pulled it already in.
629 *
630 * But maybe an application write finished, and we set
631 * something outside the resync lru_cache in sync.
632 */
633 int rs_left = drbd_bm_e_weight(mdev, enr);
634 if (ext->flags != 0) {
635 dev_warn(DEV, "changing resync lce: %d[%u;%02lx]"
636 " -> %d[%u;00]\n",
637 ext->lce.lc_number, ext->rs_left,
638 ext->flags, enr, rs_left);
639 ext->flags = 0;
640 }
641 if (ext->rs_failed) {
642 dev_warn(DEV, "Kicking resync_lru element enr=%u "
643 "out with rs_failed=%d\n",
644 ext->lce.lc_number, ext->rs_failed);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700645 }
646 ext->rs_left = rs_left;
647 ext->rs_failed = success ? 0 : count;
Lars Ellenberg46a15bc2011-02-21 13:21:01 +0100648 /* we don't keep a persistent log of the resync lru,
649 * we can commit any change right away. */
650 lc_committed(mdev->resync);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700651 }
652 lc_put(mdev->resync, &ext->lce);
653 /* no race, we are within the al_lock! */
654
655 if (ext->rs_left == ext->rs_failed) {
656 ext->rs_failed = 0;
657
658 udw = kmalloc(sizeof(*udw), GFP_ATOMIC);
659 if (udw) {
660 udw->enr = ext->lce.lc_number;
661 udw->w.cb = w_update_odbm;
Philipp Reisnera21e9292011-02-08 15:08:49 +0100662 udw->w.mdev = mdev;
Lars Ellenbergd5b27b02011-11-14 15:42:37 +0100663 drbd_queue_work_front(&mdev->tconn->sender_work, &udw->w);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700664 } else {
665 dev_warn(DEV, "Could not kmalloc an udw\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700666 }
667 }
668 } else {
669 dev_err(DEV, "lc_get() failed! locked=%d/%d flags=%lu\n",
670 mdev->resync_locked,
671 mdev->resync->nr_elements,
672 mdev->resync->flags);
673 }
674}
675
Lars Ellenbergc6ea14d2010-11-05 09:23:37 +0100676void drbd_advance_rs_marks(struct drbd_conf *mdev, unsigned long still_to_go)
677{
678 unsigned long now = jiffies;
679 unsigned long last = mdev->rs_mark_time[mdev->rs_last_mark];
680 int next = (mdev->rs_last_mark + 1) % DRBD_SYNC_MARKS;
681 if (time_after_eq(now, last + DRBD_SYNC_MARK_STEP)) {
682 if (mdev->rs_mark_left[mdev->rs_last_mark] != still_to_go &&
683 mdev->state.conn != C_PAUSED_SYNC_T &&
684 mdev->state.conn != C_PAUSED_SYNC_S) {
685 mdev->rs_mark_time[next] = now;
686 mdev->rs_mark_left[next] = still_to_go;
687 mdev->rs_last_mark = next;
688 }
689 }
690}
691
Philipp Reisnerb411b362009-09-25 16:07:19 -0700692/* clear the bit corresponding to the piece of storage in question:
693 * size byte of data starting from sector. Only clear a bits of the affected
694 * one ore more _aligned_ BM_BLOCK_SIZE blocks.
695 *
696 * called by worker on C_SYNC_TARGET and receiver on SyncSource.
697 *
698 */
699void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
700 const char *file, const unsigned int line)
701{
702 /* Is called from worker and receiver context _only_ */
703 unsigned long sbnr, ebnr, lbnr;
704 unsigned long count = 0;
705 sector_t esector, nr_sectors;
706 int wake_up = 0;
707 unsigned long flags;
708
Andreas Gruenbacherc670a392011-02-21 12:41:39 +0100709 if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700710 dev_err(DEV, "drbd_set_in_sync: sector=%llus size=%d nonsense!\n",
711 (unsigned long long)sector, size);
712 return;
713 }
Philipp Reisner518a4d52012-10-19 14:21:22 +0200714
715 if (!get_ldev(mdev))
716 return; /* no disk, no metadata, no bitmap to clear bits in */
717
Philipp Reisnerb411b362009-09-25 16:07:19 -0700718 nr_sectors = drbd_get_capacity(mdev->this_bdev);
719 esector = sector + (size >> 9) - 1;
720
Andreas Gruenbacher841ce242010-12-15 19:31:20 +0100721 if (!expect(sector < nr_sectors))
Philipp Reisner518a4d52012-10-19 14:21:22 +0200722 goto out;
Andreas Gruenbacher841ce242010-12-15 19:31:20 +0100723 if (!expect(esector < nr_sectors))
724 esector = nr_sectors - 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700725
726 lbnr = BM_SECT_TO_BIT(nr_sectors-1);
727
728 /* we clear it (in sync).
729 * round up start sector, round down end sector. we make sure we only
730 * clear full, aligned, BM_BLOCK_SIZE (4K) blocks */
731 if (unlikely(esector < BM_SECT_PER_BIT-1))
Philipp Reisner518a4d52012-10-19 14:21:22 +0200732 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700733 if (unlikely(esector == (nr_sectors-1)))
734 ebnr = lbnr;
735 else
736 ebnr = BM_SECT_TO_BIT(esector - (BM_SECT_PER_BIT-1));
737 sbnr = BM_SECT_TO_BIT(sector + BM_SECT_PER_BIT-1);
738
Philipp Reisnerb411b362009-09-25 16:07:19 -0700739 if (sbnr > ebnr)
Philipp Reisner518a4d52012-10-19 14:21:22 +0200740 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700741
742 /*
743 * ok, (capacity & 7) != 0 sometimes, but who cares...
744 * we count rs_{total,left} in bits, not sectors.
745 */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700746 count = drbd_bm_clear_bits(mdev, sbnr, ebnr);
Philipp Reisner518a4d52012-10-19 14:21:22 +0200747 if (count) {
Lars Ellenbergc6ea14d2010-11-05 09:23:37 +0100748 drbd_advance_rs_marks(mdev, drbd_bm_total_weight(mdev));
Lars Ellenberg1d7734a2010-08-11 21:21:50 +0200749 spin_lock_irqsave(&mdev->al_lock, flags);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100750 drbd_try_clear_on_disk_bm(mdev, sector, count, true);
Lars Ellenberg1d7734a2010-08-11 21:21:50 +0200751 spin_unlock_irqrestore(&mdev->al_lock, flags);
752
Philipp Reisnerb411b362009-09-25 16:07:19 -0700753 /* just wake_up unconditional now, various lc_chaged(),
754 * lc_put() in drbd_try_clear_on_disk_bm(). */
755 wake_up = 1;
756 }
Philipp Reisner518a4d52012-10-19 14:21:22 +0200757out:
758 put_ldev(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700759 if (wake_up)
760 wake_up(&mdev->al_wait);
761}
762
763/*
764 * this is intended to set one request worth of data out of sync.
765 * affects at least 1 bit,
Lars Ellenberg1816a2b2010-11-11 15:19:07 +0100766 * and at most 1+DRBD_MAX_BIO_SIZE/BM_BLOCK_SIZE bits.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700767 *
768 * called by tl_clear and drbd_send_dblock (==drbd_make_request).
769 * so this can be _any_ process.
770 */
Philipp Reisner73a01a12010-10-27 14:33:00 +0200771int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
Philipp Reisnerb411b362009-09-25 16:07:19 -0700772 const char *file, const unsigned int line)
773{
Philipp Reisner376694a2011-11-07 10:54:28 +0100774 unsigned long sbnr, ebnr, flags;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700775 sector_t esector, nr_sectors;
Philipp Reisner73a01a12010-10-27 14:33:00 +0200776 unsigned int enr, count = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700777 struct lc_element *e;
778
Lars Ellenberg81a35372012-07-30 09:00:54 +0200779 /* this should be an empty REQ_FLUSH */
780 if (size == 0)
781 return 0;
782
783 if (size < 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700784 dev_err(DEV, "sector: %llus, size: %d\n",
785 (unsigned long long)sector, size);
Philipp Reisner73a01a12010-10-27 14:33:00 +0200786 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700787 }
788
789 if (!get_ldev(mdev))
Philipp Reisner73a01a12010-10-27 14:33:00 +0200790 return 0; /* no disk, no metadata, no bitmap to set bits in */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700791
792 nr_sectors = drbd_get_capacity(mdev->this_bdev);
793 esector = sector + (size >> 9) - 1;
794
Andreas Gruenbacher841ce242010-12-15 19:31:20 +0100795 if (!expect(sector < nr_sectors))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700796 goto out;
Andreas Gruenbacher841ce242010-12-15 19:31:20 +0100797 if (!expect(esector < nr_sectors))
798 esector = nr_sectors - 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700799
Philipp Reisnerb411b362009-09-25 16:07:19 -0700800 /* we set it out of sync,
801 * we do not need to round anything here */
802 sbnr = BM_SECT_TO_BIT(sector);
803 ebnr = BM_SECT_TO_BIT(esector);
804
Philipp Reisnerb411b362009-09-25 16:07:19 -0700805 /* ok, (capacity & 7) != 0 sometimes, but who cares...
806 * we count rs_{total,left} in bits, not sectors. */
807 spin_lock_irqsave(&mdev->al_lock, flags);
808 count = drbd_bm_set_bits(mdev, sbnr, ebnr);
809
810 enr = BM_SECT_TO_EXT(sector);
811 e = lc_find(mdev->resync, enr);
812 if (e)
813 lc_entry(e, struct bm_extent, lce)->rs_left += count;
814 spin_unlock_irqrestore(&mdev->al_lock, flags);
815
816out:
817 put_ldev(mdev);
Philipp Reisner73a01a12010-10-27 14:33:00 +0200818
819 return count;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700820}
821
822static
823struct bm_extent *_bme_get(struct drbd_conf *mdev, unsigned int enr)
824{
825 struct lc_element *e;
826 struct bm_extent *bm_ext;
827 int wakeup = 0;
828 unsigned long rs_flags;
829
830 spin_lock_irq(&mdev->al_lock);
831 if (mdev->resync_locked > mdev->resync->nr_elements/2) {
832 spin_unlock_irq(&mdev->al_lock);
833 return NULL;
834 }
835 e = lc_get(mdev->resync, enr);
836 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
837 if (bm_ext) {
838 if (bm_ext->lce.lc_number != enr) {
839 bm_ext->rs_left = drbd_bm_e_weight(mdev, enr);
840 bm_ext->rs_failed = 0;
Lars Ellenberg46a15bc2011-02-21 13:21:01 +0100841 lc_committed(mdev->resync);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700842 wakeup = 1;
843 }
844 if (bm_ext->lce.refcnt == 1)
845 mdev->resync_locked++;
846 set_bit(BME_NO_WRITES, &bm_ext->flags);
847 }
848 rs_flags = mdev->resync->flags;
849 spin_unlock_irq(&mdev->al_lock);
850 if (wakeup)
851 wake_up(&mdev->al_wait);
852
853 if (!bm_ext) {
854 if (rs_flags & LC_STARVING)
855 dev_warn(DEV, "Have to wait for element"
856 " (resync LRU too small?)\n");
Lars Ellenberg46a15bc2011-02-21 13:21:01 +0100857 BUG_ON(rs_flags & LC_LOCKED);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700858 }
859
860 return bm_ext;
861}
862
863static int _is_in_al(struct drbd_conf *mdev, unsigned int enr)
864{
Lars Ellenberg46a15bc2011-02-21 13:21:01 +0100865 int rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700866
867 spin_lock_irq(&mdev->al_lock);
Lars Ellenberg46a15bc2011-02-21 13:21:01 +0100868 rv = lc_is_used(mdev->act_log, enr);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700869 spin_unlock_irq(&mdev->al_lock);
870
Philipp Reisnerb411b362009-09-25 16:07:19 -0700871 return rv;
872}
873
874/**
875 * drbd_rs_begin_io() - Gets an extent in the resync LRU cache and sets it to BME_LOCKED
876 * @mdev: DRBD device.
877 * @sector: The sector number.
878 *
Lars Ellenberg80a40e42010-08-11 23:28:00 +0200879 * This functions sleeps on al_wait. Returns 0 on success, -EINTR if interrupted.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700880 */
881int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
882{
883 unsigned int enr = BM_SECT_TO_EXT(sector);
884 struct bm_extent *bm_ext;
885 int i, sig;
Philipp Reisnerf91ab622010-11-09 13:59:41 +0100886 int sa = 200; /* Step aside 200 times, then grab the extent and let app-IO wait.
887 200 times -> 20 seconds. */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700888
Philipp Reisnerf91ab622010-11-09 13:59:41 +0100889retry:
Philipp Reisnerb411b362009-09-25 16:07:19 -0700890 sig = wait_event_interruptible(mdev->al_wait,
891 (bm_ext = _bme_get(mdev, enr)));
892 if (sig)
Lars Ellenberg80a40e42010-08-11 23:28:00 +0200893 return -EINTR;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700894
895 if (test_bit(BME_LOCKED, &bm_ext->flags))
Lars Ellenberg80a40e42010-08-11 23:28:00 +0200896 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700897
898 for (i = 0; i < AL_EXT_PER_BM_SECT; i++) {
899 sig = wait_event_interruptible(mdev->al_wait,
Philipp Reisnerf91ab622010-11-09 13:59:41 +0100900 !_is_in_al(mdev, enr * AL_EXT_PER_BM_SECT + i) ||
Philipp Reisnerc507f462010-11-22 15:49:17 +0100901 test_bit(BME_PRIORITY, &bm_ext->flags));
Philipp Reisnerf91ab622010-11-09 13:59:41 +0100902
903 if (sig || (test_bit(BME_PRIORITY, &bm_ext->flags) && sa)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700904 spin_lock_irq(&mdev->al_lock);
905 if (lc_put(mdev->resync, &bm_ext->lce) == 0) {
Philipp Reisnerf91ab622010-11-09 13:59:41 +0100906 bm_ext->flags = 0; /* clears BME_NO_WRITES and eventually BME_PRIORITY */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700907 mdev->resync_locked--;
908 wake_up(&mdev->al_wait);
909 }
910 spin_unlock_irq(&mdev->al_lock);
Philipp Reisnerf91ab622010-11-09 13:59:41 +0100911 if (sig)
912 return -EINTR;
913 if (schedule_timeout_interruptible(HZ/10))
914 return -EINTR;
Philipp Reisnerc507f462010-11-22 15:49:17 +0100915 if (sa && --sa == 0)
916 dev_warn(DEV,"drbd_rs_begin_io() stepped aside for 20sec."
917 "Resync stalled?\n");
Philipp Reisnerf91ab622010-11-09 13:59:41 +0100918 goto retry;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700919 }
920 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700921 set_bit(BME_LOCKED, &bm_ext->flags);
Lars Ellenberg80a40e42010-08-11 23:28:00 +0200922 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700923}
924
925/**
926 * drbd_try_rs_begin_io() - Gets an extent in the resync LRU cache, does not sleep
927 * @mdev: DRBD device.
928 * @sector: The sector number.
929 *
930 * Gets an extent in the resync LRU cache, sets it to BME_NO_WRITES, then
931 * tries to set it to BME_LOCKED. Returns 0 upon success, and -EAGAIN
932 * if there is still application IO going on in this area.
933 */
934int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
935{
936 unsigned int enr = BM_SECT_TO_EXT(sector);
937 const unsigned int al_enr = enr*AL_EXT_PER_BM_SECT;
938 struct lc_element *e;
939 struct bm_extent *bm_ext;
940 int i;
941
Philipp Reisnerb411b362009-09-25 16:07:19 -0700942 spin_lock_irq(&mdev->al_lock);
943 if (mdev->resync_wenr != LC_FREE && mdev->resync_wenr != enr) {
944 /* in case you have very heavy scattered io, it may
945 * stall the syncer undefined if we give up the ref count
946 * when we try again and requeue.
947 *
948 * if we don't give up the refcount, but the next time
949 * we are scheduled this extent has been "synced" by new
950 * application writes, we'd miss the lc_put on the
951 * extent we keep the refcount on.
952 * so we remembered which extent we had to try again, and
953 * if the next requested one is something else, we do
954 * the lc_put here...
955 * we also have to wake_up
956 */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700957 e = lc_find(mdev->resync, mdev->resync_wenr);
958 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
959 if (bm_ext) {
960 D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags));
961 D_ASSERT(test_bit(BME_NO_WRITES, &bm_ext->flags));
962 clear_bit(BME_NO_WRITES, &bm_ext->flags);
963 mdev->resync_wenr = LC_FREE;
964 if (lc_put(mdev->resync, &bm_ext->lce) == 0)
965 mdev->resync_locked--;
966 wake_up(&mdev->al_wait);
967 } else {
968 dev_alert(DEV, "LOGIC BUG\n");
969 }
970 }
971 /* TRY. */
972 e = lc_try_get(mdev->resync, enr);
973 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
974 if (bm_ext) {
975 if (test_bit(BME_LOCKED, &bm_ext->flags))
976 goto proceed;
977 if (!test_and_set_bit(BME_NO_WRITES, &bm_ext->flags)) {
978 mdev->resync_locked++;
979 } else {
980 /* we did set the BME_NO_WRITES,
981 * but then could not set BME_LOCKED,
982 * so we tried again.
983 * drop the extra reference. */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700984 bm_ext->lce.refcnt--;
985 D_ASSERT(bm_ext->lce.refcnt > 0);
986 }
987 goto check_al;
988 } else {
989 /* do we rather want to try later? */
Jens Axboe6a0afdf2009-10-01 09:04:14 +0200990 if (mdev->resync_locked > mdev->resync->nr_elements-3)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700991 goto try_again;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700992 /* Do or do not. There is no try. -- Yoda */
993 e = lc_get(mdev->resync, enr);
994 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
995 if (!bm_ext) {
996 const unsigned long rs_flags = mdev->resync->flags;
997 if (rs_flags & LC_STARVING)
998 dev_warn(DEV, "Have to wait for element"
999 " (resync LRU too small?)\n");
Lars Ellenberg46a15bc2011-02-21 13:21:01 +01001000 BUG_ON(rs_flags & LC_LOCKED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001001 goto try_again;
1002 }
1003 if (bm_ext->lce.lc_number != enr) {
1004 bm_ext->rs_left = drbd_bm_e_weight(mdev, enr);
1005 bm_ext->rs_failed = 0;
Lars Ellenberg46a15bc2011-02-21 13:21:01 +01001006 lc_committed(mdev->resync);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001007 wake_up(&mdev->al_wait);
1008 D_ASSERT(test_bit(BME_LOCKED, &bm_ext->flags) == 0);
1009 }
1010 set_bit(BME_NO_WRITES, &bm_ext->flags);
1011 D_ASSERT(bm_ext->lce.refcnt == 1);
1012 mdev->resync_locked++;
1013 goto check_al;
1014 }
1015check_al:
Philipp Reisnerb411b362009-09-25 16:07:19 -07001016 for (i = 0; i < AL_EXT_PER_BM_SECT; i++) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001017 if (lc_is_used(mdev->act_log, al_enr+i))
1018 goto try_again;
1019 }
1020 set_bit(BME_LOCKED, &bm_ext->flags);
1021proceed:
1022 mdev->resync_wenr = LC_FREE;
1023 spin_unlock_irq(&mdev->al_lock);
1024 return 0;
1025
1026try_again:
Philipp Reisnerb411b362009-09-25 16:07:19 -07001027 if (bm_ext)
1028 mdev->resync_wenr = enr;
1029 spin_unlock_irq(&mdev->al_lock);
1030 return -EAGAIN;
1031}
1032
1033void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector)
1034{
1035 unsigned int enr = BM_SECT_TO_EXT(sector);
1036 struct lc_element *e;
1037 struct bm_extent *bm_ext;
1038 unsigned long flags;
1039
Philipp Reisnerb411b362009-09-25 16:07:19 -07001040 spin_lock_irqsave(&mdev->al_lock, flags);
1041 e = lc_find(mdev->resync, enr);
1042 bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
1043 if (!bm_ext) {
1044 spin_unlock_irqrestore(&mdev->al_lock, flags);
1045 if (__ratelimit(&drbd_ratelimit_state))
1046 dev_err(DEV, "drbd_rs_complete_io() called, but extent not found\n");
1047 return;
1048 }
1049
1050 if (bm_ext->lce.refcnt == 0) {
1051 spin_unlock_irqrestore(&mdev->al_lock, flags);
1052 dev_err(DEV, "drbd_rs_complete_io(,%llu [=%u]) called, "
1053 "but refcnt is 0!?\n",
1054 (unsigned long long)sector, enr);
1055 return;
1056 }
1057
1058 if (lc_put(mdev->resync, &bm_ext->lce) == 0) {
Philipp Reisnere3555d82010-11-07 15:56:29 +01001059 bm_ext->flags = 0; /* clear BME_LOCKED, BME_NO_WRITES and BME_PRIORITY */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001060 mdev->resync_locked--;
1061 wake_up(&mdev->al_wait);
1062 }
1063
1064 spin_unlock_irqrestore(&mdev->al_lock, flags);
1065}
1066
1067/**
1068 * drbd_rs_cancel_all() - Removes all extents from the resync LRU (even BME_LOCKED)
1069 * @mdev: DRBD device.
1070 */
1071void drbd_rs_cancel_all(struct drbd_conf *mdev)
1072{
Philipp Reisnerb411b362009-09-25 16:07:19 -07001073 spin_lock_irq(&mdev->al_lock);
1074
1075 if (get_ldev_if_state(mdev, D_FAILED)) { /* Makes sure ->resync is there. */
1076 lc_reset(mdev->resync);
1077 put_ldev(mdev);
1078 }
1079 mdev->resync_locked = 0;
1080 mdev->resync_wenr = LC_FREE;
1081 spin_unlock_irq(&mdev->al_lock);
1082 wake_up(&mdev->al_wait);
1083}
1084
1085/**
1086 * drbd_rs_del_all() - Gracefully remove all extents from the resync LRU
1087 * @mdev: DRBD device.
1088 *
1089 * Returns 0 upon success, -EAGAIN if at least one reference count was
1090 * not zero.
1091 */
1092int drbd_rs_del_all(struct drbd_conf *mdev)
1093{
1094 struct lc_element *e;
1095 struct bm_extent *bm_ext;
1096 int i;
1097
Philipp Reisnerb411b362009-09-25 16:07:19 -07001098 spin_lock_irq(&mdev->al_lock);
1099
1100 if (get_ldev_if_state(mdev, D_FAILED)) {
1101 /* ok, ->resync is there. */
1102 for (i = 0; i < mdev->resync->nr_elements; i++) {
1103 e = lc_element_by_index(mdev->resync, i);
Philipp Reisnerb2b163d2010-04-02 08:40:33 +02001104 bm_ext = lc_entry(e, struct bm_extent, lce);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001105 if (bm_ext->lce.lc_number == LC_FREE)
1106 continue;
1107 if (bm_ext->lce.lc_number == mdev->resync_wenr) {
1108 dev_info(DEV, "dropping %u in drbd_rs_del_all, apparently"
1109 " got 'synced' by application io\n",
1110 mdev->resync_wenr);
1111 D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags));
1112 D_ASSERT(test_bit(BME_NO_WRITES, &bm_ext->flags));
1113 clear_bit(BME_NO_WRITES, &bm_ext->flags);
1114 mdev->resync_wenr = LC_FREE;
1115 lc_put(mdev->resync, &bm_ext->lce);
1116 }
1117 if (bm_ext->lce.refcnt != 0) {
1118 dev_info(DEV, "Retrying drbd_rs_del_all() later. "
1119 "refcnt=%d\n", bm_ext->lce.refcnt);
1120 put_ldev(mdev);
1121 spin_unlock_irq(&mdev->al_lock);
1122 return -EAGAIN;
1123 }
1124 D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags));
1125 D_ASSERT(!test_bit(BME_NO_WRITES, &bm_ext->flags));
1126 lc_del(mdev->resync, &bm_ext->lce);
1127 }
1128 D_ASSERT(mdev->resync->used == 0);
1129 put_ldev(mdev);
1130 }
1131 spin_unlock_irq(&mdev->al_lock);
Lars Ellenberga6a7d4f2012-03-26 16:21:37 +02001132 wake_up(&mdev->al_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001133
1134 return 0;
1135}
1136
1137/**
1138 * drbd_rs_failed_io() - Record information on a failure to resync the specified blocks
1139 * @mdev: DRBD device.
1140 * @sector: The sector number.
1141 * @size: Size of failed IO operation, in byte.
1142 */
1143void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size)
1144{
1145 /* Is called from worker and receiver context _only_ */
1146 unsigned long sbnr, ebnr, lbnr;
1147 unsigned long count;
1148 sector_t esector, nr_sectors;
1149 int wake_up = 0;
1150
Andreas Gruenbacherc670a392011-02-21 12:41:39 +01001151 if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001152 dev_err(DEV, "drbd_rs_failed_io: sector=%llus size=%d nonsense!\n",
1153 (unsigned long long)sector, size);
1154 return;
1155 }
1156 nr_sectors = drbd_get_capacity(mdev->this_bdev);
1157 esector = sector + (size >> 9) - 1;
1158
Andreas Gruenbacher841ce242010-12-15 19:31:20 +01001159 if (!expect(sector < nr_sectors))
1160 return;
1161 if (!expect(esector < nr_sectors))
1162 esector = nr_sectors - 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001163
1164 lbnr = BM_SECT_TO_BIT(nr_sectors-1);
1165
1166 /*
1167 * round up start sector, round down end sector. we make sure we only
1168 * handle full, aligned, BM_BLOCK_SIZE (4K) blocks */
1169 if (unlikely(esector < BM_SECT_PER_BIT-1))
1170 return;
1171 if (unlikely(esector == (nr_sectors-1)))
1172 ebnr = lbnr;
1173 else
1174 ebnr = BM_SECT_TO_BIT(esector - (BM_SECT_PER_BIT-1));
1175 sbnr = BM_SECT_TO_BIT(sector + BM_SECT_PER_BIT-1);
1176
1177 if (sbnr > ebnr)
1178 return;
1179
1180 /*
1181 * ok, (capacity & 7) != 0 sometimes, but who cares...
1182 * we count rs_{total,left} in bits, not sectors.
1183 */
1184 spin_lock_irq(&mdev->al_lock);
1185 count = drbd_bm_count_bits(mdev, sbnr, ebnr);
1186 if (count) {
1187 mdev->rs_failed += count;
1188
1189 if (get_ldev(mdev)) {
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001190 drbd_try_clear_on_disk_bm(mdev, sector, count, false);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001191 put_ldev(mdev);
1192 }
1193
1194 /* just wake_up unconditional now, various lc_chaged(),
1195 * lc_put() in drbd_try_clear_on_disk_bm(). */
1196 wake_up = 1;
1197 }
1198 spin_unlock_irq(&mdev->al_lock);
1199 if (wake_up)
1200 wake_up(&mdev->al_wait);
1201}