blob: 1061b9fff2b080bf898fcb7cc483aa71a055831f [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11 from Logicworks, Inc. for making SDP replication support possible.
12
13 drbd is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
16 any later version.
17
18 drbd is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with drbd; see the file COPYING. If not, write to
25 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26
27 */
28
Philipp Reisnerb411b362009-09-25 16:07:19 -070029#include <linux/module.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070030#include <linux/drbd.h>
31#include <asm/uaccess.h>
32#include <asm/types.h>
33#include <net/sock.h>
34#include <linux/ctype.h>
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020035#include <linux/mutex.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070036#include <linux/fs.h>
37#include <linux/file.h>
38#include <linux/proc_fs.h>
39#include <linux/init.h>
40#include <linux/mm.h>
41#include <linux/memcontrol.h>
42#include <linux/mm_inline.h>
43#include <linux/slab.h>
44#include <linux/random.h>
45#include <linux/reboot.h>
46#include <linux/notifier.h>
47#include <linux/kthread.h>
48
49#define __KERNEL_SYSCALLS__
50#include <linux/unistd.h>
51#include <linux/vmalloc.h>
52
53#include <linux/drbd_limits.h>
54#include "drbd_int.h"
Philipp Reisnerb411b362009-09-25 16:07:19 -070055#include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
56
57#include "drbd_vli.h"
58
59struct after_state_chg_work {
60 struct drbd_work w;
61 union drbd_state os;
62 union drbd_state ns;
63 enum chg_state_flags flags;
64 struct completion *done;
65};
66
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020067static DEFINE_MUTEX(drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -070068int drbdd_init(struct drbd_thread *);
69int drbd_worker(struct drbd_thread *);
70int drbd_asender(struct drbd_thread *);
71
72int drbd_init(void);
73static int drbd_open(struct block_device *bdev, fmode_t mode);
74static int drbd_release(struct gendisk *gd, fmode_t mode);
75static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused);
76static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
77 union drbd_state ns, enum chg_state_flags flags);
78static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused);
79static void md_sync_timer_fn(unsigned long data);
80static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused);
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +020081static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused);
Philipp Reisnerb411b362009-09-25 16:07:19 -070082
Philipp Reisnerb411b362009-09-25 16:07:19 -070083MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
84 "Lars Ellenberg <lars@linbit.com>");
85MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
86MODULE_VERSION(REL_VERSION);
87MODULE_LICENSE("GPL");
Philipp Reisner2b8a90b2011-01-10 11:15:17 +010088MODULE_PARM_DESC(minor_count, "Maximum number of drbd devices ("
89 __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")");
Philipp Reisnerb411b362009-09-25 16:07:19 -070090MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
91
92#include <linux/moduleparam.h>
93/* allow_open_on_secondary */
94MODULE_PARM_DESC(allow_oos, "DONT USE!");
95/* thanks to these macros, if compiled into the kernel (not-module),
96 * this becomes the boot parameter drbd.minor_count */
97module_param(minor_count, uint, 0444);
98module_param(disable_sendpage, bool, 0644);
99module_param(allow_oos, bool, 0);
100module_param(cn_idx, uint, 0444);
101module_param(proc_details, int, 0644);
102
103#ifdef CONFIG_DRBD_FAULT_INJECTION
104int enable_faults;
105int fault_rate;
106static int fault_count;
107int fault_devs;
108/* bitmap of enabled faults */
109module_param(enable_faults, int, 0664);
110/* fault rate % value - applies to all enabled faults */
111module_param(fault_rate, int, 0664);
112/* count of faults inserted */
113module_param(fault_count, int, 0664);
114/* bitmap of devices to insert faults on */
115module_param(fault_devs, int, 0644);
116#endif
117
118/* module parameter, defined */
Philipp Reisner2b8a90b2011-01-10 11:15:17 +0100119unsigned int minor_count = DRBD_MINOR_COUNT_DEF;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700120int disable_sendpage;
121int allow_oos;
122unsigned int cn_idx = CN_IDX_DRBD;
123int proc_details; /* Detail level in proc drbd*/
124
125/* Module parameter for setting the user mode helper program
126 * to run. Default is /sbin/drbdadm */
127char usermode_helper[80] = "/sbin/drbdadm";
128
129module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644);
130
131/* in 2.6.x, our device mapping and config info contains our virtual gendisks
132 * as member "struct gendisk *vdisk;"
133 */
134struct drbd_conf **minor_table;
135
136struct kmem_cache *drbd_request_cache;
137struct kmem_cache *drbd_ee_cache; /* epoch entries */
138struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
139struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
140mempool_t *drbd_request_mempool;
141mempool_t *drbd_ee_mempool;
142
143/* I do not use a standard mempool, because:
144 1) I want to hand out the pre-allocated objects first.
145 2) I want to be able to interrupt sleeping allocation with a signal.
146 Note: This is a single linked list, the next pointer is the private
147 member of struct page.
148 */
149struct page *drbd_pp_pool;
150spinlock_t drbd_pp_lock;
151int drbd_pp_vacant;
152wait_queue_head_t drbd_pp_wait;
153
154DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
155
Emese Revfy7d4e9d02009-12-14 00:59:30 +0100156static const struct block_device_operations drbd_ops = {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700157 .owner = THIS_MODULE,
158 .open = drbd_open,
159 .release = drbd_release,
160};
161
162#define ARRY_SIZE(A) (sizeof(A)/sizeof(A[0]))
163
164#ifdef __CHECKER__
165/* When checking with sparse, and this is an inline function, sparse will
166 give tons of false positives. When this is a real functions sparse works.
167 */
168int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
169{
170 int io_allowed;
171
172 atomic_inc(&mdev->local_cnt);
173 io_allowed = (mdev->state.disk >= mins);
174 if (!io_allowed) {
175 if (atomic_dec_and_test(&mdev->local_cnt))
176 wake_up(&mdev->misc_wait);
177 }
178 return io_allowed;
179}
180
181#endif
182
183/**
184 * DOC: The transfer log
185 *
186 * The transfer log is a single linked list of &struct drbd_tl_epoch objects.
187 * mdev->newest_tle points to the head, mdev->oldest_tle points to the tail
188 * of the list. There is always at least one &struct drbd_tl_epoch object.
189 *
190 * Each &struct drbd_tl_epoch has a circular double linked list of requests
191 * attached.
192 */
193static int tl_init(struct drbd_conf *mdev)
194{
195 struct drbd_tl_epoch *b;
196
197 /* during device minor initialization, we may well use GFP_KERNEL */
198 b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_KERNEL);
199 if (!b)
200 return 0;
201 INIT_LIST_HEAD(&b->requests);
202 INIT_LIST_HEAD(&b->w.list);
203 b->next = NULL;
204 b->br_number = 4711;
Philipp Reisner7e602c02010-05-27 14:49:27 +0200205 b->n_writes = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700206 b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
207
208 mdev->oldest_tle = b;
209 mdev->newest_tle = b;
210 INIT_LIST_HEAD(&mdev->out_of_sequence_requests);
211
212 mdev->tl_hash = NULL;
213 mdev->tl_hash_s = 0;
214
215 return 1;
216}
217
218static void tl_cleanup(struct drbd_conf *mdev)
219{
220 D_ASSERT(mdev->oldest_tle == mdev->newest_tle);
221 D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
222 kfree(mdev->oldest_tle);
223 mdev->oldest_tle = NULL;
224 kfree(mdev->unused_spare_tle);
225 mdev->unused_spare_tle = NULL;
226 kfree(mdev->tl_hash);
227 mdev->tl_hash = NULL;
228 mdev->tl_hash_s = 0;
229}
230
231/**
232 * _tl_add_barrier() - Adds a barrier to the transfer log
233 * @mdev: DRBD device.
234 * @new: Barrier to be added before the current head of the TL.
235 *
236 * The caller must hold the req_lock.
237 */
238void _tl_add_barrier(struct drbd_conf *mdev, struct drbd_tl_epoch *new)
239{
240 struct drbd_tl_epoch *newest_before;
241
242 INIT_LIST_HEAD(&new->requests);
243 INIT_LIST_HEAD(&new->w.list);
244 new->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
245 new->next = NULL;
Philipp Reisner7e602c02010-05-27 14:49:27 +0200246 new->n_writes = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700247
248 newest_before = mdev->newest_tle;
249 /* never send a barrier number == 0, because that is special-cased
250 * when using TCQ for our write ordering code */
251 new->br_number = (newest_before->br_number+1) ?: 1;
252 if (mdev->newest_tle != new) {
253 mdev->newest_tle->next = new;
254 mdev->newest_tle = new;
255 }
256}
257
258/**
259 * tl_release() - Free or recycle the oldest &struct drbd_tl_epoch object of the TL
260 * @mdev: DRBD device.
261 * @barrier_nr: Expected identifier of the DRBD write barrier packet.
262 * @set_size: Expected number of requests before that barrier.
263 *
264 * In case the passed barrier_nr or set_size does not match the oldest
265 * &struct drbd_tl_epoch objects this function will cause a termination
266 * of the connection.
267 */
268void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
269 unsigned int set_size)
270{
271 struct drbd_tl_epoch *b, *nob; /* next old barrier */
272 struct list_head *le, *tle;
273 struct drbd_request *r;
274
275 spin_lock_irq(&mdev->req_lock);
276
277 b = mdev->oldest_tle;
278
279 /* first some paranoia code */
280 if (b == NULL) {
281 dev_err(DEV, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
282 barrier_nr);
283 goto bail;
284 }
285 if (b->br_number != barrier_nr) {
286 dev_err(DEV, "BAD! BarrierAck #%u received, expected #%u!\n",
287 barrier_nr, b->br_number);
288 goto bail;
289 }
Philipp Reisner7e602c02010-05-27 14:49:27 +0200290 if (b->n_writes != set_size) {
291 dev_err(DEV, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
292 barrier_nr, set_size, b->n_writes);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700293 goto bail;
294 }
295
296 /* Clean up list of requests processed during current epoch */
297 list_for_each_safe(le, tle, &b->requests) {
298 r = list_entry(le, struct drbd_request, tl_requests);
299 _req_mod(r, barrier_acked);
300 }
301 /* There could be requests on the list waiting for completion
302 of the write to the local disk. To avoid corruptions of
303 slab's data structures we have to remove the lists head.
304
305 Also there could have been a barrier ack out of sequence, overtaking
306 the write acks - which would be a bug and violating write ordering.
307 To not deadlock in case we lose connection while such requests are
308 still pending, we need some way to find them for the
309 _req_mode(connection_lost_while_pending).
310
311 These have been list_move'd to the out_of_sequence_requests list in
312 _req_mod(, barrier_acked) above.
313 */
314 list_del_init(&b->requests);
315
316 nob = b->next;
317 if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
318 _tl_add_barrier(mdev, b);
319 if (nob)
320 mdev->oldest_tle = nob;
321 /* if nob == NULL b was the only barrier, and becomes the new
322 barrier. Therefore mdev->oldest_tle points already to b */
323 } else {
324 D_ASSERT(nob != NULL);
325 mdev->oldest_tle = nob;
326 kfree(b);
327 }
328
329 spin_unlock_irq(&mdev->req_lock);
330 dec_ap_pending(mdev);
331
332 return;
333
334bail:
335 spin_unlock_irq(&mdev->req_lock);
336 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
337}
338
Philipp Reisner617049a2010-12-22 12:48:31 +0100339
340/* In C_AHEAD mode only out_of_sync packets are sent for requests. Detach
341 * those requests from the newsest barrier when changing to an other cstate.
342 *
343 * That headless list vanishes when the last request finished its write or
344 * send out_of_sync packet. */
345static void tl_forget(struct drbd_conf *mdev)
346{
347 struct drbd_tl_epoch *b;
348
349 if (test_bit(CREATE_BARRIER, &mdev->flags))
350 return;
351
352 b = mdev->newest_tle;
353 list_del(&b->requests);
354 _tl_add_barrier(mdev, b);
355}
356
Philipp Reisner11b58e72010-05-12 17:08:26 +0200357/**
358 * _tl_restart() - Walks the transfer log, and applies an action to all requests
359 * @mdev: DRBD device.
360 * @what: The action/event to perform with all request objects
361 *
362 * @what might be one of connection_lost_while_pending, resend, fail_frozen_disk_io,
363 * restart_frozen_disk_io.
364 */
365static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
366{
367 struct drbd_tl_epoch *b, *tmp, **pn;
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200368 struct list_head *le, *tle, carry_reads;
Philipp Reisner11b58e72010-05-12 17:08:26 +0200369 struct drbd_request *req;
370 int rv, n_writes, n_reads;
371
372 b = mdev->oldest_tle;
373 pn = &mdev->oldest_tle;
374 while (b) {
375 n_writes = 0;
376 n_reads = 0;
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200377 INIT_LIST_HEAD(&carry_reads);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200378 list_for_each_safe(le, tle, &b->requests) {
379 req = list_entry(le, struct drbd_request, tl_requests);
380 rv = _req_mod(req, what);
381
382 n_writes += (rv & MR_WRITE) >> MR_WRITE_SHIFT;
383 n_reads += (rv & MR_READ) >> MR_READ_SHIFT;
384 }
385 tmp = b->next;
386
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200387 if (n_writes) {
Philipp Reisner11b58e72010-05-12 17:08:26 +0200388 if (what == resend) {
389 b->n_writes = n_writes;
390 if (b->w.cb == NULL) {
391 b->w.cb = w_send_barrier;
392 inc_ap_pending(mdev);
393 set_bit(CREATE_BARRIER, &mdev->flags);
394 }
395
396 drbd_queue_work(&mdev->data.work, &b->w);
397 }
398 pn = &b->next;
399 } else {
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200400 if (n_reads)
401 list_add(&carry_reads, &b->requests);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200402 /* there could still be requests on that ring list,
403 * in case local io is still pending */
404 list_del(&b->requests);
405
406 /* dec_ap_pending corresponding to queue_barrier.
407 * the newest barrier may not have been queued yet,
408 * in which case w.cb is still NULL. */
409 if (b->w.cb != NULL)
410 dec_ap_pending(mdev);
411
412 if (b == mdev->newest_tle) {
413 /* recycle, but reinit! */
414 D_ASSERT(tmp == NULL);
415 INIT_LIST_HEAD(&b->requests);
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200416 list_splice(&carry_reads, &b->requests);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200417 INIT_LIST_HEAD(&b->w.list);
418 b->w.cb = NULL;
419 b->br_number = net_random();
420 b->n_writes = 0;
421
422 *pn = b;
423 break;
424 }
425 *pn = tmp;
426 kfree(b);
427 }
428 b = tmp;
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200429 list_splice(&carry_reads, &b->requests);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200430 }
431}
432
Philipp Reisnerb411b362009-09-25 16:07:19 -0700433
434/**
435 * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
436 * @mdev: DRBD device.
437 *
438 * This is called after the connection to the peer was lost. The storage covered
439 * by the requests on the transfer gets marked as our of sync. Called from the
440 * receiver thread and the worker thread.
441 */
442void tl_clear(struct drbd_conf *mdev)
443{
Philipp Reisnerb411b362009-09-25 16:07:19 -0700444 struct list_head *le, *tle;
445 struct drbd_request *r;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700446
447 spin_lock_irq(&mdev->req_lock);
448
Philipp Reisner11b58e72010-05-12 17:08:26 +0200449 _tl_restart(mdev, connection_lost_while_pending);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700450
451 /* we expect this list to be empty. */
452 D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
453
454 /* but just in case, clean it up anyways! */
455 list_for_each_safe(le, tle, &mdev->out_of_sequence_requests) {
456 r = list_entry(le, struct drbd_request, tl_requests);
457 /* It would be nice to complete outside of spinlock.
458 * But this is easier for now. */
459 _req_mod(r, connection_lost_while_pending);
460 }
461
462 /* ensure bit indicating barrier is required is clear */
463 clear_bit(CREATE_BARRIER, &mdev->flags);
464
Philipp Reisner288f4222010-05-27 15:07:43 +0200465 memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *));
466
Philipp Reisnerb411b362009-09-25 16:07:19 -0700467 spin_unlock_irq(&mdev->req_lock);
468}
469
Philipp Reisner11b58e72010-05-12 17:08:26 +0200470void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
471{
472 spin_lock_irq(&mdev->req_lock);
473 _tl_restart(mdev, what);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700474 spin_unlock_irq(&mdev->req_lock);
475}
476
477/**
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100478 * cl_wide_st_chg() - true if the state change is a cluster wide one
Philipp Reisnerb411b362009-09-25 16:07:19 -0700479 * @mdev: DRBD device.
480 * @os: old (current) state.
481 * @ns: new (wanted) state.
482 */
483static int cl_wide_st_chg(struct drbd_conf *mdev,
484 union drbd_state os, union drbd_state ns)
485{
486 return (os.conn >= C_CONNECTED && ns.conn >= C_CONNECTED &&
487 ((os.role != R_PRIMARY && ns.role == R_PRIMARY) ||
488 (os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
489 (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S) ||
490 (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))) ||
491 (os.conn >= C_CONNECTED && ns.conn == C_DISCONNECTING) ||
492 (os.conn == C_CONNECTED && ns.conn == C_VERIFY_S);
493}
494
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100495enum drbd_state_rv
496drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
497 union drbd_state mask, union drbd_state val)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700498{
499 unsigned long flags;
500 union drbd_state os, ns;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100501 enum drbd_state_rv rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700502
503 spin_lock_irqsave(&mdev->req_lock, flags);
504 os = mdev->state;
505 ns.i = (os.i & ~mask.i) | val.i;
506 rv = _drbd_set_state(mdev, ns, f, NULL);
507 ns = mdev->state;
508 spin_unlock_irqrestore(&mdev->req_lock, flags);
509
510 return rv;
511}
512
513/**
514 * drbd_force_state() - Impose a change which happens outside our control on our state
515 * @mdev: DRBD device.
516 * @mask: mask of state bits to change.
517 * @val: value of new state bits.
518 */
519void drbd_force_state(struct drbd_conf *mdev,
520 union drbd_state mask, union drbd_state val)
521{
522 drbd_change_state(mdev, CS_HARD, mask, val);
523}
524
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100525static enum drbd_state_rv is_valid_state(struct drbd_conf *, union drbd_state);
526static enum drbd_state_rv is_valid_state_transition(struct drbd_conf *,
527 union drbd_state,
528 union drbd_state);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700529static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
Lars Ellenberg02bc7172010-09-06 12:13:20 +0200530 union drbd_state ns, const char **warn_sync_abort);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700531int drbd_send_state_req(struct drbd_conf *,
532 union drbd_state, union drbd_state);
533
Andreas Gruenbacherc8b32562010-12-08 01:06:16 +0100534static enum drbd_state_rv
535_req_st_cond(struct drbd_conf *mdev, union drbd_state mask,
536 union drbd_state val)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700537{
538 union drbd_state os, ns;
539 unsigned long flags;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100540 enum drbd_state_rv rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700541
542 if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags))
543 return SS_CW_SUCCESS;
544
545 if (test_and_clear_bit(CL_ST_CHG_FAIL, &mdev->flags))
546 return SS_CW_FAILED_BY_PEER;
547
548 rv = 0;
549 spin_lock_irqsave(&mdev->req_lock, flags);
550 os = mdev->state;
551 ns.i = (os.i & ~mask.i) | val.i;
552 ns = sanitize_state(mdev, os, ns, NULL);
553
554 if (!cl_wide_st_chg(mdev, os, ns))
555 rv = SS_CW_NO_NEED;
556 if (!rv) {
557 rv = is_valid_state(mdev, ns);
558 if (rv == SS_SUCCESS) {
559 rv = is_valid_state_transition(mdev, ns, os);
560 if (rv == SS_SUCCESS)
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100561 rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700562 }
563 }
564 spin_unlock_irqrestore(&mdev->req_lock, flags);
565
566 return rv;
567}
568
569/**
570 * drbd_req_state() - Perform an eventually cluster wide state change
571 * @mdev: DRBD device.
572 * @mask: mask of state bits to change.
573 * @val: value of new state bits.
574 * @f: flags
575 *
576 * Should not be called directly, use drbd_request_state() or
577 * _drbd_request_state().
578 */
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100579static enum drbd_state_rv
580drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
581 union drbd_state val, enum chg_state_flags f)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700582{
583 struct completion done;
584 unsigned long flags;
585 union drbd_state os, ns;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100586 enum drbd_state_rv rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700587
588 init_completion(&done);
589
590 if (f & CS_SERIALIZE)
591 mutex_lock(&mdev->state_mutex);
592
593 spin_lock_irqsave(&mdev->req_lock, flags);
594 os = mdev->state;
595 ns.i = (os.i & ~mask.i) | val.i;
596 ns = sanitize_state(mdev, os, ns, NULL);
597
598 if (cl_wide_st_chg(mdev, os, ns)) {
599 rv = is_valid_state(mdev, ns);
600 if (rv == SS_SUCCESS)
601 rv = is_valid_state_transition(mdev, ns, os);
602 spin_unlock_irqrestore(&mdev->req_lock, flags);
603
604 if (rv < SS_SUCCESS) {
605 if (f & CS_VERBOSE)
606 print_st_err(mdev, os, ns, rv);
607 goto abort;
608 }
609
610 drbd_state_lock(mdev);
611 if (!drbd_send_state_req(mdev, mask, val)) {
612 drbd_state_unlock(mdev);
613 rv = SS_CW_FAILED_BY_PEER;
614 if (f & CS_VERBOSE)
615 print_st_err(mdev, os, ns, rv);
616 goto abort;
617 }
618
619 wait_event(mdev->state_wait,
620 (rv = _req_st_cond(mdev, mask, val)));
621
622 if (rv < SS_SUCCESS) {
623 drbd_state_unlock(mdev);
624 if (f & CS_VERBOSE)
625 print_st_err(mdev, os, ns, rv);
626 goto abort;
627 }
628 spin_lock_irqsave(&mdev->req_lock, flags);
629 os = mdev->state;
630 ns.i = (os.i & ~mask.i) | val.i;
631 rv = _drbd_set_state(mdev, ns, f, &done);
632 drbd_state_unlock(mdev);
633 } else {
634 rv = _drbd_set_state(mdev, ns, f, &done);
635 }
636
637 spin_unlock_irqrestore(&mdev->req_lock, flags);
638
639 if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
640 D_ASSERT(current != mdev->worker.task);
641 wait_for_completion(&done);
642 }
643
644abort:
645 if (f & CS_SERIALIZE)
646 mutex_unlock(&mdev->state_mutex);
647
648 return rv;
649}
650
651/**
652 * _drbd_request_state() - Request a state change (with flags)
653 * @mdev: DRBD device.
654 * @mask: mask of state bits to change.
655 * @val: value of new state bits.
656 * @f: flags
657 *
658 * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
659 * flag, or when logging of failed state change requests is not desired.
660 */
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100661enum drbd_state_rv
662_drbd_request_state(struct drbd_conf *mdev, union drbd_state mask,
663 union drbd_state val, enum chg_state_flags f)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700664{
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100665 enum drbd_state_rv rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700666
667 wait_event(mdev->state_wait,
668 (rv = drbd_req_state(mdev, mask, val, f)) != SS_IN_TRANSIENT_STATE);
669
670 return rv;
671}
672
673static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns)
674{
675 dev_err(DEV, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c }\n",
676 name,
677 drbd_conn_str(ns.conn),
678 drbd_role_str(ns.role),
679 drbd_role_str(ns.peer),
680 drbd_disk_str(ns.disk),
681 drbd_disk_str(ns.pdsk),
Philipp Reisnerfb22c402010-09-08 23:20:21 +0200682 is_susp(ns) ? 's' : 'r',
Philipp Reisnerb411b362009-09-25 16:07:19 -0700683 ns.aftr_isp ? 'a' : '-',
684 ns.peer_isp ? 'p' : '-',
685 ns.user_isp ? 'u' : '-'
686 );
687}
688
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100689void print_st_err(struct drbd_conf *mdev, union drbd_state os,
690 union drbd_state ns, enum drbd_state_rv err)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700691{
692 if (err == SS_IN_TRANSIENT_STATE)
693 return;
694 dev_err(DEV, "State change failed: %s\n", drbd_set_st_err_str(err));
695 print_st(mdev, " state", os);
696 print_st(mdev, "wanted", ns);
697}
698
699
Philipp Reisnerb411b362009-09-25 16:07:19 -0700700/**
701 * is_valid_state() - Returns an SS_ error code if ns is not valid
702 * @mdev: DRBD device.
703 * @ns: State to consider.
704 */
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100705static enum drbd_state_rv
706is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700707{
708 /* See drbd_state_sw_errors in drbd_strings.c */
709
710 enum drbd_fencing_p fp;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100711 enum drbd_state_rv rv = SS_SUCCESS;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700712
713 fp = FP_DONT_CARE;
714 if (get_ldev(mdev)) {
715 fp = mdev->ldev->dc.fencing;
716 put_ldev(mdev);
717 }
718
719 if (get_net_conf(mdev)) {
720 if (!mdev->net_conf->two_primaries &&
721 ns.role == R_PRIMARY && ns.peer == R_PRIMARY)
722 rv = SS_TWO_PRIMARIES;
723 put_net_conf(mdev);
724 }
725
726 if (rv <= 0)
727 /* already found a reason to abort */;
728 else if (ns.role == R_SECONDARY && mdev->open_cnt)
729 rv = SS_DEVICE_IN_USE;
730
731 else if (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.disk < D_UP_TO_DATE)
732 rv = SS_NO_UP_TO_DATE_DISK;
733
734 else if (fp >= FP_RESOURCE &&
735 ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk >= D_UNKNOWN)
736 rv = SS_PRIMARY_NOP;
737
738 else if (ns.role == R_PRIMARY && ns.disk <= D_INCONSISTENT && ns.pdsk <= D_INCONSISTENT)
739 rv = SS_NO_UP_TO_DATE_DISK;
740
741 else if (ns.conn > C_CONNECTED && ns.disk < D_INCONSISTENT)
742 rv = SS_NO_LOCAL_DISK;
743
744 else if (ns.conn > C_CONNECTED && ns.pdsk < D_INCONSISTENT)
745 rv = SS_NO_REMOTE_DISK;
746
Lars Ellenberg8d4ce822010-04-01 16:59:32 +0200747 else if (ns.conn > C_CONNECTED && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
748 rv = SS_NO_UP_TO_DATE_DISK;
749
Philipp Reisnerb411b362009-09-25 16:07:19 -0700750 else if ((ns.conn == C_CONNECTED ||
751 ns.conn == C_WF_BITMAP_S ||
752 ns.conn == C_SYNC_SOURCE ||
753 ns.conn == C_PAUSED_SYNC_S) &&
754 ns.disk == D_OUTDATED)
755 rv = SS_CONNECTED_OUTDATES;
756
757 else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
758 (mdev->sync_conf.verify_alg[0] == 0))
759 rv = SS_NO_VERIFY_ALG;
760
761 else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
762 mdev->agreed_pro_version < 88)
763 rv = SS_NOT_SUPPORTED;
764
765 return rv;
766}
767
768/**
769 * is_valid_state_transition() - Returns an SS_ error code if the state transition is not possible
770 * @mdev: DRBD device.
771 * @ns: new state.
772 * @os: old state.
773 */
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100774static enum drbd_state_rv
775is_valid_state_transition(struct drbd_conf *mdev, union drbd_state ns,
776 union drbd_state os)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700777{
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +0100778 enum drbd_state_rv rv = SS_SUCCESS;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700779
780 if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) &&
781 os.conn > C_CONNECTED)
782 rv = SS_RESYNC_RUNNING;
783
784 if (ns.conn == C_DISCONNECTING && os.conn == C_STANDALONE)
785 rv = SS_ALREADY_STANDALONE;
786
787 if (ns.disk > D_ATTACHING && os.disk == D_DISKLESS)
788 rv = SS_IS_DISKLESS;
789
790 if (ns.conn == C_WF_CONNECTION && os.conn < C_UNCONNECTED)
791 rv = SS_NO_NET_CONFIG;
792
793 if (ns.disk == D_OUTDATED && os.disk < D_OUTDATED && os.disk != D_ATTACHING)
794 rv = SS_LOWER_THAN_OUTDATED;
795
796 if (ns.conn == C_DISCONNECTING && os.conn == C_UNCONNECTED)
797 rv = SS_IN_TRANSIENT_STATE;
798
799 if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS)
800 rv = SS_IN_TRANSIENT_STATE;
801
802 if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
803 rv = SS_NEED_CONNECTION;
804
805 if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
806 ns.conn != os.conn && os.conn > C_CONNECTED)
807 rv = SS_RESYNC_RUNNING;
808
809 if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) &&
810 os.conn < C_CONNECTED)
811 rv = SS_NEED_CONNECTION;
812
Philipp Reisner1fc80cf2010-11-22 14:18:47 +0100813 if ((ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)
814 && os.conn < C_WF_REPORT_PARAMS)
815 rv = SS_NEED_CONNECTION; /* No NetworkFailure -> SyncTarget etc... */
816
Philipp Reisnerb411b362009-09-25 16:07:19 -0700817 return rv;
818}
819
820/**
821 * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
822 * @mdev: DRBD device.
823 * @os: old state.
824 * @ns: new state.
825 * @warn_sync_abort:
826 *
827 * When we loose connection, we have to set the state of the peers disk (pdsk)
828 * to D_UNKNOWN. This rule and many more along those lines are in this function.
829 */
830static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
Lars Ellenberg02bc7172010-09-06 12:13:20 +0200831 union drbd_state ns, const char **warn_sync_abort)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700832{
833 enum drbd_fencing_p fp;
Philipp Reisnerab17b68f2010-11-17 16:54:36 +0100834 enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700835
836 fp = FP_DONT_CARE;
837 if (get_ldev(mdev)) {
838 fp = mdev->ldev->dc.fencing;
839 put_ldev(mdev);
840 }
841
842 /* Disallow Network errors to configure a device's network part */
843 if ((ns.conn >= C_TIMEOUT && ns.conn <= C_TEAR_DOWN) &&
844 os.conn <= C_DISCONNECTING)
845 ns.conn = os.conn;
846
Lars Ellenbergf2906e12010-07-21 17:04:32 +0200847 /* After a network error (+C_TEAR_DOWN) only C_UNCONNECTED or C_DISCONNECTING can follow.
848 * If you try to go into some Sync* state, that shall fail (elsewhere). */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700849 if (os.conn >= C_TIMEOUT && os.conn <= C_TEAR_DOWN &&
Lars Ellenbergf2906e12010-07-21 17:04:32 +0200850 ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_TEAR_DOWN)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700851 ns.conn = os.conn;
852
Lars Ellenberg82f59cc2010-10-16 12:13:47 +0200853 /* we cannot fail (again) if we already detached */
854 if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
855 ns.disk = D_DISKLESS;
856
857 /* if we are only D_ATTACHING yet,
858 * we can (and should) go directly to D_DISKLESS. */
859 if (ns.disk == D_FAILED && os.disk == D_ATTACHING)
860 ns.disk = D_DISKLESS;
861
Philipp Reisnerb411b362009-09-25 16:07:19 -0700862 /* After C_DISCONNECTING only C_STANDALONE may follow */
863 if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE)
864 ns.conn = os.conn;
865
866 if (ns.conn < C_CONNECTED) {
867 ns.peer_isp = 0;
868 ns.peer = R_UNKNOWN;
869 if (ns.pdsk > D_UNKNOWN || ns.pdsk < D_INCONSISTENT)
870 ns.pdsk = D_UNKNOWN;
871 }
872
873 /* Clear the aftr_isp when becoming unconfigured */
874 if (ns.conn == C_STANDALONE && ns.disk == D_DISKLESS && ns.role == R_SECONDARY)
875 ns.aftr_isp = 0;
876
Philipp Reisnerb411b362009-09-25 16:07:19 -0700877 /* Abort resync if a disk fails/detaches */
878 if (os.conn > C_CONNECTED && ns.conn > C_CONNECTED &&
879 (ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) {
880 if (warn_sync_abort)
Lars Ellenberg02bc7172010-09-06 12:13:20 +0200881 *warn_sync_abort =
882 os.conn == C_VERIFY_S || os.conn == C_VERIFY_T ?
883 "Online-verify" : "Resync";
Philipp Reisnerb411b362009-09-25 16:07:19 -0700884 ns.conn = C_CONNECTED;
885 }
886
Philipp Reisnerb411b362009-09-25 16:07:19 -0700887 /* Connection breaks down before we finished "Negotiating" */
888 if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING &&
889 get_ldev_if_state(mdev, D_NEGOTIATING)) {
890 if (mdev->ed_uuid == mdev->ldev->md.uuid[UI_CURRENT]) {
891 ns.disk = mdev->new_state_tmp.disk;
892 ns.pdsk = mdev->new_state_tmp.pdsk;
893 } else {
894 dev_alert(DEV, "Connection lost while negotiating, no data!\n");
895 ns.disk = D_DISKLESS;
896 ns.pdsk = D_UNKNOWN;
897 }
898 put_ldev(mdev);
899 }
900
Philipp Reisnerab17b68f2010-11-17 16:54:36 +0100901 /* D_CONSISTENT and D_OUTDATED vanish when we get connected */
902 if (ns.conn >= C_CONNECTED && ns.conn < C_AHEAD) {
903 if (ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED)
904 ns.disk = D_UP_TO_DATE;
905 if (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)
906 ns.pdsk = D_UP_TO_DATE;
907 }
908
909 /* Implications of the connection stat on the disk states */
910 disk_min = D_DISKLESS;
911 disk_max = D_UP_TO_DATE;
912 pdsk_min = D_INCONSISTENT;
913 pdsk_max = D_UNKNOWN;
914 switch ((enum drbd_conns)ns.conn) {
915 case C_WF_BITMAP_T:
916 case C_PAUSED_SYNC_T:
917 case C_STARTING_SYNC_T:
918 case C_WF_SYNC_UUID:
919 case C_BEHIND:
920 disk_min = D_INCONSISTENT;
921 disk_max = D_OUTDATED;
922 pdsk_min = D_UP_TO_DATE;
923 pdsk_max = D_UP_TO_DATE;
924 break;
925 case C_VERIFY_S:
926 case C_VERIFY_T:
927 disk_min = D_UP_TO_DATE;
928 disk_max = D_UP_TO_DATE;
929 pdsk_min = D_UP_TO_DATE;
930 pdsk_max = D_UP_TO_DATE;
931 break;
932 case C_CONNECTED:
933 disk_min = D_DISKLESS;
934 disk_max = D_UP_TO_DATE;
935 pdsk_min = D_DISKLESS;
936 pdsk_max = D_UP_TO_DATE;
937 break;
938 case C_WF_BITMAP_S:
939 case C_PAUSED_SYNC_S:
940 case C_STARTING_SYNC_S:
941 case C_AHEAD:
942 disk_min = D_UP_TO_DATE;
943 disk_max = D_UP_TO_DATE;
944 pdsk_min = D_INCONSISTENT;
945 pdsk_max = D_CONSISTENT; /* D_OUTDATED would be nice. But explicit outdate necessary*/
946 break;
947 case C_SYNC_TARGET:
948 disk_min = D_INCONSISTENT;
949 disk_max = D_INCONSISTENT;
950 pdsk_min = D_UP_TO_DATE;
951 pdsk_max = D_UP_TO_DATE;
952 break;
953 case C_SYNC_SOURCE:
954 disk_min = D_UP_TO_DATE;
955 disk_max = D_UP_TO_DATE;
956 pdsk_min = D_INCONSISTENT;
957 pdsk_max = D_INCONSISTENT;
958 break;
959 case C_STANDALONE:
960 case C_DISCONNECTING:
961 case C_UNCONNECTED:
962 case C_TIMEOUT:
963 case C_BROKEN_PIPE:
964 case C_NETWORK_FAILURE:
965 case C_PROTOCOL_ERROR:
966 case C_TEAR_DOWN:
967 case C_WF_CONNECTION:
968 case C_WF_REPORT_PARAMS:
969 case C_MASK:
970 break;
971 }
972 if (ns.disk > disk_max)
973 ns.disk = disk_max;
974
975 if (ns.disk < disk_min) {
976 dev_warn(DEV, "Implicitly set disk from %s to %s\n",
977 drbd_disk_str(ns.disk), drbd_disk_str(disk_min));
978 ns.disk = disk_min;
979 }
980 if (ns.pdsk > pdsk_max)
981 ns.pdsk = pdsk_max;
982
983 if (ns.pdsk < pdsk_min) {
984 dev_warn(DEV, "Implicitly set pdsk from %s to %s\n",
985 drbd_disk_str(ns.pdsk), drbd_disk_str(pdsk_min));
986 ns.pdsk = pdsk_min;
987 }
988
Philipp Reisnerb411b362009-09-25 16:07:19 -0700989 if (fp == FP_STONITH &&
Philipp Reisner0a492162009-10-21 13:08:29 +0200990 (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED) &&
991 !(os.role == R_PRIMARY && os.conn < C_CONNECTED && os.pdsk > D_OUTDATED))
Philipp Reisnerfb22c402010-09-08 23:20:21 +0200992 ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */
Philipp Reisner265be2d2010-05-31 10:14:17 +0200993
994 if (mdev->sync_conf.on_no_data == OND_SUSPEND_IO &&
995 (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE) &&
996 !(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE))
Philipp Reisnerfb22c402010-09-08 23:20:21 +0200997 ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700998
999 if (ns.aftr_isp || ns.peer_isp || ns.user_isp) {
1000 if (ns.conn == C_SYNC_SOURCE)
1001 ns.conn = C_PAUSED_SYNC_S;
1002 if (ns.conn == C_SYNC_TARGET)
1003 ns.conn = C_PAUSED_SYNC_T;
1004 } else {
1005 if (ns.conn == C_PAUSED_SYNC_S)
1006 ns.conn = C_SYNC_SOURCE;
1007 if (ns.conn == C_PAUSED_SYNC_T)
1008 ns.conn = C_SYNC_TARGET;
1009 }
1010
1011 return ns;
1012}
1013
1014/* helper for __drbd_set_state */
1015static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
1016{
Lars Ellenberg30b743a2010-11-05 09:39:06 +01001017 if (mdev->agreed_pro_version < 90)
1018 mdev->ov_start_sector = 0;
1019 mdev->rs_total = drbd_bm_bits(mdev);
1020 mdev->ov_position = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001021 if (cs == C_VERIFY_T) {
1022 /* starting online verify from an arbitrary position
1023 * does not fit well into the existing protocol.
1024 * on C_VERIFY_T, we initialize ov_left and friends
1025 * implicitly in receive_DataRequest once the
1026 * first P_OV_REQUEST is received */
1027 mdev->ov_start_sector = ~(sector_t)0;
1028 } else {
1029 unsigned long bit = BM_SECT_TO_BIT(mdev->ov_start_sector);
Lars Ellenberg30b743a2010-11-05 09:39:06 +01001030 if (bit >= mdev->rs_total) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001031 mdev->ov_start_sector =
1032 BM_BIT_TO_SECT(mdev->rs_total - 1);
Lars Ellenberg30b743a2010-11-05 09:39:06 +01001033 mdev->rs_total = 1;
1034 } else
1035 mdev->rs_total -= bit;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001036 mdev->ov_position = mdev->ov_start_sector;
1037 }
Lars Ellenberg30b743a2010-11-05 09:39:06 +01001038 mdev->ov_left = mdev->rs_total;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001039}
1040
Philipp Reisner07782862010-08-31 12:00:50 +02001041static void drbd_resume_al(struct drbd_conf *mdev)
1042{
1043 if (test_and_clear_bit(AL_SUSPENDED, &mdev->flags))
1044 dev_info(DEV, "Resumed AL updates\n");
1045}
1046
Philipp Reisnerb411b362009-09-25 16:07:19 -07001047/**
1048 * __drbd_set_state() - Set a new DRBD state
1049 * @mdev: DRBD device.
1050 * @ns: new state.
1051 * @flags: Flags
1052 * @done: Optional completion, that will get completed after the after_state_ch() finished
1053 *
1054 * Caller needs to hold req_lock, and global_state_lock. Do not call directly.
1055 */
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +01001056enum drbd_state_rv
1057__drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
1058 enum chg_state_flags flags, struct completion *done)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001059{
1060 union drbd_state os;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +01001061 enum drbd_state_rv rv = SS_SUCCESS;
Lars Ellenberg02bc7172010-09-06 12:13:20 +02001062 const char *warn_sync_abort = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001063 struct after_state_chg_work *ascw;
1064
1065 os = mdev->state;
1066
1067 ns = sanitize_state(mdev, os, ns, &warn_sync_abort);
1068
1069 if (ns.i == os.i)
1070 return SS_NOTHING_TO_DO;
1071
1072 if (!(flags & CS_HARD)) {
1073 /* pre-state-change checks ; only look at ns */
1074 /* See drbd_state_sw_errors in drbd_strings.c */
1075
1076 rv = is_valid_state(mdev, ns);
1077 if (rv < SS_SUCCESS) {
1078 /* If the old state was illegal as well, then let
1079 this happen...*/
1080
Philipp Reisner1616a252010-06-10 16:55:15 +02001081 if (is_valid_state(mdev, os) == rv)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001082 rv = is_valid_state_transition(mdev, ns, os);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001083 } else
1084 rv = is_valid_state_transition(mdev, ns, os);
1085 }
1086
1087 if (rv < SS_SUCCESS) {
1088 if (flags & CS_VERBOSE)
1089 print_st_err(mdev, os, ns, rv);
1090 return rv;
1091 }
1092
1093 if (warn_sync_abort)
Lars Ellenberg02bc7172010-09-06 12:13:20 +02001094 dev_warn(DEV, "%s aborted.\n", warn_sync_abort);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001095
1096 {
Andreas Gruenbacher662d91a2010-12-07 03:01:41 +01001097 char *pbp, pb[300];
1098 pbp = pb;
1099 *pbp = 0;
1100 if (ns.role != os.role)
1101 pbp += sprintf(pbp, "role( %s -> %s ) ",
1102 drbd_role_str(os.role),
1103 drbd_role_str(ns.role));
1104 if (ns.peer != os.peer)
1105 pbp += sprintf(pbp, "peer( %s -> %s ) ",
1106 drbd_role_str(os.peer),
1107 drbd_role_str(ns.peer));
1108 if (ns.conn != os.conn)
1109 pbp += sprintf(pbp, "conn( %s -> %s ) ",
1110 drbd_conn_str(os.conn),
1111 drbd_conn_str(ns.conn));
1112 if (ns.disk != os.disk)
1113 pbp += sprintf(pbp, "disk( %s -> %s ) ",
1114 drbd_disk_str(os.disk),
1115 drbd_disk_str(ns.disk));
1116 if (ns.pdsk != os.pdsk)
1117 pbp += sprintf(pbp, "pdsk( %s -> %s ) ",
1118 drbd_disk_str(os.pdsk),
1119 drbd_disk_str(ns.pdsk));
1120 if (is_susp(ns) != is_susp(os))
1121 pbp += sprintf(pbp, "susp( %d -> %d ) ",
1122 is_susp(os),
1123 is_susp(ns));
1124 if (ns.aftr_isp != os.aftr_isp)
1125 pbp += sprintf(pbp, "aftr_isp( %d -> %d ) ",
1126 os.aftr_isp,
1127 ns.aftr_isp);
1128 if (ns.peer_isp != os.peer_isp)
1129 pbp += sprintf(pbp, "peer_isp( %d -> %d ) ",
1130 os.peer_isp,
1131 ns.peer_isp);
1132 if (ns.user_isp != os.user_isp)
1133 pbp += sprintf(pbp, "user_isp( %d -> %d ) ",
1134 os.user_isp,
1135 ns.user_isp);
1136 dev_info(DEV, "%s\n", pb);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001137 }
1138
1139 /* solve the race between becoming unconfigured,
1140 * worker doing the cleanup, and
1141 * admin reconfiguring us:
1142 * on (re)configure, first set CONFIG_PENDING,
1143 * then wait for a potentially exiting worker,
1144 * start the worker, and schedule one no_op.
1145 * then proceed with configuration.
1146 */
1147 if (ns.disk == D_DISKLESS &&
1148 ns.conn == C_STANDALONE &&
1149 ns.role == R_SECONDARY &&
1150 !test_and_set_bit(CONFIG_PENDING, &mdev->flags))
1151 set_bit(DEVICE_DYING, &mdev->flags);
1152
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001153 /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
1154 * on the ldev here, to be sure the transition -> D_DISKLESS resp.
1155 * drbd_ldev_destroy() won't happen before our corresponding
1156 * after_state_ch works run, where we put_ldev again. */
1157 if ((os.disk != D_FAILED && ns.disk == D_FAILED) ||
1158 (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
1159 atomic_inc(&mdev->local_cnt);
1160
1161 mdev->state = ns;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001162 wake_up(&mdev->misc_wait);
1163 wake_up(&mdev->state_wait);
1164
Philipp Reisnerb411b362009-09-25 16:07:19 -07001165 /* aborted verify run. log the last position */
1166 if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) &&
1167 ns.conn < C_CONNECTED) {
1168 mdev->ov_start_sector =
Lars Ellenberg30b743a2010-11-05 09:39:06 +01001169 BM_BIT_TO_SECT(drbd_bm_bits(mdev) - mdev->ov_left);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001170 dev_info(DEV, "Online Verify reached sector %llu\n",
1171 (unsigned long long)mdev->ov_start_sector);
1172 }
1173
1174 if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) &&
1175 (ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)) {
1176 dev_info(DEV, "Syncer continues.\n");
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001177 mdev->rs_paused += (long)jiffies
1178 -(long)mdev->rs_mark_time[mdev->rs_last_mark];
Philipp Reisner63106d32010-09-01 15:47:15 +02001179 if (ns.conn == C_SYNC_TARGET)
1180 mod_timer(&mdev->resync_timer, jiffies);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001181 }
1182
1183 if ((os.conn == C_SYNC_TARGET || os.conn == C_SYNC_SOURCE) &&
1184 (ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) {
1185 dev_info(DEV, "Resync suspended\n");
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001186 mdev->rs_mark_time[mdev->rs_last_mark] = jiffies;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001187 }
1188
1189 if (os.conn == C_CONNECTED &&
1190 (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T)) {
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001191 unsigned long now = jiffies;
1192 int i;
1193
Lars Ellenberg30b743a2010-11-05 09:39:06 +01001194 set_ov_position(mdev, ns.conn);
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001195 mdev->rs_start = now;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001196 mdev->rs_last_events = 0;
1197 mdev->rs_last_sect_ev = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001198 mdev->ov_last_oos_size = 0;
1199 mdev->ov_last_oos_start = 0;
1200
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001201 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
Lars Ellenberg30b743a2010-11-05 09:39:06 +01001202 mdev->rs_mark_left[i] = mdev->ov_left;
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001203 mdev->rs_mark_time[i] = now;
1204 }
1205
Lars Ellenberg2649f082010-11-05 10:05:47 +01001206 drbd_rs_controller_reset(mdev);
1207
Philipp Reisnerb411b362009-09-25 16:07:19 -07001208 if (ns.conn == C_VERIFY_S) {
1209 dev_info(DEV, "Starting Online Verify from sector %llu\n",
1210 (unsigned long long)mdev->ov_position);
1211 mod_timer(&mdev->resync_timer, jiffies);
1212 }
1213 }
1214
1215 if (get_ldev(mdev)) {
1216 u32 mdf = mdev->ldev->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND|
1217 MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
1218 MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);
1219
1220 if (test_bit(CRASHED_PRIMARY, &mdev->flags))
1221 mdf |= MDF_CRASHED_PRIMARY;
1222 if (mdev->state.role == R_PRIMARY ||
1223 (mdev->state.pdsk < D_INCONSISTENT && mdev->state.peer == R_PRIMARY))
1224 mdf |= MDF_PRIMARY_IND;
1225 if (mdev->state.conn > C_WF_REPORT_PARAMS)
1226 mdf |= MDF_CONNECTED_IND;
1227 if (mdev->state.disk > D_INCONSISTENT)
1228 mdf |= MDF_CONSISTENT;
1229 if (mdev->state.disk > D_OUTDATED)
1230 mdf |= MDF_WAS_UP_TO_DATE;
1231 if (mdev->state.pdsk <= D_OUTDATED && mdev->state.pdsk >= D_INCONSISTENT)
1232 mdf |= MDF_PEER_OUT_DATED;
1233 if (mdf != mdev->ldev->md.flags) {
1234 mdev->ldev->md.flags = mdf;
1235 drbd_md_mark_dirty(mdev);
1236 }
1237 if (os.disk < D_CONSISTENT && ns.disk >= D_CONSISTENT)
1238 drbd_set_ed_uuid(mdev, mdev->ldev->md.uuid[UI_CURRENT]);
1239 put_ldev(mdev);
1240 }
1241
1242 /* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
1243 if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT &&
1244 os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
1245 set_bit(CONSIDER_RESYNC, &mdev->flags);
1246
1247 /* Receiver should clean up itself */
1248 if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
1249 drbd_thread_stop_nowait(&mdev->receiver);
1250
1251 /* Now the receiver finished cleaning up itself, it should die */
1252 if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE)
1253 drbd_thread_stop_nowait(&mdev->receiver);
1254
1255 /* Upon network failure, we need to restart the receiver. */
1256 if (os.conn > C_TEAR_DOWN &&
1257 ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
1258 drbd_thread_restart_nowait(&mdev->receiver);
1259
Philipp Reisner07782862010-08-31 12:00:50 +02001260 /* Resume AL writing if we get a connection */
1261 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
1262 drbd_resume_al(mdev);
1263
Philipp Reisner617049a2010-12-22 12:48:31 +01001264 if (os.conn == C_AHEAD && ns.conn != C_AHEAD)
1265 tl_forget(mdev);
1266
Philipp Reisnerb411b362009-09-25 16:07:19 -07001267 ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
1268 if (ascw) {
1269 ascw->os = os;
1270 ascw->ns = ns;
1271 ascw->flags = flags;
1272 ascw->w.cb = w_after_state_ch;
1273 ascw->done = done;
1274 drbd_queue_work(&mdev->data.work, &ascw->w);
1275 } else {
1276 dev_warn(DEV, "Could not kmalloc an ascw\n");
1277 }
1278
1279 return rv;
1280}
1281
1282static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1283{
1284 struct after_state_chg_work *ascw =
1285 container_of(w, struct after_state_chg_work, w);
1286 after_state_ch(mdev, ascw->os, ascw->ns, ascw->flags);
1287 if (ascw->flags & CS_WAIT_COMPLETE) {
1288 D_ASSERT(ascw->done != NULL);
1289 complete(ascw->done);
1290 }
1291 kfree(ascw);
1292
1293 return 1;
1294}
1295
1296static void abw_start_sync(struct drbd_conf *mdev, int rv)
1297{
1298 if (rv) {
1299 dev_err(DEV, "Writing the bitmap failed not starting resync.\n");
1300 _drbd_request_state(mdev, NS(conn, C_CONNECTED), CS_VERBOSE);
1301 return;
1302 }
1303
1304 switch (mdev->state.conn) {
1305 case C_STARTING_SYNC_T:
1306 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
1307 break;
1308 case C_STARTING_SYNC_S:
1309 drbd_start_resync(mdev, C_SYNC_SOURCE);
1310 break;
1311 }
1312}
1313
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001314int drbd_bitmap_io_from_worker(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why)
1315{
1316 int rv;
1317
1318 D_ASSERT(current == mdev->worker.task);
1319
1320 /* open coded non-blocking drbd_suspend_io(mdev); */
1321 set_bit(SUSPEND_IO, &mdev->flags);
1322 if (!is_susp(mdev->state))
1323 D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0);
1324
1325 drbd_bm_lock(mdev, why);
1326 rv = io_fn(mdev);
1327 drbd_bm_unlock(mdev);
1328
1329 drbd_resume_io(mdev);
1330
1331 return rv;
1332}
1333
Philipp Reisnerb411b362009-09-25 16:07:19 -07001334/**
1335 * after_state_ch() - Perform after state change actions that may sleep
1336 * @mdev: DRBD device.
1337 * @os: old state.
1338 * @ns: new state.
1339 * @flags: Flags
1340 */
1341static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1342 union drbd_state ns, enum chg_state_flags flags)
1343{
1344 enum drbd_fencing_p fp;
Philipp Reisner67098932010-06-24 16:24:25 +02001345 enum drbd_req_event what = nothing;
Philipp Reisnerfb22c402010-09-08 23:20:21 +02001346 union drbd_state nsm = (union drbd_state){ .i = -1 };
Philipp Reisnerb411b362009-09-25 16:07:19 -07001347
1348 if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
1349 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1350 if (mdev->p_uuid)
1351 mdev->p_uuid[UI_FLAGS] &= ~((u64)2);
1352 }
1353
1354 fp = FP_DONT_CARE;
1355 if (get_ldev(mdev)) {
1356 fp = mdev->ldev->dc.fencing;
1357 put_ldev(mdev);
1358 }
1359
1360 /* Inform userspace about the change... */
1361 drbd_bcast_state(mdev, ns);
1362
1363 if (!(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE) &&
1364 (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
1365 drbd_khelper(mdev, "pri-on-incon-degr");
1366
1367 /* Here we have the actions that are performed after a
1368 state change. This function might sleep */
1369
Philipp Reisnerfb22c402010-09-08 23:20:21 +02001370 nsm.i = -1;
1371 if (ns.susp_nod) {
Philipp Reisner3f986882010-12-20 14:48:20 +01001372 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
1373 what = resend;
Philipp Reisner265be2d2010-05-31 10:14:17 +02001374
Philipp Reisner67098932010-06-24 16:24:25 +02001375 if (os.disk == D_ATTACHING && ns.disk > D_ATTACHING)
Philipp Reisner3f986882010-12-20 14:48:20 +01001376 what = restart_frozen_disk_io;
Philipp Reisnerfb22c402010-09-08 23:20:21 +02001377
Philipp Reisner3f986882010-12-20 14:48:20 +01001378 if (what != nothing)
1379 nsm.susp_nod = 0;
Philipp Reisner265be2d2010-05-31 10:14:17 +02001380 }
1381
Philipp Reisnerfb22c402010-09-08 23:20:21 +02001382 if (ns.susp_fen) {
Philipp Reisner43a51822010-06-11 11:26:34 +02001383 /* case1: The outdate peer handler is successful: */
1384 if (os.pdsk > D_OUTDATED && ns.pdsk <= D_OUTDATED) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001385 tl_clear(mdev);
Philipp Reisner43a51822010-06-11 11:26:34 +02001386 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
1387 drbd_uuid_new_current(mdev);
1388 clear_bit(NEW_CUR_UUID, &mdev->flags);
Philipp Reisner43a51822010-06-11 11:26:34 +02001389 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001390 spin_lock_irq(&mdev->req_lock);
Philipp Reisnerfb22c402010-09-08 23:20:21 +02001391 _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001392 spin_unlock_irq(&mdev->req_lock);
1393 }
Philipp Reisner43a51822010-06-11 11:26:34 +02001394 /* case2: The connection was established again: */
1395 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
1396 clear_bit(NEW_CUR_UUID, &mdev->flags);
Philipp Reisner67098932010-06-24 16:24:25 +02001397 what = resend;
Philipp Reisnerfb22c402010-09-08 23:20:21 +02001398 nsm.susp_fen = 0;
Philipp Reisner43a51822010-06-11 11:26:34 +02001399 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001400 }
Philipp Reisner67098932010-06-24 16:24:25 +02001401
1402 if (what != nothing) {
1403 spin_lock_irq(&mdev->req_lock);
1404 _tl_restart(mdev, what);
Philipp Reisnerfb22c402010-09-08 23:20:21 +02001405 nsm.i &= mdev->state.i;
1406 _drbd_set_state(mdev, nsm, CS_VERBOSE, NULL);
Philipp Reisner67098932010-06-24 16:24:25 +02001407 spin_unlock_irq(&mdev->req_lock);
1408 }
1409
Lars Ellenberg5a22db82010-12-17 21:14:23 +01001410 /* Became sync source. With protocol >= 96, we still need to send out
1411 * the sync uuid now. Need to do that before any drbd_send_state, or
1412 * the other side may go "paused sync" before receiving the sync uuids,
1413 * which is unexpected. */
1414 if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) &&
1415 (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) &&
1416 mdev->agreed_pro_version >= 96 && get_ldev(mdev)) {
1417 drbd_gen_and_send_sync_uuid(mdev);
1418 put_ldev(mdev);
1419 }
1420
Philipp Reisnerb411b362009-09-25 16:07:19 -07001421 /* Do not change the order of the if above and the two below... */
1422 if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */
1423 drbd_send_uuids(mdev);
1424 drbd_send_state(mdev);
1425 }
1426 if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S)
1427 drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL, "send_bitmap (WFBitMapS)");
1428
1429 /* Lost contact to peer's copy of the data */
1430 if ((os.pdsk >= D_INCONSISTENT &&
1431 os.pdsk != D_UNKNOWN &&
1432 os.pdsk != D_OUTDATED)
1433 && (ns.pdsk < D_INCONSISTENT ||
1434 ns.pdsk == D_UNKNOWN ||
1435 ns.pdsk == D_OUTDATED)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001436 if (get_ldev(mdev)) {
1437 if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
Philipp Reisner2c8d1962010-05-25 14:32:03 +02001438 mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
Philipp Reisnerfb22c402010-09-08 23:20:21 +02001439 if (is_susp(mdev->state)) {
Philipp Reisner43a51822010-06-11 11:26:34 +02001440 set_bit(NEW_CUR_UUID, &mdev->flags);
1441 } else {
1442 drbd_uuid_new_current(mdev);
1443 drbd_send_uuids(mdev);
1444 }
Philipp Reisner2c8d1962010-05-25 14:32:03 +02001445 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001446 put_ldev(mdev);
1447 }
1448 }
1449
1450 if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
Philipp Reisner18a50fa2010-06-21 14:14:15 +02001451 if (ns.peer == R_PRIMARY && mdev->ldev->md.uuid[UI_BITMAP] == 0) {
Philipp Reisner2c8d1962010-05-25 14:32:03 +02001452 drbd_uuid_new_current(mdev);
Philipp Reisner18a50fa2010-06-21 14:14:15 +02001453 drbd_send_uuids(mdev);
1454 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001455
1456 /* D_DISKLESS Peer becomes secondary */
1457 if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001458 drbd_bitmap_io_from_worker(mdev, &drbd_bm_write, "demote diskless peer");
1459 put_ldev(mdev);
1460 }
1461
Lars Ellenberg06d33e92010-12-18 17:00:59 +01001462 /* Write out all changed bits on demote.
1463 * Though, no need to da that just yet
1464 * if there is a resync going on still */
1465 if (os.role == R_PRIMARY && ns.role == R_SECONDARY &&
1466 mdev->state.conn <= C_CONNECTED && get_ldev(mdev)) {
Lars Ellenberg19f843a2010-12-15 08:59:11 +01001467 drbd_bitmap_io_from_worker(mdev, &drbd_bm_write, "demote");
Philipp Reisnerb411b362009-09-25 16:07:19 -07001468 put_ldev(mdev);
1469 }
1470
1471 /* Last part of the attaching process ... */
1472 if (ns.conn >= C_CONNECTED &&
1473 os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
Philipp Reisnere89b5912010-03-24 17:11:33 +01001474 drbd_send_sizes(mdev, 0, 0); /* to start sync... */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001475 drbd_send_uuids(mdev);
1476 drbd_send_state(mdev);
1477 }
1478
1479 /* We want to pause/continue resync, tell peer. */
1480 if (ns.conn >= C_CONNECTED &&
1481 ((os.aftr_isp != ns.aftr_isp) ||
1482 (os.user_isp != ns.user_isp)))
1483 drbd_send_state(mdev);
1484
1485 /* In case one of the isp bits got set, suspend other devices. */
1486 if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) &&
1487 (ns.aftr_isp || ns.peer_isp || ns.user_isp))
1488 suspend_other_sg(mdev);
1489
1490 /* Make sure the peer gets informed about eventual state
1491 changes (ISP bits) while we were in WFReportParams. */
1492 if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
1493 drbd_send_state(mdev);
1494
Philipp Reisner67531712010-10-27 12:21:30 +02001495 if (os.conn != C_AHEAD && ns.conn == C_AHEAD)
1496 drbd_send_state(mdev);
1497
Philipp Reisnerb411b362009-09-25 16:07:19 -07001498 /* We are in the progress to start a full sync... */
1499 if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
1500 (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S))
1501 drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, &abw_start_sync, "set_n_write from StartingSync");
1502
1503 /* We are invalidating our self... */
1504 if (os.conn < C_CONNECTED && ns.conn < C_CONNECTED &&
1505 os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
1506 drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, "set_n_write from invalidate");
1507
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001508 /* first half of local IO error, failure to attach,
1509 * or administrative detach */
1510 if (os.disk != D_FAILED && ns.disk == D_FAILED) {
1511 enum drbd_io_error_p eh;
1512 int was_io_error;
1513 /* corresponding get_ldev was in __drbd_set_state, to serialize
1514 * our cleanup here with the transition to D_DISKLESS,
1515 * so it is safe to dreference ldev here. */
1516 eh = mdev->ldev->dc.on_io_error;
1517 was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
1518
1519 /* current state still has to be D_FAILED,
1520 * there is only one way out: to D_DISKLESS,
1521 * and that may only happen after our put_ldev below. */
1522 if (mdev->state.disk != D_FAILED)
1523 dev_err(DEV,
1524 "ASSERT FAILED: disk is %s during detach\n",
1525 drbd_disk_str(mdev->state.disk));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001526
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02001527 if (drbd_send_state(mdev))
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001528 dev_warn(DEV, "Notified peer that I am detaching my disk\n");
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02001529 else
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001530 dev_err(DEV, "Sending state for detaching disk failed\n");
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02001531
1532 drbd_rs_cancel_all(mdev);
1533
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001534 /* In case we want to get something to stable storage still,
1535 * this may be the last chance.
1536 * Following put_ldev may transition to D_DISKLESS. */
1537 drbd_md_sync(mdev);
1538 put_ldev(mdev);
1539
1540 if (was_io_error && eh == EP_CALL_HELPER)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001541 drbd_khelper(mdev, "local-io-error");
1542 }
1543
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001544 /* second half of local IO error, failure to attach,
1545 * or administrative detach,
1546 * after local_cnt references have reached zero again */
1547 if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
1548 /* We must still be diskless,
1549 * re-attach has to be serialized with this! */
1550 if (mdev->state.disk != D_DISKLESS)
1551 dev_err(DEV,
1552 "ASSERT FAILED: disk is %s while going diskless\n",
1553 drbd_disk_str(mdev->state.disk));
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02001554
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001555 mdev->rs_total = 0;
1556 mdev->rs_failed = 0;
1557 atomic_set(&mdev->rs_pending_cnt, 0);
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02001558
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02001559 if (drbd_send_state(mdev))
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001560 dev_warn(DEV, "Notified peer that I'm now diskless.\n");
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02001561 else
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02001562 dev_err(DEV, "Sending state for being diskless failed\n");
1563 /* corresponding get_ldev in __drbd_set_state
1564 * this may finaly trigger drbd_ldev_destroy. */
1565 put_ldev(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001566 }
1567
1568 /* Disks got bigger while they were detached */
1569 if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
1570 test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) {
1571 if (ns.conn == C_CONNECTED)
1572 resync_after_online_grow(mdev);
1573 }
1574
1575 /* A resync finished or aborted, wake paused devices... */
1576 if ((os.conn > C_CONNECTED && ns.conn <= C_CONNECTED) ||
1577 (os.peer_isp && !ns.peer_isp) ||
1578 (os.user_isp && !ns.user_isp))
1579 resume_next_sg(mdev);
1580
Lars Ellenbergaf85e8e2010-10-07 16:07:55 +02001581 /* sync target done with resync. Explicitly notify peer, even though
1582 * it should (at least for non-empty resyncs) already know itself. */
1583 if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED)
1584 drbd_send_state(mdev);
1585
Lars Ellenberg06d33e92010-12-18 17:00:59 +01001586 if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED)
Lars Ellenberg02851e92010-12-16 14:47:39 +01001587 drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, "write from resync_finished");
1588
Philipp Reisnerf70b35112010-06-24 14:34:40 +02001589 /* free tl_hash if we Got thawed and are C_STANDALONE */
Philipp Reisnerfb22c402010-09-08 23:20:21 +02001590 if (ns.conn == C_STANDALONE && !is_susp(ns) && mdev->tl_hash)
Philipp Reisnerf70b35112010-06-24 14:34:40 +02001591 drbd_free_tl_hash(mdev);
1592
Philipp Reisnerb411b362009-09-25 16:07:19 -07001593 /* Upon network connection, we need to start the receiver */
1594 if (os.conn == C_STANDALONE && ns.conn == C_UNCONNECTED)
1595 drbd_thread_start(&mdev->receiver);
1596
1597 /* Terminate worker thread if we are unconfigured - it will be
1598 restarted as needed... */
1599 if (ns.disk == D_DISKLESS &&
1600 ns.conn == C_STANDALONE &&
1601 ns.role == R_SECONDARY) {
1602 if (os.aftr_isp != ns.aftr_isp)
1603 resume_next_sg(mdev);
1604 /* set in __drbd_set_state, unless CONFIG_PENDING was set */
1605 if (test_bit(DEVICE_DYING, &mdev->flags))
1606 drbd_thread_stop_nowait(&mdev->worker);
1607 }
1608
1609 drbd_md_sync(mdev);
1610}
1611
1612
1613static int drbd_thread_setup(void *arg)
1614{
1615 struct drbd_thread *thi = (struct drbd_thread *) arg;
1616 struct drbd_conf *mdev = thi->mdev;
1617 unsigned long flags;
1618 int retval;
1619
1620restart:
1621 retval = thi->function(thi);
1622
1623 spin_lock_irqsave(&thi->t_lock, flags);
1624
1625 /* if the receiver has been "Exiting", the last thing it did
1626 * was set the conn state to "StandAlone",
1627 * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
1628 * and receiver thread will be "started".
1629 * drbd_thread_start needs to set "Restarting" in that case.
1630 * t_state check and assignment needs to be within the same spinlock,
1631 * so either thread_start sees Exiting, and can remap to Restarting,
1632 * or thread_start see None, and can proceed as normal.
1633 */
1634
1635 if (thi->t_state == Restarting) {
1636 dev_info(DEV, "Restarting %s\n", current->comm);
1637 thi->t_state = Running;
1638 spin_unlock_irqrestore(&thi->t_lock, flags);
1639 goto restart;
1640 }
1641
1642 thi->task = NULL;
1643 thi->t_state = None;
1644 smp_mb();
1645 complete(&thi->stop);
1646 spin_unlock_irqrestore(&thi->t_lock, flags);
1647
1648 dev_info(DEV, "Terminating %s\n", current->comm);
1649
1650 /* Release mod reference taken when thread was started */
1651 module_put(THIS_MODULE);
1652 return retval;
1653}
1654
1655static void drbd_thread_init(struct drbd_conf *mdev, struct drbd_thread *thi,
1656 int (*func) (struct drbd_thread *))
1657{
1658 spin_lock_init(&thi->t_lock);
1659 thi->task = NULL;
1660 thi->t_state = None;
1661 thi->function = func;
1662 thi->mdev = mdev;
1663}
1664
1665int drbd_thread_start(struct drbd_thread *thi)
1666{
1667 struct drbd_conf *mdev = thi->mdev;
1668 struct task_struct *nt;
1669 unsigned long flags;
1670
1671 const char *me =
1672 thi == &mdev->receiver ? "receiver" :
1673 thi == &mdev->asender ? "asender" :
1674 thi == &mdev->worker ? "worker" : "NONSENSE";
1675
1676 /* is used from state engine doing drbd_thread_stop_nowait,
1677 * while holding the req lock irqsave */
1678 spin_lock_irqsave(&thi->t_lock, flags);
1679
1680 switch (thi->t_state) {
1681 case None:
1682 dev_info(DEV, "Starting %s thread (from %s [%d])\n",
1683 me, current->comm, current->pid);
1684
1685 /* Get ref on module for thread - this is released when thread exits */
1686 if (!try_module_get(THIS_MODULE)) {
1687 dev_err(DEV, "Failed to get module reference in drbd_thread_start\n");
1688 spin_unlock_irqrestore(&thi->t_lock, flags);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001689 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001690 }
1691
1692 init_completion(&thi->stop);
1693 D_ASSERT(thi->task == NULL);
1694 thi->reset_cpu_mask = 1;
1695 thi->t_state = Running;
1696 spin_unlock_irqrestore(&thi->t_lock, flags);
1697 flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
1698
1699 nt = kthread_create(drbd_thread_setup, (void *) thi,
1700 "drbd%d_%s", mdev_to_minor(mdev), me);
1701
1702 if (IS_ERR(nt)) {
1703 dev_err(DEV, "Couldn't start thread\n");
1704
1705 module_put(THIS_MODULE);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001706 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001707 }
1708 spin_lock_irqsave(&thi->t_lock, flags);
1709 thi->task = nt;
1710 thi->t_state = Running;
1711 spin_unlock_irqrestore(&thi->t_lock, flags);
1712 wake_up_process(nt);
1713 break;
1714 case Exiting:
1715 thi->t_state = Restarting;
1716 dev_info(DEV, "Restarting %s thread (from %s [%d])\n",
1717 me, current->comm, current->pid);
1718 /* fall through */
1719 case Running:
1720 case Restarting:
1721 default:
1722 spin_unlock_irqrestore(&thi->t_lock, flags);
1723 break;
1724 }
1725
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001726 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001727}
1728
1729
1730void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
1731{
1732 unsigned long flags;
1733
1734 enum drbd_thread_state ns = restart ? Restarting : Exiting;
1735
1736 /* may be called from state engine, holding the req lock irqsave */
1737 spin_lock_irqsave(&thi->t_lock, flags);
1738
1739 if (thi->t_state == None) {
1740 spin_unlock_irqrestore(&thi->t_lock, flags);
1741 if (restart)
1742 drbd_thread_start(thi);
1743 return;
1744 }
1745
1746 if (thi->t_state != ns) {
1747 if (thi->task == NULL) {
1748 spin_unlock_irqrestore(&thi->t_lock, flags);
1749 return;
1750 }
1751
1752 thi->t_state = ns;
1753 smp_mb();
1754 init_completion(&thi->stop);
1755 if (thi->task != current)
1756 force_sig(DRBD_SIGKILL, thi->task);
1757
1758 }
1759
1760 spin_unlock_irqrestore(&thi->t_lock, flags);
1761
1762 if (wait)
1763 wait_for_completion(&thi->stop);
1764}
1765
1766#ifdef CONFIG_SMP
1767/**
1768 * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
1769 * @mdev: DRBD device.
1770 *
1771 * Forces all threads of a device onto the same CPU. This is beneficial for
1772 * DRBD's performance. May be overwritten by user's configuration.
1773 */
1774void drbd_calc_cpu_mask(struct drbd_conf *mdev)
1775{
1776 int ord, cpu;
1777
1778 /* user override. */
1779 if (cpumask_weight(mdev->cpu_mask))
1780 return;
1781
1782 ord = mdev_to_minor(mdev) % cpumask_weight(cpu_online_mask);
1783 for_each_online_cpu(cpu) {
1784 if (ord-- == 0) {
1785 cpumask_set_cpu(cpu, mdev->cpu_mask);
1786 return;
1787 }
1788 }
1789 /* should not be reached */
1790 cpumask_setall(mdev->cpu_mask);
1791}
1792
1793/**
1794 * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
1795 * @mdev: DRBD device.
1796 *
1797 * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
1798 * prematurely.
1799 */
1800void drbd_thread_current_set_cpu(struct drbd_conf *mdev)
1801{
1802 struct task_struct *p = current;
1803 struct drbd_thread *thi =
1804 p == mdev->asender.task ? &mdev->asender :
1805 p == mdev->receiver.task ? &mdev->receiver :
1806 p == mdev->worker.task ? &mdev->worker :
1807 NULL;
1808 ERR_IF(thi == NULL)
1809 return;
1810 if (!thi->reset_cpu_mask)
1811 return;
1812 thi->reset_cpu_mask = 0;
1813 set_cpus_allowed_ptr(p, mdev->cpu_mask);
1814}
1815#endif
1816
1817/* the appropriate socket mutex must be held already */
1818int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
Philipp Reisner0b70a132010-08-20 13:36:10 +02001819 enum drbd_packets cmd, struct p_header80 *h,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001820 size_t size, unsigned msg_flags)
1821{
1822 int sent, ok;
1823
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001824 ERR_IF(!h) return false;
1825 ERR_IF(!size) return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001826
1827 h->magic = BE_DRBD_MAGIC;
1828 h->command = cpu_to_be16(cmd);
Philipp Reisner0b70a132010-08-20 13:36:10 +02001829 h->length = cpu_to_be16(size-sizeof(struct p_header80));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001830
Philipp Reisnerb411b362009-09-25 16:07:19 -07001831 sent = drbd_send(mdev, sock, h, size, msg_flags);
1832
1833 ok = (sent == size);
1834 if (!ok)
1835 dev_err(DEV, "short sent %s size=%d sent=%d\n",
1836 cmdname(cmd), (int)size, sent);
1837 return ok;
1838}
1839
1840/* don't pass the socket. we may only look at it
1841 * when we hold the appropriate socket mutex.
1842 */
1843int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket,
Philipp Reisner0b70a132010-08-20 13:36:10 +02001844 enum drbd_packets cmd, struct p_header80 *h, size_t size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001845{
1846 int ok = 0;
1847 struct socket *sock;
1848
1849 if (use_data_socket) {
1850 mutex_lock(&mdev->data.mutex);
1851 sock = mdev->data.socket;
1852 } else {
1853 mutex_lock(&mdev->meta.mutex);
1854 sock = mdev->meta.socket;
1855 }
1856
1857 /* drbd_disconnect() could have called drbd_free_sock()
1858 * while we were waiting in down()... */
1859 if (likely(sock != NULL))
1860 ok = _drbd_send_cmd(mdev, sock, cmd, h, size, 0);
1861
1862 if (use_data_socket)
1863 mutex_unlock(&mdev->data.mutex);
1864 else
1865 mutex_unlock(&mdev->meta.mutex);
1866 return ok;
1867}
1868
1869int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd, char *data,
1870 size_t size)
1871{
Philipp Reisner0b70a132010-08-20 13:36:10 +02001872 struct p_header80 h;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001873 int ok;
1874
1875 h.magic = BE_DRBD_MAGIC;
1876 h.command = cpu_to_be16(cmd);
1877 h.length = cpu_to_be16(size);
1878
1879 if (!drbd_get_data_sock(mdev))
1880 return 0;
1881
Philipp Reisnerb411b362009-09-25 16:07:19 -07001882 ok = (sizeof(h) ==
1883 drbd_send(mdev, mdev->data.socket, &h, sizeof(h), 0));
1884 ok = ok && (size ==
1885 drbd_send(mdev, mdev->data.socket, data, size, 0));
1886
1887 drbd_put_data_sock(mdev);
1888
1889 return ok;
1890}
1891
1892int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc)
1893{
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02001894 struct p_rs_param_95 *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001895 struct socket *sock;
1896 int size, rv;
1897 const int apv = mdev->agreed_pro_version;
1898
1899 size = apv <= 87 ? sizeof(struct p_rs_param)
1900 : apv == 88 ? sizeof(struct p_rs_param)
1901 + strlen(mdev->sync_conf.verify_alg) + 1
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02001902 : apv <= 94 ? sizeof(struct p_rs_param_89)
1903 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001904
1905 /* used from admin command context and receiver/worker context.
1906 * to avoid kmalloc, grab the socket right here,
1907 * then use the pre-allocated sbuf there */
1908 mutex_lock(&mdev->data.mutex);
1909 sock = mdev->data.socket;
1910
1911 if (likely(sock != NULL)) {
1912 enum drbd_packets cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
1913
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02001914 p = &mdev->data.sbuf.rs_param_95;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001915
1916 /* initialize verify_alg and csums_alg */
1917 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
1918
1919 p->rate = cpu_to_be32(sc->rate);
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02001920 p->c_plan_ahead = cpu_to_be32(sc->c_plan_ahead);
1921 p->c_delay_target = cpu_to_be32(sc->c_delay_target);
1922 p->c_fill_target = cpu_to_be32(sc->c_fill_target);
1923 p->c_max_rate = cpu_to_be32(sc->c_max_rate);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001924
1925 if (apv >= 88)
1926 strcpy(p->verify_alg, mdev->sync_conf.verify_alg);
1927 if (apv >= 89)
1928 strcpy(p->csums_alg, mdev->sync_conf.csums_alg);
1929
1930 rv = _drbd_send_cmd(mdev, sock, cmd, &p->head, size, 0);
1931 } else
1932 rv = 0; /* not ok */
1933
1934 mutex_unlock(&mdev->data.mutex);
1935
1936 return rv;
1937}
1938
1939int drbd_send_protocol(struct drbd_conf *mdev)
1940{
1941 struct p_protocol *p;
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01001942 int size, cf, rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001943
1944 size = sizeof(struct p_protocol);
1945
1946 if (mdev->agreed_pro_version >= 87)
1947 size += strlen(mdev->net_conf->integrity_alg) + 1;
1948
1949 /* we must not recurse into our own queue,
1950 * as that is blocked during handshake */
1951 p = kmalloc(size, GFP_NOIO);
1952 if (p == NULL)
1953 return 0;
1954
1955 p->protocol = cpu_to_be32(mdev->net_conf->wire_protocol);
1956 p->after_sb_0p = cpu_to_be32(mdev->net_conf->after_sb_0p);
1957 p->after_sb_1p = cpu_to_be32(mdev->net_conf->after_sb_1p);
1958 p->after_sb_2p = cpu_to_be32(mdev->net_conf->after_sb_2p);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001959 p->two_primaries = cpu_to_be32(mdev->net_conf->two_primaries);
1960
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01001961 cf = 0;
1962 if (mdev->net_conf->want_lose)
1963 cf |= CF_WANT_LOSE;
1964 if (mdev->net_conf->dry_run) {
1965 if (mdev->agreed_pro_version >= 92)
1966 cf |= CF_DRY_RUN;
1967 else {
1968 dev_err(DEV, "--dry-run is not supported by peer");
Dan Carpenter7ac314c2010-04-22 14:27:23 +02001969 kfree(p);
Philipp Reisner148efa12011-01-15 00:21:15 +01001970 return -1;
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01001971 }
1972 }
1973 p->conn_flags = cpu_to_be32(cf);
1974
Philipp Reisnerb411b362009-09-25 16:07:19 -07001975 if (mdev->agreed_pro_version >= 87)
1976 strcpy(p->integrity_alg, mdev->net_conf->integrity_alg);
1977
1978 rv = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_PROTOCOL,
Philipp Reisner0b70a132010-08-20 13:36:10 +02001979 (struct p_header80 *)p, size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001980 kfree(p);
1981 return rv;
1982}
1983
1984int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
1985{
1986 struct p_uuids p;
1987 int i;
1988
1989 if (!get_ldev_if_state(mdev, D_NEGOTIATING))
1990 return 1;
1991
1992 for (i = UI_CURRENT; i < UI_SIZE; i++)
1993 p.uuid[i] = mdev->ldev ? cpu_to_be64(mdev->ldev->md.uuid[i]) : 0;
1994
1995 mdev->comm_bm_set = drbd_bm_total_weight(mdev);
1996 p.uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
1997 uuid_flags |= mdev->net_conf->want_lose ? 1 : 0;
1998 uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0;
1999 uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
2000 p.uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
2001
2002 put_ldev(mdev);
2003
2004 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_UUIDS,
Philipp Reisner0b70a132010-08-20 13:36:10 +02002005 (struct p_header80 *)&p, sizeof(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002006}
2007
2008int drbd_send_uuids(struct drbd_conf *mdev)
2009{
2010 return _drbd_send_uuids(mdev, 0);
2011}
2012
2013int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev)
2014{
2015 return _drbd_send_uuids(mdev, 8);
2016}
2017
Lars Ellenberg5a22db82010-12-17 21:14:23 +01002018int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002019{
2020 struct p_rs_uuid p;
Lars Ellenberg5a22db82010-12-17 21:14:23 +01002021 u64 uuid;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002022
Lars Ellenberg5a22db82010-12-17 21:14:23 +01002023 D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
2024
Philipp Reisner4a23f262011-01-11 17:42:17 +01002025 uuid = mdev->ldev->md.uuid[UI_BITMAP] + UUID_NEW_BM_OFFSET;
Lars Ellenberg5a22db82010-12-17 21:14:23 +01002026 drbd_uuid_set(mdev, UI_BITMAP, uuid);
2027 drbd_md_sync(mdev);
2028 p.uuid = cpu_to_be64(uuid);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002029
2030 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID,
Philipp Reisner0b70a132010-08-20 13:36:10 +02002031 (struct p_header80 *)&p, sizeof(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002032}
2033
Philipp Reisnere89b5912010-03-24 17:11:33 +01002034int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002035{
2036 struct p_sizes p;
2037 sector_t d_size, u_size;
2038 int q_order_type;
2039 int ok;
2040
2041 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
2042 D_ASSERT(mdev->ldev->backing_bdev);
2043 d_size = drbd_get_max_capacity(mdev->ldev);
2044 u_size = mdev->ldev->dc.disk_size;
2045 q_order_type = drbd_queue_order_type(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002046 put_ldev(mdev);
2047 } else {
2048 d_size = 0;
2049 u_size = 0;
2050 q_order_type = QUEUE_ORDERED_NONE;
2051 }
2052
2053 p.d_size = cpu_to_be64(d_size);
2054 p.u_size = cpu_to_be64(u_size);
2055 p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01002056 p.max_bio_size = cpu_to_be32(queue_max_hw_sectors(mdev->rq_queue) << 9);
Philipp Reisnere89b5912010-03-24 17:11:33 +01002057 p.queue_order_type = cpu_to_be16(q_order_type);
2058 p.dds_flags = cpu_to_be16(flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002059
2060 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SIZES,
Philipp Reisner0b70a132010-08-20 13:36:10 +02002061 (struct p_header80 *)&p, sizeof(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002062 return ok;
2063}
2064
2065/**
2066 * drbd_send_state() - Sends the drbd state to the peer
2067 * @mdev: DRBD device.
2068 */
2069int drbd_send_state(struct drbd_conf *mdev)
2070{
2071 struct socket *sock;
2072 struct p_state p;
2073 int ok = 0;
2074
2075 /* Grab state lock so we wont send state if we're in the middle
2076 * of a cluster wide state change on another thread */
2077 drbd_state_lock(mdev);
2078
2079 mutex_lock(&mdev->data.mutex);
2080
2081 p.state = cpu_to_be32(mdev->state.i); /* Within the send mutex */
2082 sock = mdev->data.socket;
2083
2084 if (likely(sock != NULL)) {
2085 ok = _drbd_send_cmd(mdev, sock, P_STATE,
Philipp Reisner0b70a132010-08-20 13:36:10 +02002086 (struct p_header80 *)&p, sizeof(p), 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002087 }
2088
2089 mutex_unlock(&mdev->data.mutex);
2090
2091 drbd_state_unlock(mdev);
2092 return ok;
2093}
2094
2095int drbd_send_state_req(struct drbd_conf *mdev,
2096 union drbd_state mask, union drbd_state val)
2097{
2098 struct p_req_state p;
2099
2100 p.mask = cpu_to_be32(mask.i);
2101 p.val = cpu_to_be32(val.i);
2102
2103 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_STATE_CHG_REQ,
Philipp Reisner0b70a132010-08-20 13:36:10 +02002104 (struct p_header80 *)&p, sizeof(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002105}
2106
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +01002107int drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002108{
2109 struct p_req_state_reply p;
2110
2111 p.retcode = cpu_to_be32(retcode);
2112
2113 return drbd_send_cmd(mdev, USE_META_SOCKET, P_STATE_CHG_REPLY,
Philipp Reisner0b70a132010-08-20 13:36:10 +02002114 (struct p_header80 *)&p, sizeof(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002115}
2116
2117int fill_bitmap_rle_bits(struct drbd_conf *mdev,
2118 struct p_compressed_bm *p,
2119 struct bm_xfer_ctx *c)
2120{
2121 struct bitstream bs;
2122 unsigned long plain_bits;
2123 unsigned long tmp;
2124 unsigned long rl;
2125 unsigned len;
2126 unsigned toggle;
2127 int bits;
2128
2129 /* may we use this feature? */
2130 if ((mdev->sync_conf.use_rle == 0) ||
2131 (mdev->agreed_pro_version < 90))
2132 return 0;
2133
2134 if (c->bit_offset >= c->bm_bits)
2135 return 0; /* nothing to do. */
2136
2137 /* use at most thus many bytes */
2138 bitstream_init(&bs, p->code, BM_PACKET_VLI_BYTES_MAX, 0);
2139 memset(p->code, 0, BM_PACKET_VLI_BYTES_MAX);
2140 /* plain bits covered in this code string */
2141 plain_bits = 0;
2142
2143 /* p->encoding & 0x80 stores whether the first run length is set.
2144 * bit offset is implicit.
2145 * start with toggle == 2 to be able to tell the first iteration */
2146 toggle = 2;
2147
2148 /* see how much plain bits we can stuff into one packet
2149 * using RLE and VLI. */
2150 do {
2151 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(mdev, c->bit_offset)
2152 : _drbd_bm_find_next(mdev, c->bit_offset);
2153 if (tmp == -1UL)
2154 tmp = c->bm_bits;
2155 rl = tmp - c->bit_offset;
2156
2157 if (toggle == 2) { /* first iteration */
2158 if (rl == 0) {
2159 /* the first checked bit was set,
2160 * store start value, */
2161 DCBP_set_start(p, 1);
2162 /* but skip encoding of zero run length */
2163 toggle = !toggle;
2164 continue;
2165 }
2166 DCBP_set_start(p, 0);
2167 }
2168
2169 /* paranoia: catch zero runlength.
2170 * can only happen if bitmap is modified while we scan it. */
2171 if (rl == 0) {
2172 dev_err(DEV, "unexpected zero runlength while encoding bitmap "
2173 "t:%u bo:%lu\n", toggle, c->bit_offset);
2174 return -1;
2175 }
2176
2177 bits = vli_encode_bits(&bs, rl);
2178 if (bits == -ENOBUFS) /* buffer full */
2179 break;
2180 if (bits <= 0) {
2181 dev_err(DEV, "error while encoding bitmap: %d\n", bits);
2182 return 0;
2183 }
2184
2185 toggle = !toggle;
2186 plain_bits += rl;
2187 c->bit_offset = tmp;
2188 } while (c->bit_offset < c->bm_bits);
2189
2190 len = bs.cur.b - p->code + !!bs.cur.bit;
2191
2192 if (plain_bits < (len << 3)) {
2193 /* incompressible with this method.
2194 * we need to rewind both word and bit position. */
2195 c->bit_offset -= plain_bits;
2196 bm_xfer_ctx_bit_to_word_offset(c);
2197 c->bit_offset = c->word_offset * BITS_PER_LONG;
2198 return 0;
2199 }
2200
2201 /* RLE + VLI was able to compress it just fine.
2202 * update c->word_offset. */
2203 bm_xfer_ctx_bit_to_word_offset(c);
2204
2205 /* store pad_bits */
2206 DCBP_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
2207
2208 return len;
2209}
2210
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01002211/**
2212 * send_bitmap_rle_or_plain
2213 *
2214 * Return 0 when done, 1 when another iteration is needed, and a negative error
2215 * code upon failure.
2216 */
2217static int
Philipp Reisnerb411b362009-09-25 16:07:19 -07002218send_bitmap_rle_or_plain(struct drbd_conf *mdev,
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01002219 struct p_header80 *h, struct bm_xfer_ctx *c)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002220{
2221 struct p_compressed_bm *p = (void*)h;
2222 unsigned long num_words;
2223 int len;
2224 int ok;
2225
2226 len = fill_bitmap_rle_bits(mdev, p, c);
2227
2228 if (len < 0)
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01002229 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002230
2231 if (len) {
2232 DCBP_set_code(p, RLE_VLI_Bits);
2233 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_COMPRESSED_BITMAP, h,
2234 sizeof(*p) + len, 0);
2235
2236 c->packets[0]++;
2237 c->bytes[0] += sizeof(*p) + len;
2238
2239 if (c->bit_offset >= c->bm_bits)
2240 len = 0; /* DONE */
2241 } else {
2242 /* was not compressible.
2243 * send a buffer full of plain text bits instead. */
2244 num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
2245 len = num_words * sizeof(long);
2246 if (len)
2247 drbd_bm_get_lel(mdev, c->word_offset, num_words, (unsigned long*)h->payload);
2248 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_BITMAP,
Philipp Reisner0b70a132010-08-20 13:36:10 +02002249 h, sizeof(struct p_header80) + len, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002250 c->word_offset += num_words;
2251 c->bit_offset = c->word_offset * BITS_PER_LONG;
2252
2253 c->packets[1]++;
Philipp Reisner0b70a132010-08-20 13:36:10 +02002254 c->bytes[1] += sizeof(struct p_header80) + len;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002255
2256 if (c->bit_offset > c->bm_bits)
2257 c->bit_offset = c->bm_bits;
2258 }
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01002259 if (ok) {
2260 if (len == 0) {
2261 INFO_bm_xfer_stats(mdev, "send", c);
2262 return 0;
2263 } else
2264 return 1;
2265 }
2266 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002267}
2268
2269/* See the comment at receive_bitmap() */
2270int _drbd_send_bitmap(struct drbd_conf *mdev)
2271{
2272 struct bm_xfer_ctx c;
Philipp Reisner0b70a132010-08-20 13:36:10 +02002273 struct p_header80 *p;
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01002274 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002275
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002276 ERR_IF(!mdev->bitmap) return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002277
2278 /* maybe we should use some per thread scratch page,
2279 * and allocate that during initial device creation? */
Philipp Reisner0b70a132010-08-20 13:36:10 +02002280 p = (struct p_header80 *) __get_free_page(GFP_NOIO);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002281 if (!p) {
2282 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002283 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002284 }
2285
2286 if (get_ldev(mdev)) {
2287 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
2288 dev_info(DEV, "Writing the whole bitmap, MDF_FullSync was set.\n");
2289 drbd_bm_set_all(mdev);
2290 if (drbd_bm_write(mdev)) {
2291 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
2292 * but otherwise process as per normal - need to tell other
2293 * side that a full resync is required! */
2294 dev_err(DEV, "Failed to write bitmap to disk!\n");
2295 } else {
2296 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
2297 drbd_md_sync(mdev);
2298 }
2299 }
2300 put_ldev(mdev);
2301 }
2302
2303 c = (struct bm_xfer_ctx) {
2304 .bm_bits = drbd_bm_bits(mdev),
2305 .bm_words = drbd_bm_words(mdev),
2306 };
2307
2308 do {
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01002309 err = send_bitmap_rle_or_plain(mdev, p, &c);
2310 } while (err > 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002311
2312 free_page((unsigned long) p);
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01002313 return err == 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002314}
2315
2316int drbd_send_bitmap(struct drbd_conf *mdev)
2317{
2318 int err;
2319
2320 if (!drbd_get_data_sock(mdev))
2321 return -1;
2322 err = !_drbd_send_bitmap(mdev);
2323 drbd_put_data_sock(mdev);
2324 return err;
2325}
2326
2327int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size)
2328{
2329 int ok;
2330 struct p_barrier_ack p;
2331
2332 p.barrier = barrier_nr;
2333 p.set_size = cpu_to_be32(set_size);
2334
2335 if (mdev->state.conn < C_CONNECTED)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002336 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002337 ok = drbd_send_cmd(mdev, USE_META_SOCKET, P_BARRIER_ACK,
Philipp Reisner0b70a132010-08-20 13:36:10 +02002338 (struct p_header80 *)&p, sizeof(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002339 return ok;
2340}
2341
2342/**
2343 * _drbd_send_ack() - Sends an ack packet
2344 * @mdev: DRBD device.
2345 * @cmd: Packet command code.
2346 * @sector: sector, needs to be in big endian byte order
2347 * @blksize: size in byte, needs to be in big endian byte order
2348 * @block_id: Id, big endian byte order
2349 */
2350static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
2351 u64 sector,
2352 u32 blksize,
2353 u64 block_id)
2354{
2355 int ok;
2356 struct p_block_ack p;
2357
2358 p.sector = sector;
2359 p.block_id = block_id;
2360 p.blksize = blksize;
2361 p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
2362
2363 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002364 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002365 ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd,
Philipp Reisner0b70a132010-08-20 13:36:10 +02002366 (struct p_header80 *)&p, sizeof(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002367 return ok;
2368}
2369
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02002370/* dp->sector and dp->block_id already/still in network byte order,
2371 * data_size is payload size according to dp->head,
2372 * and may need to be corrected for digest size. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002373int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd,
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02002374 struct p_data *dp, int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002375{
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02002376 data_size -= (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
2377 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002378 return _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
2379 dp->block_id);
2380}
2381
2382int drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packets cmd,
2383 struct p_block_req *rp)
2384{
2385 return _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id);
2386}
2387
2388/**
2389 * drbd_send_ack() - Sends an ack packet
2390 * @mdev: DRBD device.
2391 * @cmd: Packet command code.
2392 * @e: Epoch entry.
2393 */
2394int drbd_send_ack(struct drbd_conf *mdev,
2395 enum drbd_packets cmd, struct drbd_epoch_entry *e)
2396{
2397 return _drbd_send_ack(mdev, cmd,
2398 cpu_to_be64(e->sector),
2399 cpu_to_be32(e->size),
2400 e->block_id);
2401}
2402
2403/* This function misuses the block_id field to signal if the blocks
2404 * are is sync or not. */
2405int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd,
2406 sector_t sector, int blksize, u64 block_id)
2407{
2408 return _drbd_send_ack(mdev, cmd,
2409 cpu_to_be64(sector),
2410 cpu_to_be32(blksize),
2411 cpu_to_be64(block_id));
2412}
2413
2414int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
2415 sector_t sector, int size, u64 block_id)
2416{
2417 int ok;
2418 struct p_block_req p;
2419
2420 p.sector = cpu_to_be64(sector);
2421 p.block_id = block_id;
2422 p.blksize = cpu_to_be32(size);
2423
2424 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd,
Philipp Reisner0b70a132010-08-20 13:36:10 +02002425 (struct p_header80 *)&p, sizeof(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002426 return ok;
2427}
2428
2429int drbd_send_drequest_csum(struct drbd_conf *mdev,
2430 sector_t sector, int size,
2431 void *digest, int digest_size,
2432 enum drbd_packets cmd)
2433{
2434 int ok;
2435 struct p_block_req p;
2436
2437 p.sector = cpu_to_be64(sector);
2438 p.block_id = BE_DRBD_MAGIC + 0xbeef;
2439 p.blksize = cpu_to_be32(size);
2440
2441 p.head.magic = BE_DRBD_MAGIC;
2442 p.head.command = cpu_to_be16(cmd);
Philipp Reisner0b70a132010-08-20 13:36:10 +02002443 p.head.length = cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + digest_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002444
2445 mutex_lock(&mdev->data.mutex);
2446
2447 ok = (sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), 0));
2448 ok = ok && (digest_size == drbd_send(mdev, mdev->data.socket, digest, digest_size, 0));
2449
2450 mutex_unlock(&mdev->data.mutex);
2451
2452 return ok;
2453}
2454
2455int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
2456{
2457 int ok;
2458 struct p_block_req p;
2459
2460 p.sector = cpu_to_be64(sector);
2461 p.block_id = BE_DRBD_MAGIC + 0xbabe;
2462 p.blksize = cpu_to_be32(size);
2463
2464 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OV_REQUEST,
Philipp Reisner0b70a132010-08-20 13:36:10 +02002465 (struct p_header80 *)&p, sizeof(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002466 return ok;
2467}
2468
2469/* called on sndtimeo
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002470 * returns false if we should retry,
2471 * true if we think connection is dead
Philipp Reisnerb411b362009-09-25 16:07:19 -07002472 */
2473static int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket *sock)
2474{
2475 int drop_it;
2476 /* long elapsed = (long)(jiffies - mdev->last_received); */
2477
2478 drop_it = mdev->meta.socket == sock
2479 || !mdev->asender.task
2480 || get_t_state(&mdev->asender) != Running
2481 || mdev->state.conn < C_CONNECTED;
2482
2483 if (drop_it)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002484 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002485
2486 drop_it = !--mdev->ko_count;
2487 if (!drop_it) {
2488 dev_err(DEV, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
2489 current->comm, current->pid, mdev->ko_count);
2490 request_ping(mdev);
2491 }
2492
2493 return drop_it; /* && (mdev->state == R_PRIMARY) */;
2494}
2495
2496/* The idea of sendpage seems to be to put some kind of reference
2497 * to the page into the skb, and to hand it over to the NIC. In
2498 * this process get_page() gets called.
2499 *
2500 * As soon as the page was really sent over the network put_page()
2501 * gets called by some part of the network layer. [ NIC driver? ]
2502 *
2503 * [ get_page() / put_page() increment/decrement the count. If count
2504 * reaches 0 the page will be freed. ]
2505 *
2506 * This works nicely with pages from FSs.
2507 * But this means that in protocol A we might signal IO completion too early!
2508 *
2509 * In order not to corrupt data during a resync we must make sure
2510 * that we do not reuse our own buffer pages (EEs) to early, therefore
2511 * we have the net_ee list.
2512 *
2513 * XFS seems to have problems, still, it submits pages with page_count == 0!
2514 * As a workaround, we disable sendpage on pages
2515 * with page_count == 0 or PageSlab.
2516 */
2517static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page,
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002518 int offset, size_t size, unsigned msg_flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002519{
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002520 int sent = drbd_send(mdev, mdev->data.socket, kmap(page) + offset, size, msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002521 kunmap(page);
2522 if (sent == size)
2523 mdev->send_cnt += size>>9;
2524 return sent == size;
2525}
2526
2527static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002528 int offset, size_t size, unsigned msg_flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002529{
2530 mm_segment_t oldfs = get_fs();
2531 int sent, ok;
2532 int len = size;
2533
2534 /* e.g. XFS meta- & log-data is in slab pages, which have a
2535 * page_count of 0 and/or have PageSlab() set.
2536 * we cannot use send_page for those, as that does get_page();
2537 * put_page(); and would cause either a VM_BUG directly, or
2538 * __page_cache_release a page that would actually still be referenced
2539 * by someone, leading to some obscure delayed Oops somewhere else. */
2540 if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002541 return _drbd_no_send_page(mdev, page, offset, size, msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002542
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002543 msg_flags |= MSG_NOSIGNAL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002544 drbd_update_congested(mdev);
2545 set_fs(KERNEL_DS);
2546 do {
2547 sent = mdev->data.socket->ops->sendpage(mdev->data.socket, page,
2548 offset, len,
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002549 msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002550 if (sent == -EAGAIN) {
2551 if (we_should_drop_the_connection(mdev,
2552 mdev->data.socket))
2553 break;
2554 else
2555 continue;
2556 }
2557 if (sent <= 0) {
2558 dev_warn(DEV, "%s: size=%d len=%d sent=%d\n",
2559 __func__, (int)size, len, sent);
2560 break;
2561 }
2562 len -= sent;
2563 offset += sent;
2564 } while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
2565 set_fs(oldfs);
2566 clear_bit(NET_CONGESTED, &mdev->flags);
2567
2568 ok = (len == 0);
2569 if (likely(ok))
2570 mdev->send_cnt += size>>9;
2571 return ok;
2572}
2573
2574static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
2575{
2576 struct bio_vec *bvec;
2577 int i;
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002578 /* hint all but last page with MSG_MORE */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002579 __bio_for_each_segment(bvec, bio, i, 0) {
2580 if (!_drbd_no_send_page(mdev, bvec->bv_page,
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002581 bvec->bv_offset, bvec->bv_len,
2582 i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002583 return 0;
2584 }
2585 return 1;
2586}
2587
2588static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
2589{
2590 struct bio_vec *bvec;
2591 int i;
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002592 /* hint all but last page with MSG_MORE */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002593 __bio_for_each_segment(bvec, bio, i, 0) {
2594 if (!_drbd_send_page(mdev, bvec->bv_page,
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002595 bvec->bv_offset, bvec->bv_len,
2596 i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002597 return 0;
2598 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002599 return 1;
2600}
2601
Lars Ellenberg45bb9122010-05-14 17:10:48 +02002602static int _drbd_send_zc_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
2603{
2604 struct page *page = e->pages;
2605 unsigned len = e->size;
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002606 /* hint all but last page with MSG_MORE */
Lars Ellenberg45bb9122010-05-14 17:10:48 +02002607 page_chain_for_each(page) {
2608 unsigned l = min_t(unsigned, len, PAGE_SIZE);
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002609 if (!_drbd_send_page(mdev, page, 0, l,
2610 page_chain_next(page) ? MSG_MORE : 0))
Lars Ellenberg45bb9122010-05-14 17:10:48 +02002611 return 0;
2612 len -= l;
2613 }
2614 return 1;
2615}
2616
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02002617static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
2618{
2619 if (mdev->agreed_pro_version >= 95)
2620 return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02002621 (bi_rw & REQ_FUA ? DP_FUA : 0) |
2622 (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
2623 (bi_rw & REQ_DISCARD ? DP_DISCARD : 0);
2624 else
Jens Axboe721a9602011-03-09 11:56:30 +01002625 return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02002626}
2627
Philipp Reisnerb411b362009-09-25 16:07:19 -07002628/* Used to send write requests
2629 * R_PRIMARY -> Peer (P_DATA)
2630 */
2631int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
2632{
2633 int ok = 1;
2634 struct p_data p;
2635 unsigned int dp_flags = 0;
2636 void *dgb;
2637 int dgs;
2638
2639 if (!drbd_get_data_sock(mdev))
2640 return 0;
2641
2642 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
2643 crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
2644
Philipp Reisnerd5373382010-08-23 15:18:33 +02002645 if (req->size <= DRBD_MAX_SIZE_H80_PACKET) {
Philipp Reisner0b70a132010-08-20 13:36:10 +02002646 p.head.h80.magic = BE_DRBD_MAGIC;
2647 p.head.h80.command = cpu_to_be16(P_DATA);
2648 p.head.h80.length =
2649 cpu_to_be16(sizeof(p) - sizeof(union p_header) + dgs + req->size);
2650 } else {
2651 p.head.h95.magic = BE_DRBD_MAGIC_BIG;
2652 p.head.h95.command = cpu_to_be16(P_DATA);
2653 p.head.h95.length =
2654 cpu_to_be32(sizeof(p) - sizeof(union p_header) + dgs + req->size);
2655 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002656
2657 p.sector = cpu_to_be64(req->sector);
2658 p.block_id = (unsigned long)req;
2659 p.seq_num = cpu_to_be32(req->seq_num =
2660 atomic_add_return(1, &mdev->packet_seq));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002661
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02002662 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
2663
Philipp Reisnerb411b362009-09-25 16:07:19 -07002664 if (mdev->state.conn >= C_SYNC_SOURCE &&
2665 mdev->state.conn <= C_PAUSED_SYNC_T)
2666 dp_flags |= DP_MAY_SET_IN_SYNC;
2667
2668 p.dp_flags = cpu_to_be32(dp_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002669 set_bit(UNPLUG_REMOTE, &mdev->flags);
2670 ok = (sizeof(p) ==
Lars Ellenbergba11ad92010-05-25 16:26:16 +02002671 drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002672 if (ok && dgs) {
2673 dgb = mdev->int_dig_out;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02002674 drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, dgb);
Andreas Gruenbachercab2f742010-12-09 16:08:46 +01002675 ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002676 }
2677 if (ok) {
Lars Ellenberg470be442010-11-10 10:36:52 +01002678 /* For protocol A, we have to memcpy the payload into
2679 * socket buffers, as we may complete right away
2680 * as soon as we handed it over to tcp, at which point the data
2681 * pages may become invalid.
2682 *
2683 * For data-integrity enabled, we copy it as well, so we can be
2684 * sure that even if the bio pages may still be modified, it
2685 * won't change the data on the wire, thus if the digest checks
2686 * out ok after sending on this side, but does not fit on the
2687 * receiving side, we sure have detected corruption elsewhere.
2688 */
2689 if (mdev->net_conf->wire_protocol == DRBD_PROT_A || dgs)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002690 ok = _drbd_send_bio(mdev, req->master_bio);
2691 else
2692 ok = _drbd_send_zc_bio(mdev, req->master_bio);
Lars Ellenberg470be442010-11-10 10:36:52 +01002693
2694 /* double check digest, sometimes buffers have been modified in flight. */
2695 if (dgs > 0 && dgs <= 64) {
2696 /* 64 byte, 512 bit, is the larges digest size
2697 * currently supported in kernel crypto. */
2698 unsigned char digest[64];
2699 drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, digest);
2700 if (memcmp(mdev->int_dig_out, digest, dgs)) {
2701 dev_warn(DEV,
2702 "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
2703 (unsigned long long)req->sector, req->size);
2704 }
2705 } /* else if (dgs > 64) {
2706 ... Be noisy about digest too large ...
2707 } */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002708 }
2709
2710 drbd_put_data_sock(mdev);
Philipp Reisnerbd26bfc52010-05-04 12:33:58 +02002711
Philipp Reisnerb411b362009-09-25 16:07:19 -07002712 return ok;
2713}
2714
2715/* answer packet, used to send data back for read requests:
2716 * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
2717 * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
2718 */
2719int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
2720 struct drbd_epoch_entry *e)
2721{
2722 int ok;
2723 struct p_data p;
2724 void *dgb;
2725 int dgs;
2726
2727 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
2728 crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
2729
Philipp Reisnerd5373382010-08-23 15:18:33 +02002730 if (e->size <= DRBD_MAX_SIZE_H80_PACKET) {
Philipp Reisner0b70a132010-08-20 13:36:10 +02002731 p.head.h80.magic = BE_DRBD_MAGIC;
2732 p.head.h80.command = cpu_to_be16(cmd);
2733 p.head.h80.length =
2734 cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + dgs + e->size);
2735 } else {
2736 p.head.h95.magic = BE_DRBD_MAGIC_BIG;
2737 p.head.h95.command = cpu_to_be16(cmd);
2738 p.head.h95.length =
2739 cpu_to_be32(sizeof(p) - sizeof(struct p_header80) + dgs + e->size);
2740 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002741
2742 p.sector = cpu_to_be64(e->sector);
2743 p.block_id = e->block_id;
2744 /* p.seq_num = 0; No sequence numbers here.. */
2745
2746 /* Only called by our kernel thread.
2747 * This one may be interrupted by DRBD_SIG and/or DRBD_SIGKILL
2748 * in response to admin command or module unload.
2749 */
2750 if (!drbd_get_data_sock(mdev))
2751 return 0;
2752
Philipp Reisner0b70a132010-08-20 13:36:10 +02002753 ok = sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002754 if (ok && dgs) {
2755 dgb = mdev->int_dig_out;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02002756 drbd_csum_ee(mdev, mdev->integrity_w_tfm, e, dgb);
Andreas Gruenbachercab2f742010-12-09 16:08:46 +01002757 ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002758 }
2759 if (ok)
Lars Ellenberg45bb9122010-05-14 17:10:48 +02002760 ok = _drbd_send_zc_ee(mdev, e);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002761
2762 drbd_put_data_sock(mdev);
Philipp Reisnerbd26bfc52010-05-04 12:33:58 +02002763
Philipp Reisnerb411b362009-09-25 16:07:19 -07002764 return ok;
2765}
2766
Philipp Reisner73a01a12010-10-27 14:33:00 +02002767int drbd_send_oos(struct drbd_conf *mdev, struct drbd_request *req)
2768{
2769 struct p_block_desc p;
2770
2771 p.sector = cpu_to_be64(req->sector);
2772 p.blksize = cpu_to_be32(req->size);
2773
2774 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OUT_OF_SYNC, &p.head, sizeof(p));
2775}
2776
Philipp Reisnerb411b362009-09-25 16:07:19 -07002777/*
2778 drbd_send distinguishes two cases:
2779
2780 Packets sent via the data socket "sock"
2781 and packets sent via the meta data socket "msock"
2782
2783 sock msock
2784 -----------------+-------------------------+------------------------------
2785 timeout conf.timeout / 2 conf.timeout / 2
2786 timeout action send a ping via msock Abort communication
2787 and close all sockets
2788*/
2789
2790/*
2791 * you must have down()ed the appropriate [m]sock_mutex elsewhere!
2792 */
2793int drbd_send(struct drbd_conf *mdev, struct socket *sock,
2794 void *buf, size_t size, unsigned msg_flags)
2795{
2796 struct kvec iov;
2797 struct msghdr msg;
2798 int rv, sent = 0;
2799
2800 if (!sock)
2801 return -1000;
2802
2803 /* THINK if (signal_pending) return ... ? */
2804
2805 iov.iov_base = buf;
2806 iov.iov_len = size;
2807
2808 msg.msg_name = NULL;
2809 msg.msg_namelen = 0;
2810 msg.msg_control = NULL;
2811 msg.msg_controllen = 0;
2812 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
2813
2814 if (sock == mdev->data.socket) {
2815 mdev->ko_count = mdev->net_conf->ko_count;
2816 drbd_update_congested(mdev);
2817 }
2818 do {
2819 /* STRANGE
2820 * tcp_sendmsg does _not_ use its size parameter at all ?
2821 *
2822 * -EAGAIN on timeout, -EINTR on signal.
2823 */
2824/* THINK
2825 * do we need to block DRBD_SIG if sock == &meta.socket ??
2826 * otherwise wake_asender() might interrupt some send_*Ack !
2827 */
2828 rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
2829 if (rv == -EAGAIN) {
2830 if (we_should_drop_the_connection(mdev, sock))
2831 break;
2832 else
2833 continue;
2834 }
2835 D_ASSERT(rv != 0);
2836 if (rv == -EINTR) {
2837 flush_signals(current);
2838 rv = 0;
2839 }
2840 if (rv < 0)
2841 break;
2842 sent += rv;
2843 iov.iov_base += rv;
2844 iov.iov_len -= rv;
2845 } while (sent < size);
2846
2847 if (sock == mdev->data.socket)
2848 clear_bit(NET_CONGESTED, &mdev->flags);
2849
2850 if (rv <= 0) {
2851 if (rv != -EAGAIN) {
2852 dev_err(DEV, "%s_sendmsg returned %d\n",
2853 sock == mdev->meta.socket ? "msock" : "sock",
2854 rv);
2855 drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
2856 } else
2857 drbd_force_state(mdev, NS(conn, C_TIMEOUT));
2858 }
2859
2860 return sent;
2861}
2862
2863static int drbd_open(struct block_device *bdev, fmode_t mode)
2864{
2865 struct drbd_conf *mdev = bdev->bd_disk->private_data;
2866 unsigned long flags;
2867 int rv = 0;
2868
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02002869 mutex_lock(&drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002870 spin_lock_irqsave(&mdev->req_lock, flags);
2871 /* to have a stable mdev->state.role
2872 * and no race with updating open_cnt */
2873
2874 if (mdev->state.role != R_PRIMARY) {
2875 if (mode & FMODE_WRITE)
2876 rv = -EROFS;
2877 else if (!allow_oos)
2878 rv = -EMEDIUMTYPE;
2879 }
2880
2881 if (!rv)
2882 mdev->open_cnt++;
2883 spin_unlock_irqrestore(&mdev->req_lock, flags);
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02002884 mutex_unlock(&drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002885
2886 return rv;
2887}
2888
2889static int drbd_release(struct gendisk *gd, fmode_t mode)
2890{
2891 struct drbd_conf *mdev = gd->private_data;
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02002892 mutex_lock(&drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002893 mdev->open_cnt--;
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02002894 mutex_unlock(&drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002895 return 0;
2896}
2897
Philipp Reisnerb411b362009-09-25 16:07:19 -07002898static void drbd_set_defaults(struct drbd_conf *mdev)
2899{
Philipp Reisner85f4cc12010-06-29 17:35:34 +02002900 /* This way we get a compile error when sync_conf grows,
2901 and we forgot to initialize it here */
2902 mdev->sync_conf = (struct syncer_conf) {
2903 /* .rate = */ DRBD_RATE_DEF,
2904 /* .after = */ DRBD_AFTER_DEF,
2905 /* .al_extents = */ DRBD_AL_EXTENTS_DEF,
Philipp Reisner85f4cc12010-06-29 17:35:34 +02002906 /* .verify_alg = */ {}, 0,
2907 /* .cpu_mask = */ {}, 0,
2908 /* .csums_alg = */ {}, 0,
Philipp Reisnere7564142010-06-29 17:35:34 +02002909 /* .use_rle = */ 0,
Philipp Reisner9a31d712010-07-05 13:42:03 +02002910 /* .on_no_data = */ DRBD_ON_NO_DATA_DEF,
2911 /* .c_plan_ahead = */ DRBD_C_PLAN_AHEAD_DEF,
2912 /* .c_delay_target = */ DRBD_C_DELAY_TARGET_DEF,
2913 /* .c_fill_target = */ DRBD_C_FILL_TARGET_DEF,
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002914 /* .c_max_rate = */ DRBD_C_MAX_RATE_DEF,
2915 /* .c_min_rate = */ DRBD_C_MIN_RATE_DEF
Philipp Reisner85f4cc12010-06-29 17:35:34 +02002916 };
2917
2918 /* Have to use that way, because the layout differs between
2919 big endian and little endian */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002920 mdev->state = (union drbd_state) {
2921 { .role = R_SECONDARY,
2922 .peer = R_UNKNOWN,
2923 .conn = C_STANDALONE,
2924 .disk = D_DISKLESS,
2925 .pdsk = D_UNKNOWN,
Philipp Reisnerfb22c402010-09-08 23:20:21 +02002926 .susp = 0,
2927 .susp_nod = 0,
2928 .susp_fen = 0
Philipp Reisnerb411b362009-09-25 16:07:19 -07002929 } };
2930}
2931
2932void drbd_init_set_defaults(struct drbd_conf *mdev)
2933{
2934 /* the memset(,0,) did most of this.
2935 * note: only assignments, no allocation in here */
2936
2937 drbd_set_defaults(mdev);
2938
Philipp Reisnerb411b362009-09-25 16:07:19 -07002939 atomic_set(&mdev->ap_bio_cnt, 0);
2940 atomic_set(&mdev->ap_pending_cnt, 0);
2941 atomic_set(&mdev->rs_pending_cnt, 0);
2942 atomic_set(&mdev->unacked_cnt, 0);
2943 atomic_set(&mdev->local_cnt, 0);
2944 atomic_set(&mdev->net_cnt, 0);
2945 atomic_set(&mdev->packet_seq, 0);
2946 atomic_set(&mdev->pp_in_use, 0);
Lars Ellenberg435f0742010-09-06 12:30:25 +02002947 atomic_set(&mdev->pp_in_use_by_net, 0);
Philipp Reisner778f2712010-07-06 11:14:00 +02002948 atomic_set(&mdev->rs_sect_in, 0);
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002949 atomic_set(&mdev->rs_sect_ev, 0);
Philipp Reisner759fbdf2010-10-26 16:02:27 +02002950 atomic_set(&mdev->ap_in_flight, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002951
2952 mutex_init(&mdev->md_io_mutex);
2953 mutex_init(&mdev->data.mutex);
2954 mutex_init(&mdev->meta.mutex);
2955 sema_init(&mdev->data.work.s, 0);
2956 sema_init(&mdev->meta.work.s, 0);
2957 mutex_init(&mdev->state_mutex);
2958
2959 spin_lock_init(&mdev->data.work.q_lock);
2960 spin_lock_init(&mdev->meta.work.q_lock);
2961
2962 spin_lock_init(&mdev->al_lock);
2963 spin_lock_init(&mdev->req_lock);
2964 spin_lock_init(&mdev->peer_seq_lock);
2965 spin_lock_init(&mdev->epoch_lock);
2966
2967 INIT_LIST_HEAD(&mdev->active_ee);
2968 INIT_LIST_HEAD(&mdev->sync_ee);
2969 INIT_LIST_HEAD(&mdev->done_ee);
2970 INIT_LIST_HEAD(&mdev->read_ee);
2971 INIT_LIST_HEAD(&mdev->net_ee);
2972 INIT_LIST_HEAD(&mdev->resync_reads);
2973 INIT_LIST_HEAD(&mdev->data.work.q);
2974 INIT_LIST_HEAD(&mdev->meta.work.q);
2975 INIT_LIST_HEAD(&mdev->resync_work.list);
2976 INIT_LIST_HEAD(&mdev->unplug_work.list);
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02002977 INIT_LIST_HEAD(&mdev->go_diskless.list);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002978 INIT_LIST_HEAD(&mdev->md_sync_work.list);
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02002979 INIT_LIST_HEAD(&mdev->start_resync_work.list);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002980 INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
Philipp Reisner0ced55a2010-04-30 15:26:20 +02002981
Philipp Reisner794abb72010-12-27 11:51:23 +01002982 mdev->resync_work.cb = w_resync_timer;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002983 mdev->unplug_work.cb = w_send_write_hint;
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02002984 mdev->go_diskless.cb = w_go_diskless;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002985 mdev->md_sync_work.cb = w_md_sync;
2986 mdev->bm_io_work.w.cb = w_bitmap_io;
Philipp Reisner370a43e2011-01-14 16:03:11 +01002987 mdev->start_resync_work.cb = w_start_resync;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002988 init_timer(&mdev->resync_timer);
2989 init_timer(&mdev->md_sync_timer);
Philipp Reisner370a43e2011-01-14 16:03:11 +01002990 init_timer(&mdev->start_resync_timer);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002991 mdev->resync_timer.function = resync_timer_fn;
2992 mdev->resync_timer.data = (unsigned long) mdev;
2993 mdev->md_sync_timer.function = md_sync_timer_fn;
2994 mdev->md_sync_timer.data = (unsigned long) mdev;
Philipp Reisner370a43e2011-01-14 16:03:11 +01002995 mdev->start_resync_timer.function = start_resync_timer_fn;
2996 mdev->start_resync_timer.data = (unsigned long) mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002997
2998 init_waitqueue_head(&mdev->misc_wait);
2999 init_waitqueue_head(&mdev->state_wait);
Philipp Reisner84dfb9f2010-06-23 11:20:05 +02003000 init_waitqueue_head(&mdev->net_cnt_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003001 init_waitqueue_head(&mdev->ee_wait);
3002 init_waitqueue_head(&mdev->al_wait);
3003 init_waitqueue_head(&mdev->seq_wait);
3004
3005 drbd_thread_init(mdev, &mdev->receiver, drbdd_init);
3006 drbd_thread_init(mdev, &mdev->worker, drbd_worker);
3007 drbd_thread_init(mdev, &mdev->asender, drbd_asender);
3008
3009 mdev->agreed_pro_version = PRO_VERSION_MAX;
Philipp Reisner2451fc32010-08-24 13:43:11 +02003010 mdev->write_ordering = WO_bdev_flush;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003011 mdev->resync_wenr = LC_FREE;
3012}
3013
3014void drbd_mdev_cleanup(struct drbd_conf *mdev)
3015{
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02003016 int i;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003017 if (mdev->receiver.t_state != None)
3018 dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
3019 mdev->receiver.t_state);
3020
3021 /* no need to lock it, I'm the only thread alive */
3022 if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
3023 dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
3024 mdev->al_writ_cnt =
3025 mdev->bm_writ_cnt =
3026 mdev->read_cnt =
3027 mdev->recv_cnt =
3028 mdev->send_cnt =
3029 mdev->writ_cnt =
3030 mdev->p_size =
3031 mdev->rs_start =
3032 mdev->rs_total =
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02003033 mdev->rs_failed = 0;
3034 mdev->rs_last_events = 0;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02003035 mdev->rs_last_sect_ev = 0;
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02003036 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
3037 mdev->rs_mark_left[i] = 0;
3038 mdev->rs_mark_time[i] = 0;
3039 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003040 D_ASSERT(mdev->net_conf == NULL);
3041
3042 drbd_set_my_capacity(mdev, 0);
3043 if (mdev->bitmap) {
3044 /* maybe never allocated. */
Philipp Reisner02d9a942010-03-24 16:23:03 +01003045 drbd_bm_resize(mdev, 0, 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003046 drbd_bm_cleanup(mdev);
3047 }
3048
3049 drbd_free_resources(mdev);
Philipp Reisner07782862010-08-31 12:00:50 +02003050 clear_bit(AL_SUSPENDED, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003051
3052 /*
3053 * currently we drbd_init_ee only on module load, so
3054 * we may do drbd_release_ee only on module unload!
3055 */
3056 D_ASSERT(list_empty(&mdev->active_ee));
3057 D_ASSERT(list_empty(&mdev->sync_ee));
3058 D_ASSERT(list_empty(&mdev->done_ee));
3059 D_ASSERT(list_empty(&mdev->read_ee));
3060 D_ASSERT(list_empty(&mdev->net_ee));
3061 D_ASSERT(list_empty(&mdev->resync_reads));
3062 D_ASSERT(list_empty(&mdev->data.work.q));
3063 D_ASSERT(list_empty(&mdev->meta.work.q));
3064 D_ASSERT(list_empty(&mdev->resync_work.list));
3065 D_ASSERT(list_empty(&mdev->unplug_work.list));
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003066 D_ASSERT(list_empty(&mdev->go_diskless.list));
Lars Ellenberg2265b472010-12-16 15:41:26 +01003067
3068 drbd_set_defaults(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003069}
3070
3071
3072static void drbd_destroy_mempools(void)
3073{
3074 struct page *page;
3075
3076 while (drbd_pp_pool) {
3077 page = drbd_pp_pool;
3078 drbd_pp_pool = (struct page *)page_private(page);
3079 __free_page(page);
3080 drbd_pp_vacant--;
3081 }
3082
3083 /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
3084
3085 if (drbd_ee_mempool)
3086 mempool_destroy(drbd_ee_mempool);
3087 if (drbd_request_mempool)
3088 mempool_destroy(drbd_request_mempool);
3089 if (drbd_ee_cache)
3090 kmem_cache_destroy(drbd_ee_cache);
3091 if (drbd_request_cache)
3092 kmem_cache_destroy(drbd_request_cache);
3093 if (drbd_bm_ext_cache)
3094 kmem_cache_destroy(drbd_bm_ext_cache);
3095 if (drbd_al_ext_cache)
3096 kmem_cache_destroy(drbd_al_ext_cache);
3097
3098 drbd_ee_mempool = NULL;
3099 drbd_request_mempool = NULL;
3100 drbd_ee_cache = NULL;
3101 drbd_request_cache = NULL;
3102 drbd_bm_ext_cache = NULL;
3103 drbd_al_ext_cache = NULL;
3104
3105 return;
3106}
3107
3108static int drbd_create_mempools(void)
3109{
3110 struct page *page;
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01003111 const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003112 int i;
3113
3114 /* prepare our caches and mempools */
3115 drbd_request_mempool = NULL;
3116 drbd_ee_cache = NULL;
3117 drbd_request_cache = NULL;
3118 drbd_bm_ext_cache = NULL;
3119 drbd_al_ext_cache = NULL;
3120 drbd_pp_pool = NULL;
3121
3122 /* caches */
3123 drbd_request_cache = kmem_cache_create(
3124 "drbd_req", sizeof(struct drbd_request), 0, 0, NULL);
3125 if (drbd_request_cache == NULL)
3126 goto Enomem;
3127
3128 drbd_ee_cache = kmem_cache_create(
3129 "drbd_ee", sizeof(struct drbd_epoch_entry), 0, 0, NULL);
3130 if (drbd_ee_cache == NULL)
3131 goto Enomem;
3132
3133 drbd_bm_ext_cache = kmem_cache_create(
3134 "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL);
3135 if (drbd_bm_ext_cache == NULL)
3136 goto Enomem;
3137
3138 drbd_al_ext_cache = kmem_cache_create(
3139 "drbd_al", sizeof(struct lc_element), 0, 0, NULL);
3140 if (drbd_al_ext_cache == NULL)
3141 goto Enomem;
3142
3143 /* mempools */
3144 drbd_request_mempool = mempool_create(number,
3145 mempool_alloc_slab, mempool_free_slab, drbd_request_cache);
3146 if (drbd_request_mempool == NULL)
3147 goto Enomem;
3148
3149 drbd_ee_mempool = mempool_create(number,
3150 mempool_alloc_slab, mempool_free_slab, drbd_ee_cache);
Nicolas Kaiser2027ae12010-10-28 06:15:26 -06003151 if (drbd_ee_mempool == NULL)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003152 goto Enomem;
3153
3154 /* drbd's page pool */
3155 spin_lock_init(&drbd_pp_lock);
3156
3157 for (i = 0; i < number; i++) {
3158 page = alloc_page(GFP_HIGHUSER);
3159 if (!page)
3160 goto Enomem;
3161 set_page_private(page, (unsigned long)drbd_pp_pool);
3162 drbd_pp_pool = page;
3163 }
3164 drbd_pp_vacant = number;
3165
3166 return 0;
3167
3168Enomem:
3169 drbd_destroy_mempools(); /* in case we allocated some */
3170 return -ENOMEM;
3171}
3172
3173static int drbd_notify_sys(struct notifier_block *this, unsigned long code,
3174 void *unused)
3175{
3176 /* just so we have it. you never know what interesting things we
3177 * might want to do here some day...
3178 */
3179
3180 return NOTIFY_DONE;
3181}
3182
3183static struct notifier_block drbd_notifier = {
3184 .notifier_call = drbd_notify_sys,
3185};
3186
3187static void drbd_release_ee_lists(struct drbd_conf *mdev)
3188{
3189 int rr;
3190
3191 rr = drbd_release_ee(mdev, &mdev->active_ee);
3192 if (rr)
3193 dev_err(DEV, "%d EEs in active list found!\n", rr);
3194
3195 rr = drbd_release_ee(mdev, &mdev->sync_ee);
3196 if (rr)
3197 dev_err(DEV, "%d EEs in sync list found!\n", rr);
3198
3199 rr = drbd_release_ee(mdev, &mdev->read_ee);
3200 if (rr)
3201 dev_err(DEV, "%d EEs in read list found!\n", rr);
3202
3203 rr = drbd_release_ee(mdev, &mdev->done_ee);
3204 if (rr)
3205 dev_err(DEV, "%d EEs in done list found!\n", rr);
3206
3207 rr = drbd_release_ee(mdev, &mdev->net_ee);
3208 if (rr)
3209 dev_err(DEV, "%d EEs in net list found!\n", rr);
3210}
3211
3212/* caution. no locking.
3213 * currently only used from module cleanup code. */
3214static void drbd_delete_device(unsigned int minor)
3215{
3216 struct drbd_conf *mdev = minor_to_mdev(minor);
3217
3218 if (!mdev)
3219 return;
3220
3221 /* paranoia asserts */
3222 if (mdev->open_cnt != 0)
3223 dev_err(DEV, "open_cnt = %d in %s:%u", mdev->open_cnt,
3224 __FILE__ , __LINE__);
3225
3226 ERR_IF (!list_empty(&mdev->data.work.q)) {
3227 struct list_head *lp;
3228 list_for_each(lp, &mdev->data.work.q) {
3229 dev_err(DEV, "lp = %p\n", lp);
3230 }
3231 };
3232 /* end paranoia asserts */
3233
3234 del_gendisk(mdev->vdisk);
3235
3236 /* cleanup stuff that may have been allocated during
3237 * device (re-)configuration or state changes */
3238
3239 if (mdev->this_bdev)
3240 bdput(mdev->this_bdev);
3241
3242 drbd_free_resources(mdev);
3243
3244 drbd_release_ee_lists(mdev);
3245
3246 /* should be free'd on disconnect? */
3247 kfree(mdev->ee_hash);
3248 /*
3249 mdev->ee_hash_s = 0;
3250 mdev->ee_hash = NULL;
3251 */
3252
3253 lc_destroy(mdev->act_log);
3254 lc_destroy(mdev->resync);
3255
3256 kfree(mdev->p_uuid);
3257 /* mdev->p_uuid = NULL; */
3258
3259 kfree(mdev->int_dig_out);
3260 kfree(mdev->int_dig_in);
3261 kfree(mdev->int_dig_vv);
3262
3263 /* cleanup the rest that has been
3264 * allocated from drbd_new_device
3265 * and actually free the mdev itself */
3266 drbd_free_mdev(mdev);
3267}
3268
3269static void drbd_cleanup(void)
3270{
3271 unsigned int i;
3272
3273 unregister_reboot_notifier(&drbd_notifier);
3274
Lars Ellenberg17a93f32010-11-24 10:37:35 +01003275 /* first remove proc,
3276 * drbdsetup uses it's presence to detect
3277 * whether DRBD is loaded.
3278 * If we would get stuck in proc removal,
3279 * but have netlink already deregistered,
3280 * some drbdsetup commands may wait forever
3281 * for an answer.
3282 */
3283 if (drbd_proc)
3284 remove_proc_entry("drbd", NULL);
3285
Philipp Reisnerb411b362009-09-25 16:07:19 -07003286 drbd_nl_cleanup();
3287
3288 if (minor_table) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003289 i = minor_count;
3290 while (i--)
3291 drbd_delete_device(i);
3292 drbd_destroy_mempools();
3293 }
3294
3295 kfree(minor_table);
3296
3297 unregister_blkdev(DRBD_MAJOR, "drbd");
3298
3299 printk(KERN_INFO "drbd: module cleanup done.\n");
3300}
3301
3302/**
3303 * drbd_congested() - Callback for pdflush
3304 * @congested_data: User data
3305 * @bdi_bits: Bits pdflush is currently interested in
3306 *
3307 * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
3308 */
3309static int drbd_congested(void *congested_data, int bdi_bits)
3310{
3311 struct drbd_conf *mdev = congested_data;
3312 struct request_queue *q;
3313 char reason = '-';
3314 int r = 0;
3315
Andreas Gruenbacher1b881ef2010-12-13 18:03:38 +01003316 if (!may_inc_ap_bio(mdev)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003317 /* DRBD has frozen IO */
3318 r = bdi_bits;
3319 reason = 'd';
3320 goto out;
3321 }
3322
3323 if (get_ldev(mdev)) {
3324 q = bdev_get_queue(mdev->ldev->backing_bdev);
3325 r = bdi_congested(&q->backing_dev_info, bdi_bits);
3326 put_ldev(mdev);
3327 if (r)
3328 reason = 'b';
3329 }
3330
3331 if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->flags)) {
3332 r |= (1 << BDI_async_congested);
3333 reason = reason == 'b' ? 'a' : 'n';
3334 }
3335
3336out:
3337 mdev->congestion_reason = reason;
3338 return r;
3339}
3340
3341struct drbd_conf *drbd_new_device(unsigned int minor)
3342{
3343 struct drbd_conf *mdev;
3344 struct gendisk *disk;
3345 struct request_queue *q;
3346
3347 /* GFP_KERNEL, we are outside of all write-out paths */
3348 mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL);
3349 if (!mdev)
3350 return NULL;
3351 if (!zalloc_cpumask_var(&mdev->cpu_mask, GFP_KERNEL))
3352 goto out_no_cpumask;
3353
3354 mdev->minor = minor;
3355
3356 drbd_init_set_defaults(mdev);
3357
3358 q = blk_alloc_queue(GFP_KERNEL);
3359 if (!q)
3360 goto out_no_q;
3361 mdev->rq_queue = q;
3362 q->queuedata = mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003363
3364 disk = alloc_disk(1);
3365 if (!disk)
3366 goto out_no_disk;
3367 mdev->vdisk = disk;
3368
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003369 set_disk_ro(disk, true);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003370
3371 disk->queue = q;
3372 disk->major = DRBD_MAJOR;
3373 disk->first_minor = minor;
3374 disk->fops = &drbd_ops;
3375 sprintf(disk->disk_name, "drbd%d", minor);
3376 disk->private_data = mdev;
3377
3378 mdev->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
3379 /* we have no partitions. we contain only ourselves. */
3380 mdev->this_bdev->bd_contains = mdev->this_bdev;
3381
3382 q->backing_dev_info.congested_fn = drbd_congested;
3383 q->backing_dev_info.congested_data = mdev;
3384
Andreas Gruenbacher2f58dcf2010-12-13 17:48:19 +01003385 blk_queue_make_request(q, drbd_make_request);
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01003386 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE >> 9);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003387 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
3388 blk_queue_merge_bvec(q, drbd_merge_bvec);
Jens Axboe7eaceac2011-03-10 08:52:07 +01003389 q->queue_lock = &mdev->req_lock;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003390
3391 mdev->md_io_page = alloc_page(GFP_KERNEL);
3392 if (!mdev->md_io_page)
3393 goto out_no_io_page;
3394
3395 if (drbd_bm_init(mdev))
3396 goto out_no_bitmap;
3397 /* no need to lock access, we are still initializing this minor device. */
3398 if (!tl_init(mdev))
3399 goto out_no_tl;
3400
3401 mdev->app_reads_hash = kzalloc(APP_R_HSIZE*sizeof(void *), GFP_KERNEL);
3402 if (!mdev->app_reads_hash)
3403 goto out_no_app_reads;
3404
3405 mdev->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
3406 if (!mdev->current_epoch)
3407 goto out_no_epoch;
3408
3409 INIT_LIST_HEAD(&mdev->current_epoch->list);
3410 mdev->epochs = 1;
3411
3412 return mdev;
3413
3414/* out_whatever_else:
3415 kfree(mdev->current_epoch); */
3416out_no_epoch:
3417 kfree(mdev->app_reads_hash);
3418out_no_app_reads:
3419 tl_cleanup(mdev);
3420out_no_tl:
3421 drbd_bm_cleanup(mdev);
3422out_no_bitmap:
3423 __free_page(mdev->md_io_page);
3424out_no_io_page:
3425 put_disk(disk);
3426out_no_disk:
3427 blk_cleanup_queue(q);
3428out_no_q:
3429 free_cpumask_var(mdev->cpu_mask);
3430out_no_cpumask:
3431 kfree(mdev);
3432 return NULL;
3433}
3434
3435/* counterpart of drbd_new_device.
3436 * last part of drbd_delete_device. */
3437void drbd_free_mdev(struct drbd_conf *mdev)
3438{
3439 kfree(mdev->current_epoch);
3440 kfree(mdev->app_reads_hash);
3441 tl_cleanup(mdev);
3442 if (mdev->bitmap) /* should no longer be there. */
3443 drbd_bm_cleanup(mdev);
3444 __free_page(mdev->md_io_page);
3445 put_disk(mdev->vdisk);
3446 blk_cleanup_queue(mdev->rq_queue);
3447 free_cpumask_var(mdev->cpu_mask);
Philipp Reisner37190942010-11-10 12:08:37 +01003448 drbd_free_tl_hash(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003449 kfree(mdev);
3450}
3451
3452
3453int __init drbd_init(void)
3454{
3455 int err;
3456
3457 if (sizeof(struct p_handshake) != 80) {
3458 printk(KERN_ERR
3459 "drbd: never change the size or layout "
3460 "of the HandShake packet.\n");
3461 return -EINVAL;
3462 }
3463
Philipp Reisner2b8a90b2011-01-10 11:15:17 +01003464 if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003465 printk(KERN_ERR
3466 "drbd: invalid minor_count (%d)\n", minor_count);
3467#ifdef MODULE
3468 return -EINVAL;
3469#else
3470 minor_count = 8;
3471#endif
3472 }
3473
3474 err = drbd_nl_init();
3475 if (err)
3476 return err;
3477
3478 err = register_blkdev(DRBD_MAJOR, "drbd");
3479 if (err) {
3480 printk(KERN_ERR
3481 "drbd: unable to register block device major %d\n",
3482 DRBD_MAJOR);
3483 return err;
3484 }
3485
3486 register_reboot_notifier(&drbd_notifier);
3487
3488 /*
3489 * allocate all necessary structs
3490 */
3491 err = -ENOMEM;
3492
3493 init_waitqueue_head(&drbd_pp_wait);
3494
3495 drbd_proc = NULL; /* play safe for drbd_cleanup */
3496 minor_table = kzalloc(sizeof(struct drbd_conf *)*minor_count,
3497 GFP_KERNEL);
3498 if (!minor_table)
3499 goto Enomem;
3500
3501 err = drbd_create_mempools();
3502 if (err)
3503 goto Enomem;
3504
Lars Ellenberg8c484ee2010-03-11 16:47:58 +01003505 drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003506 if (!drbd_proc) {
3507 printk(KERN_ERR "drbd: unable to register proc file\n");
3508 goto Enomem;
3509 }
3510
3511 rwlock_init(&global_state_lock);
3512
3513 printk(KERN_INFO "drbd: initialized. "
3514 "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
3515 API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
3516 printk(KERN_INFO "drbd: %s\n", drbd_buildtag());
3517 printk(KERN_INFO "drbd: registered as block device major %d\n",
3518 DRBD_MAJOR);
3519 printk(KERN_INFO "drbd: minor_table @ 0x%p\n", minor_table);
3520
3521 return 0; /* Success! */
3522
3523Enomem:
3524 drbd_cleanup();
3525 if (err == -ENOMEM)
3526 /* currently always the case */
3527 printk(KERN_ERR "drbd: ran out of memory\n");
3528 else
3529 printk(KERN_ERR "drbd: initialization failure\n");
3530 return err;
3531}
3532
3533void drbd_free_bc(struct drbd_backing_dev *ldev)
3534{
3535 if (ldev == NULL)
3536 return;
3537
Tejun Heoe525fd82010-11-13 11:55:17 +01003538 blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
3539 blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003540
3541 kfree(ldev);
3542}
3543
3544void drbd_free_sock(struct drbd_conf *mdev)
3545{
3546 if (mdev->data.socket) {
Lars Ellenberg4589d7f2010-03-03 02:25:33 +01003547 mutex_lock(&mdev->data.mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003548 kernel_sock_shutdown(mdev->data.socket, SHUT_RDWR);
3549 sock_release(mdev->data.socket);
3550 mdev->data.socket = NULL;
Lars Ellenberg4589d7f2010-03-03 02:25:33 +01003551 mutex_unlock(&mdev->data.mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003552 }
3553 if (mdev->meta.socket) {
Lars Ellenberg4589d7f2010-03-03 02:25:33 +01003554 mutex_lock(&mdev->meta.mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003555 kernel_sock_shutdown(mdev->meta.socket, SHUT_RDWR);
3556 sock_release(mdev->meta.socket);
3557 mdev->meta.socket = NULL;
Lars Ellenberg4589d7f2010-03-03 02:25:33 +01003558 mutex_unlock(&mdev->meta.mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003559 }
3560}
3561
3562
3563void drbd_free_resources(struct drbd_conf *mdev)
3564{
3565 crypto_free_hash(mdev->csums_tfm);
3566 mdev->csums_tfm = NULL;
3567 crypto_free_hash(mdev->verify_tfm);
3568 mdev->verify_tfm = NULL;
3569 crypto_free_hash(mdev->cram_hmac_tfm);
3570 mdev->cram_hmac_tfm = NULL;
3571 crypto_free_hash(mdev->integrity_w_tfm);
3572 mdev->integrity_w_tfm = NULL;
3573 crypto_free_hash(mdev->integrity_r_tfm);
3574 mdev->integrity_r_tfm = NULL;
3575
3576 drbd_free_sock(mdev);
3577
3578 __no_warn(local,
3579 drbd_free_bc(mdev->ldev);
3580 mdev->ldev = NULL;);
3581}
3582
3583/* meta data management */
3584
3585struct meta_data_on_disk {
3586 u64 la_size; /* last agreed size. */
3587 u64 uuid[UI_SIZE]; /* UUIDs. */
3588 u64 device_uuid;
3589 u64 reserved_u64_1;
3590 u32 flags; /* MDF */
3591 u32 magic;
3592 u32 md_size_sect;
3593 u32 al_offset; /* offset to this block */
3594 u32 al_nr_extents; /* important for restoring the AL */
3595 /* `-- act_log->nr_elements <-- sync_conf.al_extents */
3596 u32 bm_offset; /* offset to the bitmap, from here */
3597 u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */
3598 u32 reserved_u32[4];
3599
3600} __packed;
3601
3602/**
3603 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
3604 * @mdev: DRBD device.
3605 */
3606void drbd_md_sync(struct drbd_conf *mdev)
3607{
3608 struct meta_data_on_disk *buffer;
3609 sector_t sector;
3610 int i;
3611
Lars Ellenbergee15b032010-09-03 10:00:09 +02003612 del_timer(&mdev->md_sync_timer);
3613 /* timer may be rearmed by drbd_md_mark_dirty() now. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003614 if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
3615 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003616
3617 /* We use here D_FAILED and not D_ATTACHING because we try to write
3618 * metadata even if we detach due to a disk failure! */
3619 if (!get_ldev_if_state(mdev, D_FAILED))
3620 return;
3621
Philipp Reisnerb411b362009-09-25 16:07:19 -07003622 mutex_lock(&mdev->md_io_mutex);
3623 buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
3624 memset(buffer, 0, 512);
3625
3626 buffer->la_size = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
3627 for (i = UI_CURRENT; i < UI_SIZE; i++)
3628 buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
3629 buffer->flags = cpu_to_be32(mdev->ldev->md.flags);
3630 buffer->magic = cpu_to_be32(DRBD_MD_MAGIC);
3631
3632 buffer->md_size_sect = cpu_to_be32(mdev->ldev->md.md_size_sect);
3633 buffer->al_offset = cpu_to_be32(mdev->ldev->md.al_offset);
3634 buffer->al_nr_extents = cpu_to_be32(mdev->act_log->nr_elements);
3635 buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
3636 buffer->device_uuid = cpu_to_be64(mdev->ldev->md.device_uuid);
3637
3638 buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset);
3639
3640 D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset);
3641 sector = mdev->ldev->md.md_offset;
3642
Lars Ellenberg3f3a9b82010-09-01 15:12:12 +02003643 if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003644 /* this was a try anyways ... */
3645 dev_err(DEV, "meta data update failed!\n");
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003646 drbd_chk_io_error(mdev, 1, true);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003647 }
3648
3649 /* Update mdev->ldev->md.la_size_sect,
3650 * since we updated it on metadata. */
3651 mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev);
3652
3653 mutex_unlock(&mdev->md_io_mutex);
3654 put_ldev(mdev);
3655}
3656
3657/**
3658 * drbd_md_read() - Reads in the meta data super block
3659 * @mdev: DRBD device.
3660 * @bdev: Device from which the meta data should be read in.
3661 *
Andreas Gruenbacher116676c2010-12-08 13:33:11 +01003662 * Return 0 (NO_ERROR) on success, and an enum drbd_ret_code in case
Philipp Reisnerb411b362009-09-25 16:07:19 -07003663 * something goes wrong. Currently only: ERR_IO_MD_DISK, ERR_MD_INVALID.
3664 */
3665int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
3666{
3667 struct meta_data_on_disk *buffer;
3668 int i, rv = NO_ERROR;
3669
3670 if (!get_ldev_if_state(mdev, D_ATTACHING))
3671 return ERR_IO_MD_DISK;
3672
Philipp Reisnerb411b362009-09-25 16:07:19 -07003673 mutex_lock(&mdev->md_io_mutex);
3674 buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
3675
3676 if (!drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
3677 /* NOTE: cant do normal error processing here as this is
3678 called BEFORE disk is attached */
3679 dev_err(DEV, "Error while reading metadata.\n");
3680 rv = ERR_IO_MD_DISK;
3681 goto err;
3682 }
3683
3684 if (be32_to_cpu(buffer->magic) != DRBD_MD_MAGIC) {
3685 dev_err(DEV, "Error while reading metadata, magic not found.\n");
3686 rv = ERR_MD_INVALID;
3687 goto err;
3688 }
3689 if (be32_to_cpu(buffer->al_offset) != bdev->md.al_offset) {
3690 dev_err(DEV, "unexpected al_offset: %d (expected %d)\n",
3691 be32_to_cpu(buffer->al_offset), bdev->md.al_offset);
3692 rv = ERR_MD_INVALID;
3693 goto err;
3694 }
3695 if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
3696 dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n",
3697 be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
3698 rv = ERR_MD_INVALID;
3699 goto err;
3700 }
3701 if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
3702 dev_err(DEV, "unexpected md_size: %u (expected %u)\n",
3703 be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
3704 rv = ERR_MD_INVALID;
3705 goto err;
3706 }
3707
3708 if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
3709 dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
3710 be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
3711 rv = ERR_MD_INVALID;
3712 goto err;
3713 }
3714
3715 bdev->md.la_size_sect = be64_to_cpu(buffer->la_size);
3716 for (i = UI_CURRENT; i < UI_SIZE; i++)
3717 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
3718 bdev->md.flags = be32_to_cpu(buffer->flags);
3719 mdev->sync_conf.al_extents = be32_to_cpu(buffer->al_nr_extents);
3720 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
3721
3722 if (mdev->sync_conf.al_extents < 7)
3723 mdev->sync_conf.al_extents = 127;
3724
3725 err:
3726 mutex_unlock(&mdev->md_io_mutex);
3727 put_ldev(mdev);
3728
3729 return rv;
3730}
3731
Lars Ellenbergac724122010-10-07 15:18:08 +02003732static void debug_drbd_uuid(struct drbd_conf *mdev, enum drbd_uuid_index index)
3733{
3734 static char *uuid_str[UI_EXTENDED_SIZE] = {
3735 [UI_CURRENT] = "CURRENT",
3736 [UI_BITMAP] = "BITMAP",
3737 [UI_HISTORY_START] = "HISTORY_START",
3738 [UI_HISTORY_END] = "HISTORY_END",
3739 [UI_SIZE] = "SIZE",
3740 [UI_FLAGS] = "FLAGS",
3741 };
3742
3743 if (index >= UI_EXTENDED_SIZE) {
3744 dev_warn(DEV, " uuid_index >= EXTENDED_SIZE\n");
3745 return;
3746 }
3747
3748 dynamic_dev_dbg(DEV, " uuid[%s] now %016llX\n",
3749 uuid_str[index],
3750 (unsigned long long)mdev->ldev->md.uuid[index]);
3751}
3752
3753
Philipp Reisnerb411b362009-09-25 16:07:19 -07003754/**
3755 * drbd_md_mark_dirty() - Mark meta data super block as dirty
3756 * @mdev: DRBD device.
3757 *
3758 * Call this function if you change anything that should be written to
3759 * the meta-data super block. This function sets MD_DIRTY, and starts a
3760 * timer that ensures that within five seconds you have to call drbd_md_sync().
3761 */
Lars Ellenbergca0e6092010-10-14 15:01:21 +02003762#ifdef DEBUG
Lars Ellenbergee15b032010-09-03 10:00:09 +02003763void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func)
3764{
3765 if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) {
3766 mod_timer(&mdev->md_sync_timer, jiffies + HZ);
3767 mdev->last_md_mark_dirty.line = line;
3768 mdev->last_md_mark_dirty.func = func;
3769 }
3770}
3771#else
Philipp Reisnerb411b362009-09-25 16:07:19 -07003772void drbd_md_mark_dirty(struct drbd_conf *mdev)
3773{
Lars Ellenbergee15b032010-09-03 10:00:09 +02003774 if (!test_and_set_bit(MD_DIRTY, &mdev->flags))
Lars Ellenbergca0e6092010-10-14 15:01:21 +02003775 mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003776}
Lars Ellenbergee15b032010-09-03 10:00:09 +02003777#endif
Philipp Reisnerb411b362009-09-25 16:07:19 -07003778
3779static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
3780{
3781 int i;
3782
Lars Ellenbergac724122010-10-07 15:18:08 +02003783 for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003784 mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
Lars Ellenbergac724122010-10-07 15:18:08 +02003785 debug_drbd_uuid(mdev, i+1);
3786 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003787}
3788
3789void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3790{
3791 if (idx == UI_CURRENT) {
3792 if (mdev->state.role == R_PRIMARY)
3793 val |= 1;
3794 else
3795 val &= ~((u64)1);
3796
3797 drbd_set_ed_uuid(mdev, val);
3798 }
3799
3800 mdev->ldev->md.uuid[idx] = val;
Lars Ellenbergac724122010-10-07 15:18:08 +02003801 debug_drbd_uuid(mdev, idx);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003802 drbd_md_mark_dirty(mdev);
3803}
3804
3805
3806void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3807{
3808 if (mdev->ldev->md.uuid[idx]) {
3809 drbd_uuid_move_history(mdev);
3810 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
Lars Ellenbergac724122010-10-07 15:18:08 +02003811 debug_drbd_uuid(mdev, UI_HISTORY_START);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003812 }
3813 _drbd_uuid_set(mdev, idx, val);
3814}
3815
3816/**
3817 * drbd_uuid_new_current() - Creates a new current UUID
3818 * @mdev: DRBD device.
3819 *
3820 * Creates a new current UUID, and rotates the old current UUID into
3821 * the bitmap slot. Causes an incremental resync upon next connect.
3822 */
3823void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
3824{
3825 u64 val;
3826
3827 dev_info(DEV, "Creating new current UUID\n");
3828 D_ASSERT(mdev->ldev->md.uuid[UI_BITMAP] == 0);
3829 mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
Lars Ellenbergac724122010-10-07 15:18:08 +02003830 debug_drbd_uuid(mdev, UI_BITMAP);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003831
3832 get_random_bytes(&val, sizeof(u64));
3833 _drbd_uuid_set(mdev, UI_CURRENT, val);
Lars Ellenbergaaa8e2b2010-10-15 13:16:53 +02003834 /* get it to stable storage _now_ */
3835 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003836}
3837
3838void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
3839{
3840 if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
3841 return;
3842
3843 if (val == 0) {
3844 drbd_uuid_move_history(mdev);
3845 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
3846 mdev->ldev->md.uuid[UI_BITMAP] = 0;
Lars Ellenbergac724122010-10-07 15:18:08 +02003847 debug_drbd_uuid(mdev, UI_HISTORY_START);
3848 debug_drbd_uuid(mdev, UI_BITMAP);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003849 } else {
3850 if (mdev->ldev->md.uuid[UI_BITMAP])
3851 dev_warn(DEV, "bm UUID already set");
3852
3853 mdev->ldev->md.uuid[UI_BITMAP] = val;
3854 mdev->ldev->md.uuid[UI_BITMAP] &= ~((u64)1);
3855
Lars Ellenbergac724122010-10-07 15:18:08 +02003856 debug_drbd_uuid(mdev, UI_BITMAP);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003857 }
3858 drbd_md_mark_dirty(mdev);
3859}
3860
3861/**
3862 * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3863 * @mdev: DRBD device.
3864 *
3865 * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
3866 */
3867int drbd_bmio_set_n_write(struct drbd_conf *mdev)
3868{
3869 int rv = -EIO;
3870
3871 if (get_ldev_if_state(mdev, D_ATTACHING)) {
3872 drbd_md_set_flag(mdev, MDF_FULL_SYNC);
3873 drbd_md_sync(mdev);
3874 drbd_bm_set_all(mdev);
3875
3876 rv = drbd_bm_write(mdev);
3877
3878 if (!rv) {
3879 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
3880 drbd_md_sync(mdev);
3881 }
3882
3883 put_ldev(mdev);
3884 }
3885
3886 return rv;
3887}
3888
3889/**
3890 * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3891 * @mdev: DRBD device.
3892 *
3893 * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
3894 */
3895int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
3896{
3897 int rv = -EIO;
3898
Philipp Reisner07782862010-08-31 12:00:50 +02003899 drbd_resume_al(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003900 if (get_ldev_if_state(mdev, D_ATTACHING)) {
3901 drbd_bm_clear_all(mdev);
3902 rv = drbd_bm_write(mdev);
3903 put_ldev(mdev);
3904 }
3905
3906 return rv;
3907}
3908
3909static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
3910{
3911 struct bm_io_work *work = container_of(w, struct bm_io_work, w);
Lars Ellenberg02851e92010-12-16 14:47:39 +01003912 int rv = -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003913
3914 D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0);
3915
Lars Ellenberg02851e92010-12-16 14:47:39 +01003916 if (get_ldev(mdev)) {
3917 drbd_bm_lock(mdev, work->why);
3918 rv = work->io_fn(mdev);
3919 drbd_bm_unlock(mdev);
3920 put_ldev(mdev);
3921 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003922
3923 clear_bit(BITMAP_IO, &mdev->flags);
Philipp Reisner127b3172010-11-16 10:07:53 +01003924 smp_mb__after_clear_bit();
Philipp Reisnerb411b362009-09-25 16:07:19 -07003925 wake_up(&mdev->misc_wait);
3926
3927 if (work->done)
3928 work->done(mdev, rv);
3929
3930 clear_bit(BITMAP_IO_QUEUED, &mdev->flags);
3931 work->why = NULL;
3932
3933 return 1;
3934}
3935
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003936void drbd_ldev_destroy(struct drbd_conf *mdev)
3937{
3938 lc_destroy(mdev->resync);
3939 mdev->resync = NULL;
3940 lc_destroy(mdev->act_log);
3941 mdev->act_log = NULL;
3942 __no_warn(local,
3943 drbd_free_bc(mdev->ldev);
3944 mdev->ldev = NULL;);
3945
3946 if (mdev->md_io_tmpp) {
3947 __free_page(mdev->md_io_tmpp);
3948 mdev->md_io_tmpp = NULL;
3949 }
3950 clear_bit(GO_DISKLESS, &mdev->flags);
3951}
3952
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003953static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused)
3954{
3955 D_ASSERT(mdev->state.disk == D_FAILED);
Lars Ellenberg9d282872010-10-14 13:57:07 +02003956 /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
3957 * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003958 * the protected members anymore, though, so once put_ldev reaches zero
3959 * again, it will be safe to free them. */
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003960 drbd_force_state(mdev, NS(disk, D_DISKLESS));
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003961 return 1;
3962}
3963
3964void drbd_go_diskless(struct drbd_conf *mdev)
3965{
3966 D_ASSERT(mdev->state.disk == D_FAILED);
3967 if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
Lars Ellenberg9d282872010-10-14 13:57:07 +02003968 drbd_queue_work(&mdev->data.work, &mdev->go_diskless);
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003969}
3970
Philipp Reisnerb411b362009-09-25 16:07:19 -07003971/**
3972 * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
3973 * @mdev: DRBD device.
3974 * @io_fn: IO callback to be called when bitmap IO is possible
3975 * @done: callback to be called after the bitmap IO was performed
3976 * @why: Descriptive text of the reason for doing the IO
3977 *
3978 * While IO on the bitmap happens we freeze application IO thus we ensure
3979 * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
3980 * called from worker context. It MUST NOT be used while a previous such
3981 * work is still pending!
3982 */
3983void drbd_queue_bitmap_io(struct drbd_conf *mdev,
3984 int (*io_fn)(struct drbd_conf *),
3985 void (*done)(struct drbd_conf *, int),
3986 char *why)
3987{
3988 D_ASSERT(current == mdev->worker.task);
3989
3990 D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags));
3991 D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags));
3992 D_ASSERT(list_empty(&mdev->bm_io_work.w.list));
3993 if (mdev->bm_io_work.why)
3994 dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n",
3995 why, mdev->bm_io_work.why);
3996
3997 mdev->bm_io_work.io_fn = io_fn;
3998 mdev->bm_io_work.done = done;
3999 mdev->bm_io_work.why = why;
4000
Philipp Reisner22afd7e2010-11-16 15:30:44 +01004001 spin_lock_irq(&mdev->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004002 set_bit(BITMAP_IO, &mdev->flags);
4003 if (atomic_read(&mdev->ap_bio_cnt) == 0) {
Philipp Reisner127b3172010-11-16 10:07:53 +01004004 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
Philipp Reisnerb411b362009-09-25 16:07:19 -07004005 drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004006 }
Philipp Reisner22afd7e2010-11-16 15:30:44 +01004007 spin_unlock_irq(&mdev->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004008}
4009
4010/**
4011 * drbd_bitmap_io() - Does an IO operation on the whole bitmap
4012 * @mdev: DRBD device.
4013 * @io_fn: IO callback to be called when bitmap IO is possible
4014 * @why: Descriptive text of the reason for doing the IO
4015 *
4016 * freezes application IO while that the actual IO operations runs. This
4017 * functions MAY NOT be called from worker context.
4018 */
4019int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why)
4020{
4021 int rv;
4022
4023 D_ASSERT(current != mdev->worker.task);
4024
4025 drbd_suspend_io(mdev);
4026
4027 drbd_bm_lock(mdev, why);
4028 rv = io_fn(mdev);
4029 drbd_bm_unlock(mdev);
4030
4031 drbd_resume_io(mdev);
4032
4033 return rv;
4034}
4035
4036void drbd_md_set_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
4037{
4038 if ((mdev->ldev->md.flags & flag) != flag) {
4039 drbd_md_mark_dirty(mdev);
4040 mdev->ldev->md.flags |= flag;
4041 }
4042}
4043
4044void drbd_md_clear_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
4045{
4046 if ((mdev->ldev->md.flags & flag) != 0) {
4047 drbd_md_mark_dirty(mdev);
4048 mdev->ldev->md.flags &= ~flag;
4049 }
4050}
4051int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
4052{
4053 return (bdev->md.flags & flag) != 0;
4054}
4055
4056static void md_sync_timer_fn(unsigned long data)
4057{
4058 struct drbd_conf *mdev = (struct drbd_conf *) data;
4059
4060 drbd_queue_work_front(&mdev->data.work, &mdev->md_sync_work);
4061}
4062
4063static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused)
4064{
4065 dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
Lars Ellenbergee15b032010-09-03 10:00:09 +02004066#ifdef DEBUG
4067 dev_warn(DEV, "last md_mark_dirty: %s:%u\n",
4068 mdev->last_md_mark_dirty.func, mdev->last_md_mark_dirty.line);
4069#endif
Philipp Reisnerb411b362009-09-25 16:07:19 -07004070 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004071 return 1;
4072}
4073
4074#ifdef CONFIG_DRBD_FAULT_INJECTION
4075/* Fault insertion support including random number generator shamelessly
4076 * stolen from kernel/rcutorture.c */
4077struct fault_random_state {
4078 unsigned long state;
4079 unsigned long count;
4080};
4081
4082#define FAULT_RANDOM_MULT 39916801 /* prime */
4083#define FAULT_RANDOM_ADD 479001701 /* prime */
4084#define FAULT_RANDOM_REFRESH 10000
4085
4086/*
4087 * Crude but fast random-number generator. Uses a linear congruential
4088 * generator, with occasional help from get_random_bytes().
4089 */
4090static unsigned long
4091_drbd_fault_random(struct fault_random_state *rsp)
4092{
4093 long refresh;
4094
Roel Kluin49829ea2009-12-15 22:55:44 +01004095 if (!rsp->count--) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004096 get_random_bytes(&refresh, sizeof(refresh));
4097 rsp->state += refresh;
4098 rsp->count = FAULT_RANDOM_REFRESH;
4099 }
4100 rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
4101 return swahw32(rsp->state);
4102}
4103
4104static char *
4105_drbd_fault_str(unsigned int type) {
4106 static char *_faults[] = {
4107 [DRBD_FAULT_MD_WR] = "Meta-data write",
4108 [DRBD_FAULT_MD_RD] = "Meta-data read",
4109 [DRBD_FAULT_RS_WR] = "Resync write",
4110 [DRBD_FAULT_RS_RD] = "Resync read",
4111 [DRBD_FAULT_DT_WR] = "Data write",
4112 [DRBD_FAULT_DT_RD] = "Data read",
4113 [DRBD_FAULT_DT_RA] = "Data read ahead",
4114 [DRBD_FAULT_BM_ALLOC] = "BM allocation",
Philipp Reisner6b4388a2010-04-26 14:11:45 +02004115 [DRBD_FAULT_AL_EE] = "EE allocation",
4116 [DRBD_FAULT_RECEIVE] = "receive data corruption",
Philipp Reisnerb411b362009-09-25 16:07:19 -07004117 };
4118
4119 return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
4120}
4121
4122unsigned int
4123_drbd_insert_fault(struct drbd_conf *mdev, unsigned int type)
4124{
4125 static struct fault_random_state rrs = {0, 0};
4126
4127 unsigned int ret = (
4128 (fault_devs == 0 ||
4129 ((1 << mdev_to_minor(mdev)) & fault_devs) != 0) &&
4130 (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate));
4131
4132 if (ret) {
4133 fault_count++;
4134
Lars Ellenberg73835062010-05-27 11:51:56 +02004135 if (__ratelimit(&drbd_ratelimit_state))
Philipp Reisnerb411b362009-09-25 16:07:19 -07004136 dev_warn(DEV, "***Simulating %s failure\n",
4137 _drbd_fault_str(type));
4138 }
4139
4140 return ret;
4141}
4142#endif
4143
4144const char *drbd_buildtag(void)
4145{
4146 /* DRBD built from external sources has here a reference to the
4147 git hash of the source code. */
4148
4149 static char buildtag[38] = "\0uilt-in";
4150
4151 if (buildtag[0] == 0) {
4152#ifdef CONFIG_MODULES
4153 if (THIS_MODULE != NULL)
4154 sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
4155 else
4156#endif
4157 buildtag[0] = 'b';
4158 }
4159
4160 return buildtag;
4161}
4162
4163module_init(drbd_init)
4164module_exit(drbd_cleanup)
4165
Philipp Reisnerb411b362009-09-25 16:07:19 -07004166EXPORT_SYMBOL(drbd_conn_str);
4167EXPORT_SYMBOL(drbd_role_str);
4168EXPORT_SYMBOL(drbd_disk_str);
4169EXPORT_SYMBOL(drbd_set_st_err_str);