blob: e5686a81f42c0bb31740492bdece41aa0db57263 [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd_receiver.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25
Philipp Reisnerb411b362009-09-25 16:07:19 -070026#include <linux/module.h>
27
28#include <asm/uaccess.h>
29#include <net/sock.h>
30
Philipp Reisnerb411b362009-09-25 16:07:19 -070031#include <linux/drbd.h>
32#include <linux/fs.h>
33#include <linux/file.h>
34#include <linux/in.h>
35#include <linux/mm.h>
36#include <linux/memcontrol.h>
37#include <linux/mm_inline.h>
38#include <linux/slab.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070039#include <linux/pkt_sched.h>
40#define __KERNEL_SYSCALLS__
41#include <linux/unistd.h>
42#include <linux/vmalloc.h>
43#include <linux/random.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070044#include <linux/string.h>
45#include <linux/scatterlist.h>
46#include "drbd_int.h"
Philipp Reisnerb411b362009-09-25 16:07:19 -070047#include "drbd_req.h"
48
49#include "drbd_vli.h"
50
Philipp Reisnerb411b362009-09-25 16:07:19 -070051enum finish_epoch {
52 FE_STILL_LIVE,
53 FE_DESTROYED,
54 FE_RECYCLED,
55};
56
57static int drbd_do_handshake(struct drbd_conf *mdev);
58static int drbd_do_auth(struct drbd_conf *mdev);
59
60static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
61static int e_end_block(struct drbd_conf *, struct drbd_work *, int);
62
Philipp Reisnerb411b362009-09-25 16:07:19 -070063
64#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
65
Lars Ellenberg45bb9122010-05-14 17:10:48 +020066/*
67 * some helper functions to deal with single linked page lists,
68 * page->private being our "next" pointer.
69 */
70
71/* If at least n pages are linked at head, get n pages off.
72 * Otherwise, don't modify head, and return NULL.
73 * Locking is the responsibility of the caller.
74 */
75static struct page *page_chain_del(struct page **head, int n)
76{
77 struct page *page;
78 struct page *tmp;
79
80 BUG_ON(!n);
81 BUG_ON(!head);
82
83 page = *head;
Philipp Reisner23ce4222010-05-20 13:35:31 +020084
85 if (!page)
86 return NULL;
87
Lars Ellenberg45bb9122010-05-14 17:10:48 +020088 while (page) {
89 tmp = page_chain_next(page);
90 if (--n == 0)
91 break; /* found sufficient pages */
92 if (tmp == NULL)
93 /* insufficient pages, don't use any of them. */
94 return NULL;
95 page = tmp;
96 }
97
98 /* add end of list marker for the returned list */
99 set_page_private(page, 0);
100 /* actual return value, and adjustment of head */
101 page = *head;
102 *head = tmp;
103 return page;
104}
105
106/* may be used outside of locks to find the tail of a (usually short)
107 * "private" page chain, before adding it back to a global chain head
108 * with page_chain_add() under a spinlock. */
109static struct page *page_chain_tail(struct page *page, int *len)
110{
111 struct page *tmp;
112 int i = 1;
113 while ((tmp = page_chain_next(page)))
114 ++i, page = tmp;
115 if (len)
116 *len = i;
117 return page;
118}
119
120static int page_chain_free(struct page *page)
121{
122 struct page *tmp;
123 int i = 0;
124 page_chain_for_each_safe(page, tmp) {
125 put_page(page);
126 ++i;
127 }
128 return i;
129}
130
131static void page_chain_add(struct page **head,
132 struct page *chain_first, struct page *chain_last)
133{
134#if 1
135 struct page *tmp;
136 tmp = page_chain_tail(chain_first, NULL);
137 BUG_ON(tmp != chain_last);
138#endif
139
140 /* add chain to head */
141 set_page_private(chain_last, (unsigned long)*head);
142 *head = chain_first;
143}
144
145static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int number)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700146{
147 struct page *page = NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200148 struct page *tmp = NULL;
149 int i = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700150
151 /* Yes, testing drbd_pp_vacant outside the lock is racy.
152 * So what. It saves a spin_lock. */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200153 if (drbd_pp_vacant >= number) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700154 spin_lock(&drbd_pp_lock);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200155 page = page_chain_del(&drbd_pp_pool, number);
156 if (page)
157 drbd_pp_vacant -= number;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700158 spin_unlock(&drbd_pp_lock);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200159 if (page)
160 return page;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700161 }
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200162
Philipp Reisnerb411b362009-09-25 16:07:19 -0700163 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
164 * "criss-cross" setup, that might cause write-out on some other DRBD,
165 * which in turn might block on the other node at this very place. */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200166 for (i = 0; i < number; i++) {
167 tmp = alloc_page(GFP_TRY);
168 if (!tmp)
169 break;
170 set_page_private(tmp, (unsigned long)page);
171 page = tmp;
172 }
173
174 if (i == number)
175 return page;
176
177 /* Not enough pages immediately available this time.
178 * No need to jump around here, drbd_pp_alloc will retry this
179 * function "soon". */
180 if (page) {
181 tmp = page_chain_tail(page, NULL);
182 spin_lock(&drbd_pp_lock);
183 page_chain_add(&drbd_pp_pool, page, tmp);
184 drbd_pp_vacant += i;
185 spin_unlock(&drbd_pp_lock);
186 }
187 return NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700188}
189
Philipp Reisnerb411b362009-09-25 16:07:19 -0700190static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
191{
192 struct drbd_epoch_entry *e;
193 struct list_head *le, *tle;
194
195 /* The EEs are always appended to the end of the list. Since
196 they are sent in order over the wire, they have to finish
197 in order. As soon as we see the first not finished we can
198 stop to examine the list... */
199
200 list_for_each_safe(le, tle, &mdev->net_ee) {
201 e = list_entry(le, struct drbd_epoch_entry, w.list);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200202 if (drbd_ee_has_active_page(e))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700203 break;
204 list_move(le, to_be_freed);
205 }
206}
207
208static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
209{
210 LIST_HEAD(reclaimed);
211 struct drbd_epoch_entry *e, *t;
212
Philipp Reisnerb411b362009-09-25 16:07:19 -0700213 spin_lock_irq(&mdev->req_lock);
214 reclaim_net_ee(mdev, &reclaimed);
215 spin_unlock_irq(&mdev->req_lock);
216
217 list_for_each_entry_safe(e, t, &reclaimed, w.list)
Lars Ellenberg435f0742010-09-06 12:30:25 +0200218 drbd_free_net_ee(mdev, e);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700219}
220
221/**
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200222 * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700223 * @mdev: DRBD device.
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200224 * @number: number of pages requested
225 * @retry: whether to retry, if not enough pages are available right now
Philipp Reisnerb411b362009-09-25 16:07:19 -0700226 *
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200227 * Tries to allocate number pages, first from our own page pool, then from
228 * the kernel, unless this allocation would exceed the max_buffers setting.
229 * Possibly retry until DRBD frees sufficient pages somewhere else.
230 *
231 * Returns a page chain linked via page->private.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700232 */
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200233static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool retry)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700234{
235 struct page *page = NULL;
236 DEFINE_WAIT(wait);
237
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200238 /* Yes, we may run up to @number over max_buffers. If we
239 * follow it strictly, the admin will get it wrong anyways. */
240 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers)
241 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700242
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200243 while (page == NULL) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700244 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
245
246 drbd_kick_lo_and_reclaim_net(mdev);
247
248 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) {
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200249 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700250 if (page)
251 break;
252 }
253
254 if (!retry)
255 break;
256
257 if (signal_pending(current)) {
258 dev_warn(DEV, "drbd_pp_alloc interrupted!\n");
259 break;
260 }
261
262 schedule();
263 }
264 finish_wait(&drbd_pp_wait, &wait);
265
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200266 if (page)
267 atomic_add(number, &mdev->pp_in_use);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700268 return page;
269}
270
271/* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200272 * Is also used from inside an other spin_lock_irq(&mdev->req_lock);
273 * Either links the page chain back to the global pool,
274 * or returns all pages to the system. */
Lars Ellenberg435f0742010-09-06 12:30:25 +0200275static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700276{
Lars Ellenberg435f0742010-09-06 12:30:25 +0200277 atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700278 int i;
Lars Ellenberg435f0742010-09-06 12:30:25 +0200279
Lars Ellenberg1816a2b2010-11-11 15:19:07 +0100280 if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE)*minor_count)
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200281 i = page_chain_free(page);
282 else {
283 struct page *tmp;
284 tmp = page_chain_tail(page, &i);
285 spin_lock(&drbd_pp_lock);
286 page_chain_add(&drbd_pp_pool, page, tmp);
287 drbd_pp_vacant += i;
288 spin_unlock(&drbd_pp_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700289 }
Lars Ellenberg435f0742010-09-06 12:30:25 +0200290 i = atomic_sub_return(i, a);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200291 if (i < 0)
Lars Ellenberg435f0742010-09-06 12:30:25 +0200292 dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
293 is_net ? "pp_in_use_by_net" : "pp_in_use", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700294 wake_up(&drbd_pp_wait);
295}
296
297/*
298You need to hold the req_lock:
299 _drbd_wait_ee_list_empty()
300
301You must not have the req_lock:
302 drbd_free_ee()
303 drbd_alloc_ee()
304 drbd_init_ee()
305 drbd_release_ee()
306 drbd_ee_fix_bhs()
307 drbd_process_done_ee()
308 drbd_clear_done_ee()
309 drbd_wait_ee_list_empty()
310*/
311
312struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
313 u64 id,
314 sector_t sector,
315 unsigned int data_size,
316 gfp_t gfp_mask) __must_hold(local)
317{
Philipp Reisnerb411b362009-09-25 16:07:19 -0700318 struct drbd_epoch_entry *e;
319 struct page *page;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200320 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700321
Andreas Gruenbacher0cf9d272010-12-07 10:43:29 +0100322 if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700323 return NULL;
324
325 e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
326 if (!e) {
327 if (!(gfp_mask & __GFP_NOWARN))
328 dev_err(DEV, "alloc_ee: Allocation of an EE failed\n");
329 return NULL;
330 }
331
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200332 page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
333 if (!page)
334 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700335
Philipp Reisnerb411b362009-09-25 16:07:19 -0700336 INIT_HLIST_NODE(&e->colision);
337 e->epoch = NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200338 e->mdev = mdev;
339 e->pages = page;
340 atomic_set(&e->pending_bios, 0);
341 e->size = data_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700342 e->flags = 0;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200343 e->sector = sector;
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200344 e->block_id = id;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700345
Philipp Reisnerb411b362009-09-25 16:07:19 -0700346 return e;
347
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200348 fail:
Philipp Reisnerb411b362009-09-25 16:07:19 -0700349 mempool_free(e, drbd_ee_mempool);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700350 return NULL;
351}
352
Lars Ellenberg435f0742010-09-06 12:30:25 +0200353void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, int is_net)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700354{
Lars Ellenbergc36c3ce2010-08-11 20:42:55 +0200355 if (e->flags & EE_HAS_DIGEST)
356 kfree(e->digest);
Lars Ellenberg435f0742010-09-06 12:30:25 +0200357 drbd_pp_free(mdev, e->pages, is_net);
Lars Ellenberg45bb9122010-05-14 17:10:48 +0200358 D_ASSERT(atomic_read(&e->pending_bios) == 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700359 D_ASSERT(hlist_unhashed(&e->colision));
360 mempool_free(e, drbd_ee_mempool);
361}
362
363int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
364{
365 LIST_HEAD(work_list);
366 struct drbd_epoch_entry *e, *t;
367 int count = 0;
Lars Ellenberg435f0742010-09-06 12:30:25 +0200368 int is_net = list == &mdev->net_ee;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700369
370 spin_lock_irq(&mdev->req_lock);
371 list_splice_init(list, &work_list);
372 spin_unlock_irq(&mdev->req_lock);
373
374 list_for_each_entry_safe(e, t, &work_list, w.list) {
Lars Ellenberg435f0742010-09-06 12:30:25 +0200375 drbd_free_some_ee(mdev, e, is_net);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700376 count++;
377 }
378 return count;
379}
380
381
382/*
383 * This function is called from _asender only_
384 * but see also comments in _req_mod(,barrier_acked)
385 * and receive_Barrier.
386 *
387 * Move entries from net_ee to done_ee, if ready.
388 * Grab done_ee, call all callbacks, free the entries.
389 * The callbacks typically send out ACKs.
390 */
391static int drbd_process_done_ee(struct drbd_conf *mdev)
392{
393 LIST_HEAD(work_list);
394 LIST_HEAD(reclaimed);
395 struct drbd_epoch_entry *e, *t;
396 int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS);
397
398 spin_lock_irq(&mdev->req_lock);
399 reclaim_net_ee(mdev, &reclaimed);
400 list_splice_init(&mdev->done_ee, &work_list);
401 spin_unlock_irq(&mdev->req_lock);
402
403 list_for_each_entry_safe(e, t, &reclaimed, w.list)
Lars Ellenberg435f0742010-09-06 12:30:25 +0200404 drbd_free_net_ee(mdev, e);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700405
406 /* possible callbacks here:
407 * e_end_block, and e_end_resync_block, e_send_discard_ack.
408 * all ignore the last argument.
409 */
410 list_for_each_entry_safe(e, t, &work_list, w.list) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700411 /* list_del not necessary, next/prev members not touched */
412 ok = e->w.cb(mdev, &e->w, !ok) && ok;
413 drbd_free_ee(mdev, e);
414 }
415 wake_up(&mdev->ee_wait);
416
417 return ok;
418}
419
420void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
421{
422 DEFINE_WAIT(wait);
423
424 /* avoids spin_lock/unlock
425 * and calling prepare_to_wait in the fast path */
426 while (!list_empty(head)) {
427 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
428 spin_unlock_irq(&mdev->req_lock);
Jens Axboe7eaceac2011-03-10 08:52:07 +0100429 io_schedule();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700430 finish_wait(&mdev->ee_wait, &wait);
431 spin_lock_irq(&mdev->req_lock);
432 }
433}
434
435void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
436{
437 spin_lock_irq(&mdev->req_lock);
438 _drbd_wait_ee_list_empty(mdev, head);
439 spin_unlock_irq(&mdev->req_lock);
440}
441
442/* see also kernel_accept; which is only present since 2.6.18.
443 * also we want to log which part of it failed, exactly */
444static int drbd_accept(struct drbd_conf *mdev, const char **what,
445 struct socket *sock, struct socket **newsock)
446{
447 struct sock *sk = sock->sk;
448 int err = 0;
449
450 *what = "listen";
451 err = sock->ops->listen(sock, 5);
452 if (err < 0)
453 goto out;
454
455 *what = "sock_create_lite";
456 err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol,
457 newsock);
458 if (err < 0)
459 goto out;
460
461 *what = "accept";
462 err = sock->ops->accept(sock, *newsock, 0);
463 if (err < 0) {
464 sock_release(*newsock);
465 *newsock = NULL;
466 goto out;
467 }
468 (*newsock)->ops = sock->ops;
469
470out:
471 return err;
472}
473
474static int drbd_recv_short(struct drbd_conf *mdev, struct socket *sock,
475 void *buf, size_t size, int flags)
476{
477 mm_segment_t oldfs;
478 struct kvec iov = {
479 .iov_base = buf,
480 .iov_len = size,
481 };
482 struct msghdr msg = {
483 .msg_iovlen = 1,
484 .msg_iov = (struct iovec *)&iov,
485 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
486 };
487 int rv;
488
489 oldfs = get_fs();
490 set_fs(KERNEL_DS);
491 rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
492 set_fs(oldfs);
493
494 return rv;
495}
496
497static int drbd_recv(struct drbd_conf *mdev, void *buf, size_t size)
498{
499 mm_segment_t oldfs;
500 struct kvec iov = {
501 .iov_base = buf,
502 .iov_len = size,
503 };
504 struct msghdr msg = {
505 .msg_iovlen = 1,
506 .msg_iov = (struct iovec *)&iov,
507 .msg_flags = MSG_WAITALL | MSG_NOSIGNAL
508 };
509 int rv;
510
511 oldfs = get_fs();
512 set_fs(KERNEL_DS);
513
514 for (;;) {
515 rv = sock_recvmsg(mdev->data.socket, &msg, size, msg.msg_flags);
516 if (rv == size)
517 break;
518
519 /* Note:
520 * ECONNRESET other side closed the connection
521 * ERESTARTSYS (on sock) we got a signal
522 */
523
524 if (rv < 0) {
525 if (rv == -ECONNRESET)
526 dev_info(DEV, "sock was reset by peer\n");
527 else if (rv != -ERESTARTSYS)
528 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
529 break;
530 } else if (rv == 0) {
531 dev_info(DEV, "sock was shut down by peer\n");
532 break;
533 } else {
534 /* signal came in, or peer/link went down,
535 * after we read a partial message
536 */
537 /* D_ASSERT(signal_pending(current)); */
538 break;
539 }
540 };
541
542 set_fs(oldfs);
543
544 if (rv != size)
545 drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
546
547 return rv;
548}
549
Lars Ellenberg5dbf1672010-05-25 16:18:01 +0200550/* quoting tcp(7):
551 * On individual connections, the socket buffer size must be set prior to the
552 * listen(2) or connect(2) calls in order to have it take effect.
553 * This is our wrapper to do so.
554 */
555static void drbd_setbufsize(struct socket *sock, unsigned int snd,
556 unsigned int rcv)
557{
558 /* open coded SO_SNDBUF, SO_RCVBUF */
559 if (snd) {
560 sock->sk->sk_sndbuf = snd;
561 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
562 }
563 if (rcv) {
564 sock->sk->sk_rcvbuf = rcv;
565 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
566 }
567}
568
Philipp Reisnerb411b362009-09-25 16:07:19 -0700569static struct socket *drbd_try_connect(struct drbd_conf *mdev)
570{
571 const char *what;
572 struct socket *sock;
573 struct sockaddr_in6 src_in6;
574 int err;
575 int disconnect_on_error = 1;
576
577 if (!get_net_conf(mdev))
578 return NULL;
579
580 what = "sock_create_kern";
581 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
582 SOCK_STREAM, IPPROTO_TCP, &sock);
583 if (err < 0) {
584 sock = NULL;
585 goto out;
586 }
587
588 sock->sk->sk_rcvtimeo =
589 sock->sk->sk_sndtimeo = mdev->net_conf->try_connect_int*HZ;
Lars Ellenberg5dbf1672010-05-25 16:18:01 +0200590 drbd_setbufsize(sock, mdev->net_conf->sndbuf_size,
591 mdev->net_conf->rcvbuf_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700592
593 /* explicitly bind to the configured IP as source IP
594 * for the outgoing connections.
595 * This is needed for multihomed hosts and to be
596 * able to use lo: interfaces for drbd.
597 * Make sure to use 0 as port number, so linux selects
598 * a free one dynamically.
599 */
600 memcpy(&src_in6, mdev->net_conf->my_addr,
601 min_t(int, mdev->net_conf->my_addr_len, sizeof(src_in6)));
602 if (((struct sockaddr *)mdev->net_conf->my_addr)->sa_family == AF_INET6)
603 src_in6.sin6_port = 0;
604 else
605 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
606
607 what = "bind before connect";
608 err = sock->ops->bind(sock,
609 (struct sockaddr *) &src_in6,
610 mdev->net_conf->my_addr_len);
611 if (err < 0)
612 goto out;
613
614 /* connect may fail, peer not yet available.
615 * stay C_WF_CONNECTION, don't go Disconnecting! */
616 disconnect_on_error = 0;
617 what = "connect";
618 err = sock->ops->connect(sock,
619 (struct sockaddr *)mdev->net_conf->peer_addr,
620 mdev->net_conf->peer_addr_len, 0);
621
622out:
623 if (err < 0) {
624 if (sock) {
625 sock_release(sock);
626 sock = NULL;
627 }
628 switch (-err) {
629 /* timeout, busy, signal pending */
630 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
631 case EINTR: case ERESTARTSYS:
632 /* peer not (yet) available, network problem */
633 case ECONNREFUSED: case ENETUNREACH:
634 case EHOSTDOWN: case EHOSTUNREACH:
635 disconnect_on_error = 0;
636 break;
637 default:
638 dev_err(DEV, "%s failed, err = %d\n", what, err);
639 }
640 if (disconnect_on_error)
641 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
642 }
643 put_net_conf(mdev);
644 return sock;
645}
646
647static struct socket *drbd_wait_for_connect(struct drbd_conf *mdev)
648{
649 int timeo, err;
650 struct socket *s_estab = NULL, *s_listen;
651 const char *what;
652
653 if (!get_net_conf(mdev))
654 return NULL;
655
656 what = "sock_create_kern";
657 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
658 SOCK_STREAM, IPPROTO_TCP, &s_listen);
659 if (err) {
660 s_listen = NULL;
661 goto out;
662 }
663
664 timeo = mdev->net_conf->try_connect_int * HZ;
665 timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
666
667 s_listen->sk->sk_reuse = 1; /* SO_REUSEADDR */
668 s_listen->sk->sk_rcvtimeo = timeo;
669 s_listen->sk->sk_sndtimeo = timeo;
Lars Ellenberg5dbf1672010-05-25 16:18:01 +0200670 drbd_setbufsize(s_listen, mdev->net_conf->sndbuf_size,
671 mdev->net_conf->rcvbuf_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700672
673 what = "bind before listen";
674 err = s_listen->ops->bind(s_listen,
675 (struct sockaddr *) mdev->net_conf->my_addr,
676 mdev->net_conf->my_addr_len);
677 if (err < 0)
678 goto out;
679
680 err = drbd_accept(mdev, &what, s_listen, &s_estab);
681
682out:
683 if (s_listen)
684 sock_release(s_listen);
685 if (err < 0) {
686 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
687 dev_err(DEV, "%s failed, err = %d\n", what, err);
688 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
689 }
690 }
691 put_net_conf(mdev);
692
693 return s_estab;
694}
695
696static int drbd_send_fp(struct drbd_conf *mdev,
697 struct socket *sock, enum drbd_packets cmd)
698{
Philipp Reisner02918be2010-08-20 14:35:10 +0200699 struct p_header80 *h = &mdev->data.sbuf.header.h80;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700700
701 return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0);
702}
703
704static enum drbd_packets drbd_recv_fp(struct drbd_conf *mdev, struct socket *sock)
705{
Philipp Reisner02918be2010-08-20 14:35:10 +0200706 struct p_header80 *h = &mdev->data.rbuf.header.h80;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700707 int rr;
708
709 rr = drbd_recv_short(mdev, sock, h, sizeof(*h), 0);
710
711 if (rr == sizeof(*h) && h->magic == BE_DRBD_MAGIC)
712 return be16_to_cpu(h->command);
713
714 return 0xffff;
715}
716
717/**
718 * drbd_socket_okay() - Free the socket if its connection is not okay
719 * @mdev: DRBD device.
720 * @sock: pointer to the pointer to the socket.
721 */
722static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
723{
724 int rr;
725 char tb[4];
726
727 if (!*sock)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100728 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700729
730 rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
731
732 if (rr > 0 || rr == -EAGAIN) {
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100733 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700734 } else {
735 sock_release(*sock);
736 *sock = NULL;
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100737 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700738 }
739}
740
741/*
742 * return values:
743 * 1 yes, we have a valid connection
744 * 0 oops, did not work out, please try again
745 * -1 peer talks different language,
746 * no point in trying again, please go standalone.
747 * -2 We do not have a network config...
748 */
749static int drbd_connect(struct drbd_conf *mdev)
750{
751 struct socket *s, *sock, *msock;
752 int try, h, ok;
753
754 D_ASSERT(!mdev->data.socket);
755
Philipp Reisnerb411b362009-09-25 16:07:19 -0700756 if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS)
757 return -2;
758
759 clear_bit(DISCARD_CONCURRENT, &mdev->flags);
760
761 sock = NULL;
762 msock = NULL;
763
764 do {
765 for (try = 0;;) {
766 /* 3 tries, this should take less than a second! */
767 s = drbd_try_connect(mdev);
768 if (s || ++try >= 3)
769 break;
770 /* give the other side time to call bind() & listen() */
Philipp Reisner20ee6392011-01-18 15:28:59 +0100771 schedule_timeout_interruptible(HZ / 10);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700772 }
773
774 if (s) {
775 if (!sock) {
776 drbd_send_fp(mdev, s, P_HAND_SHAKE_S);
777 sock = s;
778 s = NULL;
779 } else if (!msock) {
780 drbd_send_fp(mdev, s, P_HAND_SHAKE_M);
781 msock = s;
782 s = NULL;
783 } else {
784 dev_err(DEV, "Logic error in drbd_connect()\n");
785 goto out_release_sockets;
786 }
787 }
788
789 if (sock && msock) {
Philipp Reisner20ee6392011-01-18 15:28:59 +0100790 schedule_timeout_interruptible(HZ / 10);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700791 ok = drbd_socket_okay(mdev, &sock);
792 ok = drbd_socket_okay(mdev, &msock) && ok;
793 if (ok)
794 break;
795 }
796
797retry:
798 s = drbd_wait_for_connect(mdev);
799 if (s) {
800 try = drbd_recv_fp(mdev, s);
801 drbd_socket_okay(mdev, &sock);
802 drbd_socket_okay(mdev, &msock);
803 switch (try) {
804 case P_HAND_SHAKE_S:
805 if (sock) {
806 dev_warn(DEV, "initial packet S crossed\n");
807 sock_release(sock);
808 }
809 sock = s;
810 break;
811 case P_HAND_SHAKE_M:
812 if (msock) {
813 dev_warn(DEV, "initial packet M crossed\n");
814 sock_release(msock);
815 }
816 msock = s;
817 set_bit(DISCARD_CONCURRENT, &mdev->flags);
818 break;
819 default:
820 dev_warn(DEV, "Error receiving initial packet\n");
821 sock_release(s);
822 if (random32() & 1)
823 goto retry;
824 }
825 }
826
827 if (mdev->state.conn <= C_DISCONNECTING)
828 goto out_release_sockets;
829 if (signal_pending(current)) {
830 flush_signals(current);
831 smp_rmb();
832 if (get_t_state(&mdev->receiver) == Exiting)
833 goto out_release_sockets;
834 }
835
836 if (sock && msock) {
837 ok = drbd_socket_okay(mdev, &sock);
838 ok = drbd_socket_okay(mdev, &msock) && ok;
839 if (ok)
840 break;
841 }
842 } while (1);
843
844 msock->sk->sk_reuse = 1; /* SO_REUSEADDR */
845 sock->sk->sk_reuse = 1; /* SO_REUSEADDR */
846
847 sock->sk->sk_allocation = GFP_NOIO;
848 msock->sk->sk_allocation = GFP_NOIO;
849
850 sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
851 msock->sk->sk_priority = TC_PRIO_INTERACTIVE;
852
Philipp Reisnerb411b362009-09-25 16:07:19 -0700853 /* NOT YET ...
854 * sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
855 * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
856 * first set it to the P_HAND_SHAKE timeout,
857 * which we set to 4x the configured ping_timeout. */
858 sock->sk->sk_sndtimeo =
859 sock->sk->sk_rcvtimeo = mdev->net_conf->ping_timeo*4*HZ/10;
860
861 msock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
862 msock->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
863
864 /* we don't want delays.
865 * we use TCP_CORK where apropriate, though */
866 drbd_tcp_nodelay(sock);
867 drbd_tcp_nodelay(msock);
868
869 mdev->data.socket = sock;
870 mdev->meta.socket = msock;
871 mdev->last_received = jiffies;
872
873 D_ASSERT(mdev->asender.task == NULL);
874
875 h = drbd_do_handshake(mdev);
876 if (h <= 0)
877 return h;
878
879 if (mdev->cram_hmac_tfm) {
880 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
Johannes Thomab10d96c2010-01-07 16:02:50 +0100881 switch (drbd_do_auth(mdev)) {
882 case -1:
Philipp Reisnerb411b362009-09-25 16:07:19 -0700883 dev_err(DEV, "Authentication of peer failed\n");
884 return -1;
Johannes Thomab10d96c2010-01-07 16:02:50 +0100885 case 0:
886 dev_err(DEV, "Authentication of peer failed, trying again.\n");
887 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700888 }
889 }
890
891 if (drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS)) < SS_SUCCESS)
892 return 0;
893
894 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
895 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
896
897 atomic_set(&mdev->packet_seq, 0);
898 mdev->peer_seq = 0;
899
900 drbd_thread_start(&mdev->asender);
901
Philipp Reisnerd5373382010-08-23 15:18:33 +0200902 if (mdev->agreed_pro_version < 95 && get_ldev(mdev)) {
903 drbd_setup_queue_param(mdev, DRBD_MAX_SIZE_H80_PACKET);
904 put_ldev(mdev);
905 }
906
Philipp Reisner148efa12011-01-15 00:21:15 +0100907 if (drbd_send_protocol(mdev) == -1)
Philipp Reisner7e2455c2010-04-22 14:50:23 +0200908 return -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700909 drbd_send_sync_param(mdev, &mdev->sync_conf);
Philipp Reisnere89b5912010-03-24 17:11:33 +0100910 drbd_send_sizes(mdev, 0, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700911 drbd_send_uuids(mdev);
912 drbd_send_state(mdev);
913 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
914 clear_bit(RESIZE_PENDING, &mdev->flags);
915
916 return 1;
917
918out_release_sockets:
919 if (sock)
920 sock_release(sock);
921 if (msock)
922 sock_release(msock);
923 return -1;
924}
925
Philipp Reisner02918be2010-08-20 14:35:10 +0200926static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsigned int *packet_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700927{
Philipp Reisner02918be2010-08-20 14:35:10 +0200928 union p_header *h = &mdev->data.rbuf.header;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700929 int r;
930
931 r = drbd_recv(mdev, h, sizeof(*h));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700932 if (unlikely(r != sizeof(*h))) {
933 dev_err(DEV, "short read expecting header on sock: r=%d\n", r);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100934 return false;
Philipp Reisner02918be2010-08-20 14:35:10 +0200935 }
936
937 if (likely(h->h80.magic == BE_DRBD_MAGIC)) {
938 *cmd = be16_to_cpu(h->h80.command);
939 *packet_size = be16_to_cpu(h->h80.length);
940 } else if (h->h95.magic == BE_DRBD_MAGIC_BIG) {
941 *cmd = be16_to_cpu(h->h95.command);
942 *packet_size = be32_to_cpu(h->h95.length);
943 } else {
Lars Ellenberg004352f2010-10-05 20:13:58 +0200944 dev_err(DEV, "magic?? on data m: 0x%08x c: %d l: %d\n",
945 be32_to_cpu(h->h80.magic),
946 be16_to_cpu(h->h80.command),
947 be16_to_cpu(h->h80.length));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100948 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700949 }
950 mdev->last_received = jiffies;
951
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100952 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700953}
954
Philipp Reisner2451fc32010-08-24 13:43:11 +0200955static void drbd_flush(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700956{
957 int rv;
958
959 if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
Dmitry Monakhovfbd9b092010-04-28 17:55:06 +0400960 rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
Christoph Hellwigdd3932e2010-09-16 20:51:46 +0200961 NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700962 if (rv) {
963 dev_err(DEV, "local disk flush failed with status %d\n", rv);
964 /* would rather check on EOPNOTSUPP, but that is not reliable.
965 * don't try again for ANY return value != 0
966 * if (rv == -EOPNOTSUPP) */
967 drbd_bump_write_ordering(mdev, WO_drain_io);
968 }
969 put_ldev(mdev);
970 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700971}
972
973/**
974 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
975 * @mdev: DRBD device.
976 * @epoch: Epoch object.
977 * @ev: Epoch event.
978 */
979static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
980 struct drbd_epoch *epoch,
981 enum epoch_event ev)
982{
Philipp Reisner2451fc32010-08-24 13:43:11 +0200983 int epoch_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700984 struct drbd_epoch *next_epoch;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700985 enum finish_epoch rv = FE_STILL_LIVE;
986
987 spin_lock(&mdev->epoch_lock);
988 do {
989 next_epoch = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700990
991 epoch_size = atomic_read(&epoch->epoch_size);
992
993 switch (ev & ~EV_CLEANUP) {
994 case EV_PUT:
995 atomic_dec(&epoch->active);
996 break;
997 case EV_GOT_BARRIER_NR:
998 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700999 break;
1000 case EV_BECAME_LAST:
1001 /* nothing to do*/
1002 break;
1003 }
1004
Philipp Reisnerb411b362009-09-25 16:07:19 -07001005 if (epoch_size != 0 &&
1006 atomic_read(&epoch->active) == 0 &&
Philipp Reisner2451fc32010-08-24 13:43:11 +02001007 test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001008 if (!(ev & EV_CLEANUP)) {
1009 spin_unlock(&mdev->epoch_lock);
1010 drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
1011 spin_lock(&mdev->epoch_lock);
1012 }
1013 dec_unacked(mdev);
1014
1015 if (mdev->current_epoch != epoch) {
1016 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1017 list_del(&epoch->list);
1018 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1019 mdev->epochs--;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001020 kfree(epoch);
1021
1022 if (rv == FE_STILL_LIVE)
1023 rv = FE_DESTROYED;
1024 } else {
1025 epoch->flags = 0;
1026 atomic_set(&epoch->epoch_size, 0);
Uwe Kleine-König698f9312010-07-02 20:41:51 +02001027 /* atomic_set(&epoch->active, 0); is already zero */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001028 if (rv == FE_STILL_LIVE)
1029 rv = FE_RECYCLED;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001030 wake_up(&mdev->ee_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001031 }
1032 }
1033
1034 if (!next_epoch)
1035 break;
1036
1037 epoch = next_epoch;
1038 } while (1);
1039
1040 spin_unlock(&mdev->epoch_lock);
1041
Philipp Reisnerb411b362009-09-25 16:07:19 -07001042 return rv;
1043}
1044
1045/**
1046 * drbd_bump_write_ordering() - Fall back to an other write ordering method
1047 * @mdev: DRBD device.
1048 * @wo: Write ordering method to try.
1049 */
1050void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) __must_hold(local)
1051{
1052 enum write_ordering_e pwo;
1053 static char *write_ordering_str[] = {
1054 [WO_none] = "none",
1055 [WO_drain_io] = "drain",
1056 [WO_bdev_flush] = "flush",
Philipp Reisnerb411b362009-09-25 16:07:19 -07001057 };
1058
1059 pwo = mdev->write_ordering;
1060 wo = min(pwo, wo);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001061 if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
1062 wo = WO_drain_io;
1063 if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
1064 wo = WO_none;
1065 mdev->write_ordering = wo;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001066 if (pwo != mdev->write_ordering || wo == WO_bdev_flush)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001067 dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
1068}
1069
1070/**
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001071 * drbd_submit_ee()
1072 * @mdev: DRBD device.
1073 * @e: epoch entry
1074 * @rw: flag field, see bio->bi_rw
1075 */
1076/* TODO allocate from our own bio_set. */
1077int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
1078 const unsigned rw, const int fault_type)
1079{
1080 struct bio *bios = NULL;
1081 struct bio *bio;
1082 struct page *page = e->pages;
1083 sector_t sector = e->sector;
1084 unsigned ds = e->size;
1085 unsigned n_bios = 0;
1086 unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
1087
1088 /* In most cases, we will only need one bio. But in case the lower
1089 * level restrictions happen to be different at this offset on this
1090 * side than those of the sending peer, we may need to submit the
1091 * request in more than one bio. */
1092next_bio:
1093 bio = bio_alloc(GFP_NOIO, nr_pages);
1094 if (!bio) {
1095 dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1096 goto fail;
1097 }
1098 /* > e->sector, unless this is the first bio */
1099 bio->bi_sector = sector;
1100 bio->bi_bdev = mdev->ldev->backing_bdev;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001101 bio->bi_rw = rw;
1102 bio->bi_private = e;
1103 bio->bi_end_io = drbd_endio_sec;
1104
1105 bio->bi_next = bios;
1106 bios = bio;
1107 ++n_bios;
1108
1109 page_chain_for_each(page) {
1110 unsigned len = min_t(unsigned, ds, PAGE_SIZE);
1111 if (!bio_add_page(bio, page, len, 0)) {
1112 /* a single page must always be possible! */
1113 BUG_ON(bio->bi_vcnt == 0);
1114 goto next_bio;
1115 }
1116 ds -= len;
1117 sector += len >> 9;
1118 --nr_pages;
1119 }
1120 D_ASSERT(page == NULL);
1121 D_ASSERT(ds == 0);
1122
1123 atomic_set(&e->pending_bios, n_bios);
1124 do {
1125 bio = bios;
1126 bios = bios->bi_next;
1127 bio->bi_next = NULL;
1128
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001129 drbd_generic_make_request(mdev, fault_type, bio);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001130 } while (bios);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001131 return 0;
1132
1133fail:
1134 while (bios) {
1135 bio = bios;
1136 bios = bios->bi_next;
1137 bio_put(bio);
1138 }
1139 return -ENOMEM;
1140}
1141
Philipp Reisner02918be2010-08-20 14:35:10 +02001142static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001143{
Philipp Reisner2451fc32010-08-24 13:43:11 +02001144 int rv;
Philipp Reisner02918be2010-08-20 14:35:10 +02001145 struct p_barrier *p = &mdev->data.rbuf.barrier;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001146 struct drbd_epoch *epoch;
1147
Philipp Reisnerb411b362009-09-25 16:07:19 -07001148 inc_unacked(mdev);
1149
Philipp Reisnerb411b362009-09-25 16:07:19 -07001150 mdev->current_epoch->barrier_nr = p->barrier;
1151 rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
1152
1153 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1154 * the activity log, which means it would not be resynced in case the
1155 * R_PRIMARY crashes now.
1156 * Therefore we must send the barrier_ack after the barrier request was
1157 * completed. */
1158 switch (mdev->write_ordering) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001159 case WO_none:
1160 if (rv == FE_RECYCLED)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001161 return true;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001162
1163 /* receiver context, in the writeout path of the other node.
1164 * avoid potential distributed deadlock */
1165 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1166 if (epoch)
1167 break;
1168 else
1169 dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
1170 /* Fall through */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001171
1172 case WO_bdev_flush:
1173 case WO_drain_io:
Philipp Reisnerb411b362009-09-25 16:07:19 -07001174 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
Philipp Reisner2451fc32010-08-24 13:43:11 +02001175 drbd_flush(mdev);
1176
1177 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1178 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1179 if (epoch)
1180 break;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001181 }
1182
Philipp Reisner2451fc32010-08-24 13:43:11 +02001183 epoch = mdev->current_epoch;
1184 wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
1185
1186 D_ASSERT(atomic_read(&epoch->active) == 0);
1187 D_ASSERT(epoch->flags == 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001188
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001189 return true;
Philipp Reisner2451fc32010-08-24 13:43:11 +02001190 default:
1191 dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001192 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001193 }
1194
1195 epoch->flags = 0;
1196 atomic_set(&epoch->epoch_size, 0);
1197 atomic_set(&epoch->active, 0);
1198
1199 spin_lock(&mdev->epoch_lock);
1200 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1201 list_add(&epoch->list, &mdev->current_epoch->list);
1202 mdev->current_epoch = epoch;
1203 mdev->epochs++;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001204 } else {
1205 /* The current_epoch got recycled while we allocated this one... */
1206 kfree(epoch);
1207 }
1208 spin_unlock(&mdev->epoch_lock);
1209
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001210 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001211}
1212
1213/* used from receive_RSDataReply (recv_resync_read)
1214 * and from receive_Data */
1215static struct drbd_epoch_entry *
1216read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __must_hold(local)
1217{
Lars Ellenberg66660322010-04-06 12:15:04 +02001218 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001219 struct drbd_epoch_entry *e;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001220 struct page *page;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001221 int dgs, ds, rr;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001222 void *dig_in = mdev->int_dig_in;
1223 void *dig_vv = mdev->int_dig_vv;
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001224 unsigned long *data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001225
1226 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1227 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1228
1229 if (dgs) {
1230 rr = drbd_recv(mdev, dig_in, dgs);
1231 if (rr != dgs) {
1232 dev_warn(DEV, "short read receiving data digest: read %d expected %d\n",
1233 rr, dgs);
1234 return NULL;
1235 }
1236 }
1237
1238 data_size -= dgs;
1239
Philipp Reisnerd07c9c12011-01-20 16:49:33 +01001240 ERR_IF(data_size == 0) return NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001241 ERR_IF(data_size & 0x1ff) return NULL;
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01001242 ERR_IF(data_size > DRBD_MAX_BIO_SIZE) return NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001243
Lars Ellenberg66660322010-04-06 12:15:04 +02001244 /* even though we trust out peer,
1245 * we sometimes have to double check. */
1246 if (sector + (data_size>>9) > capacity) {
1247 dev_err(DEV, "capacity: %llus < sector: %llus + size: %u\n",
1248 (unsigned long long)capacity,
1249 (unsigned long long)sector, data_size);
1250 return NULL;
1251 }
1252
Philipp Reisnerb411b362009-09-25 16:07:19 -07001253 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1254 * "criss-cross" setup, that might cause write-out on some other DRBD,
1255 * which in turn might block on the other node at this very place. */
1256 e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO);
1257 if (!e)
1258 return NULL;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001259
Philipp Reisnerb411b362009-09-25 16:07:19 -07001260 ds = data_size;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001261 page = e->pages;
1262 page_chain_for_each(page) {
1263 unsigned len = min_t(int, ds, PAGE_SIZE);
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001264 data = kmap(page);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001265 rr = drbd_recv(mdev, data, len);
Andreas Gruenbacher0cf9d272010-12-07 10:43:29 +01001266 if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) {
Philipp Reisner6b4388a2010-04-26 14:11:45 +02001267 dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1268 data[0] = data[0] ^ (unsigned long)-1;
1269 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001270 kunmap(page);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001271 if (rr != len) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001272 drbd_free_ee(mdev, e);
1273 dev_warn(DEV, "short read receiving data: read %d expected %d\n",
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001274 rr, len);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001275 return NULL;
1276 }
1277 ds -= rr;
1278 }
1279
1280 if (dgs) {
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001281 drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001282 if (memcmp(dig_in, dig_vv, dgs)) {
Lars Ellenberg470be442010-11-10 10:36:52 +01001283 dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
1284 (unsigned long long)sector, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001285 drbd_bcast_ee(mdev, "digest failed",
1286 dgs, dig_in, dig_vv, e);
1287 drbd_free_ee(mdev, e);
1288 return NULL;
1289 }
1290 }
1291 mdev->recv_cnt += data_size>>9;
1292 return e;
1293}
1294
1295/* drbd_drain_block() just takes a data block
1296 * out of the socket input buffer, and discards it.
1297 */
1298static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1299{
1300 struct page *page;
1301 int rr, rv = 1;
1302 void *data;
1303
Lars Ellenbergc3470cd2010-04-01 16:57:19 +02001304 if (!data_size)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001305 return true;
Lars Ellenbergc3470cd2010-04-01 16:57:19 +02001306
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001307 page = drbd_pp_alloc(mdev, 1, 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001308
1309 data = kmap(page);
1310 while (data_size) {
1311 rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE));
1312 if (rr != min_t(int, data_size, PAGE_SIZE)) {
1313 rv = 0;
1314 dev_warn(DEV, "short read receiving data: read %d expected %d\n",
1315 rr, min_t(int, data_size, PAGE_SIZE));
1316 break;
1317 }
1318 data_size -= rr;
1319 }
1320 kunmap(page);
Lars Ellenberg435f0742010-09-06 12:30:25 +02001321 drbd_pp_free(mdev, page, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001322 return rv;
1323}
1324
1325static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1326 sector_t sector, int data_size)
1327{
1328 struct bio_vec *bvec;
1329 struct bio *bio;
1330 int dgs, rr, i, expect;
1331 void *dig_in = mdev->int_dig_in;
1332 void *dig_vv = mdev->int_dig_vv;
1333
1334 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1335 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1336
1337 if (dgs) {
1338 rr = drbd_recv(mdev, dig_in, dgs);
1339 if (rr != dgs) {
1340 dev_warn(DEV, "short read receiving data reply digest: read %d expected %d\n",
1341 rr, dgs);
1342 return 0;
1343 }
1344 }
1345
1346 data_size -= dgs;
1347
1348 /* optimistically update recv_cnt. if receiving fails below,
1349 * we disconnect anyways, and counters will be reset. */
1350 mdev->recv_cnt += data_size>>9;
1351
1352 bio = req->master_bio;
1353 D_ASSERT(sector == bio->bi_sector);
1354
1355 bio_for_each_segment(bvec, bio, i) {
1356 expect = min_t(int, data_size, bvec->bv_len);
1357 rr = drbd_recv(mdev,
1358 kmap(bvec->bv_page)+bvec->bv_offset,
1359 expect);
1360 kunmap(bvec->bv_page);
1361 if (rr != expect) {
1362 dev_warn(DEV, "short read receiving data reply: "
1363 "read %d expected %d\n",
1364 rr, expect);
1365 return 0;
1366 }
1367 data_size -= rr;
1368 }
1369
1370 if (dgs) {
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001371 drbd_csum_bio(mdev, mdev->integrity_r_tfm, bio, dig_vv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001372 if (memcmp(dig_in, dig_vv, dgs)) {
1373 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
1374 return 0;
1375 }
1376 }
1377
1378 D_ASSERT(data_size == 0);
1379 return 1;
1380}
1381
1382/* e_end_resync_block() is called via
1383 * drbd_process_done_ee() by asender only */
1384static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1385{
1386 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1387 sector_t sector = e->sector;
1388 int ok;
1389
1390 D_ASSERT(hlist_unhashed(&e->colision));
1391
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001392 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001393 drbd_set_in_sync(mdev, sector, e->size);
1394 ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e);
1395 } else {
1396 /* Record failure to sync */
1397 drbd_rs_failed_io(mdev, sector, e->size);
1398
1399 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1400 }
1401 dec_unacked(mdev);
1402
1403 return ok;
1404}
1405
1406static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1407{
1408 struct drbd_epoch_entry *e;
1409
1410 e = read_in_block(mdev, ID_SYNCER, sector, data_size);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001411 if (!e)
1412 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001413
1414 dec_rs_pending(mdev);
1415
Philipp Reisnerb411b362009-09-25 16:07:19 -07001416 inc_unacked(mdev);
1417 /* corresponding dec_unacked() in e_end_resync_block()
1418 * respective _drbd_clear_done_ee */
1419
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001420 e->w.cb = e_end_resync_block;
1421
Philipp Reisnerb411b362009-09-25 16:07:19 -07001422 spin_lock_irq(&mdev->req_lock);
1423 list_add(&e->w.list, &mdev->sync_ee);
1424 spin_unlock_irq(&mdev->req_lock);
1425
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001426 atomic_add(data_size >> 9, &mdev->rs_sect_ev);
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001427 if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001428 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001429
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02001430 /* drbd_submit_ee currently fails for one reason only:
1431 * not being able to allocate enough bios.
1432 * Is dropping the connection going to help? */
1433 spin_lock_irq(&mdev->req_lock);
1434 list_del(&e->w.list);
1435 spin_unlock_irq(&mdev->req_lock);
1436
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001437 drbd_free_ee(mdev, e);
1438fail:
1439 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001440 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001441}
1442
Philipp Reisner02918be2010-08-20 14:35:10 +02001443static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001444{
1445 struct drbd_request *req;
1446 sector_t sector;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001447 int ok;
Philipp Reisner02918be2010-08-20 14:35:10 +02001448 struct p_data *p = &mdev->data.rbuf.data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001449
1450 sector = be64_to_cpu(p->sector);
1451
1452 spin_lock_irq(&mdev->req_lock);
1453 req = _ar_id_to_req(mdev, p->block_id, sector);
1454 spin_unlock_irq(&mdev->req_lock);
1455 if (unlikely(!req)) {
1456 dev_err(DEV, "Got a corrupt block_id/sector pair(1).\n");
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001457 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001458 }
1459
1460 /* hlist_del(&req->colision) is done in _req_may_be_done, to avoid
1461 * special casing it there for the various failure cases.
1462 * still no race with drbd_fail_pending_reads */
1463 ok = recv_dless_read(mdev, req, sector, data_size);
1464
1465 if (ok)
1466 req_mod(req, data_received);
1467 /* else: nothing. handled from drbd_disconnect...
1468 * I don't think we may complete this just yet
1469 * in case we are "on-disconnect: freeze" */
1470
1471 return ok;
1472}
1473
Philipp Reisner02918be2010-08-20 14:35:10 +02001474static int receive_RSDataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001475{
1476 sector_t sector;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001477 int ok;
Philipp Reisner02918be2010-08-20 14:35:10 +02001478 struct p_data *p = &mdev->data.rbuf.data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001479
1480 sector = be64_to_cpu(p->sector);
1481 D_ASSERT(p->block_id == ID_SYNCER);
1482
1483 if (get_ldev(mdev)) {
1484 /* data is submitted to disk within recv_resync_read.
1485 * corresponding put_ldev done below on error,
1486 * or in drbd_endio_write_sec. */
1487 ok = recv_resync_read(mdev, sector, data_size);
1488 } else {
1489 if (__ratelimit(&drbd_ratelimit_state))
1490 dev_err(DEV, "Can not write resync data to local disk.\n");
1491
1492 ok = drbd_drain_block(mdev, data_size);
1493
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02001494 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001495 }
1496
Philipp Reisner778f2712010-07-06 11:14:00 +02001497 atomic_add(data_size >> 9, &mdev->rs_sect_in);
1498
Philipp Reisnerb411b362009-09-25 16:07:19 -07001499 return ok;
1500}
1501
1502/* e_end_block() is called via drbd_process_done_ee().
1503 * this means this function only runs in the asender thread
1504 */
1505static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1506{
1507 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1508 sector_t sector = e->sector;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001509 int ok = 1, pcmd;
1510
Philipp Reisnerb411b362009-09-25 16:07:19 -07001511 if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001512 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001513 pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1514 mdev->state.conn <= C_PAUSED_SYNC_T &&
1515 e->flags & EE_MAY_SET_IN_SYNC) ?
1516 P_RS_WRITE_ACK : P_WRITE_ACK;
1517 ok &= drbd_send_ack(mdev, pcmd, e);
1518 if (pcmd == P_RS_WRITE_ACK)
1519 drbd_set_in_sync(mdev, sector, e->size);
1520 } else {
1521 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1522 /* we expect it to be marked out of sync anyways...
1523 * maybe assert this? */
1524 }
1525 dec_unacked(mdev);
1526 }
1527 /* we delete from the conflict detection hash _after_ we sent out the
1528 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
1529 if (mdev->net_conf->two_primaries) {
1530 spin_lock_irq(&mdev->req_lock);
1531 D_ASSERT(!hlist_unhashed(&e->colision));
1532 hlist_del_init(&e->colision);
1533 spin_unlock_irq(&mdev->req_lock);
1534 } else {
1535 D_ASSERT(hlist_unhashed(&e->colision));
1536 }
1537
1538 drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
1539
1540 return ok;
1541}
1542
1543static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1544{
1545 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1546 int ok = 1;
1547
1548 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1549 ok = drbd_send_ack(mdev, P_DISCARD_ACK, e);
1550
1551 spin_lock_irq(&mdev->req_lock);
1552 D_ASSERT(!hlist_unhashed(&e->colision));
1553 hlist_del_init(&e->colision);
1554 spin_unlock_irq(&mdev->req_lock);
1555
1556 dec_unacked(mdev);
1557
1558 return ok;
1559}
1560
1561/* Called from receive_Data.
1562 * Synchronize packets on sock with packets on msock.
1563 *
1564 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1565 * packet traveling on msock, they are still processed in the order they have
1566 * been sent.
1567 *
1568 * Note: we don't care for Ack packets overtaking P_DATA packets.
1569 *
1570 * In case packet_seq is larger than mdev->peer_seq number, there are
1571 * outstanding packets on the msock. We wait for them to arrive.
1572 * In case we are the logically next packet, we update mdev->peer_seq
1573 * ourselves. Correctly handles 32bit wrap around.
1574 *
1575 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1576 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1577 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1578 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1579 *
1580 * returns 0 if we may process the packet,
1581 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1582static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq)
1583{
1584 DEFINE_WAIT(wait);
1585 unsigned int p_seq;
1586 long timeout;
1587 int ret = 0;
1588 spin_lock(&mdev->peer_seq_lock);
1589 for (;;) {
1590 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
1591 if (seq_le(packet_seq, mdev->peer_seq+1))
1592 break;
1593 if (signal_pending(current)) {
1594 ret = -ERESTARTSYS;
1595 break;
1596 }
1597 p_seq = mdev->peer_seq;
1598 spin_unlock(&mdev->peer_seq_lock);
1599 timeout = schedule_timeout(30*HZ);
1600 spin_lock(&mdev->peer_seq_lock);
1601 if (timeout == 0 && p_seq == mdev->peer_seq) {
1602 ret = -ETIMEDOUT;
1603 dev_err(DEV, "ASSERT FAILED waited 30 seconds for sequence update, forcing reconnect\n");
1604 break;
1605 }
1606 }
1607 finish_wait(&mdev->seq_wait, &wait);
1608 if (mdev->peer_seq+1 == packet_seq)
1609 mdev->peer_seq++;
1610 spin_unlock(&mdev->peer_seq_lock);
1611 return ret;
1612}
1613
Lars Ellenberg688593c2010-11-17 22:25:03 +01001614/* see also bio_flags_to_wire()
1615 * DRBD_REQ_*, because we need to semantically map the flags to data packet
1616 * flags and back. We may replicate to other kernel versions. */
1617static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001618{
Lars Ellenberg688593c2010-11-17 22:25:03 +01001619 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
1620 (dpf & DP_FUA ? REQ_FUA : 0) |
1621 (dpf & DP_FLUSH ? REQ_FLUSH : 0) |
1622 (dpf & DP_DISCARD ? REQ_DISCARD : 0);
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001623}
1624
Philipp Reisnerb411b362009-09-25 16:07:19 -07001625/* mirrored write */
Philipp Reisner02918be2010-08-20 14:35:10 +02001626static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001627{
1628 sector_t sector;
1629 struct drbd_epoch_entry *e;
Philipp Reisner02918be2010-08-20 14:35:10 +02001630 struct p_data *p = &mdev->data.rbuf.data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001631 int rw = WRITE;
1632 u32 dp_flags;
1633
Philipp Reisnerb411b362009-09-25 16:07:19 -07001634 if (!get_ldev(mdev)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001635 spin_lock(&mdev->peer_seq_lock);
1636 if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num))
1637 mdev->peer_seq++;
1638 spin_unlock(&mdev->peer_seq_lock);
1639
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02001640 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001641 atomic_inc(&mdev->current_epoch->epoch_size);
1642 return drbd_drain_block(mdev, data_size);
1643 }
1644
1645 /* get_ldev(mdev) successful.
1646 * Corresponding put_ldev done either below (on various errors),
1647 * or in drbd_endio_write_sec, if we successfully submit the data at
1648 * the end of this function. */
1649
1650 sector = be64_to_cpu(p->sector);
1651 e = read_in_block(mdev, p->block_id, sector, data_size);
1652 if (!e) {
1653 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001654 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001655 }
1656
Philipp Reisnerb411b362009-09-25 16:07:19 -07001657 e->w.cb = e_end_block;
1658
Lars Ellenberg688593c2010-11-17 22:25:03 +01001659 dp_flags = be32_to_cpu(p->dp_flags);
1660 rw |= wire_flags_to_bio(mdev, dp_flags);
1661
1662 if (dp_flags & DP_MAY_SET_IN_SYNC)
1663 e->flags |= EE_MAY_SET_IN_SYNC;
1664
Philipp Reisnerb411b362009-09-25 16:07:19 -07001665 spin_lock(&mdev->epoch_lock);
1666 e->epoch = mdev->current_epoch;
1667 atomic_inc(&e->epoch->epoch_size);
1668 atomic_inc(&e->epoch->active);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001669 spin_unlock(&mdev->epoch_lock);
1670
Philipp Reisnerb411b362009-09-25 16:07:19 -07001671 /* I'm the receiver, I do hold a net_cnt reference. */
1672 if (!mdev->net_conf->two_primaries) {
1673 spin_lock_irq(&mdev->req_lock);
1674 } else {
1675 /* don't get the req_lock yet,
1676 * we may sleep in drbd_wait_peer_seq */
1677 const int size = e->size;
1678 const int discard = test_bit(DISCARD_CONCURRENT, &mdev->flags);
1679 DEFINE_WAIT(wait);
1680 struct drbd_request *i;
1681 struct hlist_node *n;
1682 struct hlist_head *slot;
1683 int first;
1684
1685 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1686 BUG_ON(mdev->ee_hash == NULL);
1687 BUG_ON(mdev->tl_hash == NULL);
1688
1689 /* conflict detection and handling:
1690 * 1. wait on the sequence number,
1691 * in case this data packet overtook ACK packets.
1692 * 2. check our hash tables for conflicting requests.
1693 * we only need to walk the tl_hash, since an ee can not
1694 * have a conflict with an other ee: on the submitting
1695 * node, the corresponding req had already been conflicting,
1696 * and a conflicting req is never sent.
1697 *
1698 * Note: for two_primaries, we are protocol C,
1699 * so there cannot be any request that is DONE
1700 * but still on the transfer log.
1701 *
1702 * unconditionally add to the ee_hash.
1703 *
1704 * if no conflicting request is found:
1705 * submit.
1706 *
1707 * if any conflicting request is found
1708 * that has not yet been acked,
1709 * AND I have the "discard concurrent writes" flag:
1710 * queue (via done_ee) the P_DISCARD_ACK; OUT.
1711 *
1712 * if any conflicting request is found:
1713 * block the receiver, waiting on misc_wait
1714 * until no more conflicting requests are there,
1715 * or we get interrupted (disconnect).
1716 *
1717 * we do not just write after local io completion of those
1718 * requests, but only after req is done completely, i.e.
1719 * we wait for the P_DISCARD_ACK to arrive!
1720 *
1721 * then proceed normally, i.e. submit.
1722 */
1723 if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num)))
1724 goto out_interrupted;
1725
1726 spin_lock_irq(&mdev->req_lock);
1727
1728 hlist_add_head(&e->colision, ee_hash_slot(mdev, sector));
1729
1730#define OVERLAPS overlaps(i->sector, i->size, sector, size)
1731 slot = tl_hash_slot(mdev, sector);
1732 first = 1;
1733 for (;;) {
1734 int have_unacked = 0;
1735 int have_conflict = 0;
1736 prepare_to_wait(&mdev->misc_wait, &wait,
1737 TASK_INTERRUPTIBLE);
1738 hlist_for_each_entry(i, n, slot, colision) {
1739 if (OVERLAPS) {
1740 /* only ALERT on first iteration,
1741 * we may be woken up early... */
1742 if (first)
1743 dev_alert(DEV, "%s[%u] Concurrent local write detected!"
1744 " new: %llus +%u; pending: %llus +%u\n",
1745 current->comm, current->pid,
1746 (unsigned long long)sector, size,
1747 (unsigned long long)i->sector, i->size);
1748 if (i->rq_state & RQ_NET_PENDING)
1749 ++have_unacked;
1750 ++have_conflict;
1751 }
1752 }
1753#undef OVERLAPS
1754 if (!have_conflict)
1755 break;
1756
1757 /* Discard Ack only for the _first_ iteration */
1758 if (first && discard && have_unacked) {
1759 dev_alert(DEV, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n",
1760 (unsigned long long)sector);
1761 inc_unacked(mdev);
1762 e->w.cb = e_send_discard_ack;
1763 list_add_tail(&e->w.list, &mdev->done_ee);
1764
1765 spin_unlock_irq(&mdev->req_lock);
1766
1767 /* we could probably send that P_DISCARD_ACK ourselves,
1768 * but I don't like the receiver using the msock */
1769
1770 put_ldev(mdev);
1771 wake_asender(mdev);
1772 finish_wait(&mdev->misc_wait, &wait);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001773 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001774 }
1775
1776 if (signal_pending(current)) {
1777 hlist_del_init(&e->colision);
1778
1779 spin_unlock_irq(&mdev->req_lock);
1780
1781 finish_wait(&mdev->misc_wait, &wait);
1782 goto out_interrupted;
1783 }
1784
1785 spin_unlock_irq(&mdev->req_lock);
1786 if (first) {
1787 first = 0;
1788 dev_alert(DEV, "Concurrent write! [W AFTERWARDS] "
1789 "sec=%llus\n", (unsigned long long)sector);
1790 } else if (discard) {
1791 /* we had none on the first iteration.
1792 * there must be none now. */
1793 D_ASSERT(have_unacked == 0);
1794 }
1795 schedule();
1796 spin_lock_irq(&mdev->req_lock);
1797 }
1798 finish_wait(&mdev->misc_wait, &wait);
1799 }
1800
1801 list_add(&e->w.list, &mdev->active_ee);
1802 spin_unlock_irq(&mdev->req_lock);
1803
1804 switch (mdev->net_conf->wire_protocol) {
1805 case DRBD_PROT_C:
1806 inc_unacked(mdev);
1807 /* corresponding dec_unacked() in e_end_block()
1808 * respective _drbd_clear_done_ee */
1809 break;
1810 case DRBD_PROT_B:
1811 /* I really don't like it that the receiver thread
1812 * sends on the msock, but anyways */
1813 drbd_send_ack(mdev, P_RECV_ACK, e);
1814 break;
1815 case DRBD_PROT_A:
1816 /* nothing to do */
1817 break;
1818 }
1819
Lars Ellenberg6719fb02010-10-18 23:04:07 +02001820 if (mdev->state.pdsk < D_INCONSISTENT) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001821 /* In case we have the only disk of the cluster, */
1822 drbd_set_out_of_sync(mdev, e->sector, e->size);
1823 e->flags |= EE_CALL_AL_COMPLETE_IO;
Lars Ellenberg6719fb02010-10-18 23:04:07 +02001824 e->flags &= ~EE_MAY_SET_IN_SYNC;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001825 drbd_al_begin_io(mdev, e->sector);
1826 }
1827
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001828 if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001829 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001830
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02001831 /* drbd_submit_ee currently fails for one reason only:
1832 * not being able to allocate enough bios.
1833 * Is dropping the connection going to help? */
1834 spin_lock_irq(&mdev->req_lock);
1835 list_del(&e->w.list);
1836 hlist_del_init(&e->colision);
1837 spin_unlock_irq(&mdev->req_lock);
1838 if (e->flags & EE_CALL_AL_COMPLETE_IO)
1839 drbd_al_complete_io(mdev, e->sector);
1840
Philipp Reisnerb411b362009-09-25 16:07:19 -07001841out_interrupted:
1842 /* yes, the epoch_size now is imbalanced.
1843 * but we drop the connection anyways, so we don't have a chance to
1844 * receive a barrier... atomic_inc(&mdev->epoch_size); */
1845 put_ldev(mdev);
1846 drbd_free_ee(mdev, e);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001847 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001848}
1849
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001850/* We may throttle resync, if the lower device seems to be busy,
1851 * and current sync rate is above c_min_rate.
1852 *
1853 * To decide whether or not the lower device is busy, we use a scheme similar
1854 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
1855 * (more than 64 sectors) of activity we cannot account for with our own resync
1856 * activity, it obviously is "busy".
1857 *
1858 * The current sync rate used here uses only the most recent two step marks,
1859 * to have a short time average so we can react faster.
1860 */
Philipp Reisnere3555d82010-11-07 15:56:29 +01001861int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001862{
1863 struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
1864 unsigned long db, dt, dbdt;
Philipp Reisnere3555d82010-11-07 15:56:29 +01001865 struct lc_element *tmp;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001866 int curr_events;
1867 int throttle = 0;
1868
1869 /* feature disabled? */
1870 if (mdev->sync_conf.c_min_rate == 0)
1871 return 0;
1872
Philipp Reisnere3555d82010-11-07 15:56:29 +01001873 spin_lock_irq(&mdev->al_lock);
1874 tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector));
1875 if (tmp) {
1876 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
1877 if (test_bit(BME_PRIORITY, &bm_ext->flags)) {
1878 spin_unlock_irq(&mdev->al_lock);
1879 return 0;
1880 }
1881 /* Do not slow down if app IO is already waiting for this extent */
1882 }
1883 spin_unlock_irq(&mdev->al_lock);
1884
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001885 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
1886 (int)part_stat_read(&disk->part0, sectors[1]) -
1887 atomic_read(&mdev->rs_sect_ev);
Philipp Reisnere3555d82010-11-07 15:56:29 +01001888
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001889 if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
1890 unsigned long rs_left;
1891 int i;
1892
1893 mdev->rs_last_events = curr_events;
1894
1895 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
1896 * approx. */
Lars Ellenberg2649f082010-11-05 10:05:47 +01001897 i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
1898
1899 if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
1900 rs_left = mdev->ov_left;
1901 else
1902 rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001903
1904 dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
1905 if (!dt)
1906 dt++;
1907 db = mdev->rs_mark_left[i] - rs_left;
1908 dbdt = Bit2KB(db/dt);
1909
1910 if (dbdt > mdev->sync_conf.c_min_rate)
1911 throttle = 1;
1912 }
1913 return throttle;
1914}
1915
1916
Philipp Reisner02918be2010-08-20 14:35:10 +02001917static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int digest_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001918{
1919 sector_t sector;
1920 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
1921 struct drbd_epoch_entry *e;
1922 struct digest_info *di = NULL;
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02001923 int size, verb;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001924 unsigned int fault_type;
Philipp Reisner02918be2010-08-20 14:35:10 +02001925 struct p_block_req *p = &mdev->data.rbuf.block_req;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001926
1927 sector = be64_to_cpu(p->sector);
1928 size = be32_to_cpu(p->blksize);
1929
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01001930 if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001931 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
1932 (unsigned long long)sector, size);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001933 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001934 }
1935 if (sector + (size>>9) > capacity) {
1936 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
1937 (unsigned long long)sector, size);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001938 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001939 }
1940
1941 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02001942 verb = 1;
1943 switch (cmd) {
1944 case P_DATA_REQUEST:
1945 drbd_send_ack_rp(mdev, P_NEG_DREPLY, p);
1946 break;
1947 case P_RS_DATA_REQUEST:
1948 case P_CSUM_RS_REQUEST:
1949 case P_OV_REQUEST:
1950 drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p);
1951 break;
1952 case P_OV_REPLY:
1953 verb = 0;
1954 dec_rs_pending(mdev);
1955 drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC);
1956 break;
1957 default:
1958 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
1959 cmdname(cmd));
1960 }
1961 if (verb && __ratelimit(&drbd_ratelimit_state))
Philipp Reisnerb411b362009-09-25 16:07:19 -07001962 dev_err(DEV, "Can not satisfy peer's read request, "
1963 "no local data.\n");
Philipp Reisnerb18b37b2010-10-13 15:32:44 +02001964
Lars Ellenberga821cc42010-09-06 12:31:37 +02001965 /* drain possibly payload */
1966 return drbd_drain_block(mdev, digest_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001967 }
1968
1969 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1970 * "criss-cross" setup, that might cause write-out on some other DRBD,
1971 * which in turn might block on the other node at this very place. */
1972 e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO);
1973 if (!e) {
1974 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001975 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001976 }
1977
Philipp Reisner02918be2010-08-20 14:35:10 +02001978 switch (cmd) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001979 case P_DATA_REQUEST:
1980 e->w.cb = w_e_end_data_req;
1981 fault_type = DRBD_FAULT_DT_RD;
Lars Ellenberg80a40e42010-08-11 23:28:00 +02001982 /* application IO, don't drbd_rs_begin_io */
1983 goto submit;
1984
Philipp Reisnerb411b362009-09-25 16:07:19 -07001985 case P_RS_DATA_REQUEST:
1986 e->w.cb = w_e_end_rsdata_req;
1987 fault_type = DRBD_FAULT_RS_RD;
Lars Ellenberg5f9915b2010-11-09 14:15:24 +01001988 /* used in the sector offset progress display */
1989 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001990 break;
1991
1992 case P_OV_REPLY:
1993 case P_CSUM_RS_REQUEST:
1994 fault_type = DRBD_FAULT_RS_RD;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001995 di = kmalloc(sizeof(*di) + digest_size, GFP_NOIO);
1996 if (!di)
1997 goto out_free_e;
1998
1999 di->digest_size = digest_size;
2000 di->digest = (((char *)di)+sizeof(struct digest_info));
2001
Lars Ellenbergc36c3ce2010-08-11 20:42:55 +02002002 e->digest = di;
2003 e->flags |= EE_HAS_DIGEST;
2004
Philipp Reisnerb411b362009-09-25 16:07:19 -07002005 if (drbd_recv(mdev, di->digest, digest_size) != digest_size)
2006 goto out_free_e;
2007
Philipp Reisner02918be2010-08-20 14:35:10 +02002008 if (cmd == P_CSUM_RS_REQUEST) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002009 D_ASSERT(mdev->agreed_pro_version >= 89);
2010 e->w.cb = w_e_end_csum_rs_req;
Lars Ellenberg5f9915b2010-11-09 14:15:24 +01002011 /* used in the sector offset progress display */
2012 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
Philipp Reisner02918be2010-08-20 14:35:10 +02002013 } else if (cmd == P_OV_REPLY) {
Lars Ellenberg2649f082010-11-05 10:05:47 +01002014 /* track progress, we may need to throttle */
2015 atomic_add(size >> 9, &mdev->rs_sect_in);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002016 e->w.cb = w_e_end_ov_reply;
2017 dec_rs_pending(mdev);
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002018 /* drbd_rs_begin_io done when we sent this request,
2019 * but accounting still needs to be done. */
2020 goto submit_for_resync;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002021 }
2022 break;
2023
2024 case P_OV_REQUEST:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002025 if (mdev->ov_start_sector == ~(sector_t)0 &&
2026 mdev->agreed_pro_version >= 90) {
Lars Ellenbergde228bb2010-11-05 09:43:15 +01002027 unsigned long now = jiffies;
2028 int i;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002029 mdev->ov_start_sector = sector;
2030 mdev->ov_position = sector;
Lars Ellenberg30b743a2010-11-05 09:39:06 +01002031 mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector);
2032 mdev->rs_total = mdev->ov_left;
Lars Ellenbergde228bb2010-11-05 09:43:15 +01002033 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2034 mdev->rs_mark_left[i] = mdev->ov_left;
2035 mdev->rs_mark_time[i] = now;
2036 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002037 dev_info(DEV, "Online Verify start sector: %llu\n",
2038 (unsigned long long)sector);
2039 }
2040 e->w.cb = w_e_end_ov_req;
2041 fault_type = DRBD_FAULT_RS_RD;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002042 break;
2043
Philipp Reisnerb411b362009-09-25 16:07:19 -07002044 default:
2045 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02002046 cmdname(cmd));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002047 fault_type = DRBD_FAULT_MAX;
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002048 goto out_free_e;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002049 }
2050
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002051 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2052 * wrt the receiver, but it is not as straightforward as it may seem.
2053 * Various places in the resync start and stop logic assume resync
2054 * requests are processed in order, requeuing this on the worker thread
2055 * introduces a bunch of new code for synchronization between threads.
2056 *
2057 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2058 * "forever", throttling after drbd_rs_begin_io will lock that extent
2059 * for application writes for the same time. For now, just throttle
2060 * here, where the rest of the code expects the receiver to sleep for
2061 * a while, anyways.
2062 */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002063
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002064 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2065 * this defers syncer requests for some time, before letting at least
2066 * on request through. The resync controller on the receiving side
2067 * will adapt to the incoming rate accordingly.
2068 *
2069 * We cannot throttle here if remote is Primary/SyncTarget:
2070 * we would also throttle its application reads.
2071 * In that case, throttling is done on the SyncTarget only.
2072 */
Philipp Reisnere3555d82010-11-07 15:56:29 +01002073 if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector))
2074 schedule_timeout_uninterruptible(HZ/10);
2075 if (drbd_rs_begin_io(mdev, sector))
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002076 goto out_free_e;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002077
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002078submit_for_resync:
2079 atomic_add(size >> 9, &mdev->rs_sect_ev);
2080
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002081submit:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002082 inc_unacked(mdev);
Lars Ellenberg80a40e42010-08-11 23:28:00 +02002083 spin_lock_irq(&mdev->req_lock);
2084 list_add_tail(&e->w.list, &mdev->read_ee);
2085 spin_unlock_irq(&mdev->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002086
Lars Ellenberg45bb9122010-05-14 17:10:48 +02002087 if (drbd_submit_ee(mdev, e, READ, fault_type) == 0)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002088 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002089
Lars Ellenberg22cc37a2010-09-14 20:40:41 +02002090 /* drbd_submit_ee currently fails for one reason only:
2091 * not being able to allocate enough bios.
2092 * Is dropping the connection going to help? */
2093 spin_lock_irq(&mdev->req_lock);
2094 list_del(&e->w.list);
2095 spin_unlock_irq(&mdev->req_lock);
2096 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2097
Philipp Reisnerb411b362009-09-25 16:07:19 -07002098out_free_e:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002099 put_ldev(mdev);
2100 drbd_free_ee(mdev, e);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002101 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002102}
2103
2104static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2105{
2106 int self, peer, rv = -100;
2107 unsigned long ch_self, ch_peer;
2108
2109 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2110 peer = mdev->p_uuid[UI_BITMAP] & 1;
2111
2112 ch_peer = mdev->p_uuid[UI_SIZE];
2113 ch_self = mdev->comm_bm_set;
2114
2115 switch (mdev->net_conf->after_sb_0p) {
2116 case ASB_CONSENSUS:
2117 case ASB_DISCARD_SECONDARY:
2118 case ASB_CALL_HELPER:
2119 dev_err(DEV, "Configuration error.\n");
2120 break;
2121 case ASB_DISCONNECT:
2122 break;
2123 case ASB_DISCARD_YOUNGER_PRI:
2124 if (self == 0 && peer == 1) {
2125 rv = -1;
2126 break;
2127 }
2128 if (self == 1 && peer == 0) {
2129 rv = 1;
2130 break;
2131 }
2132 /* Else fall through to one of the other strategies... */
2133 case ASB_DISCARD_OLDER_PRI:
2134 if (self == 0 && peer == 1) {
2135 rv = 1;
2136 break;
2137 }
2138 if (self == 1 && peer == 0) {
2139 rv = -1;
2140 break;
2141 }
2142 /* Else fall through to one of the other strategies... */
Lars Ellenbergad19bf62009-10-14 09:36:49 +02002143 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
Philipp Reisnerb411b362009-09-25 16:07:19 -07002144 "Using discard-least-changes instead\n");
2145 case ASB_DISCARD_ZERO_CHG:
2146 if (ch_peer == 0 && ch_self == 0) {
2147 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2148 ? -1 : 1;
2149 break;
2150 } else {
2151 if (ch_peer == 0) { rv = 1; break; }
2152 if (ch_self == 0) { rv = -1; break; }
2153 }
2154 if (mdev->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG)
2155 break;
2156 case ASB_DISCARD_LEAST_CHG:
2157 if (ch_self < ch_peer)
2158 rv = -1;
2159 else if (ch_self > ch_peer)
2160 rv = 1;
2161 else /* ( ch_self == ch_peer ) */
2162 /* Well, then use something else. */
2163 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2164 ? -1 : 1;
2165 break;
2166 case ASB_DISCARD_LOCAL:
2167 rv = -1;
2168 break;
2169 case ASB_DISCARD_REMOTE:
2170 rv = 1;
2171 }
2172
2173 return rv;
2174}
2175
2176static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2177{
Andreas Gruenbacher6184ea22010-12-09 14:23:27 +01002178 int hg, rv = -100;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002179
2180 switch (mdev->net_conf->after_sb_1p) {
2181 case ASB_DISCARD_YOUNGER_PRI:
2182 case ASB_DISCARD_OLDER_PRI:
2183 case ASB_DISCARD_LEAST_CHG:
2184 case ASB_DISCARD_LOCAL:
2185 case ASB_DISCARD_REMOTE:
2186 dev_err(DEV, "Configuration error.\n");
2187 break;
2188 case ASB_DISCONNECT:
2189 break;
2190 case ASB_CONSENSUS:
2191 hg = drbd_asb_recover_0p(mdev);
2192 if (hg == -1 && mdev->state.role == R_SECONDARY)
2193 rv = hg;
2194 if (hg == 1 && mdev->state.role == R_PRIMARY)
2195 rv = hg;
2196 break;
2197 case ASB_VIOLENTLY:
2198 rv = drbd_asb_recover_0p(mdev);
2199 break;
2200 case ASB_DISCARD_SECONDARY:
2201 return mdev->state.role == R_PRIMARY ? 1 : -1;
2202 case ASB_CALL_HELPER:
2203 hg = drbd_asb_recover_0p(mdev);
2204 if (hg == -1 && mdev->state.role == R_PRIMARY) {
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002205 enum drbd_state_rv rv2;
2206
2207 drbd_set_role(mdev, R_SECONDARY, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002208 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2209 * we might be here in C_WF_REPORT_PARAMS which is transient.
2210 * we do not need to wait for the after state change work either. */
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002211 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2212 if (rv2 != SS_SUCCESS) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002213 drbd_khelper(mdev, "pri-lost-after-sb");
2214 } else {
2215 dev_warn(DEV, "Successfully gave up primary role.\n");
2216 rv = hg;
2217 }
2218 } else
2219 rv = hg;
2220 }
2221
2222 return rv;
2223}
2224
2225static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2226{
Andreas Gruenbacher6184ea22010-12-09 14:23:27 +01002227 int hg, rv = -100;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002228
2229 switch (mdev->net_conf->after_sb_2p) {
2230 case ASB_DISCARD_YOUNGER_PRI:
2231 case ASB_DISCARD_OLDER_PRI:
2232 case ASB_DISCARD_LEAST_CHG:
2233 case ASB_DISCARD_LOCAL:
2234 case ASB_DISCARD_REMOTE:
2235 case ASB_CONSENSUS:
2236 case ASB_DISCARD_SECONDARY:
2237 dev_err(DEV, "Configuration error.\n");
2238 break;
2239 case ASB_VIOLENTLY:
2240 rv = drbd_asb_recover_0p(mdev);
2241 break;
2242 case ASB_DISCONNECT:
2243 break;
2244 case ASB_CALL_HELPER:
2245 hg = drbd_asb_recover_0p(mdev);
2246 if (hg == -1) {
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002247 enum drbd_state_rv rv2;
2248
Philipp Reisnerb411b362009-09-25 16:07:19 -07002249 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2250 * we might be here in C_WF_REPORT_PARAMS which is transient.
2251 * we do not need to wait for the after state change work either. */
Andreas Gruenbacherbb437942010-12-09 14:02:35 +01002252 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2253 if (rv2 != SS_SUCCESS) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002254 drbd_khelper(mdev, "pri-lost-after-sb");
2255 } else {
2256 dev_warn(DEV, "Successfully gave up primary role.\n");
2257 rv = hg;
2258 }
2259 } else
2260 rv = hg;
2261 }
2262
2263 return rv;
2264}
2265
2266static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2267 u64 bits, u64 flags)
2268{
2269 if (!uuid) {
2270 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2271 return;
2272 }
2273 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2274 text,
2275 (unsigned long long)uuid[UI_CURRENT],
2276 (unsigned long long)uuid[UI_BITMAP],
2277 (unsigned long long)uuid[UI_HISTORY_START],
2278 (unsigned long long)uuid[UI_HISTORY_END],
2279 (unsigned long long)bits,
2280 (unsigned long long)flags);
2281}
2282
2283/*
2284 100 after split brain try auto recover
2285 2 C_SYNC_SOURCE set BitMap
2286 1 C_SYNC_SOURCE use BitMap
2287 0 no Sync
2288 -1 C_SYNC_TARGET use BitMap
2289 -2 C_SYNC_TARGET set BitMap
2290 -100 after split brain, disconnect
2291-1000 unrelated data
Philipp Reisner4a23f262011-01-11 17:42:17 +01002292-1091 requires proto 91
2293-1096 requires proto 96
Philipp Reisnerb411b362009-09-25 16:07:19 -07002294 */
2295static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2296{
2297 u64 self, peer;
2298 int i, j;
2299
2300 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2301 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2302
2303 *rule_nr = 10;
2304 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2305 return 0;
2306
2307 *rule_nr = 20;
2308 if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2309 peer != UUID_JUST_CREATED)
2310 return -2;
2311
2312 *rule_nr = 30;
2313 if (self != UUID_JUST_CREATED &&
2314 (peer == UUID_JUST_CREATED || peer == (u64)0))
2315 return 2;
2316
2317 if (self == peer) {
2318 int rct, dc; /* roles at crash time */
2319
2320 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2321
2322 if (mdev->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002323 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002324
2325 if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2326 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2327 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
2328 drbd_uuid_set_bm(mdev, 0UL);
2329
2330 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2331 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2332 *rule_nr = 34;
2333 } else {
2334 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2335 *rule_nr = 36;
2336 }
2337
2338 return 1;
2339 }
2340
2341 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2342
2343 if (mdev->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002344 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002345
2346 if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2347 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2348 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2349
2350 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2351 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2352 mdev->p_uuid[UI_BITMAP] = 0UL;
2353
2354 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2355 *rule_nr = 35;
2356 } else {
2357 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2358 *rule_nr = 37;
2359 }
2360
2361 return -1;
2362 }
2363
2364 /* Common power [off|failure] */
2365 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2366 (mdev->p_uuid[UI_FLAGS] & 2);
2367 /* lowest bit is set when we were primary,
2368 * next bit (weight 2) is set when peer was primary */
2369 *rule_nr = 40;
2370
2371 switch (rct) {
2372 case 0: /* !self_pri && !peer_pri */ return 0;
2373 case 1: /* self_pri && !peer_pri */ return 1;
2374 case 2: /* !self_pri && peer_pri */ return -1;
2375 case 3: /* self_pri && peer_pri */
2376 dc = test_bit(DISCARD_CONCURRENT, &mdev->flags);
2377 return dc ? -1 : 1;
2378 }
2379 }
2380
2381 *rule_nr = 50;
2382 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2383 if (self == peer)
2384 return -1;
2385
2386 *rule_nr = 51;
2387 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2388 if (self == peer) {
Philipp Reisner4a23f262011-01-11 17:42:17 +01002389 if (mdev->agreed_pro_version < 96 ?
2390 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
2391 (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
2392 peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002393 /* The last P_SYNC_UUID did not get though. Undo the last start of
2394 resync as sync source modifications of the peer's UUIDs. */
2395
2396 if (mdev->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002397 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002398
2399 mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2400 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
Philipp Reisner4a23f262011-01-11 17:42:17 +01002401
2402 dev_info(DEV, "Did not got last syncUUID packet, corrected:\n");
2403 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2404
Philipp Reisnerb411b362009-09-25 16:07:19 -07002405 return -1;
2406 }
2407 }
2408
2409 *rule_nr = 60;
2410 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2411 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2412 peer = mdev->p_uuid[i] & ~((u64)1);
2413 if (self == peer)
2414 return -2;
2415 }
2416
2417 *rule_nr = 70;
2418 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2419 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2420 if (self == peer)
2421 return 1;
2422
2423 *rule_nr = 71;
2424 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2425 if (self == peer) {
Philipp Reisner4a23f262011-01-11 17:42:17 +01002426 if (mdev->agreed_pro_version < 96 ?
2427 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
2428 (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
2429 self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002430 /* The last P_SYNC_UUID did not get though. Undo the last start of
2431 resync as sync source modifications of our UUIDs. */
2432
2433 if (mdev->agreed_pro_version < 91)
Philipp Reisner4a23f262011-01-11 17:42:17 +01002434 return -1091;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002435
2436 _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2437 _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
2438
Philipp Reisner4a23f262011-01-11 17:42:17 +01002439 dev_info(DEV, "Last syncUUID did not get through, corrected:\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002440 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2441 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2442
2443 return 1;
2444 }
2445 }
2446
2447
2448 *rule_nr = 80;
Philipp Reisnerd8c2a362009-11-18 15:52:51 +01002449 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002450 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2451 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2452 if (self == peer)
2453 return 2;
2454 }
2455
2456 *rule_nr = 90;
2457 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2458 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2459 if (self == peer && self != ((u64)0))
2460 return 100;
2461
2462 *rule_nr = 100;
2463 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2464 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2465 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2466 peer = mdev->p_uuid[j] & ~((u64)1);
2467 if (self == peer)
2468 return -100;
2469 }
2470 }
2471
2472 return -1000;
2473}
2474
2475/* drbd_sync_handshake() returns the new conn state on success, or
2476 CONN_MASK (-1) on failure.
2477 */
2478static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2479 enum drbd_disk_state peer_disk) __must_hold(local)
2480{
2481 int hg, rule_nr;
2482 enum drbd_conns rv = C_MASK;
2483 enum drbd_disk_state mydisk;
2484
2485 mydisk = mdev->state.disk;
2486 if (mydisk == D_NEGOTIATING)
2487 mydisk = mdev->new_state_tmp.disk;
2488
2489 dev_info(DEV, "drbd_sync_handshake:\n");
2490 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2491 drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2492 mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2493
2494 hg = drbd_uuid_compare(mdev, &rule_nr);
2495
2496 dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2497
2498 if (hg == -1000) {
2499 dev_alert(DEV, "Unrelated data, aborting!\n");
2500 return C_MASK;
2501 }
Philipp Reisner4a23f262011-01-11 17:42:17 +01002502 if (hg < -1000) {
2503 dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002504 return C_MASK;
2505 }
2506
2507 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2508 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
2509 int f = (hg == -100) || abs(hg) == 2;
2510 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2511 if (f)
2512 hg = hg*2;
2513 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2514 hg > 0 ? "source" : "target");
2515 }
2516
Adam Gandelman3a11a482010-04-08 16:48:23 -07002517 if (abs(hg) == 100)
2518 drbd_khelper(mdev, "initial-split-brain");
2519
Philipp Reisnerb411b362009-09-25 16:07:19 -07002520 if (hg == 100 || (hg == -100 && mdev->net_conf->always_asbp)) {
2521 int pcount = (mdev->state.role == R_PRIMARY)
2522 + (peer_role == R_PRIMARY);
2523 int forced = (hg == -100);
2524
2525 switch (pcount) {
2526 case 0:
2527 hg = drbd_asb_recover_0p(mdev);
2528 break;
2529 case 1:
2530 hg = drbd_asb_recover_1p(mdev);
2531 break;
2532 case 2:
2533 hg = drbd_asb_recover_2p(mdev);
2534 break;
2535 }
2536 if (abs(hg) < 100) {
2537 dev_warn(DEV, "Split-Brain detected, %d primaries, "
2538 "automatically solved. Sync from %s node\n",
2539 pcount, (hg < 0) ? "peer" : "this");
2540 if (forced) {
2541 dev_warn(DEV, "Doing a full sync, since"
2542 " UUIDs where ambiguous.\n");
2543 hg = hg*2;
2544 }
2545 }
2546 }
2547
2548 if (hg == -100) {
2549 if (mdev->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1))
2550 hg = -1;
2551 if (!mdev->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1))
2552 hg = 1;
2553
2554 if (abs(hg) < 100)
2555 dev_warn(DEV, "Split-Brain detected, manually solved. "
2556 "Sync from %s node\n",
2557 (hg < 0) ? "peer" : "this");
2558 }
2559
2560 if (hg == -100) {
Lars Ellenberg580b9762010-02-26 23:15:23 +01002561 /* FIXME this log message is not correct if we end up here
2562 * after an attempted attach on a diskless node.
2563 * We just refuse to attach -- well, we drop the "connection"
2564 * to that disk, in a way... */
Adam Gandelman3a11a482010-04-08 16:48:23 -07002565 dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002566 drbd_khelper(mdev, "split-brain");
2567 return C_MASK;
2568 }
2569
2570 if (hg > 0 && mydisk <= D_INCONSISTENT) {
2571 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
2572 return C_MASK;
2573 }
2574
2575 if (hg < 0 && /* by intention we do not use mydisk here. */
2576 mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
2577 switch (mdev->net_conf->rr_conflict) {
2578 case ASB_CALL_HELPER:
2579 drbd_khelper(mdev, "pri-lost");
2580 /* fall through */
2581 case ASB_DISCONNECT:
2582 dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
2583 return C_MASK;
2584 case ASB_VIOLENTLY:
2585 dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
2586 "assumption\n");
2587 }
2588 }
2589
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002590 if (mdev->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) {
2591 if (hg == 0)
2592 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
2593 else
2594 dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
2595 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
2596 abs(hg) >= 2 ? "full" : "bit-map based");
2597 return C_MASK;
2598 }
2599
Philipp Reisnerb411b362009-09-25 16:07:19 -07002600 if (abs(hg) >= 2) {
2601 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
2602 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake"))
2603 return C_MASK;
2604 }
2605
2606 if (hg > 0) { /* become sync source. */
2607 rv = C_WF_BITMAP_S;
2608 } else if (hg < 0) { /* become sync target */
2609 rv = C_WF_BITMAP_T;
2610 } else {
2611 rv = C_CONNECTED;
2612 if (drbd_bm_total_weight(mdev)) {
2613 dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
2614 drbd_bm_total_weight(mdev));
2615 }
2616 }
2617
2618 return rv;
2619}
2620
2621/* returns 1 if invalid */
2622static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self)
2623{
2624 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
2625 if ((peer == ASB_DISCARD_REMOTE && self == ASB_DISCARD_LOCAL) ||
2626 (self == ASB_DISCARD_REMOTE && peer == ASB_DISCARD_LOCAL))
2627 return 0;
2628
2629 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
2630 if (peer == ASB_DISCARD_REMOTE || peer == ASB_DISCARD_LOCAL ||
2631 self == ASB_DISCARD_REMOTE || self == ASB_DISCARD_LOCAL)
2632 return 1;
2633
2634 /* everything else is valid if they are equal on both sides. */
2635 if (peer == self)
2636 return 0;
2637
2638 /* everything es is invalid. */
2639 return 1;
2640}
2641
Philipp Reisner02918be2010-08-20 14:35:10 +02002642static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002643{
Philipp Reisner02918be2010-08-20 14:35:10 +02002644 struct p_protocol *p = &mdev->data.rbuf.protocol;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002645 int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002646 int p_want_lose, p_two_primaries, cf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002647 char p_integrity_alg[SHARED_SECRET_MAX] = "";
2648
Philipp Reisnerb411b362009-09-25 16:07:19 -07002649 p_proto = be32_to_cpu(p->protocol);
2650 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
2651 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
2652 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002653 p_two_primaries = be32_to_cpu(p->two_primaries);
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01002654 cf = be32_to_cpu(p->conn_flags);
2655 p_want_lose = cf & CF_WANT_LOSE;
2656
2657 clear_bit(CONN_DRY_RUN, &mdev->flags);
2658
2659 if (cf & CF_DRY_RUN)
2660 set_bit(CONN_DRY_RUN, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002661
2662 if (p_proto != mdev->net_conf->wire_protocol) {
2663 dev_err(DEV, "incompatible communication protocols\n");
2664 goto disconnect;
2665 }
2666
2667 if (cmp_after_sb(p_after_sb_0p, mdev->net_conf->after_sb_0p)) {
2668 dev_err(DEV, "incompatible after-sb-0pri settings\n");
2669 goto disconnect;
2670 }
2671
2672 if (cmp_after_sb(p_after_sb_1p, mdev->net_conf->after_sb_1p)) {
2673 dev_err(DEV, "incompatible after-sb-1pri settings\n");
2674 goto disconnect;
2675 }
2676
2677 if (cmp_after_sb(p_after_sb_2p, mdev->net_conf->after_sb_2p)) {
2678 dev_err(DEV, "incompatible after-sb-2pri settings\n");
2679 goto disconnect;
2680 }
2681
2682 if (p_want_lose && mdev->net_conf->want_lose) {
2683 dev_err(DEV, "both sides have the 'want_lose' flag set\n");
2684 goto disconnect;
2685 }
2686
2687 if (p_two_primaries != mdev->net_conf->two_primaries) {
2688 dev_err(DEV, "incompatible setting of the two-primaries options\n");
2689 goto disconnect;
2690 }
2691
2692 if (mdev->agreed_pro_version >= 87) {
2693 unsigned char *my_alg = mdev->net_conf->integrity_alg;
2694
2695 if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002696 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002697
2698 p_integrity_alg[SHARED_SECRET_MAX-1] = 0;
2699 if (strcmp(p_integrity_alg, my_alg)) {
2700 dev_err(DEV, "incompatible setting of the data-integrity-alg\n");
2701 goto disconnect;
2702 }
2703 dev_info(DEV, "data-integrity-alg: %s\n",
2704 my_alg[0] ? my_alg : (unsigned char *)"<not-used>");
2705 }
2706
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002707 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002708
2709disconnect:
2710 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002711 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002712}
2713
2714/* helper function
2715 * input: alg name, feature name
2716 * return: NULL (alg name was "")
2717 * ERR_PTR(error) if something goes wrong
2718 * or the crypto hash ptr, if it worked out ok. */
2719struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
2720 const char *alg, const char *name)
2721{
2722 struct crypto_hash *tfm;
2723
2724 if (!alg[0])
2725 return NULL;
2726
2727 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
2728 if (IS_ERR(tfm)) {
2729 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
2730 alg, name, PTR_ERR(tfm));
2731 return tfm;
2732 }
2733 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
2734 crypto_free_hash(tfm);
2735 dev_err(DEV, "\"%s\" is not a digest (%s)\n", alg, name);
2736 return ERR_PTR(-EINVAL);
2737 }
2738 return tfm;
2739}
2740
Philipp Reisner02918be2010-08-20 14:35:10 +02002741static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int packet_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002742{
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002743 int ok = true;
Philipp Reisner02918be2010-08-20 14:35:10 +02002744 struct p_rs_param_95 *p = &mdev->data.rbuf.rs_param_95;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002745 unsigned int header_size, data_size, exp_max_sz;
2746 struct crypto_hash *verify_tfm = NULL;
2747 struct crypto_hash *csums_tfm = NULL;
2748 const int apv = mdev->agreed_pro_version;
Philipp Reisner778f2712010-07-06 11:14:00 +02002749 int *rs_plan_s = NULL;
2750 int fifo_size = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002751
2752 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
2753 : apv == 88 ? sizeof(struct p_rs_param)
2754 + SHARED_SECRET_MAX
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002755 : apv <= 94 ? sizeof(struct p_rs_param_89)
2756 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002757
Philipp Reisner02918be2010-08-20 14:35:10 +02002758 if (packet_size > exp_max_sz) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002759 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02002760 packet_size, exp_max_sz);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002761 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002762 }
2763
2764 if (apv <= 88) {
Philipp Reisner02918be2010-08-20 14:35:10 +02002765 header_size = sizeof(struct p_rs_param) - sizeof(struct p_header80);
2766 data_size = packet_size - header_size;
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002767 } else if (apv <= 94) {
Philipp Reisner02918be2010-08-20 14:35:10 +02002768 header_size = sizeof(struct p_rs_param_89) - sizeof(struct p_header80);
2769 data_size = packet_size - header_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002770 D_ASSERT(data_size == 0);
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002771 } else {
Philipp Reisner02918be2010-08-20 14:35:10 +02002772 header_size = sizeof(struct p_rs_param_95) - sizeof(struct p_header80);
2773 data_size = packet_size - header_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002774 D_ASSERT(data_size == 0);
2775 }
2776
2777 /* initialize verify_alg and csums_alg */
2778 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
2779
Philipp Reisner02918be2010-08-20 14:35:10 +02002780 if (drbd_recv(mdev, &p->head.payload, header_size) != header_size)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002781 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002782
2783 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2784
2785 if (apv >= 88) {
2786 if (apv == 88) {
2787 if (data_size > SHARED_SECRET_MAX) {
2788 dev_err(DEV, "verify-alg too long, "
2789 "peer wants %u, accepting only %u byte\n",
2790 data_size, SHARED_SECRET_MAX);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002791 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002792 }
2793
2794 if (drbd_recv(mdev, p->verify_alg, data_size) != data_size)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002795 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002796
2797 /* we expect NUL terminated string */
2798 /* but just in case someone tries to be evil */
2799 D_ASSERT(p->verify_alg[data_size-1] == 0);
2800 p->verify_alg[data_size-1] = 0;
2801
2802 } else /* apv >= 89 */ {
2803 /* we still expect NUL terminated strings */
2804 /* but just in case someone tries to be evil */
2805 D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
2806 D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
2807 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
2808 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
2809 }
2810
2811 if (strcmp(mdev->sync_conf.verify_alg, p->verify_alg)) {
2812 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2813 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
2814 mdev->sync_conf.verify_alg, p->verify_alg);
2815 goto disconnect;
2816 }
2817 verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
2818 p->verify_alg, "verify-alg");
2819 if (IS_ERR(verify_tfm)) {
2820 verify_tfm = NULL;
2821 goto disconnect;
2822 }
2823 }
2824
2825 if (apv >= 89 && strcmp(mdev->sync_conf.csums_alg, p->csums_alg)) {
2826 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2827 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
2828 mdev->sync_conf.csums_alg, p->csums_alg);
2829 goto disconnect;
2830 }
2831 csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
2832 p->csums_alg, "csums-alg");
2833 if (IS_ERR(csums_tfm)) {
2834 csums_tfm = NULL;
2835 goto disconnect;
2836 }
2837 }
2838
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002839 if (apv > 94) {
2840 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2841 mdev->sync_conf.c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
2842 mdev->sync_conf.c_delay_target = be32_to_cpu(p->c_delay_target);
2843 mdev->sync_conf.c_fill_target = be32_to_cpu(p->c_fill_target);
2844 mdev->sync_conf.c_max_rate = be32_to_cpu(p->c_max_rate);
Philipp Reisner778f2712010-07-06 11:14:00 +02002845
2846 fifo_size = (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
2847 if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
2848 rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
2849 if (!rs_plan_s) {
2850 dev_err(DEV, "kmalloc of fifo_buffer failed");
2851 goto disconnect;
2852 }
2853 }
Philipp Reisner8e26f9c2010-07-06 17:25:54 +02002854 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002855
2856 spin_lock(&mdev->peer_seq_lock);
2857 /* lock against drbd_nl_syncer_conf() */
2858 if (verify_tfm) {
2859 strcpy(mdev->sync_conf.verify_alg, p->verify_alg);
2860 mdev->sync_conf.verify_alg_len = strlen(p->verify_alg) + 1;
2861 crypto_free_hash(mdev->verify_tfm);
2862 mdev->verify_tfm = verify_tfm;
2863 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
2864 }
2865 if (csums_tfm) {
2866 strcpy(mdev->sync_conf.csums_alg, p->csums_alg);
2867 mdev->sync_conf.csums_alg_len = strlen(p->csums_alg) + 1;
2868 crypto_free_hash(mdev->csums_tfm);
2869 mdev->csums_tfm = csums_tfm;
2870 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
2871 }
Philipp Reisner778f2712010-07-06 11:14:00 +02002872 if (fifo_size != mdev->rs_plan_s.size) {
2873 kfree(mdev->rs_plan_s.values);
2874 mdev->rs_plan_s.values = rs_plan_s;
2875 mdev->rs_plan_s.size = fifo_size;
2876 mdev->rs_planed = 0;
2877 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002878 spin_unlock(&mdev->peer_seq_lock);
2879 }
2880
2881 return ok;
2882disconnect:
2883 /* just for completeness: actually not needed,
2884 * as this is not reached if csums_tfm was ok. */
2885 crypto_free_hash(csums_tfm);
2886 /* but free the verify_tfm again, if csums_tfm did not work out */
2887 crypto_free_hash(verify_tfm);
2888 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002889 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002890}
2891
2892static void drbd_setup_order_type(struct drbd_conf *mdev, int peer)
2893{
2894 /* sorry, we currently have no working implementation
2895 * of distributed TCQ */
2896}
2897
2898/* warn if the arguments differ by more than 12.5% */
2899static void warn_if_differ_considerably(struct drbd_conf *mdev,
2900 const char *s, sector_t a, sector_t b)
2901{
2902 sector_t d;
2903 if (a == 0 || b == 0)
2904 return;
2905 d = (a > b) ? (a - b) : (b - a);
2906 if (d > (a>>3) || d > (b>>3))
2907 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
2908 (unsigned long long)a, (unsigned long long)b);
2909}
2910
Philipp Reisner02918be2010-08-20 14:35:10 +02002911static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002912{
Philipp Reisner02918be2010-08-20 14:35:10 +02002913 struct p_sizes *p = &mdev->data.rbuf.sizes;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002914 enum determine_dev_size dd = unchanged;
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01002915 unsigned int max_bio_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002916 sector_t p_size, p_usize, my_usize;
2917 int ldsc = 0; /* local disk size changed */
Philipp Reisnere89b5912010-03-24 17:11:33 +01002918 enum dds_flags ddsf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002919
Philipp Reisnerb411b362009-09-25 16:07:19 -07002920 p_size = be64_to_cpu(p->d_size);
2921 p_usize = be64_to_cpu(p->u_size);
2922
2923 if (p_size == 0 && mdev->state.disk == D_DISKLESS) {
2924 dev_err(DEV, "some backing storage is needed\n");
2925 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002926 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002927 }
2928
2929 /* just store the peer's disk size for now.
2930 * we still need to figure out whether we accept that. */
2931 mdev->p_size = p_size;
2932
Philipp Reisnerb411b362009-09-25 16:07:19 -07002933 if (get_ldev(mdev)) {
2934 warn_if_differ_considerably(mdev, "lower level device sizes",
2935 p_size, drbd_get_max_capacity(mdev->ldev));
2936 warn_if_differ_considerably(mdev, "user requested size",
2937 p_usize, mdev->ldev->dc.disk_size);
2938
2939 /* if this is the first connect, or an otherwise expected
2940 * param exchange, choose the minimum */
2941 if (mdev->state.conn == C_WF_REPORT_PARAMS)
2942 p_usize = min_not_zero((sector_t)mdev->ldev->dc.disk_size,
2943 p_usize);
2944
2945 my_usize = mdev->ldev->dc.disk_size;
2946
2947 if (mdev->ldev->dc.disk_size != p_usize) {
2948 mdev->ldev->dc.disk_size = p_usize;
2949 dev_info(DEV, "Peer sets u_size to %lu sectors\n",
2950 (unsigned long)mdev->ldev->dc.disk_size);
2951 }
2952
2953 /* Never shrink a device with usable data during connect.
2954 But allow online shrinking if we are connected. */
Philipp Reisnera393db62009-12-22 13:35:52 +01002955 if (drbd_new_dev_size(mdev, mdev->ldev, 0) <
Philipp Reisnerb411b362009-09-25 16:07:19 -07002956 drbd_get_capacity(mdev->this_bdev) &&
2957 mdev->state.disk >= D_OUTDATED &&
2958 mdev->state.conn < C_CONNECTED) {
2959 dev_err(DEV, "The peer's disk size is too small!\n");
2960 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2961 mdev->ldev->dc.disk_size = my_usize;
2962 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002963 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002964 }
2965 put_ldev(mdev);
2966 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002967
Philipp Reisnere89b5912010-03-24 17:11:33 +01002968 ddsf = be16_to_cpu(p->dds_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002969 if (get_ldev(mdev)) {
Philipp Reisnere89b5912010-03-24 17:11:33 +01002970 dd = drbd_determin_dev_size(mdev, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002971 put_ldev(mdev);
2972 if (dd == dev_size_error)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002973 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002974 drbd_md_sync(mdev);
2975 } else {
2976 /* I am diskless, need to accept the peer's size. */
2977 drbd_set_my_capacity(mdev, p_size);
2978 }
2979
Philipp Reisnerb411b362009-09-25 16:07:19 -07002980 if (get_ldev(mdev)) {
2981 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
2982 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
2983 ldsc = 1;
2984 }
2985
Lars Ellenberga1c88d02010-05-14 19:16:41 +02002986 if (mdev->agreed_pro_version < 94)
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01002987 max_bio_size = be32_to_cpu(p->max_bio_size);
Lars Ellenberg8979d9c2010-09-14 15:56:29 +02002988 else if (mdev->agreed_pro_version == 94)
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01002989 max_bio_size = DRBD_MAX_SIZE_H80_PACKET;
Lars Ellenberga1c88d02010-05-14 19:16:41 +02002990 else /* drbd 8.3.8 onwards */
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01002991 max_bio_size = DRBD_MAX_BIO_SIZE;
Lars Ellenberga1c88d02010-05-14 19:16:41 +02002992
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01002993 if (max_bio_size != queue_max_hw_sectors(mdev->rq_queue) << 9)
2994 drbd_setup_queue_param(mdev, max_bio_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002995
Philipp Reisnere89b5912010-03-24 17:11:33 +01002996 drbd_setup_order_type(mdev, be16_to_cpu(p->queue_order_type));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002997 put_ldev(mdev);
2998 }
2999
3000 if (mdev->state.conn > C_WF_REPORT_PARAMS) {
3001 if (be64_to_cpu(p->c_size) !=
3002 drbd_get_capacity(mdev->this_bdev) || ldsc) {
3003 /* we have different sizes, probably peer
3004 * needs to know my new size... */
Philipp Reisnere89b5912010-03-24 17:11:33 +01003005 drbd_send_sizes(mdev, 0, ddsf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003006 }
3007 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
3008 (dd == grew && mdev->state.conn == C_CONNECTED)) {
3009 if (mdev->state.pdsk >= D_INCONSISTENT &&
Philipp Reisnere89b5912010-03-24 17:11:33 +01003010 mdev->state.disk >= D_INCONSISTENT) {
3011 if (ddsf & DDSF_NO_RESYNC)
3012 dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
3013 else
3014 resync_after_online_grow(mdev);
3015 } else
Philipp Reisnerb411b362009-09-25 16:07:19 -07003016 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
3017 }
3018 }
3019
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003020 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003021}
3022
Philipp Reisner02918be2010-08-20 14:35:10 +02003023static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003024{
Philipp Reisner02918be2010-08-20 14:35:10 +02003025 struct p_uuids *p = &mdev->data.rbuf.uuids;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003026 u64 *p_uuid;
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003027 int i, updated_uuids = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003028
Philipp Reisnerb411b362009-09-25 16:07:19 -07003029 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
3030
3031 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
3032 p_uuid[i] = be64_to_cpu(p->uuid[i]);
3033
3034 kfree(mdev->p_uuid);
3035 mdev->p_uuid = p_uuid;
3036
3037 if (mdev->state.conn < C_CONNECTED &&
3038 mdev->state.disk < D_INCONSISTENT &&
3039 mdev->state.role == R_PRIMARY &&
3040 (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3041 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3042 (unsigned long long)mdev->ed_uuid);
3043 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003044 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003045 }
3046
3047 if (get_ldev(mdev)) {
3048 int skip_initial_sync =
3049 mdev->state.conn == C_CONNECTED &&
3050 mdev->agreed_pro_version >= 90 &&
3051 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3052 (p_uuid[UI_FLAGS] & 8);
3053 if (skip_initial_sync) {
3054 dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3055 drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
3056 "clear_n_write from receive_uuids");
3057 _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3058 _drbd_uuid_set(mdev, UI_BITMAP, 0);
3059 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3060 CS_VERBOSE, NULL);
3061 drbd_md_sync(mdev);
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003062 updated_uuids = 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003063 }
3064 put_ldev(mdev);
Philipp Reisner18a50fa2010-06-21 14:14:15 +02003065 } else if (mdev->state.disk < D_INCONSISTENT &&
3066 mdev->state.role == R_PRIMARY) {
3067 /* I am a diskless primary, the peer just created a new current UUID
3068 for me. */
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003069 updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003070 }
3071
3072 /* Before we test for the disk state, we should wait until an eventually
3073 ongoing cluster wide state change is finished. That is important if
3074 we are primary and are detaching from our disk. We need to see the
3075 new disk state... */
3076 wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags));
3077 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003078 updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3079
3080 if (updated_uuids)
3081 drbd_print_uuids(mdev, "receiver updated UUIDs to");
Philipp Reisnerb411b362009-09-25 16:07:19 -07003082
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003083 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003084}
3085
3086/**
3087 * convert_state() - Converts the peer's view of the cluster state to our point of view
3088 * @ps: The state as seen by the peer.
3089 */
3090static union drbd_state convert_state(union drbd_state ps)
3091{
3092 union drbd_state ms;
3093
3094 static enum drbd_conns c_tab[] = {
3095 [C_CONNECTED] = C_CONNECTED,
3096
3097 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3098 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3099 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3100 [C_VERIFY_S] = C_VERIFY_T,
3101 [C_MASK] = C_MASK,
3102 };
3103
3104 ms.i = ps.i;
3105
3106 ms.conn = c_tab[ps.conn];
3107 ms.peer = ps.role;
3108 ms.role = ps.peer;
3109 ms.pdsk = ps.disk;
3110 ms.disk = ps.pdsk;
3111 ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3112
3113 return ms;
3114}
3115
Philipp Reisner02918be2010-08-20 14:35:10 +02003116static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003117{
Philipp Reisner02918be2010-08-20 14:35:10 +02003118 struct p_req_state *p = &mdev->data.rbuf.req_state;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003119 union drbd_state mask, val;
Andreas Gruenbacherbf885f82010-12-08 00:39:32 +01003120 enum drbd_state_rv rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003121
Philipp Reisnerb411b362009-09-25 16:07:19 -07003122 mask.i = be32_to_cpu(p->mask);
3123 val.i = be32_to_cpu(p->val);
3124
3125 if (test_bit(DISCARD_CONCURRENT, &mdev->flags) &&
3126 test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) {
3127 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003128 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003129 }
3130
3131 mask = convert_state(mask);
3132 val = convert_state(val);
3133
3134 rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
3135
3136 drbd_send_sr_reply(mdev, rv);
3137 drbd_md_sync(mdev);
3138
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003139 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003140}
3141
Philipp Reisner02918be2010-08-20 14:35:10 +02003142static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003143{
Philipp Reisner02918be2010-08-20 14:35:10 +02003144 struct p_state *p = &mdev->data.rbuf.state;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003145 union drbd_state os, ns, peer_state;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003146 enum drbd_disk_state real_peer_disk;
Philipp Reisner65d922c2010-06-16 16:18:09 +02003147 enum chg_state_flags cs_flags;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003148 int rv;
3149
Philipp Reisnerb411b362009-09-25 16:07:19 -07003150 peer_state.i = be32_to_cpu(p->state);
3151
3152 real_peer_disk = peer_state.disk;
3153 if (peer_state.disk == D_NEGOTIATING) {
3154 real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3155 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3156 }
3157
3158 spin_lock_irq(&mdev->req_lock);
3159 retry:
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003160 os = ns = mdev->state;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003161 spin_unlock_irq(&mdev->req_lock);
3162
Lars Ellenberge9ef7bb2010-10-07 15:55:39 +02003163 /* peer says his disk is uptodate, while we think it is inconsistent,
3164 * and this happens while we think we have a sync going on. */
3165 if (os.pdsk == D_INCONSISTENT && real_peer_disk == D_UP_TO_DATE &&
3166 os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
3167 /* If we are (becoming) SyncSource, but peer is still in sync
3168 * preparation, ignore its uptodate-ness to avoid flapping, it
3169 * will change to inconsistent once the peer reaches active
3170 * syncing states.
3171 * It may have changed syncer-paused flags, however, so we
3172 * cannot ignore this completely. */
3173 if (peer_state.conn > C_CONNECTED &&
3174 peer_state.conn < C_SYNC_SOURCE)
3175 real_peer_disk = D_INCONSISTENT;
3176
3177 /* if peer_state changes to connected at the same time,
3178 * it explicitly notifies us that it finished resync.
3179 * Maybe we should finish it up, too? */
3180 else if (os.conn >= C_SYNC_SOURCE &&
3181 peer_state.conn == C_CONNECTED) {
3182 if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
3183 drbd_resync_finished(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003184 return true;
Lars Ellenberge9ef7bb2010-10-07 15:55:39 +02003185 }
3186 }
3187
3188 /* peer says his disk is inconsistent, while we think it is uptodate,
3189 * and this happens while the peer still thinks we have a sync going on,
3190 * but we think we are already done with the sync.
3191 * We ignore this to avoid flapping pdsk.
3192 * This should not happen, if the peer is a recent version of drbd. */
3193 if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
3194 os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
3195 real_peer_disk = D_UP_TO_DATE;
3196
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003197 if (ns.conn == C_WF_REPORT_PARAMS)
3198 ns.conn = C_CONNECTED;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003199
Philipp Reisner67531712010-10-27 12:21:30 +02003200 if (peer_state.conn == C_AHEAD)
3201 ns.conn = C_BEHIND;
3202
Philipp Reisnerb411b362009-09-25 16:07:19 -07003203 if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3204 get_ldev_if_state(mdev, D_NEGOTIATING)) {
3205 int cr; /* consider resync */
3206
3207 /* if we established a new connection */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003208 cr = (os.conn < C_CONNECTED);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003209 /* if we had an established connection
3210 * and one of the nodes newly attaches a disk */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003211 cr |= (os.conn == C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003212 (peer_state.disk == D_NEGOTIATING ||
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003213 os.disk == D_NEGOTIATING));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003214 /* if we have both been inconsistent, and the peer has been
3215 * forced to be UpToDate with --overwrite-data */
3216 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3217 /* if we had been plain connected, and the admin requested to
3218 * start a sync by "invalidate" or "invalidate-remote" */
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003219 cr |= (os.conn == C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003220 (peer_state.conn >= C_STARTING_SYNC_S &&
3221 peer_state.conn <= C_WF_BITMAP_T));
3222
3223 if (cr)
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003224 ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003225
3226 put_ldev(mdev);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003227 if (ns.conn == C_MASK) {
3228 ns.conn = C_CONNECTED;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003229 if (mdev->state.disk == D_NEGOTIATING) {
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003230 drbd_force_state(mdev, NS(disk, D_FAILED));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003231 } else if (peer_state.disk == D_NEGOTIATING) {
3232 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3233 peer_state.disk = D_DISKLESS;
Lars Ellenberg580b9762010-02-26 23:15:23 +01003234 real_peer_disk = D_DISKLESS;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003235 } else {
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01003236 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags))
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003237 return false;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003238 D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003239 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003240 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003241 }
3242 }
3243 }
3244
3245 spin_lock_irq(&mdev->req_lock);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003246 if (mdev->state.i != os.i)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003247 goto retry;
3248 clear_bit(CONSIDER_RESYNC, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003249 ns.peer = peer_state.role;
3250 ns.pdsk = real_peer_disk;
3251 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003252 if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003253 ns.disk = mdev->new_state_tmp.disk;
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003254 cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
3255 if (ns.pdsk == D_CONSISTENT && is_susp(ns) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
Philipp Reisner481c6f52010-06-22 14:03:27 +02003256 test_bit(NEW_CUR_UUID, &mdev->flags)) {
3257 /* Do not allow tl_restart(resend) for a rebooted peer. We can only allow this
3258 for temporal network outages! */
3259 spin_unlock_irq(&mdev->req_lock);
3260 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
3261 tl_clear(mdev);
3262 drbd_uuid_new_current(mdev);
3263 clear_bit(NEW_CUR_UUID, &mdev->flags);
3264 drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003265 return false;
Philipp Reisner481c6f52010-06-22 14:03:27 +02003266 }
Philipp Reisner65d922c2010-06-16 16:18:09 +02003267 rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003268 ns = mdev->state;
3269 spin_unlock_irq(&mdev->req_lock);
3270
3271 if (rv < SS_SUCCESS) {
3272 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003273 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003274 }
3275
Lars Ellenberg4ac4aad2010-07-22 17:39:26 +02003276 if (os.conn > C_WF_REPORT_PARAMS) {
3277 if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003278 peer_state.disk != D_NEGOTIATING ) {
3279 /* we want resync, peer has not yet decided to sync... */
3280 /* Nowadays only used when forcing a node into primary role and
3281 setting its disk to UpToDate with that */
3282 drbd_send_uuids(mdev);
3283 drbd_send_state(mdev);
3284 }
3285 }
3286
3287 mdev->net_conf->want_lose = 0;
3288
3289 drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
3290
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003291 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003292}
3293
Philipp Reisner02918be2010-08-20 14:35:10 +02003294static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003295{
Philipp Reisner02918be2010-08-20 14:35:10 +02003296 struct p_rs_uuid *p = &mdev->data.rbuf.rs_uuid;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003297
3298 wait_event(mdev->misc_wait,
3299 mdev->state.conn == C_WF_SYNC_UUID ||
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02003300 mdev->state.conn == C_BEHIND ||
Philipp Reisnerb411b362009-09-25 16:07:19 -07003301 mdev->state.conn < C_CONNECTED ||
3302 mdev->state.disk < D_NEGOTIATING);
3303
3304 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
3305
Philipp Reisnerb411b362009-09-25 16:07:19 -07003306 /* Here the _drbd_uuid_ functions are right, current should
3307 _not_ be rotated into the history */
3308 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
3309 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
3310 _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
3311
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003312 drbd_print_uuids(mdev, "updated sync uuid");
Philipp Reisnerb411b362009-09-25 16:07:19 -07003313 drbd_start_resync(mdev, C_SYNC_TARGET);
3314
3315 put_ldev(mdev);
3316 } else
3317 dev_err(DEV, "Ignoring SyncUUID packet!\n");
3318
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003319 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003320}
3321
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003322/**
3323 * receive_bitmap_plain
3324 *
3325 * Return 0 when done, 1 when another iteration is needed, and a negative error
3326 * code upon failure.
3327 */
3328static int
Philipp Reisner02918be2010-08-20 14:35:10 +02003329receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size,
3330 unsigned long *buffer, struct bm_xfer_ctx *c)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003331{
3332 unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
3333 unsigned want = num_words * sizeof(long);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003334 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003335
Philipp Reisner02918be2010-08-20 14:35:10 +02003336 if (want != data_size) {
3337 dev_err(DEV, "%s:want (%u) != data_size (%u)\n", __func__, want, data_size);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003338 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003339 }
3340 if (want == 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003341 return 0;
3342 err = drbd_recv(mdev, buffer, want);
3343 if (err != want) {
3344 if (err >= 0)
3345 err = -EIO;
3346 return err;
3347 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003348
3349 drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer);
3350
3351 c->word_offset += num_words;
3352 c->bit_offset = c->word_offset * BITS_PER_LONG;
3353 if (c->bit_offset > c->bm_bits)
3354 c->bit_offset = c->bm_bits;
3355
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003356 return 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003357}
3358
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003359/**
3360 * recv_bm_rle_bits
3361 *
3362 * Return 0 when done, 1 when another iteration is needed, and a negative error
3363 * code upon failure.
3364 */
3365static int
Philipp Reisnerb411b362009-09-25 16:07:19 -07003366recv_bm_rle_bits(struct drbd_conf *mdev,
3367 struct p_compressed_bm *p,
3368 struct bm_xfer_ctx *c)
3369{
3370 struct bitstream bs;
3371 u64 look_ahead;
3372 u64 rl;
3373 u64 tmp;
3374 unsigned long s = c->bit_offset;
3375 unsigned long e;
Lars Ellenberg004352f2010-10-05 20:13:58 +02003376 int len = be16_to_cpu(p->head.length) - (sizeof(*p) - sizeof(p->head));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003377 int toggle = DCBP_get_start(p);
3378 int have;
3379 int bits;
3380
3381 bitstream_init(&bs, p->code, len, DCBP_get_pad_bits(p));
3382
3383 bits = bitstream_get_bits(&bs, &look_ahead, 64);
3384 if (bits < 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003385 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003386
3387 for (have = bits; have > 0; s += rl, toggle = !toggle) {
3388 bits = vli_decode_bits(&rl, look_ahead);
3389 if (bits <= 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003390 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003391
3392 if (toggle) {
3393 e = s + rl -1;
3394 if (e >= c->bm_bits) {
3395 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003396 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003397 }
3398 _drbd_bm_set_bits(mdev, s, e);
3399 }
3400
3401 if (have < bits) {
3402 dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
3403 have, bits, look_ahead,
3404 (unsigned int)(bs.cur.b - p->code),
3405 (unsigned int)bs.buf_len);
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003406 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003407 }
3408 look_ahead >>= bits;
3409 have -= bits;
3410
3411 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
3412 if (bits < 0)
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003413 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003414 look_ahead |= tmp << have;
3415 have += bits;
3416 }
3417
3418 c->bit_offset = s;
3419 bm_xfer_ctx_bit_to_word_offset(c);
3420
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003421 return (s != c->bm_bits);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003422}
3423
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003424/**
3425 * decode_bitmap_c
3426 *
3427 * Return 0 when done, 1 when another iteration is needed, and a negative error
3428 * code upon failure.
3429 */
3430static int
Philipp Reisnerb411b362009-09-25 16:07:19 -07003431decode_bitmap_c(struct drbd_conf *mdev,
3432 struct p_compressed_bm *p,
3433 struct bm_xfer_ctx *c)
3434{
3435 if (DCBP_get_code(p) == RLE_VLI_Bits)
3436 return recv_bm_rle_bits(mdev, p, c);
3437
3438 /* other variants had been implemented for evaluation,
3439 * but have been dropped as this one turned out to be "best"
3440 * during all our tests. */
3441
3442 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
3443 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003444 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003445}
3446
3447void INFO_bm_xfer_stats(struct drbd_conf *mdev,
3448 const char *direction, struct bm_xfer_ctx *c)
3449{
3450 /* what would it take to transfer it "plaintext" */
Philipp Reisner0b70a132010-08-20 13:36:10 +02003451 unsigned plain = sizeof(struct p_header80) *
Philipp Reisnerb411b362009-09-25 16:07:19 -07003452 ((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1)
3453 + c->bm_words * sizeof(long);
3454 unsigned total = c->bytes[0] + c->bytes[1];
3455 unsigned r;
3456
3457 /* total can not be zero. but just in case: */
3458 if (total == 0)
3459 return;
3460
3461 /* don't report if not compressed */
3462 if (total >= plain)
3463 return;
3464
3465 /* total < plain. check for overflow, still */
3466 r = (total > UINT_MAX/1000) ? (total / (plain/1000))
3467 : (1000 * total / plain);
3468
3469 if (r > 1000)
3470 r = 1000;
3471
3472 r = 1000 - r;
3473 dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
3474 "total %u; compression: %u.%u%%\n",
3475 direction,
3476 c->bytes[1], c->packets[1],
3477 c->bytes[0], c->packets[0],
3478 total, r/10, r % 10);
3479}
3480
3481/* Since we are processing the bitfield from lower addresses to higher,
3482 it does not matter if the process it in 32 bit chunks or 64 bit
3483 chunks as long as it is little endian. (Understand it as byte stream,
3484 beginning with the lowest byte...) If we would use big endian
3485 we would need to process it from the highest address to the lowest,
3486 in order to be agnostic to the 32 vs 64 bits issue.
3487
3488 returns 0 on failure, 1 if we successfully received it. */
Philipp Reisner02918be2010-08-20 14:35:10 +02003489static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003490{
3491 struct bm_xfer_ctx c;
3492 void *buffer;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003493 int err;
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003494 int ok = false;
Philipp Reisner02918be2010-08-20 14:35:10 +02003495 struct p_header80 *h = &mdev->data.rbuf.header.h80;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003496
Philipp Reisner37190942010-11-10 12:08:37 +01003497 /* drbd_bm_lock(mdev, "receive bitmap"); By intention no bm_lock */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003498
3499 /* maybe we should use some per thread scratch page,
3500 * and allocate that during initial device creation? */
3501 buffer = (unsigned long *) __get_free_page(GFP_NOIO);
3502 if (!buffer) {
3503 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
3504 goto out;
3505 }
3506
3507 c = (struct bm_xfer_ctx) {
3508 .bm_bits = drbd_bm_bits(mdev),
3509 .bm_words = drbd_bm_words(mdev),
3510 };
3511
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003512 for(;;) {
Philipp Reisner02918be2010-08-20 14:35:10 +02003513 if (cmd == P_BITMAP) {
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003514 err = receive_bitmap_plain(mdev, data_size, buffer, &c);
Philipp Reisner02918be2010-08-20 14:35:10 +02003515 } else if (cmd == P_COMPRESSED_BITMAP) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003516 /* MAYBE: sanity check that we speak proto >= 90,
3517 * and the feature is enabled! */
3518 struct p_compressed_bm *p;
3519
Philipp Reisner02918be2010-08-20 14:35:10 +02003520 if (data_size > BM_PACKET_PAYLOAD_BYTES) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003521 dev_err(DEV, "ReportCBitmap packet too large\n");
3522 goto out;
3523 }
3524 /* use the page buff */
3525 p = buffer;
3526 memcpy(p, h, sizeof(*h));
Philipp Reisner02918be2010-08-20 14:35:10 +02003527 if (drbd_recv(mdev, p->head.payload, data_size) != data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003528 goto out;
Lars Ellenberg004352f2010-10-05 20:13:58 +02003529 if (data_size <= (sizeof(*p) - sizeof(p->head))) {
3530 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", data_size);
Andreas Gruenbacher78fcbda2010-12-10 22:18:27 +01003531 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003532 }
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003533 err = decode_bitmap_c(mdev, p, &c);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003534 } else {
Philipp Reisner02918be2010-08-20 14:35:10 +02003535 dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003536 goto out;
3537 }
3538
Philipp Reisner02918be2010-08-20 14:35:10 +02003539 c.packets[cmd == P_BITMAP]++;
3540 c.bytes[cmd == P_BITMAP] += sizeof(struct p_header80) + data_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003541
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003542 if (err <= 0) {
3543 if (err < 0)
3544 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003545 break;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003546 }
Philipp Reisner02918be2010-08-20 14:35:10 +02003547 if (!drbd_recv_header(mdev, &cmd, &data_size))
Philipp Reisnerb411b362009-09-25 16:07:19 -07003548 goto out;
Andreas Gruenbacher2c464072010-12-11 21:53:12 +01003549 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003550
3551 INFO_bm_xfer_stats(mdev, "receive", &c);
3552
3553 if (mdev->state.conn == C_WF_BITMAP_T) {
Andreas Gruenbacherde1f8e42010-12-10 21:04:00 +01003554 enum drbd_state_rv rv;
3555
Philipp Reisnerb411b362009-09-25 16:07:19 -07003556 ok = !drbd_send_bitmap(mdev);
3557 if (!ok)
3558 goto out;
3559 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
Andreas Gruenbacherde1f8e42010-12-10 21:04:00 +01003560 rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
3561 D_ASSERT(rv == SS_SUCCESS);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003562 } else if (mdev->state.conn != C_WF_BITMAP_S) {
3563 /* admin may have requested C_DISCONNECTING,
3564 * other threads may have noticed network errors */
3565 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
3566 drbd_conn_str(mdev->state.conn));
3567 }
3568
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003569 ok = true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003570 out:
Philipp Reisner37190942010-11-10 12:08:37 +01003571 /* drbd_bm_unlock(mdev); by intention no lock */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003572 if (ok && mdev->state.conn == C_WF_BITMAP_S)
3573 drbd_start_resync(mdev, C_SYNC_SOURCE);
3574 free_page((unsigned long) buffer);
3575 return ok;
3576}
3577
Philipp Reisner02918be2010-08-20 14:35:10 +02003578static int receive_skip(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003579{
3580 /* TODO zero copy sink :) */
3581 static char sink[128];
3582 int size, want, r;
3583
Philipp Reisner02918be2010-08-20 14:35:10 +02003584 dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n",
3585 cmd, data_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003586
Philipp Reisner02918be2010-08-20 14:35:10 +02003587 size = data_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003588 while (size > 0) {
3589 want = min_t(int, size, sizeof(sink));
3590 r = drbd_recv(mdev, sink, want);
3591 ERR_IF(r <= 0) break;
3592 size -= r;
3593 }
3594 return size == 0;
3595}
3596
Philipp Reisner02918be2010-08-20 14:35:10 +02003597static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003598{
Philipp Reisnerb411b362009-09-25 16:07:19 -07003599 /* Make sure we've acked all the TCP data associated
3600 * with the data requests being unplugged */
3601 drbd_tcp_quickack(mdev->data.socket);
3602
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003603 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003604}
3605
Philipp Reisner73a01a12010-10-27 14:33:00 +02003606static int receive_out_of_sync(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3607{
3608 struct p_block_desc *p = &mdev->data.rbuf.block_desc;
3609
Lars Ellenbergf735e3632010-12-17 21:06:18 +01003610 switch (mdev->state.conn) {
3611 case C_WF_SYNC_UUID:
3612 case C_WF_BITMAP_T:
3613 case C_BEHIND:
3614 break;
3615 default:
3616 dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
3617 drbd_conn_str(mdev->state.conn));
3618 }
3619
Philipp Reisner73a01a12010-10-27 14:33:00 +02003620 drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
3621
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01003622 return true;
Philipp Reisner73a01a12010-10-27 14:33:00 +02003623}
3624
Philipp Reisner02918be2010-08-20 14:35:10 +02003625typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, enum drbd_packets cmd, unsigned int to_receive);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003626
Philipp Reisner02918be2010-08-20 14:35:10 +02003627struct data_cmd {
3628 int expect_payload;
3629 size_t pkt_size;
3630 drbd_cmd_handler_f function;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003631};
3632
Philipp Reisner02918be2010-08-20 14:35:10 +02003633static struct data_cmd drbd_cmd_handler[] = {
3634 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
3635 [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply },
3636 [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } ,
3637 [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } ,
3638 [P_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
3639 [P_COMPRESSED_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
3640 [P_UNPLUG_REMOTE] = { 0, sizeof(struct p_header80), receive_UnplugRemote },
3641 [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3642 [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3643 [P_SYNC_PARAM] = { 1, sizeof(struct p_header80), receive_SyncParam },
3644 [P_SYNC_PARAM89] = { 1, sizeof(struct p_header80), receive_SyncParam },
3645 [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol },
3646 [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids },
3647 [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes },
3648 [P_STATE] = { 0, sizeof(struct p_state), receive_state },
3649 [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state },
3650 [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
3651 [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3652 [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3653 [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3654 [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
Philipp Reisner73a01a12010-10-27 14:33:00 +02003655 [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
Philipp Reisner02918be2010-08-20 14:35:10 +02003656 /* anything missing from this table is in
3657 * the asender_tbl, see get_asender_cmd */
3658 [P_MAX_CMD] = { 0, 0, NULL },
3659};
3660
3661/* All handler functions that expect a sub-header get that sub-heder in
3662 mdev->data.rbuf.header.head.payload.
3663
3664 Usually in mdev->data.rbuf.header.head the callback can find the usual
3665 p_header, but they may not rely on that. Since there is also p_header95 !
3666 */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003667
3668static void drbdd(struct drbd_conf *mdev)
3669{
Philipp Reisner02918be2010-08-20 14:35:10 +02003670 union p_header *header = &mdev->data.rbuf.header;
3671 unsigned int packet_size;
3672 enum drbd_packets cmd;
3673 size_t shs; /* sub header size */
3674 int rv;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003675
3676 while (get_t_state(&mdev->receiver) == Running) {
3677 drbd_thread_current_set_cpu(mdev);
Philipp Reisner02918be2010-08-20 14:35:10 +02003678 if (!drbd_recv_header(mdev, &cmd, &packet_size))
3679 goto err_out;
3680
3681 if (unlikely(cmd >= P_MAX_CMD || !drbd_cmd_handler[cmd].function)) {
3682 dev_err(DEV, "unknown packet type %d, l: %d!\n", cmd, packet_size);
3683 goto err_out;
Lars Ellenberg0b33a912009-11-16 15:58:04 +01003684 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003685
Philipp Reisner02918be2010-08-20 14:35:10 +02003686 shs = drbd_cmd_handler[cmd].pkt_size - sizeof(union p_header);
Philipp Reisner02918be2010-08-20 14:35:10 +02003687 if (packet_size - shs > 0 && !drbd_cmd_handler[cmd].expect_payload) {
3688 dev_err(DEV, "No payload expected %s l:%d\n", cmdname(cmd), packet_size);
3689 goto err_out;
3690 }
3691
Lars Ellenbergc13f7e12010-10-29 23:32:01 +02003692 if (shs) {
3693 rv = drbd_recv(mdev, &header->h80.payload, shs);
3694 if (unlikely(rv != shs)) {
3695 dev_err(DEV, "short read while reading sub header: rv=%d\n", rv);
3696 goto err_out;
3697 }
3698 }
3699
Philipp Reisner02918be2010-08-20 14:35:10 +02003700 rv = drbd_cmd_handler[cmd].function(mdev, cmd, packet_size - shs);
3701
3702 if (unlikely(!rv)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003703 dev_err(DEV, "error receiving %s, l: %d!\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02003704 cmdname(cmd), packet_size);
3705 goto err_out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003706 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003707 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003708
Philipp Reisner02918be2010-08-20 14:35:10 +02003709 if (0) {
3710 err_out:
3711 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003712 }
Lars Ellenberg856c50c2010-10-14 13:37:40 +02003713 /* If we leave here, we probably want to update at least the
3714 * "Connected" indicator on stable storage. Do so explicitly here. */
3715 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003716}
3717
3718void drbd_flush_workqueue(struct drbd_conf *mdev)
3719{
3720 struct drbd_wq_barrier barr;
3721
3722 barr.w.cb = w_prev_work_done;
3723 init_completion(&barr.done);
3724 drbd_queue_work(&mdev->data.work, &barr.w);
3725 wait_for_completion(&barr.done);
3726}
3727
Philipp Reisnerf70b35112010-06-24 14:34:40 +02003728void drbd_free_tl_hash(struct drbd_conf *mdev)
3729{
3730 struct hlist_head *h;
3731
3732 spin_lock_irq(&mdev->req_lock);
3733
3734 if (!mdev->tl_hash || mdev->state.conn != C_STANDALONE) {
3735 spin_unlock_irq(&mdev->req_lock);
3736 return;
3737 }
3738 /* paranoia code */
3739 for (h = mdev->ee_hash; h < mdev->ee_hash + mdev->ee_hash_s; h++)
3740 if (h->first)
3741 dev_err(DEV, "ASSERT FAILED ee_hash[%u].first == %p, expected NULL\n",
3742 (int)(h - mdev->ee_hash), h->first);
3743 kfree(mdev->ee_hash);
3744 mdev->ee_hash = NULL;
3745 mdev->ee_hash_s = 0;
3746
3747 /* paranoia code */
3748 for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++)
3749 if (h->first)
3750 dev_err(DEV, "ASSERT FAILED tl_hash[%u] == %p, expected NULL\n",
3751 (int)(h - mdev->tl_hash), h->first);
3752 kfree(mdev->tl_hash);
3753 mdev->tl_hash = NULL;
3754 mdev->tl_hash_s = 0;
3755 spin_unlock_irq(&mdev->req_lock);
3756}
3757
Philipp Reisnerb411b362009-09-25 16:07:19 -07003758static void drbd_disconnect(struct drbd_conf *mdev)
3759{
3760 enum drbd_fencing_p fp;
3761 union drbd_state os, ns;
3762 int rv = SS_UNKNOWN_ERROR;
3763 unsigned int i;
3764
3765 if (mdev->state.conn == C_STANDALONE)
3766 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003767
3768 /* asender does not clean up anything. it must not interfere, either */
3769 drbd_thread_stop(&mdev->asender);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003770 drbd_free_sock(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003771
Philipp Reisner85719572010-07-21 10:20:17 +02003772 /* wait for current activity to cease. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003773 spin_lock_irq(&mdev->req_lock);
3774 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
3775 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
3776 _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
3777 spin_unlock_irq(&mdev->req_lock);
3778
3779 /* We do not have data structures that would allow us to
3780 * get the rs_pending_cnt down to 0 again.
3781 * * On C_SYNC_TARGET we do not have any data structures describing
3782 * the pending RSDataRequest's we have sent.
3783 * * On C_SYNC_SOURCE there is no data structure that tracks
3784 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
3785 * And no, it is not the sum of the reference counts in the
3786 * resync_LRU. The resync_LRU tracks the whole operation including
3787 * the disk-IO, while the rs_pending_cnt only tracks the blocks
3788 * on the fly. */
3789 drbd_rs_cancel_all(mdev);
3790 mdev->rs_total = 0;
3791 mdev->rs_failed = 0;
3792 atomic_set(&mdev->rs_pending_cnt, 0);
3793 wake_up(&mdev->misc_wait);
3794
3795 /* make sure syncer is stopped and w_resume_next_sg queued */
3796 del_timer_sync(&mdev->resync_timer);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003797 resync_timer_fn((unsigned long)mdev);
3798
Philipp Reisnerb411b362009-09-25 16:07:19 -07003799 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
3800 * w_make_resync_request etc. which may still be on the worker queue
3801 * to be "canceled" */
3802 drbd_flush_workqueue(mdev);
3803
3804 /* This also does reclaim_net_ee(). If we do this too early, we might
3805 * miss some resync ee and pages.*/
3806 drbd_process_done_ee(mdev);
3807
3808 kfree(mdev->p_uuid);
3809 mdev->p_uuid = NULL;
3810
Philipp Reisnerfb22c402010-09-08 23:20:21 +02003811 if (!is_susp(mdev->state))
Philipp Reisnerb411b362009-09-25 16:07:19 -07003812 tl_clear(mdev);
3813
Philipp Reisnerb411b362009-09-25 16:07:19 -07003814 dev_info(DEV, "Connection closed\n");
3815
3816 drbd_md_sync(mdev);
3817
3818 fp = FP_DONT_CARE;
3819 if (get_ldev(mdev)) {
Lars Ellenberg79a30d22011-01-20 10:32:05 +01003820 drbd_bitmap_io(mdev, &drbd_bm_write, "write from disconnect");
Philipp Reisnerb411b362009-09-25 16:07:19 -07003821 fp = mdev->ldev->dc.fencing;
3822 put_ldev(mdev);
3823 }
3824
Philipp Reisner87f7be42010-06-11 13:56:33 +02003825 if (mdev->state.role == R_PRIMARY && fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN)
3826 drbd_try_outdate_peer_async(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003827
3828 spin_lock_irq(&mdev->req_lock);
3829 os = mdev->state;
3830 if (os.conn >= C_UNCONNECTED) {
3831 /* Do not restart in case we are C_DISCONNECTING */
3832 ns = os;
3833 ns.conn = C_UNCONNECTED;
3834 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
3835 }
3836 spin_unlock_irq(&mdev->req_lock);
3837
3838 if (os.conn == C_DISCONNECTING) {
Philipp Reisner84dfb9f2010-06-23 11:20:05 +02003839 wait_event(mdev->net_cnt_wait, atomic_read(&mdev->net_cnt) == 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003840
Philipp Reisnerb411b362009-09-25 16:07:19 -07003841 crypto_free_hash(mdev->cram_hmac_tfm);
3842 mdev->cram_hmac_tfm = NULL;
3843
3844 kfree(mdev->net_conf);
3845 mdev->net_conf = NULL;
3846 drbd_request_state(mdev, NS(conn, C_STANDALONE));
3847 }
3848
3849 /* tcp_close and release of sendpage pages can be deferred. I don't
3850 * want to use SO_LINGER, because apparently it can be deferred for
3851 * more than 20 seconds (longest time I checked).
3852 *
3853 * Actually we don't care for exactly when the network stack does its
3854 * put_page(), but release our reference on these pages right here.
3855 */
3856 i = drbd_release_ee(mdev, &mdev->net_ee);
3857 if (i)
3858 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
Lars Ellenberg435f0742010-09-06 12:30:25 +02003859 i = atomic_read(&mdev->pp_in_use_by_net);
3860 if (i)
3861 dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003862 i = atomic_read(&mdev->pp_in_use);
3863 if (i)
Lars Ellenberg45bb9122010-05-14 17:10:48 +02003864 dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003865
3866 D_ASSERT(list_empty(&mdev->read_ee));
3867 D_ASSERT(list_empty(&mdev->active_ee));
3868 D_ASSERT(list_empty(&mdev->sync_ee));
3869 D_ASSERT(list_empty(&mdev->done_ee));
3870
3871 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
3872 atomic_set(&mdev->current_epoch->epoch_size, 0);
3873 D_ASSERT(list_empty(&mdev->current_epoch->list));
3874}
3875
3876/*
3877 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
3878 * we can agree on is stored in agreed_pro_version.
3879 *
3880 * feature flags and the reserved array should be enough room for future
3881 * enhancements of the handshake protocol, and possible plugins...
3882 *
3883 * for now, they are expected to be zero, but ignored.
3884 */
3885static int drbd_send_handshake(struct drbd_conf *mdev)
3886{
3887 /* ASSERT current == mdev->receiver ... */
3888 struct p_handshake *p = &mdev->data.sbuf.handshake;
3889 int ok;
3890
3891 if (mutex_lock_interruptible(&mdev->data.mutex)) {
3892 dev_err(DEV, "interrupted during initial handshake\n");
3893 return 0; /* interrupted. not ok. */
3894 }
3895
3896 if (mdev->data.socket == NULL) {
3897 mutex_unlock(&mdev->data.mutex);
3898 return 0;
3899 }
3900
3901 memset(p, 0, sizeof(*p));
3902 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
3903 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
3904 ok = _drbd_send_cmd( mdev, mdev->data.socket, P_HAND_SHAKE,
Philipp Reisner0b70a132010-08-20 13:36:10 +02003905 (struct p_header80 *)p, sizeof(*p), 0 );
Philipp Reisnerb411b362009-09-25 16:07:19 -07003906 mutex_unlock(&mdev->data.mutex);
3907 return ok;
3908}
3909
3910/*
3911 * return values:
3912 * 1 yes, we have a valid connection
3913 * 0 oops, did not work out, please try again
3914 * -1 peer talks different language,
3915 * no point in trying again, please go standalone.
3916 */
3917static int drbd_do_handshake(struct drbd_conf *mdev)
3918{
3919 /* ASSERT current == mdev->receiver ... */
3920 struct p_handshake *p = &mdev->data.rbuf.handshake;
Philipp Reisner02918be2010-08-20 14:35:10 +02003921 const int expect = sizeof(struct p_handshake) - sizeof(struct p_header80);
3922 unsigned int length;
3923 enum drbd_packets cmd;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003924 int rv;
3925
3926 rv = drbd_send_handshake(mdev);
3927 if (!rv)
3928 return 0;
3929
Philipp Reisner02918be2010-08-20 14:35:10 +02003930 rv = drbd_recv_header(mdev, &cmd, &length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003931 if (!rv)
3932 return 0;
3933
Philipp Reisner02918be2010-08-20 14:35:10 +02003934 if (cmd != P_HAND_SHAKE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003935 dev_err(DEV, "expected HandShake packet, received: %s (0x%04x)\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02003936 cmdname(cmd), cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003937 return -1;
3938 }
3939
Philipp Reisner02918be2010-08-20 14:35:10 +02003940 if (length != expect) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003941 dev_err(DEV, "expected HandShake length: %u, received: %u\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02003942 expect, length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003943 return -1;
3944 }
3945
3946 rv = drbd_recv(mdev, &p->head.payload, expect);
3947
3948 if (rv != expect) {
3949 dev_err(DEV, "short read receiving handshake packet: l=%u\n", rv);
3950 return 0;
3951 }
3952
Philipp Reisnerb411b362009-09-25 16:07:19 -07003953 p->protocol_min = be32_to_cpu(p->protocol_min);
3954 p->protocol_max = be32_to_cpu(p->protocol_max);
3955 if (p->protocol_max == 0)
3956 p->protocol_max = p->protocol_min;
3957
3958 if (PRO_VERSION_MAX < p->protocol_min ||
3959 PRO_VERSION_MIN > p->protocol_max)
3960 goto incompat;
3961
3962 mdev->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
3963
3964 dev_info(DEV, "Handshake successful: "
3965 "Agreed network protocol version %d\n", mdev->agreed_pro_version);
3966
3967 return 1;
3968
3969 incompat:
3970 dev_err(DEV, "incompatible DRBD dialects: "
3971 "I support %d-%d, peer supports %d-%d\n",
3972 PRO_VERSION_MIN, PRO_VERSION_MAX,
3973 p->protocol_min, p->protocol_max);
3974 return -1;
3975}
3976
3977#if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
3978static int drbd_do_auth(struct drbd_conf *mdev)
3979{
3980 dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
3981 dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01003982 return -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003983}
3984#else
3985#define CHALLENGE_LEN 64
Johannes Thomab10d96c2010-01-07 16:02:50 +01003986
3987/* Return value:
3988 1 - auth succeeded,
3989 0 - failed, try again (network error),
3990 -1 - auth failed, don't try again.
3991*/
3992
Philipp Reisnerb411b362009-09-25 16:07:19 -07003993static int drbd_do_auth(struct drbd_conf *mdev)
3994{
3995 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
3996 struct scatterlist sg;
3997 char *response = NULL;
3998 char *right_response = NULL;
3999 char *peers_ch = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004000 unsigned int key_len = strlen(mdev->net_conf->shared_secret);
4001 unsigned int resp_size;
4002 struct hash_desc desc;
Philipp Reisner02918be2010-08-20 14:35:10 +02004003 enum drbd_packets cmd;
4004 unsigned int length;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004005 int rv;
4006
4007 desc.tfm = mdev->cram_hmac_tfm;
4008 desc.flags = 0;
4009
4010 rv = crypto_hash_setkey(mdev->cram_hmac_tfm,
4011 (u8 *)mdev->net_conf->shared_secret, key_len);
4012 if (rv) {
4013 dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004014 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004015 goto fail;
4016 }
4017
4018 get_random_bytes(my_challenge, CHALLENGE_LEN);
4019
4020 rv = drbd_send_cmd2(mdev, P_AUTH_CHALLENGE, my_challenge, CHALLENGE_LEN);
4021 if (!rv)
4022 goto fail;
4023
Philipp Reisner02918be2010-08-20 14:35:10 +02004024 rv = drbd_recv_header(mdev, &cmd, &length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004025 if (!rv)
4026 goto fail;
4027
Philipp Reisner02918be2010-08-20 14:35:10 +02004028 if (cmd != P_AUTH_CHALLENGE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004029 dev_err(DEV, "expected AuthChallenge packet, received: %s (0x%04x)\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02004030 cmdname(cmd), cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004031 rv = 0;
4032 goto fail;
4033 }
4034
Philipp Reisner02918be2010-08-20 14:35:10 +02004035 if (length > CHALLENGE_LEN * 2) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004036 dev_err(DEV, "expected AuthChallenge payload too big.\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004037 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004038 goto fail;
4039 }
4040
Philipp Reisner02918be2010-08-20 14:35:10 +02004041 peers_ch = kmalloc(length, GFP_NOIO);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004042 if (peers_ch == NULL) {
4043 dev_err(DEV, "kmalloc of peers_ch failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004044 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004045 goto fail;
4046 }
4047
Philipp Reisner02918be2010-08-20 14:35:10 +02004048 rv = drbd_recv(mdev, peers_ch, length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004049
Philipp Reisner02918be2010-08-20 14:35:10 +02004050 if (rv != length) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004051 dev_err(DEV, "short read AuthChallenge: l=%u\n", rv);
4052 rv = 0;
4053 goto fail;
4054 }
4055
4056 resp_size = crypto_hash_digestsize(mdev->cram_hmac_tfm);
4057 response = kmalloc(resp_size, GFP_NOIO);
4058 if (response == NULL) {
4059 dev_err(DEV, "kmalloc of response failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004060 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004061 goto fail;
4062 }
4063
4064 sg_init_table(&sg, 1);
Philipp Reisner02918be2010-08-20 14:35:10 +02004065 sg_set_buf(&sg, peers_ch, length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004066
4067 rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4068 if (rv) {
4069 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004070 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004071 goto fail;
4072 }
4073
4074 rv = drbd_send_cmd2(mdev, P_AUTH_RESPONSE, response, resp_size);
4075 if (!rv)
4076 goto fail;
4077
Philipp Reisner02918be2010-08-20 14:35:10 +02004078 rv = drbd_recv_header(mdev, &cmd, &length);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004079 if (!rv)
4080 goto fail;
4081
Philipp Reisner02918be2010-08-20 14:35:10 +02004082 if (cmd != P_AUTH_RESPONSE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004083 dev_err(DEV, "expected AuthResponse packet, received: %s (0x%04x)\n",
Philipp Reisner02918be2010-08-20 14:35:10 +02004084 cmdname(cmd), cmd);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004085 rv = 0;
4086 goto fail;
4087 }
4088
Philipp Reisner02918be2010-08-20 14:35:10 +02004089 if (length != resp_size) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004090 dev_err(DEV, "expected AuthResponse payload of wrong size\n");
4091 rv = 0;
4092 goto fail;
4093 }
4094
4095 rv = drbd_recv(mdev, response , resp_size);
4096
4097 if (rv != resp_size) {
4098 dev_err(DEV, "short read receiving AuthResponse: l=%u\n", rv);
4099 rv = 0;
4100 goto fail;
4101 }
4102
4103 right_response = kmalloc(resp_size, GFP_NOIO);
Julia Lawall2d1ee872009-12-27 22:27:11 +01004104 if (right_response == NULL) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004105 dev_err(DEV, "kmalloc of right_response failed\n");
Johannes Thomab10d96c2010-01-07 16:02:50 +01004106 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004107 goto fail;
4108 }
4109
4110 sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
4111
4112 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
4113 if (rv) {
4114 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004115 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004116 goto fail;
4117 }
4118
4119 rv = !memcmp(response, right_response, resp_size);
4120
4121 if (rv)
4122 dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n",
4123 resp_size, mdev->net_conf->cram_hmac_alg);
Johannes Thomab10d96c2010-01-07 16:02:50 +01004124 else
4125 rv = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004126
4127 fail:
4128 kfree(peers_ch);
4129 kfree(response);
4130 kfree(right_response);
4131
4132 return rv;
4133}
4134#endif
4135
4136int drbdd_init(struct drbd_thread *thi)
4137{
4138 struct drbd_conf *mdev = thi->mdev;
4139 unsigned int minor = mdev_to_minor(mdev);
4140 int h;
4141
4142 sprintf(current->comm, "drbd%d_receiver", minor);
4143
4144 dev_info(DEV, "receiver (re)started\n");
4145
4146 do {
4147 h = drbd_connect(mdev);
4148 if (h == 0) {
4149 drbd_disconnect(mdev);
Philipp Reisner20ee6392011-01-18 15:28:59 +01004150 schedule_timeout_interruptible(HZ);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004151 }
4152 if (h == -1) {
4153 dev_warn(DEV, "Discarding network configuration.\n");
4154 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4155 }
4156 } while (h == 0);
4157
4158 if (h > 0) {
4159 if (get_net_conf(mdev)) {
4160 drbdd(mdev);
4161 put_net_conf(mdev);
4162 }
4163 }
4164
4165 drbd_disconnect(mdev);
4166
4167 dev_info(DEV, "receiver terminated\n");
4168 return 0;
4169}
4170
4171/* ********* acknowledge sender ******** */
4172
Philipp Reisner0b70a132010-08-20 13:36:10 +02004173static int got_RqSReply(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004174{
4175 struct p_req_state_reply *p = (struct p_req_state_reply *)h;
4176
4177 int retcode = be32_to_cpu(p->retcode);
4178
4179 if (retcode >= SS_SUCCESS) {
4180 set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
4181 } else {
4182 set_bit(CL_ST_CHG_FAIL, &mdev->flags);
4183 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
4184 drbd_set_st_err_str(retcode), retcode);
4185 }
4186 wake_up(&mdev->state_wait);
4187
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004188 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004189}
4190
Philipp Reisner0b70a132010-08-20 13:36:10 +02004191static int got_Ping(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004192{
4193 return drbd_send_ping_ack(mdev);
4194
4195}
4196
Philipp Reisner0b70a132010-08-20 13:36:10 +02004197static int got_PingAck(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004198{
4199 /* restore idle timeout */
4200 mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
Philipp Reisner309d1602010-03-02 15:03:44 +01004201 if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags))
4202 wake_up(&mdev->misc_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004203
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004204 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004205}
4206
Philipp Reisner0b70a132010-08-20 13:36:10 +02004207static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004208{
4209 struct p_block_ack *p = (struct p_block_ack *)h;
4210 sector_t sector = be64_to_cpu(p->sector);
4211 int blksize = be32_to_cpu(p->blksize);
4212
4213 D_ASSERT(mdev->agreed_pro_version >= 89);
4214
4215 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4216
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004217 if (get_ldev(mdev)) {
4218 drbd_rs_complete_io(mdev, sector);
4219 drbd_set_in_sync(mdev, sector, blksize);
4220 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4221 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4222 put_ldev(mdev);
4223 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004224 dec_rs_pending(mdev);
Philipp Reisner778f2712010-07-06 11:14:00 +02004225 atomic_add(blksize >> 9, &mdev->rs_sect_in);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004226
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004227 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004228}
4229
4230/* when we receive the ACK for a write request,
4231 * verify that we actually know about it */
4232static struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev,
4233 u64 id, sector_t sector)
4234{
4235 struct hlist_head *slot = tl_hash_slot(mdev, sector);
4236 struct hlist_node *n;
4237 struct drbd_request *req;
4238
4239 hlist_for_each_entry(req, n, slot, colision) {
4240 if ((unsigned long)req == (unsigned long)id) {
4241 if (req->sector != sector) {
4242 dev_err(DEV, "_ack_id_to_req: found req %p but it has "
4243 "wrong sector (%llus versus %llus)\n", req,
4244 (unsigned long long)req->sector,
4245 (unsigned long long)sector);
4246 break;
4247 }
4248 return req;
4249 }
4250 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004251 return NULL;
4252}
4253
4254typedef struct drbd_request *(req_validator_fn)
4255 (struct drbd_conf *mdev, u64 id, sector_t sector);
4256
4257static int validate_req_change_req_state(struct drbd_conf *mdev,
4258 u64 id, sector_t sector, req_validator_fn validator,
4259 const char *func, enum drbd_req_event what)
4260{
4261 struct drbd_request *req;
4262 struct bio_and_error m;
4263
4264 spin_lock_irq(&mdev->req_lock);
4265 req = validator(mdev, id, sector);
4266 if (unlikely(!req)) {
4267 spin_unlock_irq(&mdev->req_lock);
Philipp Reisner2deb8332011-01-17 18:39:18 +01004268
4269 dev_err(DEV, "%s: failed to find req %p, sector %llus\n", func,
4270 (void *)(unsigned long)id, (unsigned long long)sector);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004271 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004272 }
4273 __req_mod(req, what, &m);
4274 spin_unlock_irq(&mdev->req_lock);
4275
4276 if (m.bio)
4277 complete_master_bio(mdev, &m);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004278 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004279}
4280
Philipp Reisner0b70a132010-08-20 13:36:10 +02004281static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004282{
4283 struct p_block_ack *p = (struct p_block_ack *)h;
4284 sector_t sector = be64_to_cpu(p->sector);
4285 int blksize = be32_to_cpu(p->blksize);
4286 enum drbd_req_event what;
4287
4288 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4289
4290 if (is_syncer_block_id(p->block_id)) {
4291 drbd_set_in_sync(mdev, sector, blksize);
4292 dec_rs_pending(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004293 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004294 }
4295 switch (be16_to_cpu(h->command)) {
4296 case P_RS_WRITE_ACK:
4297 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4298 what = write_acked_by_peer_and_sis;
4299 break;
4300 case P_WRITE_ACK:
4301 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4302 what = write_acked_by_peer;
4303 break;
4304 case P_RECV_ACK:
4305 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_B);
4306 what = recv_acked_by_peer;
4307 break;
4308 case P_DISCARD_ACK:
4309 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4310 what = conflict_discarded_by_peer;
4311 break;
4312 default:
4313 D_ASSERT(0);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004314 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004315 }
4316
4317 return validate_req_change_req_state(mdev, p->block_id, sector,
4318 _ack_id_to_req, __func__ , what);
4319}
4320
Philipp Reisner0b70a132010-08-20 13:36:10 +02004321static int got_NegAck(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004322{
4323 struct p_block_ack *p = (struct p_block_ack *)h;
4324 sector_t sector = be64_to_cpu(p->sector);
Philipp Reisner2deb8332011-01-17 18:39:18 +01004325 int size = be32_to_cpu(p->blksize);
4326 struct drbd_request *req;
4327 struct bio_and_error m;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004328
4329 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4330
4331 if (is_syncer_block_id(p->block_id)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004332 dec_rs_pending(mdev);
4333 drbd_rs_failed_io(mdev, sector, size);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004334 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004335 }
Philipp Reisner2deb8332011-01-17 18:39:18 +01004336
4337 spin_lock_irq(&mdev->req_lock);
4338 req = _ack_id_to_req(mdev, p->block_id, sector);
4339 if (!req) {
4340 spin_unlock_irq(&mdev->req_lock);
4341 if (mdev->net_conf->wire_protocol == DRBD_PROT_A ||
4342 mdev->net_conf->wire_protocol == DRBD_PROT_B) {
4343 /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
4344 The master bio might already be completed, therefore the
4345 request is no longer in the collision hash.
4346 => Do not try to validate block_id as request. */
4347 /* In Protocol B we might already have got a P_RECV_ACK
4348 but then get a P_NEG_ACK after wards. */
4349 drbd_set_out_of_sync(mdev, sector, size);
4350 return true;
4351 } else {
4352 dev_err(DEV, "%s: failed to find req %p, sector %llus\n", __func__,
4353 (void *)(unsigned long)p->block_id, (unsigned long long)sector);
4354 return false;
4355 }
4356 }
4357 __req_mod(req, neg_acked, &m);
4358 spin_unlock_irq(&mdev->req_lock);
4359
4360 if (m.bio)
4361 complete_master_bio(mdev, &m);
4362 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004363}
4364
Philipp Reisner0b70a132010-08-20 13:36:10 +02004365static int got_NegDReply(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004366{
4367 struct p_block_ack *p = (struct p_block_ack *)h;
4368 sector_t sector = be64_to_cpu(p->sector);
4369
4370 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4371 dev_err(DEV, "Got NegDReply; Sector %llus, len %u; Fail original request.\n",
4372 (unsigned long long)sector, be32_to_cpu(p->blksize));
4373
4374 return validate_req_change_req_state(mdev, p->block_id, sector,
4375 _ar_id_to_req, __func__ , neg_acked);
4376}
4377
Philipp Reisner0b70a132010-08-20 13:36:10 +02004378static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004379{
4380 sector_t sector;
4381 int size;
4382 struct p_block_ack *p = (struct p_block_ack *)h;
4383
4384 sector = be64_to_cpu(p->sector);
4385 size = be32_to_cpu(p->blksize);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004386
4387 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4388
4389 dec_rs_pending(mdev);
4390
4391 if (get_ldev_if_state(mdev, D_FAILED)) {
4392 drbd_rs_complete_io(mdev, sector);
Philipp Reisnerd612d302010-12-27 10:53:28 +01004393 switch (be16_to_cpu(h->command)) {
4394 case P_NEG_RS_DREPLY:
4395 drbd_rs_failed_io(mdev, sector, size);
4396 case P_RS_CANCEL:
4397 break;
4398 default:
4399 D_ASSERT(0);
4400 put_ldev(mdev);
4401 return false;
4402 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07004403 put_ldev(mdev);
4404 }
4405
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004406 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004407}
4408
Philipp Reisner0b70a132010-08-20 13:36:10 +02004409static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004410{
4411 struct p_barrier_ack *p = (struct p_barrier_ack *)h;
4412
4413 tl_release(mdev, p->barrier, be32_to_cpu(p->set_size));
4414
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02004415 if (mdev->state.conn == C_AHEAD &&
4416 atomic_read(&mdev->ap_in_flight) == 0 &&
Philipp Reisner370a43e2011-01-14 16:03:11 +01004417 !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags)) {
4418 mdev->start_resync_timer.expires = jiffies + HZ;
4419 add_timer(&mdev->start_resync_timer);
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02004420 }
4421
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004422 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004423}
4424
Philipp Reisner0b70a132010-08-20 13:36:10 +02004425static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisnerb411b362009-09-25 16:07:19 -07004426{
4427 struct p_block_ack *p = (struct p_block_ack *)h;
4428 struct drbd_work *w;
4429 sector_t sector;
4430 int size;
4431
4432 sector = be64_to_cpu(p->sector);
4433 size = be32_to_cpu(p->blksize);
4434
4435 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4436
4437 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
4438 drbd_ov_oos_found(mdev, sector, size);
4439 else
4440 ov_oos_print(mdev);
4441
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004442 if (!get_ldev(mdev))
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004443 return true;
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004444
Philipp Reisnerb411b362009-09-25 16:07:19 -07004445 drbd_rs_complete_io(mdev, sector);
4446 dec_rs_pending(mdev);
4447
Lars Ellenbergea5442a2010-11-05 09:48:01 +01004448 --mdev->ov_left;
4449
4450 /* let's advance progress step marks only for every other megabyte */
4451 if ((mdev->ov_left & 0x200) == 0x200)
4452 drbd_advance_rs_marks(mdev, mdev->ov_left);
4453
4454 if (mdev->ov_left == 0) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07004455 w = kmalloc(sizeof(*w), GFP_NOIO);
4456 if (w) {
4457 w->cb = w_ov_finished;
4458 drbd_queue_work_front(&mdev->data.work, w);
4459 } else {
4460 dev_err(DEV, "kmalloc(w) failed.");
4461 ov_oos_print(mdev);
4462 drbd_resync_finished(mdev);
4463 }
4464 }
Lars Ellenberg1d53f092010-09-05 01:13:24 +02004465 put_ldev(mdev);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004466 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004467}
4468
Philipp Reisner02918be2010-08-20 14:35:10 +02004469static int got_skip(struct drbd_conf *mdev, struct p_header80 *h)
Philipp Reisner0ced55a2010-04-30 15:26:20 +02004470{
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01004471 return true;
Philipp Reisner0ced55a2010-04-30 15:26:20 +02004472}
4473
Philipp Reisnerb411b362009-09-25 16:07:19 -07004474struct asender_cmd {
4475 size_t pkt_size;
Philipp Reisner0b70a132010-08-20 13:36:10 +02004476 int (*process)(struct drbd_conf *mdev, struct p_header80 *h);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004477};
4478
4479static struct asender_cmd *get_asender_cmd(int cmd)
4480{
4481 static struct asender_cmd asender_tbl[] = {
4482 /* anything missing from this table is in
4483 * the drbd_cmd_handler (drbd_default_handler) table,
4484 * see the beginning of drbdd() */
Philipp Reisner0b70a132010-08-20 13:36:10 +02004485 [P_PING] = { sizeof(struct p_header80), got_Ping },
4486 [P_PING_ACK] = { sizeof(struct p_header80), got_PingAck },
Philipp Reisnerb411b362009-09-25 16:07:19 -07004487 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4488 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4489 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4490 [P_DISCARD_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4491 [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
4492 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
4493 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply},
4494 [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
4495 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
4496 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
4497 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
Philipp Reisner02918be2010-08-20 14:35:10 +02004498 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip },
Philipp Reisnerd612d302010-12-27 10:53:28 +01004499 [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply},
Philipp Reisnerb411b362009-09-25 16:07:19 -07004500 [P_MAX_CMD] = { 0, NULL },
4501 };
4502 if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
4503 return NULL;
4504 return &asender_tbl[cmd];
4505}
4506
4507int drbd_asender(struct drbd_thread *thi)
4508{
4509 struct drbd_conf *mdev = thi->mdev;
Philipp Reisner02918be2010-08-20 14:35:10 +02004510 struct p_header80 *h = &mdev->meta.rbuf.header.h80;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004511 struct asender_cmd *cmd = NULL;
4512
4513 int rv, len;
4514 void *buf = h;
4515 int received = 0;
Philipp Reisner0b70a132010-08-20 13:36:10 +02004516 int expect = sizeof(struct p_header80);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004517 int empty;
4518
4519 sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev));
4520
4521 current->policy = SCHED_RR; /* Make this a realtime task! */
4522 current->rt_priority = 2; /* more important than all other tasks */
4523
4524 while (get_t_state(thi) == Running) {
4525 drbd_thread_current_set_cpu(mdev);
4526 if (test_and_clear_bit(SEND_PING, &mdev->flags)) {
4527 ERR_IF(!drbd_send_ping(mdev)) goto reconnect;
4528 mdev->meta.socket->sk->sk_rcvtimeo =
4529 mdev->net_conf->ping_timeo*HZ/10;
4530 }
4531
4532 /* conditionally cork;
4533 * it may hurt latency if we cork without much to send */
4534 if (!mdev->net_conf->no_cork &&
4535 3 < atomic_read(&mdev->unacked_cnt))
4536 drbd_tcp_cork(mdev->meta.socket);
4537 while (1) {
4538 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4539 flush_signals(current);
Lars Ellenberg0f8488e2010-10-13 18:19:23 +02004540 if (!drbd_process_done_ee(mdev))
Philipp Reisnerb411b362009-09-25 16:07:19 -07004541 goto reconnect;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004542 /* to avoid race with newly queued ACKs */
4543 set_bit(SIGNAL_ASENDER, &mdev->flags);
4544 spin_lock_irq(&mdev->req_lock);
4545 empty = list_empty(&mdev->done_ee);
4546 spin_unlock_irq(&mdev->req_lock);
4547 /* new ack may have been queued right here,
4548 * but then there is also a signal pending,
4549 * and we start over... */
4550 if (empty)
4551 break;
4552 }
4553 /* but unconditionally uncork unless disabled */
4554 if (!mdev->net_conf->no_cork)
4555 drbd_tcp_uncork(mdev->meta.socket);
4556
4557 /* short circuit, recv_msg would return EINTR anyways. */
4558 if (signal_pending(current))
4559 continue;
4560
4561 rv = drbd_recv_short(mdev, mdev->meta.socket,
4562 buf, expect-received, 0);
4563 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4564
4565 flush_signals(current);
4566
4567 /* Note:
4568 * -EINTR (on meta) we got a signal
4569 * -EAGAIN (on meta) rcvtimeo expired
4570 * -ECONNRESET other side closed the connection
4571 * -ERESTARTSYS (on data) we got a signal
4572 * rv < 0 other than above: unexpected error!
4573 * rv == expected: full header or command
4574 * rv < expected: "woken" by signal during receive
4575 * rv == 0 : "connection shut down by peer"
4576 */
4577 if (likely(rv > 0)) {
4578 received += rv;
4579 buf += rv;
4580 } else if (rv == 0) {
4581 dev_err(DEV, "meta connection shut down by peer.\n");
4582 goto reconnect;
4583 } else if (rv == -EAGAIN) {
4584 if (mdev->meta.socket->sk->sk_rcvtimeo ==
4585 mdev->net_conf->ping_timeo*HZ/10) {
4586 dev_err(DEV, "PingAck did not arrive in time.\n");
4587 goto reconnect;
4588 }
4589 set_bit(SEND_PING, &mdev->flags);
4590 continue;
4591 } else if (rv == -EINTR) {
4592 continue;
4593 } else {
4594 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
4595 goto reconnect;
4596 }
4597
4598 if (received == expect && cmd == NULL) {
4599 if (unlikely(h->magic != BE_DRBD_MAGIC)) {
Lars Ellenberg004352f2010-10-05 20:13:58 +02004600 dev_err(DEV, "magic?? on meta m: 0x%08x c: %d l: %d\n",
4601 be32_to_cpu(h->magic),
4602 be16_to_cpu(h->command),
4603 be16_to_cpu(h->length));
Philipp Reisnerb411b362009-09-25 16:07:19 -07004604 goto reconnect;
4605 }
4606 cmd = get_asender_cmd(be16_to_cpu(h->command));
4607 len = be16_to_cpu(h->length);
4608 if (unlikely(cmd == NULL)) {
Lars Ellenberg004352f2010-10-05 20:13:58 +02004609 dev_err(DEV, "unknown command?? on meta m: 0x%08x c: %d l: %d\n",
4610 be32_to_cpu(h->magic),
4611 be16_to_cpu(h->command),
4612 be16_to_cpu(h->length));
Philipp Reisnerb411b362009-09-25 16:07:19 -07004613 goto disconnect;
4614 }
4615 expect = cmd->pkt_size;
Philipp Reisner0b70a132010-08-20 13:36:10 +02004616 ERR_IF(len != expect-sizeof(struct p_header80))
Philipp Reisnerb411b362009-09-25 16:07:19 -07004617 goto reconnect;
Philipp Reisnerb411b362009-09-25 16:07:19 -07004618 }
4619 if (received == expect) {
4620 D_ASSERT(cmd != NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004621 if (!cmd->process(mdev, h))
4622 goto reconnect;
4623
4624 buf = h;
4625 received = 0;
Philipp Reisner0b70a132010-08-20 13:36:10 +02004626 expect = sizeof(struct p_header80);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004627 cmd = NULL;
4628 }
4629 }
4630
4631 if (0) {
4632reconnect:
4633 drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
Lars Ellenberg856c50c2010-10-14 13:37:40 +02004634 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004635 }
4636 if (0) {
4637disconnect:
4638 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
Lars Ellenberg856c50c2010-10-14 13:37:40 +02004639 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07004640 }
4641 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4642
4643 D_ASSERT(mdev->state.conn < C_CONNECTED);
4644 dev_info(DEV, "asender terminated\n");
4645
4646 return 0;
4647}