blob: 137935037664c63ad3c4bc1530c38eb421fa95f6 [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11 from Logicworks, Inc. for making SDP replication support possible.
12
13 drbd is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
16 any later version.
17
18 drbd is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with drbd; see the file COPYING. If not, write to
25 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26
27 */
28
Philipp Reisnerb411b362009-09-25 16:07:19 -070029#include <linux/module.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070030#include <linux/drbd.h>
31#include <asm/uaccess.h>
32#include <asm/types.h>
33#include <net/sock.h>
34#include <linux/ctype.h>
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020035#include <linux/mutex.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070036#include <linux/fs.h>
37#include <linux/file.h>
38#include <linux/proc_fs.h>
39#include <linux/init.h>
40#include <linux/mm.h>
41#include <linux/memcontrol.h>
42#include <linux/mm_inline.h>
43#include <linux/slab.h>
44#include <linux/random.h>
45#include <linux/reboot.h>
46#include <linux/notifier.h>
47#include <linux/kthread.h>
48
49#define __KERNEL_SYSCALLS__
50#include <linux/unistd.h>
51#include <linux/vmalloc.h>
52
53#include <linux/drbd_limits.h>
54#include "drbd_int.h"
Philipp Reisnerb411b362009-09-25 16:07:19 -070055#include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
56
57#include "drbd_vli.h"
58
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020059static DEFINE_MUTEX(drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -070060int drbdd_init(struct drbd_thread *);
61int drbd_worker(struct drbd_thread *);
62int drbd_asender(struct drbd_thread *);
63
64int drbd_init(void);
65static int drbd_open(struct block_device *bdev, fmode_t mode);
66static int drbd_release(struct gendisk *gd, fmode_t mode);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +010067static int w_md_sync(struct drbd_work *w, int unused);
Philipp Reisnerb411b362009-09-25 16:07:19 -070068static void md_sync_timer_fn(unsigned long data);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +010069static int w_bitmap_io(struct drbd_work *w, int unused);
70static int w_go_diskless(struct drbd_work *w, int unused);
Philipp Reisnerb411b362009-09-25 16:07:19 -070071
Philipp Reisnerb411b362009-09-25 16:07:19 -070072MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
73 "Lars Ellenberg <lars@linbit.com>");
74MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
75MODULE_VERSION(REL_VERSION);
76MODULE_LICENSE("GPL");
Philipp Reisner81a5d602011-02-22 19:53:16 -050077MODULE_PARM_DESC(minor_count, "Approximate number of drbd devices ("
Philipp Reisner2b8a90b2011-01-10 11:15:17 +010078 __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")");
Philipp Reisnerb411b362009-09-25 16:07:19 -070079MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
80
81#include <linux/moduleparam.h>
82/* allow_open_on_secondary */
83MODULE_PARM_DESC(allow_oos, "DONT USE!");
84/* thanks to these macros, if compiled into the kernel (not-module),
85 * this becomes the boot parameter drbd.minor_count */
86module_param(minor_count, uint, 0444);
87module_param(disable_sendpage, bool, 0644);
88module_param(allow_oos, bool, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -070089module_param(proc_details, int, 0644);
90
91#ifdef CONFIG_DRBD_FAULT_INJECTION
92int enable_faults;
93int fault_rate;
94static int fault_count;
95int fault_devs;
96/* bitmap of enabled faults */
97module_param(enable_faults, int, 0664);
98/* fault rate % value - applies to all enabled faults */
99module_param(fault_rate, int, 0664);
100/* count of faults inserted */
101module_param(fault_count, int, 0664);
102/* bitmap of devices to insert faults on */
103module_param(fault_devs, int, 0644);
104#endif
105
106/* module parameter, defined */
Philipp Reisner2b8a90b2011-01-10 11:15:17 +0100107unsigned int minor_count = DRBD_MINOR_COUNT_DEF;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700108int disable_sendpage;
109int allow_oos;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700110int proc_details; /* Detail level in proc drbd*/
111
112/* Module parameter for setting the user mode helper program
113 * to run. Default is /sbin/drbdadm */
114char usermode_helper[80] = "/sbin/drbdadm";
115
116module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644);
117
118/* in 2.6.x, our device mapping and config info contains our virtual gendisks
119 * as member "struct gendisk *vdisk;"
120 */
Philipp Reisner81a5d602011-02-22 19:53:16 -0500121struct idr minors;
Philipp Reisner21114382011-01-19 12:26:59 +0100122struct list_head drbd_tconns; /* list of struct drbd_tconn */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700123
124struct kmem_cache *drbd_request_cache;
Andreas Gruenbacher6c852be2011-02-04 15:38:52 +0100125struct kmem_cache *drbd_ee_cache; /* peer requests */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700126struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
127struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
128mempool_t *drbd_request_mempool;
129mempool_t *drbd_ee_mempool;
Lars Ellenberg35abf592011-02-23 12:39:46 +0100130mempool_t *drbd_md_io_page_pool;
Lars Ellenbergda4a75d2011-02-23 17:02:01 +0100131struct bio_set *drbd_md_io_bio_set;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700132
133/* I do not use a standard mempool, because:
134 1) I want to hand out the pre-allocated objects first.
135 2) I want to be able to interrupt sleeping allocation with a signal.
136 Note: This is a single linked list, the next pointer is the private
137 member of struct page.
138 */
139struct page *drbd_pp_pool;
140spinlock_t drbd_pp_lock;
141int drbd_pp_vacant;
142wait_queue_head_t drbd_pp_wait;
143
144DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
145
Emese Revfy7d4e9d02009-12-14 00:59:30 +0100146static const struct block_device_operations drbd_ops = {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700147 .owner = THIS_MODULE,
148 .open = drbd_open,
149 .release = drbd_release,
150};
151
Lars Ellenbergda4a75d2011-02-23 17:02:01 +0100152static void bio_destructor_drbd(struct bio *bio)
153{
154 bio_free(bio, drbd_md_io_bio_set);
155}
156
157struct bio *bio_alloc_drbd(gfp_t gfp_mask)
158{
159 struct bio *bio;
160
161 if (!drbd_md_io_bio_set)
162 return bio_alloc(gfp_mask, 1);
163
164 bio = bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set);
165 if (!bio)
166 return NULL;
167 bio->bi_destructor = bio_destructor_drbd;
168 return bio;
169}
170
Philipp Reisnerb411b362009-09-25 16:07:19 -0700171#ifdef __CHECKER__
172/* When checking with sparse, and this is an inline function, sparse will
173 give tons of false positives. When this is a real functions sparse works.
174 */
175int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
176{
177 int io_allowed;
178
179 atomic_inc(&mdev->local_cnt);
180 io_allowed = (mdev->state.disk >= mins);
181 if (!io_allowed) {
182 if (atomic_dec_and_test(&mdev->local_cnt))
183 wake_up(&mdev->misc_wait);
184 }
185 return io_allowed;
186}
187
188#endif
189
190/**
191 * DOC: The transfer log
192 *
193 * The transfer log is a single linked list of &struct drbd_tl_epoch objects.
Philipp Reisner87eeee42011-01-19 14:16:30 +0100194 * mdev->tconn->newest_tle points to the head, mdev->tconn->oldest_tle points to the tail
Philipp Reisnerb411b362009-09-25 16:07:19 -0700195 * of the list. There is always at least one &struct drbd_tl_epoch object.
196 *
197 * Each &struct drbd_tl_epoch has a circular double linked list of requests
198 * attached.
199 */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100200static int tl_init(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700201{
202 struct drbd_tl_epoch *b;
203
204 /* during device minor initialization, we may well use GFP_KERNEL */
205 b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_KERNEL);
206 if (!b)
207 return 0;
208 INIT_LIST_HEAD(&b->requests);
209 INIT_LIST_HEAD(&b->w.list);
210 b->next = NULL;
211 b->br_number = 4711;
Philipp Reisner7e602c02010-05-27 14:49:27 +0200212 b->n_writes = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700213 b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
214
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100215 tconn->oldest_tle = b;
216 tconn->newest_tle = b;
217 INIT_LIST_HEAD(&tconn->out_of_sequence_requests);
Philipp Reisnercdfda632011-07-05 15:38:59 +0200218 INIT_LIST_HEAD(&tconn->barrier_acked_requests);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700219
Philipp Reisnerb411b362009-09-25 16:07:19 -0700220 return 1;
221}
222
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100223static void tl_cleanup(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700224{
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100225 if (tconn->oldest_tle != tconn->newest_tle)
226 conn_err(tconn, "ASSERT FAILED: oldest_tle == newest_tle\n");
227 if (!list_empty(&tconn->out_of_sequence_requests))
228 conn_err(tconn, "ASSERT FAILED: list_empty(out_of_sequence_requests)\n");
229 kfree(tconn->oldest_tle);
230 tconn->oldest_tle = NULL;
231 kfree(tconn->unused_spare_tle);
232 tconn->unused_spare_tle = NULL;
Andreas Gruenbacherd6287692011-01-13 23:05:39 +0100233}
234
Philipp Reisnerb411b362009-09-25 16:07:19 -0700235/**
236 * _tl_add_barrier() - Adds a barrier to the transfer log
237 * @mdev: DRBD device.
238 * @new: Barrier to be added before the current head of the TL.
239 *
240 * The caller must hold the req_lock.
241 */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100242void _tl_add_barrier(struct drbd_tconn *tconn, struct drbd_tl_epoch *new)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700243{
244 struct drbd_tl_epoch *newest_before;
245
246 INIT_LIST_HEAD(&new->requests);
247 INIT_LIST_HEAD(&new->w.list);
248 new->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
249 new->next = NULL;
Philipp Reisner7e602c02010-05-27 14:49:27 +0200250 new->n_writes = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700251
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100252 newest_before = tconn->newest_tle;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700253 /* never send a barrier number == 0, because that is special-cased
254 * when using TCQ for our write ordering code */
255 new->br_number = (newest_before->br_number+1) ?: 1;
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100256 if (tconn->newest_tle != new) {
257 tconn->newest_tle->next = new;
258 tconn->newest_tle = new;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700259 }
260}
261
262/**
263 * tl_release() - Free or recycle the oldest &struct drbd_tl_epoch object of the TL
264 * @mdev: DRBD device.
265 * @barrier_nr: Expected identifier of the DRBD write barrier packet.
266 * @set_size: Expected number of requests before that barrier.
267 *
268 * In case the passed barrier_nr or set_size does not match the oldest
269 * &struct drbd_tl_epoch objects this function will cause a termination
270 * of the connection.
271 */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100272void tl_release(struct drbd_tconn *tconn, unsigned int barrier_nr,
273 unsigned int set_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700274{
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100275 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700276 struct drbd_tl_epoch *b, *nob; /* next old barrier */
277 struct list_head *le, *tle;
278 struct drbd_request *r;
279
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100280 spin_lock_irq(&tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700281
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100282 b = tconn->oldest_tle;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700283
284 /* first some paranoia code */
285 if (b == NULL) {
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100286 conn_err(tconn, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
287 barrier_nr);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700288 goto bail;
289 }
290 if (b->br_number != barrier_nr) {
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100291 conn_err(tconn, "BAD! BarrierAck #%u received, expected #%u!\n",
292 barrier_nr, b->br_number);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700293 goto bail;
294 }
Philipp Reisner7e602c02010-05-27 14:49:27 +0200295 if (b->n_writes != set_size) {
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100296 conn_err(tconn, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
297 barrier_nr, set_size, b->n_writes);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700298 goto bail;
299 }
300
301 /* Clean up list of requests processed during current epoch */
302 list_for_each_safe(le, tle, &b->requests) {
303 r = list_entry(le, struct drbd_request, tl_requests);
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100304 _req_mod(r, BARRIER_ACKED);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700305 }
306 /* There could be requests on the list waiting for completion
307 of the write to the local disk. To avoid corruptions of
308 slab's data structures we have to remove the lists head.
309
310 Also there could have been a barrier ack out of sequence, overtaking
311 the write acks - which would be a bug and violating write ordering.
312 To not deadlock in case we lose connection while such requests are
313 still pending, we need some way to find them for the
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100314 _req_mode(CONNECTION_LOST_WHILE_PENDING).
Philipp Reisnerb411b362009-09-25 16:07:19 -0700315
316 These have been list_move'd to the out_of_sequence_requests list in
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100317 _req_mod(, BARRIER_ACKED) above.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700318 */
Philipp Reisnercdfda632011-07-05 15:38:59 +0200319 list_splice_init(&b->requests, &tconn->barrier_acked_requests);
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100320 mdev = b->w.mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700321
322 nob = b->next;
323 if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100324 _tl_add_barrier(tconn, b);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700325 if (nob)
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100326 tconn->oldest_tle = nob;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700327 /* if nob == NULL b was the only barrier, and becomes the new
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100328 barrier. Therefore tconn->oldest_tle points already to b */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700329 } else {
330 D_ASSERT(nob != NULL);
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100331 tconn->oldest_tle = nob;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700332 kfree(b);
333 }
334
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100335 spin_unlock_irq(&tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700336 dec_ap_pending(mdev);
337
338 return;
339
340bail:
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100341 spin_unlock_irq(&tconn->req_lock);
342 conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700343}
344
Philipp Reisner617049a2010-12-22 12:48:31 +0100345
Philipp Reisner11b58e72010-05-12 17:08:26 +0200346/**
347 * _tl_restart() - Walks the transfer log, and applies an action to all requests
348 * @mdev: DRBD device.
349 * @what: The action/event to perform with all request objects
350 *
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100351 * @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO,
352 * RESTART_FROZEN_DISK_IO.
Philipp Reisner11b58e72010-05-12 17:08:26 +0200353 */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100354void _tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what)
Philipp Reisner11b58e72010-05-12 17:08:26 +0200355{
356 struct drbd_tl_epoch *b, *tmp, **pn;
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200357 struct list_head *le, *tle, carry_reads;
Philipp Reisner11b58e72010-05-12 17:08:26 +0200358 struct drbd_request *req;
359 int rv, n_writes, n_reads;
360
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100361 b = tconn->oldest_tle;
362 pn = &tconn->oldest_tle;
Philipp Reisner11b58e72010-05-12 17:08:26 +0200363 while (b) {
364 n_writes = 0;
365 n_reads = 0;
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200366 INIT_LIST_HEAD(&carry_reads);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200367 list_for_each_safe(le, tle, &b->requests) {
368 req = list_entry(le, struct drbd_request, tl_requests);
369 rv = _req_mod(req, what);
370
371 n_writes += (rv & MR_WRITE) >> MR_WRITE_SHIFT;
372 n_reads += (rv & MR_READ) >> MR_READ_SHIFT;
373 }
374 tmp = b->next;
375
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200376 if (n_writes) {
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100377 if (what == RESEND) {
Philipp Reisner11b58e72010-05-12 17:08:26 +0200378 b->n_writes = n_writes;
379 if (b->w.cb == NULL) {
380 b->w.cb = w_send_barrier;
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100381 inc_ap_pending(b->w.mdev);
382 set_bit(CREATE_BARRIER, &b->w.mdev->flags);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200383 }
384
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100385 drbd_queue_work(&tconn->data.work, &b->w);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200386 }
387 pn = &b->next;
388 } else {
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200389 if (n_reads)
390 list_add(&carry_reads, &b->requests);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200391 /* there could still be requests on that ring list,
392 * in case local io is still pending */
393 list_del(&b->requests);
394
395 /* dec_ap_pending corresponding to queue_barrier.
396 * the newest barrier may not have been queued yet,
397 * in which case w.cb is still NULL. */
398 if (b->w.cb != NULL)
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100399 dec_ap_pending(b->w.mdev);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200400
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100401 if (b == tconn->newest_tle) {
Philipp Reisner11b58e72010-05-12 17:08:26 +0200402 /* recycle, but reinit! */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100403 if (tmp != NULL)
404 conn_err(tconn, "ASSERT FAILED tmp == NULL");
Philipp Reisner11b58e72010-05-12 17:08:26 +0200405 INIT_LIST_HEAD(&b->requests);
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200406 list_splice(&carry_reads, &b->requests);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200407 INIT_LIST_HEAD(&b->w.list);
408 b->w.cb = NULL;
409 b->br_number = net_random();
410 b->n_writes = 0;
411
412 *pn = b;
413 break;
414 }
415 *pn = tmp;
416 kfree(b);
417 }
418 b = tmp;
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200419 list_splice(&carry_reads, &b->requests);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200420 }
Philipp Reisner11b58e72010-05-12 17:08:26 +0200421
Philipp Reisnercdfda632011-07-05 15:38:59 +0200422 /* Actions operating on the disk state, also want to work on
423 requests that got barrier acked. */
424 switch (what) {
425 case FAIL_FROZEN_DISK_IO:
426 case RESTART_FROZEN_DISK_IO:
427 list_for_each_safe(le, tle, &tconn->barrier_acked_requests) {
428 req = list_entry(le, struct drbd_request, tl_requests);
429 _req_mod(req, what);
430 }
431 case CONNECTION_LOST_WHILE_PENDING:
432 case RESEND:
433 break;
434 default:
435 conn_err(tconn, "what = %d in _tl_restart()\n", what);
436 }
437}
Philipp Reisnerb411b362009-09-25 16:07:19 -0700438
439/**
440 * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
441 * @mdev: DRBD device.
442 *
443 * This is called after the connection to the peer was lost. The storage covered
444 * by the requests on the transfer gets marked as our of sync. Called from the
445 * receiver thread and the worker thread.
446 */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100447void tl_clear(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700448{
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100449 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700450 struct list_head *le, *tle;
451 struct drbd_request *r;
Philipp Reisnere90285e2011-03-22 12:51:21 +0100452 int vnr;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700453
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100454 spin_lock_irq(&tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700455
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100456 _tl_restart(tconn, CONNECTION_LOST_WHILE_PENDING);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700457
458 /* we expect this list to be empty. */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100459 if (!list_empty(&tconn->out_of_sequence_requests))
460 conn_err(tconn, "ASSERT FAILED list_empty(&out_of_sequence_requests)\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700461
462 /* but just in case, clean it up anyways! */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100463 list_for_each_safe(le, tle, &tconn->out_of_sequence_requests) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700464 r = list_entry(le, struct drbd_request, tl_requests);
465 /* It would be nice to complete outside of spinlock.
466 * But this is easier for now. */
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100467 _req_mod(r, CONNECTION_LOST_WHILE_PENDING);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700468 }
469
470 /* ensure bit indicating barrier is required is clear */
Philipp Reisner695d08f2011-04-11 22:53:32 -0700471 rcu_read_lock();
Philipp Reisnere90285e2011-03-22 12:51:21 +0100472 idr_for_each_entry(&tconn->volumes, mdev, vnr)
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100473 clear_bit(CREATE_BARRIER, &mdev->flags);
Philipp Reisner695d08f2011-04-11 22:53:32 -0700474 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700475
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100476 spin_unlock_irq(&tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700477}
478
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100479void tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what)
Philipp Reisner11b58e72010-05-12 17:08:26 +0200480{
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100481 spin_lock_irq(&tconn->req_lock);
482 _tl_restart(tconn, what);
483 spin_unlock_irq(&tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700484}
485
Philipp Reisnercdfda632011-07-05 15:38:59 +0200486/**
Andreas Gruenbacher71fc7ee2011-07-17 23:06:12 +0200487 * tl_abort_disk_io() - Abort disk I/O for all requests for a certain mdev in the TL
Philipp Reisnercdfda632011-07-05 15:38:59 +0200488 * @mdev: DRBD device.
Philipp Reisnercdfda632011-07-05 15:38:59 +0200489 */
Andreas Gruenbacher71fc7ee2011-07-17 23:06:12 +0200490void tl_abort_disk_io(struct drbd_conf *mdev)
Philipp Reisnercdfda632011-07-05 15:38:59 +0200491{
492 struct drbd_tconn *tconn = mdev->tconn;
493 struct drbd_tl_epoch *b;
494 struct list_head *le, *tle;
495 struct drbd_request *req;
496
Philipp Reisnercdfda632011-07-05 15:38:59 +0200497 spin_lock_irq(&tconn->req_lock);
498 b = tconn->oldest_tle;
499 while (b) {
500 list_for_each_safe(le, tle, &b->requests) {
501 req = list_entry(le, struct drbd_request, tl_requests);
502 if (req->w.mdev == mdev)
Andreas Gruenbacher71fc7ee2011-07-17 23:06:12 +0200503 _req_mod(req, ABORT_DISK_IO);
Philipp Reisnercdfda632011-07-05 15:38:59 +0200504 }
505 b = b->next;
506 }
507
508 list_for_each_safe(le, tle, &tconn->barrier_acked_requests) {
509 req = list_entry(le, struct drbd_request, tl_requests);
510 if (req->w.mdev == mdev)
Andreas Gruenbacher71fc7ee2011-07-17 23:06:12 +0200511 _req_mod(req, ABORT_DISK_IO);
Philipp Reisnercdfda632011-07-05 15:38:59 +0200512 }
513
514 spin_unlock_irq(&tconn->req_lock);
515}
516
Philipp Reisnerb411b362009-09-25 16:07:19 -0700517static int drbd_thread_setup(void *arg)
518{
519 struct drbd_thread *thi = (struct drbd_thread *) arg;
Philipp Reisner392c8802011-02-09 10:33:31 +0100520 struct drbd_tconn *tconn = thi->tconn;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700521 unsigned long flags;
522 int retval;
523
Philipp Reisnerf1b3a6e2011-02-08 15:35:58 +0100524 snprintf(current->comm, sizeof(current->comm), "drbd_%c_%s",
Philipp Reisner392c8802011-02-09 10:33:31 +0100525 thi->name[0], thi->tconn->name);
Philipp Reisnerf1b3a6e2011-02-08 15:35:58 +0100526
Philipp Reisnerb411b362009-09-25 16:07:19 -0700527restart:
528 retval = thi->function(thi);
529
530 spin_lock_irqsave(&thi->t_lock, flags);
531
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100532 /* if the receiver has been "EXITING", the last thing it did
Philipp Reisnerb411b362009-09-25 16:07:19 -0700533 * was set the conn state to "StandAlone",
534 * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
535 * and receiver thread will be "started".
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100536 * drbd_thread_start needs to set "RESTARTING" in that case.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700537 * t_state check and assignment needs to be within the same spinlock,
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100538 * so either thread_start sees EXITING, and can remap to RESTARTING,
539 * or thread_start see NONE, and can proceed as normal.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700540 */
541
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100542 if (thi->t_state == RESTARTING) {
Philipp Reisner392c8802011-02-09 10:33:31 +0100543 conn_info(tconn, "Restarting %s thread\n", thi->name);
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100544 thi->t_state = RUNNING;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700545 spin_unlock_irqrestore(&thi->t_lock, flags);
546 goto restart;
547 }
548
549 thi->task = NULL;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100550 thi->t_state = NONE;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700551 smp_mb();
Lars Ellenberg992d6e92011-05-02 11:47:18 +0200552 complete_all(&thi->stop);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700553 spin_unlock_irqrestore(&thi->t_lock, flags);
554
Philipp Reisner392c8802011-02-09 10:33:31 +0100555 conn_info(tconn, "Terminating %s\n", current->comm);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700556
557 /* Release mod reference taken when thread was started */
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200558
559 kref_put(&tconn->kref, &conn_destroy);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700560 module_put(THIS_MODULE);
561 return retval;
562}
563
Philipp Reisner392c8802011-02-09 10:33:31 +0100564static void drbd_thread_init(struct drbd_tconn *tconn, struct drbd_thread *thi,
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100565 int (*func) (struct drbd_thread *), char *name)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700566{
567 spin_lock_init(&thi->t_lock);
568 thi->task = NULL;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100569 thi->t_state = NONE;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700570 thi->function = func;
Philipp Reisner392c8802011-02-09 10:33:31 +0100571 thi->tconn = tconn;
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100572 strncpy(thi->name, name, ARRAY_SIZE(thi->name));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700573}
574
575int drbd_thread_start(struct drbd_thread *thi)
576{
Philipp Reisner392c8802011-02-09 10:33:31 +0100577 struct drbd_tconn *tconn = thi->tconn;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700578 struct task_struct *nt;
579 unsigned long flags;
580
Philipp Reisnerb411b362009-09-25 16:07:19 -0700581 /* is used from state engine doing drbd_thread_stop_nowait,
582 * while holding the req lock irqsave */
583 spin_lock_irqsave(&thi->t_lock, flags);
584
585 switch (thi->t_state) {
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100586 case NONE:
Philipp Reisner392c8802011-02-09 10:33:31 +0100587 conn_info(tconn, "Starting %s thread (from %s [%d])\n",
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100588 thi->name, current->comm, current->pid);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700589
590 /* Get ref on module for thread - this is released when thread exits */
591 if (!try_module_get(THIS_MODULE)) {
Philipp Reisner392c8802011-02-09 10:33:31 +0100592 conn_err(tconn, "Failed to get module reference in drbd_thread_start\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700593 spin_unlock_irqrestore(&thi->t_lock, flags);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100594 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700595 }
596
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200597 kref_get(&thi->tconn->kref);
598
Philipp Reisnerb411b362009-09-25 16:07:19 -0700599 init_completion(&thi->stop);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700600 thi->reset_cpu_mask = 1;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100601 thi->t_state = RUNNING;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700602 spin_unlock_irqrestore(&thi->t_lock, flags);
603 flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
604
605 nt = kthread_create(drbd_thread_setup, (void *) thi,
Philipp Reisner392c8802011-02-09 10:33:31 +0100606 "drbd_%c_%s", thi->name[0], thi->tconn->name);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700607
608 if (IS_ERR(nt)) {
Philipp Reisner392c8802011-02-09 10:33:31 +0100609 conn_err(tconn, "Couldn't start thread\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700610
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200611 kref_put(&tconn->kref, &conn_destroy);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700612 module_put(THIS_MODULE);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100613 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700614 }
615 spin_lock_irqsave(&thi->t_lock, flags);
616 thi->task = nt;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100617 thi->t_state = RUNNING;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700618 spin_unlock_irqrestore(&thi->t_lock, flags);
619 wake_up_process(nt);
620 break;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100621 case EXITING:
622 thi->t_state = RESTARTING;
Philipp Reisner392c8802011-02-09 10:33:31 +0100623 conn_info(tconn, "Restarting %s thread (from %s [%d])\n",
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100624 thi->name, current->comm, current->pid);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700625 /* fall through */
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100626 case RUNNING:
627 case RESTARTING:
Philipp Reisnerb411b362009-09-25 16:07:19 -0700628 default:
629 spin_unlock_irqrestore(&thi->t_lock, flags);
630 break;
631 }
632
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100633 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700634}
635
636
637void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
638{
639 unsigned long flags;
640
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100641 enum drbd_thread_state ns = restart ? RESTARTING : EXITING;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700642
643 /* may be called from state engine, holding the req lock irqsave */
644 spin_lock_irqsave(&thi->t_lock, flags);
645
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100646 if (thi->t_state == NONE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700647 spin_unlock_irqrestore(&thi->t_lock, flags);
648 if (restart)
649 drbd_thread_start(thi);
650 return;
651 }
652
653 if (thi->t_state != ns) {
654 if (thi->task == NULL) {
655 spin_unlock_irqrestore(&thi->t_lock, flags);
656 return;
657 }
658
659 thi->t_state = ns;
660 smp_mb();
661 init_completion(&thi->stop);
662 if (thi->task != current)
663 force_sig(DRBD_SIGKILL, thi->task);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700664 }
665
666 spin_unlock_irqrestore(&thi->t_lock, flags);
667
668 if (wait)
669 wait_for_completion(&thi->stop);
670}
671
Philipp Reisner392c8802011-02-09 10:33:31 +0100672static struct drbd_thread *drbd_task_to_thread(struct drbd_tconn *tconn, struct task_struct *task)
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100673{
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100674 struct drbd_thread *thi =
675 task == tconn->receiver.task ? &tconn->receiver :
676 task == tconn->asender.task ? &tconn->asender :
677 task == tconn->worker.task ? &tconn->worker : NULL;
678
679 return thi;
680}
681
Philipp Reisner392c8802011-02-09 10:33:31 +0100682char *drbd_task_to_thread_name(struct drbd_tconn *tconn, struct task_struct *task)
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100683{
Philipp Reisner392c8802011-02-09 10:33:31 +0100684 struct drbd_thread *thi = drbd_task_to_thread(tconn, task);
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100685 return thi ? thi->name : task->comm;
686}
687
Philipp Reisner80883192011-02-18 14:56:45 +0100688int conn_lowest_minor(struct drbd_tconn *tconn)
Philipp Reisner80822282011-02-08 12:46:30 +0100689{
Philipp Reisnere90285e2011-03-22 12:51:21 +0100690 struct drbd_conf *mdev;
Philipp Reisner695d08f2011-04-11 22:53:32 -0700691 int vnr = 0, m;
Philipp Reisner774b3052011-02-22 02:07:03 -0500692
Philipp Reisner695d08f2011-04-11 22:53:32 -0700693 rcu_read_lock();
Philipp Reisnere90285e2011-03-22 12:51:21 +0100694 mdev = idr_get_next(&tconn->volumes, &vnr);
Philipp Reisner695d08f2011-04-11 22:53:32 -0700695 m = mdev ? mdev_to_minor(mdev) : -1;
696 rcu_read_unlock();
697
698 return m;
Philipp Reisner80822282011-02-08 12:46:30 +0100699}
Philipp Reisner774b3052011-02-22 02:07:03 -0500700
701#ifdef CONFIG_SMP
Philipp Reisnerb411b362009-09-25 16:07:19 -0700702/**
703 * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
704 * @mdev: DRBD device.
705 *
706 * Forces all threads of a device onto the same CPU. This is beneficial for
707 * DRBD's performance. May be overwritten by user's configuration.
708 */
Philipp Reisner80822282011-02-08 12:46:30 +0100709void drbd_calc_cpu_mask(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700710{
711 int ord, cpu;
712
713 /* user override. */
Philipp Reisner80822282011-02-08 12:46:30 +0100714 if (cpumask_weight(tconn->cpu_mask))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700715 return;
716
Philipp Reisner80822282011-02-08 12:46:30 +0100717 ord = conn_lowest_minor(tconn) % cpumask_weight(cpu_online_mask);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700718 for_each_online_cpu(cpu) {
719 if (ord-- == 0) {
Philipp Reisner80822282011-02-08 12:46:30 +0100720 cpumask_set_cpu(cpu, tconn->cpu_mask);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700721 return;
722 }
723 }
724 /* should not be reached */
Philipp Reisner80822282011-02-08 12:46:30 +0100725 cpumask_setall(tconn->cpu_mask);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700726}
727
728/**
729 * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
730 * @mdev: DRBD device.
Philipp Reisnerbc31fe32011-02-07 11:14:38 +0100731 * @thi: drbd_thread object
Philipp Reisnerb411b362009-09-25 16:07:19 -0700732 *
733 * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
734 * prematurely.
735 */
Philipp Reisner80822282011-02-08 12:46:30 +0100736void drbd_thread_current_set_cpu(struct drbd_thread *thi)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700737{
738 struct task_struct *p = current;
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100739
Philipp Reisnerb411b362009-09-25 16:07:19 -0700740 if (!thi->reset_cpu_mask)
741 return;
742 thi->reset_cpu_mask = 0;
Philipp Reisner392c8802011-02-09 10:33:31 +0100743 set_cpus_allowed_ptr(p, thi->tconn->cpu_mask);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700744}
745#endif
746
Andreas Gruenbacher52b061a2011-03-30 11:38:49 +0200747/**
748 * drbd_header_size - size of a packet header
749 *
750 * The header size is a multiple of 8, so any payload following the header is
751 * word aligned on 64-bit architectures. (The bitmap send and receive code
752 * relies on this.)
753 */
754unsigned int drbd_header_size(struct drbd_tconn *tconn)
755{
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +0200756 if (tconn->agreed_pro_version >= 100) {
757 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header100), 8));
758 return sizeof(struct p_header100);
759 } else {
760 BUILD_BUG_ON(sizeof(struct p_header80) !=
761 sizeof(struct p_header95));
762 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header80), 8));
763 return sizeof(struct p_header80);
764 }
Andreas Gruenbacher52b061a2011-03-30 11:38:49 +0200765}
766
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200767static unsigned int prepare_header80(struct p_header80 *h, enum drbd_packet cmd, int size)
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100768{
769 h->magic = cpu_to_be32(DRBD_MAGIC);
770 h->command = cpu_to_be16(cmd);
771 h->length = cpu_to_be16(size);
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200772 return sizeof(struct p_header80);
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100773}
774
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200775static unsigned int prepare_header95(struct p_header95 *h, enum drbd_packet cmd, int size)
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100776{
777 h->magic = cpu_to_be16(DRBD_MAGIC_BIG);
778 h->command = cpu_to_be16(cmd);
Andreas Gruenbacherb55d84b2011-03-22 13:17:47 +0100779 h->length = cpu_to_be32(size);
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200780 return sizeof(struct p_header95);
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100781}
782
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +0200783static unsigned int prepare_header100(struct p_header100 *h, enum drbd_packet cmd,
784 int size, int vnr)
Philipp Reisnerd38e7872011-02-07 15:32:04 +0100785{
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +0200786 h->magic = cpu_to_be32(DRBD_MAGIC_100);
787 h->volume = cpu_to_be16(vnr);
788 h->command = cpu_to_be16(cmd);
789 h->length = cpu_to_be32(size);
790 h->pad = 0;
791 return sizeof(struct p_header100);
792}
793
794static unsigned int prepare_header(struct drbd_tconn *tconn, int vnr,
795 void *buffer, enum drbd_packet cmd, int size)
796{
797 if (tconn->agreed_pro_version >= 100)
798 return prepare_header100(buffer, cmd, size, vnr);
799 else if (tconn->agreed_pro_version >= 95 &&
800 size > DRBD_MAX_SIZE_H80_PACKET)
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200801 return prepare_header95(buffer, cmd, size);
Philipp Reisnerd38e7872011-02-07 15:32:04 +0100802 else
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200803 return prepare_header80(buffer, cmd, size);
Philipp Reisnerd38e7872011-02-07 15:32:04 +0100804}
805
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200806static void *__conn_prepare_command(struct drbd_tconn *tconn,
807 struct drbd_socket *sock)
808{
809 if (!sock->socket)
810 return NULL;
811 return sock->sbuf + drbd_header_size(tconn);
812}
813
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200814void *conn_prepare_command(struct drbd_tconn *tconn, struct drbd_socket *sock)
815{
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200816 void *p;
817
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200818 mutex_lock(&sock->mutex);
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200819 p = __conn_prepare_command(tconn, sock);
820 if (!p)
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200821 mutex_unlock(&sock->mutex);
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200822
823 return p;
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200824}
825
826void *drbd_prepare_command(struct drbd_conf *mdev, struct drbd_socket *sock)
827{
828 return conn_prepare_command(mdev->tconn, sock);
829}
830
831static int __send_command(struct drbd_tconn *tconn, int vnr,
832 struct drbd_socket *sock, enum drbd_packet cmd,
833 unsigned int header_size, void *data,
834 unsigned int size)
835{
836 int msg_flags;
837 int err;
838
839 /*
840 * Called with @data == NULL and the size of the data blocks in @size
841 * for commands that send data blocks. For those commands, omit the
842 * MSG_MORE flag: this will increase the likelihood that data blocks
843 * which are page aligned on the sender will end up page aligned on the
844 * receiver.
845 */
846 msg_flags = data ? MSG_MORE : 0;
847
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200848 header_size += prepare_header(tconn, vnr, sock->sbuf, cmd,
849 header_size + size);
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200850 err = drbd_send_all(tconn, sock->socket, sock->sbuf, header_size,
851 msg_flags);
852 if (data && !err)
853 err = drbd_send_all(tconn, sock->socket, data, size, 0);
854 return err;
855}
856
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200857static int __conn_send_command(struct drbd_tconn *tconn, struct drbd_socket *sock,
858 enum drbd_packet cmd, unsigned int header_size,
859 void *data, unsigned int size)
860{
861 return __send_command(tconn, 0, sock, cmd, header_size, data, size);
862}
863
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200864int conn_send_command(struct drbd_tconn *tconn, struct drbd_socket *sock,
865 enum drbd_packet cmd, unsigned int header_size,
866 void *data, unsigned int size)
867{
868 int err;
869
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200870 err = __conn_send_command(tconn, sock, cmd, header_size, data, size);
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200871 mutex_unlock(&sock->mutex);
872 return err;
873}
874
875int drbd_send_command(struct drbd_conf *mdev, struct drbd_socket *sock,
876 enum drbd_packet cmd, unsigned int header_size,
877 void *data, unsigned int size)
878{
879 int err;
880
881 err = __send_command(mdev->tconn, mdev->vnr, sock, cmd, header_size,
882 data, size);
883 mutex_unlock(&sock->mutex);
884 return err;
885}
886
Andreas Gruenbachere307f352011-03-22 10:55:48 +0100887int drbd_send_ping(struct drbd_tconn *tconn)
888{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200889 struct drbd_socket *sock;
890
891 sock = &tconn->meta;
892 if (!conn_prepare_command(tconn, sock))
893 return -EIO;
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200894 return conn_send_command(tconn, sock, P_PING, 0, NULL, 0);
Andreas Gruenbachere307f352011-03-22 10:55:48 +0100895}
896
897int drbd_send_ping_ack(struct drbd_tconn *tconn)
898{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200899 struct drbd_socket *sock;
900
901 sock = &tconn->meta;
902 if (!conn_prepare_command(tconn, sock))
903 return -EIO;
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200904 return conn_send_command(tconn, sock, P_PING_ACK, 0, NULL, 0);
Andreas Gruenbachere307f352011-03-22 10:55:48 +0100905}
906
Lars Ellenbergf3990022011-03-23 14:31:09 +0100907int drbd_send_sync_param(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700908{
Andreas Gruenbacher7c967152011-03-22 00:49:36 +0100909 struct drbd_socket *sock;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200910 struct p_rs_param_95 *p;
911 int size;
Philipp Reisner31890f42011-01-19 14:12:51 +0100912 const int apv = mdev->tconn->agreed_pro_version;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200913 enum drbd_packet cmd;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200914 struct net_conf *nc;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200915 struct disk_conf *dc;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200916
917 sock = &mdev->tconn->data;
918 p = drbd_prepare_command(mdev, sock);
919 if (!p)
920 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700921
Philipp Reisner44ed1672011-04-19 17:10:19 +0200922 rcu_read_lock();
923 nc = rcu_dereference(mdev->tconn->net_conf);
924
Philipp Reisnerb411b362009-09-25 16:07:19 -0700925 size = apv <= 87 ? sizeof(struct p_rs_param)
926 : apv == 88 ? sizeof(struct p_rs_param)
Philipp Reisner44ed1672011-04-19 17:10:19 +0200927 + strlen(nc->verify_alg) + 1
Philipp Reisner8e26f9c2010-07-06 17:25:54 +0200928 : apv <= 94 ? sizeof(struct p_rs_param_89)
929 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700930
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200931 cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700932
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200933 /* initialize verify_alg and csums_alg */
934 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700935
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200936 if (get_ldev(mdev)) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200937 dc = rcu_dereference(mdev->ldev->disk_conf);
Andreas Gruenbacher6394b932011-05-11 14:29:52 +0200938 p->resync_rate = cpu_to_be32(dc->resync_rate);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200939 p->c_plan_ahead = cpu_to_be32(dc->c_plan_ahead);
940 p->c_delay_target = cpu_to_be32(dc->c_delay_target);
941 p->c_fill_target = cpu_to_be32(dc->c_fill_target);
942 p->c_max_rate = cpu_to_be32(dc->c_max_rate);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200943 put_ldev(mdev);
944 } else {
Andreas Gruenbacher6394b932011-05-11 14:29:52 +0200945 p->resync_rate = cpu_to_be32(DRBD_RESYNC_RATE_DEF);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200946 p->c_plan_ahead = cpu_to_be32(DRBD_C_PLAN_AHEAD_DEF);
947 p->c_delay_target = cpu_to_be32(DRBD_C_DELAY_TARGET_DEF);
948 p->c_fill_target = cpu_to_be32(DRBD_C_FILL_TARGET_DEF);
949 p->c_max_rate = cpu_to_be32(DRBD_C_MAX_RATE_DEF);
950 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700951
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200952 if (apv >= 88)
Philipp Reisner44ed1672011-04-19 17:10:19 +0200953 strcpy(p->verify_alg, nc->verify_alg);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200954 if (apv >= 89)
Philipp Reisner44ed1672011-04-19 17:10:19 +0200955 strcpy(p->csums_alg, nc->csums_alg);
956 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700957
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200958 return drbd_send_command(mdev, sock, cmd, size, NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700959}
960
Philipp Reisnerd659f2a2011-05-16 17:38:45 +0200961int __drbd_send_protocol(struct drbd_tconn *tconn, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700962{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200963 struct drbd_socket *sock;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700964 struct p_protocol *p;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200965 struct net_conf *nc;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200966 int size, cf;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700967
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200968 sock = &tconn->data;
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200969 p = __conn_prepare_command(tconn, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200970 if (!p)
971 return -EIO;
972
Philipp Reisner44ed1672011-04-19 17:10:19 +0200973 rcu_read_lock();
974 nc = rcu_dereference(tconn->net_conf);
975
Andreas Gruenbacher6dff2902011-06-28 14:18:12 +0200976 if (nc->tentative && tconn->agreed_pro_version < 92) {
Philipp Reisner44ed1672011-04-19 17:10:19 +0200977 rcu_read_unlock();
978 mutex_unlock(&sock->mutex);
979 conn_err(tconn, "--dry-run is not supported by peer");
980 return -EOPNOTSUPP;
981 }
982
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200983 size = sizeof(*p);
Philipp Reisnerdc8228d2011-02-08 10:13:15 +0100984 if (tconn->agreed_pro_version >= 87)
Philipp Reisner44ed1672011-04-19 17:10:19 +0200985 size += strlen(nc->integrity_alg) + 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700986
Philipp Reisner44ed1672011-04-19 17:10:19 +0200987 p->protocol = cpu_to_be32(nc->wire_protocol);
988 p->after_sb_0p = cpu_to_be32(nc->after_sb_0p);
989 p->after_sb_1p = cpu_to_be32(nc->after_sb_1p);
990 p->after_sb_2p = cpu_to_be32(nc->after_sb_2p);
991 p->two_primaries = cpu_to_be32(nc->two_primaries);
Philipp Reisnercf14c2e2010-02-02 21:03:50 +0100992 cf = 0;
Andreas Gruenbacher6139f602011-05-06 20:00:02 +0200993 if (nc->discard_my_data)
994 cf |= CF_DISCARD_MY_DATA;
Andreas Gruenbacher6dff2902011-06-28 14:18:12 +0200995 if (nc->tentative)
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200996 cf |= CF_DRY_RUN;
Philipp Reisnercf14c2e2010-02-02 21:03:50 +0100997 p->conn_flags = cpu_to_be32(cf);
998
Philipp Reisnerdc8228d2011-02-08 10:13:15 +0100999 if (tconn->agreed_pro_version >= 87)
Philipp Reisner44ed1672011-04-19 17:10:19 +02001000 strcpy(p->integrity_alg, nc->integrity_alg);
1001 rcu_read_unlock();
1002
Philipp Reisnerd659f2a2011-05-16 17:38:45 +02001003 return __conn_send_command(tconn, sock, cmd, size, NULL, 0);
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +02001004}
1005
1006int drbd_send_protocol(struct drbd_tconn *tconn)
1007{
1008 int err;
1009
1010 mutex_lock(&tconn->data.mutex);
Philipp Reisnerd659f2a2011-05-16 17:38:45 +02001011 err = __drbd_send_protocol(tconn, P_PROTOCOL);
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +02001012 mutex_unlock(&tconn->data.mutex);
1013
1014 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001015}
1016
1017int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
1018{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001019 struct drbd_socket *sock;
1020 struct p_uuids *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001021 int i;
1022
1023 if (!get_ldev_if_state(mdev, D_NEGOTIATING))
Andreas Gruenbacher2ae5f952011-03-16 01:07:20 +01001024 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001025
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001026 sock = &mdev->tconn->data;
1027 p = drbd_prepare_command(mdev, sock);
1028 if (!p) {
1029 put_ldev(mdev);
1030 return -EIO;
1031 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001032 for (i = UI_CURRENT; i < UI_SIZE; i++)
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001033 p->uuid[i] = mdev->ldev ? cpu_to_be64(mdev->ldev->md.uuid[i]) : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001034
1035 mdev->comm_bm_set = drbd_bm_total_weight(mdev);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001036 p->uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
Philipp Reisner44ed1672011-04-19 17:10:19 +02001037 rcu_read_lock();
Andreas Gruenbacher6139f602011-05-06 20:00:02 +02001038 uuid_flags |= rcu_dereference(mdev->tconn->net_conf)->discard_my_data ? 1 : 0;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001039 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -07001040 uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0;
1041 uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001042 p->uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001043
1044 put_ldev(mdev);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001045 return drbd_send_command(mdev, sock, P_UUIDS, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001046}
1047
1048int drbd_send_uuids(struct drbd_conf *mdev)
1049{
1050 return _drbd_send_uuids(mdev, 0);
1051}
1052
1053int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev)
1054{
1055 return _drbd_send_uuids(mdev, 8);
1056}
1057
Lars Ellenberg62b0da32011-01-20 13:25:21 +01001058void drbd_print_uuids(struct drbd_conf *mdev, const char *text)
1059{
1060 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
1061 u64 *uuid = mdev->ldev->md.uuid;
1062 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX\n",
1063 text,
1064 (unsigned long long)uuid[UI_CURRENT],
1065 (unsigned long long)uuid[UI_BITMAP],
1066 (unsigned long long)uuid[UI_HISTORY_START],
1067 (unsigned long long)uuid[UI_HISTORY_END]);
1068 put_ldev(mdev);
1069 } else {
1070 dev_info(DEV, "%s effective data uuid: %016llX\n",
1071 text,
1072 (unsigned long long)mdev->ed_uuid);
1073 }
1074}
1075
Andreas Gruenbacher9c1b7f72011-03-16 01:09:01 +01001076void drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001077{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001078 struct drbd_socket *sock;
1079 struct p_rs_uuid *p;
Lars Ellenberg5a22db82010-12-17 21:14:23 +01001080 u64 uuid;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001081
Lars Ellenberg5a22db82010-12-17 21:14:23 +01001082 D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
1083
Philipp Reisner4a23f262011-01-11 17:42:17 +01001084 uuid = mdev->ldev->md.uuid[UI_BITMAP] + UUID_NEW_BM_OFFSET;
Lars Ellenberg5a22db82010-12-17 21:14:23 +01001085 drbd_uuid_set(mdev, UI_BITMAP, uuid);
Lars Ellenberg62b0da32011-01-20 13:25:21 +01001086 drbd_print_uuids(mdev, "updated sync UUID");
Lars Ellenberg5a22db82010-12-17 21:14:23 +01001087 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001088
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001089 sock = &mdev->tconn->data;
1090 p = drbd_prepare_command(mdev, sock);
1091 if (p) {
1092 p->uuid = cpu_to_be64(uuid);
1093 drbd_send_command(mdev, sock, P_SYNC_UUID, sizeof(*p), NULL, 0);
1094 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001095}
1096
Philipp Reisnere89b5912010-03-24 17:11:33 +01001097int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001098{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001099 struct drbd_socket *sock;
1100 struct p_sizes *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001101 sector_t d_size, u_size;
Philipp Reisner99432fc2011-05-20 16:39:13 +02001102 int q_order_type, max_bio_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001103
1104 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
1105 D_ASSERT(mdev->ldev->backing_bdev);
1106 d_size = drbd_get_max_capacity(mdev->ldev);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001107 rcu_read_lock();
1108 u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
1109 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -07001110 q_order_type = drbd_queue_order_type(mdev);
Philipp Reisner99432fc2011-05-20 16:39:13 +02001111 max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
1112 max_bio_size = min_t(int, max_bio_size, DRBD_MAX_BIO_SIZE);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001113 put_ldev(mdev);
1114 } else {
1115 d_size = 0;
1116 u_size = 0;
1117 q_order_type = QUEUE_ORDERED_NONE;
Philipp Reisner99432fc2011-05-20 16:39:13 +02001118 max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001119 }
1120
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001121 sock = &mdev->tconn->data;
1122 p = drbd_prepare_command(mdev, sock);
1123 if (!p)
1124 return -EIO;
Philipp Reisner2ffca4f2011-06-30 15:43:06 +02001125
1126 if (mdev->tconn->agreed_pro_version <= 94)
1127 max_bio_size = min_t(int, max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
1128 else if (mdev->tconn->agreed_pro_version < 100)
1129 max_bio_size = min_t(int, max_bio_size, DRBD_MAX_BIO_SIZE_P95);
1130
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001131 p->d_size = cpu_to_be64(d_size);
1132 p->u_size = cpu_to_be64(u_size);
1133 p->c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
1134 p->max_bio_size = cpu_to_be32(max_bio_size);
1135 p->queue_order_type = cpu_to_be16(q_order_type);
1136 p->dds_flags = cpu_to_be16(flags);
1137 return drbd_send_command(mdev, sock, P_SIZES, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001138}
1139
1140/**
1141 * drbd_send_state() - Sends the drbd state to the peer
1142 * @mdev: DRBD device.
1143 */
1144int drbd_send_state(struct drbd_conf *mdev)
1145{
Andreas Gruenbacher7c967152011-03-22 00:49:36 +01001146 struct drbd_socket *sock;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001147 struct p_state *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001148
Andreas Gruenbacher7c967152011-03-22 00:49:36 +01001149 sock = &mdev->tconn->data;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001150 p = drbd_prepare_command(mdev, sock);
1151 if (!p)
1152 return -EIO;
1153 p->state = cpu_to_be32(mdev->state.i); /* Within the send mutex */
1154 return drbd_send_command(mdev, sock, P_STATE, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001155}
1156
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001157int drbd_send_state_req(struct drbd_conf *mdev, union drbd_state mask, union drbd_state val)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001158{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001159 struct drbd_socket *sock;
1160 struct p_req_state *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001161
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001162 sock = &mdev->tconn->data;
1163 p = drbd_prepare_command(mdev, sock);
1164 if (!p)
1165 return -EIO;
1166 p->mask = cpu_to_be32(mask.i);
1167 p->val = cpu_to_be32(val.i);
1168 return drbd_send_command(mdev, sock, P_STATE_CHG_REQ, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001169
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001170}
1171
1172int conn_send_state_req(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val)
1173{
1174 enum drbd_packet cmd;
1175 struct drbd_socket *sock;
1176 struct p_req_state *p;
1177
1178 cmd = tconn->agreed_pro_version < 100 ? P_STATE_CHG_REQ : P_CONN_ST_CHG_REQ;
1179 sock = &tconn->data;
1180 p = conn_prepare_command(tconn, sock);
1181 if (!p)
1182 return -EIO;
1183 p->mask = cpu_to_be32(mask.i);
1184 p->val = cpu_to_be32(val.i);
1185 return conn_send_command(tconn, sock, cmd, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001186}
1187
Andreas Gruenbacher2f4e7ab2011-03-16 01:20:38 +01001188void drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001189{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001190 struct drbd_socket *sock;
1191 struct p_req_state_reply *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001192
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001193 sock = &mdev->tconn->meta;
1194 p = drbd_prepare_command(mdev, sock);
1195 if (p) {
1196 p->retcode = cpu_to_be32(retcode);
1197 drbd_send_command(mdev, sock, P_STATE_CHG_REPLY, sizeof(*p), NULL, 0);
1198 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001199}
1200
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001201void conn_send_sr_reply(struct drbd_tconn *tconn, enum drbd_state_rv retcode)
Philipp Reisner047cd4a2011-02-15 11:09:33 +01001202{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001203 struct drbd_socket *sock;
1204 struct p_req_state_reply *p;
Philipp Reisner047cd4a2011-02-15 11:09:33 +01001205 enum drbd_packet cmd = tconn->agreed_pro_version < 100 ? P_STATE_CHG_REPLY : P_CONN_ST_CHG_REPLY;
1206
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001207 sock = &tconn->meta;
1208 p = conn_prepare_command(tconn, sock);
1209 if (p) {
1210 p->retcode = cpu_to_be32(retcode);
1211 conn_send_command(tconn, sock, cmd, sizeof(*p), NULL, 0);
1212 }
Philipp Reisner047cd4a2011-02-15 11:09:33 +01001213}
1214
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001215static void dcbp_set_code(struct p_compressed_bm *p, enum drbd_bitmap_code code)
1216{
1217 BUG_ON(code & ~0xf);
1218 p->encoding = (p->encoding & ~0xf) | code;
1219}
1220
1221static void dcbp_set_start(struct p_compressed_bm *p, int set)
1222{
1223 p->encoding = (p->encoding & ~0x80) | (set ? 0x80 : 0);
1224}
1225
1226static void dcbp_set_pad_bits(struct p_compressed_bm *p, int n)
1227{
1228 BUG_ON(n & ~0x7);
1229 p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4);
1230}
1231
Philipp Reisnerb411b362009-09-25 16:07:19 -07001232int fill_bitmap_rle_bits(struct drbd_conf *mdev,
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001233 struct p_compressed_bm *p,
1234 unsigned int size,
1235 struct bm_xfer_ctx *c)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001236{
1237 struct bitstream bs;
1238 unsigned long plain_bits;
1239 unsigned long tmp;
1240 unsigned long rl;
1241 unsigned len;
1242 unsigned toggle;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001243 int bits, use_rle;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001244
1245 /* may we use this feature? */
Philipp Reisner44ed1672011-04-19 17:10:19 +02001246 rcu_read_lock();
1247 use_rle = rcu_dereference(mdev->tconn->net_conf)->use_rle;
1248 rcu_read_unlock();
1249 if (!use_rle || mdev->tconn->agreed_pro_version < 90)
1250 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001251
1252 if (c->bit_offset >= c->bm_bits)
1253 return 0; /* nothing to do. */
1254
1255 /* use at most thus many bytes */
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001256 bitstream_init(&bs, p->code, size, 0);
1257 memset(p->code, 0, size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001258 /* plain bits covered in this code string */
1259 plain_bits = 0;
1260
1261 /* p->encoding & 0x80 stores whether the first run length is set.
1262 * bit offset is implicit.
1263 * start with toggle == 2 to be able to tell the first iteration */
1264 toggle = 2;
1265
1266 /* see how much plain bits we can stuff into one packet
1267 * using RLE and VLI. */
1268 do {
1269 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(mdev, c->bit_offset)
1270 : _drbd_bm_find_next(mdev, c->bit_offset);
1271 if (tmp == -1UL)
1272 tmp = c->bm_bits;
1273 rl = tmp - c->bit_offset;
1274
1275 if (toggle == 2) { /* first iteration */
1276 if (rl == 0) {
1277 /* the first checked bit was set,
1278 * store start value, */
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001279 dcbp_set_start(p, 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001280 /* but skip encoding of zero run length */
1281 toggle = !toggle;
1282 continue;
1283 }
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001284 dcbp_set_start(p, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001285 }
1286
1287 /* paranoia: catch zero runlength.
1288 * can only happen if bitmap is modified while we scan it. */
1289 if (rl == 0) {
1290 dev_err(DEV, "unexpected zero runlength while encoding bitmap "
1291 "t:%u bo:%lu\n", toggle, c->bit_offset);
1292 return -1;
1293 }
1294
1295 bits = vli_encode_bits(&bs, rl);
1296 if (bits == -ENOBUFS) /* buffer full */
1297 break;
1298 if (bits <= 0) {
1299 dev_err(DEV, "error while encoding bitmap: %d\n", bits);
1300 return 0;
1301 }
1302
1303 toggle = !toggle;
1304 plain_bits += rl;
1305 c->bit_offset = tmp;
1306 } while (c->bit_offset < c->bm_bits);
1307
1308 len = bs.cur.b - p->code + !!bs.cur.bit;
1309
1310 if (plain_bits < (len << 3)) {
1311 /* incompressible with this method.
1312 * we need to rewind both word and bit position. */
1313 c->bit_offset -= plain_bits;
1314 bm_xfer_ctx_bit_to_word_offset(c);
1315 c->bit_offset = c->word_offset * BITS_PER_LONG;
1316 return 0;
1317 }
1318
1319 /* RLE + VLI was able to compress it just fine.
1320 * update c->word_offset. */
1321 bm_xfer_ctx_bit_to_word_offset(c);
1322
1323 /* store pad_bits */
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001324 dcbp_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001325
1326 return len;
1327}
1328
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001329/**
1330 * send_bitmap_rle_or_plain
1331 *
1332 * Return 0 when done, 1 when another iteration is needed, and a negative error
1333 * code upon failure.
1334 */
1335static int
Andreas Gruenbacher79ed9bd2011-03-24 21:31:38 +01001336send_bitmap_rle_or_plain(struct drbd_conf *mdev, struct bm_xfer_ctx *c)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001337{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001338 struct drbd_socket *sock = &mdev->tconn->data;
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001339 unsigned int header_size = drbd_header_size(mdev->tconn);
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001340 struct p_compressed_bm *p = sock->sbuf + header_size;
Andreas Gruenbachera982dd52010-12-10 00:45:25 +01001341 int len, err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001342
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001343 len = fill_bitmap_rle_bits(mdev, p,
1344 DRBD_SOCKET_BUFFER_SIZE - header_size - sizeof(*p), c);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001345 if (len < 0)
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001346 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001347
1348 if (len) {
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001349 dcbp_set_code(p, RLE_VLI_Bits);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001350 err = __send_command(mdev->tconn, mdev->vnr, sock,
1351 P_COMPRESSED_BITMAP, sizeof(*p) + len,
1352 NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001353 c->packets[0]++;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001354 c->bytes[0] += header_size + sizeof(*p) + len;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001355
1356 if (c->bit_offset >= c->bm_bits)
1357 len = 0; /* DONE */
1358 } else {
1359 /* was not compressible.
1360 * send a buffer full of plain text bits instead. */
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001361 unsigned int data_size;
1362 unsigned long num_words;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001363 unsigned long *p = sock->sbuf + header_size;
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001364
1365 data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001366 num_words = min_t(size_t, data_size / sizeof(*p),
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001367 c->bm_words - c->word_offset);
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001368 len = num_words * sizeof(*p);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001369 if (len)
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001370 drbd_bm_get_lel(mdev, c->word_offset, num_words, p);
1371 err = __send_command(mdev->tconn, mdev->vnr, sock, P_BITMAP, len, NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001372 c->word_offset += num_words;
1373 c->bit_offset = c->word_offset * BITS_PER_LONG;
1374
1375 c->packets[1]++;
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001376 c->bytes[1] += header_size + len;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001377
1378 if (c->bit_offset > c->bm_bits)
1379 c->bit_offset = c->bm_bits;
1380 }
Andreas Gruenbachera982dd52010-12-10 00:45:25 +01001381 if (!err) {
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001382 if (len == 0) {
1383 INFO_bm_xfer_stats(mdev, "send", c);
1384 return 0;
1385 } else
1386 return 1;
1387 }
1388 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001389}
1390
1391/* See the comment at receive_bitmap() */
Andreas Gruenbacher058820c2011-03-22 16:03:43 +01001392static int _drbd_send_bitmap(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001393{
1394 struct bm_xfer_ctx c;
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001395 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001396
Andreas Gruenbacher841ce242010-12-15 19:31:20 +01001397 if (!expect(mdev->bitmap))
1398 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001399
Philipp Reisnerb411b362009-09-25 16:07:19 -07001400 if (get_ldev(mdev)) {
1401 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
1402 dev_info(DEV, "Writing the whole bitmap, MDF_FullSync was set.\n");
1403 drbd_bm_set_all(mdev);
1404 if (drbd_bm_write(mdev)) {
1405 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
1406 * but otherwise process as per normal - need to tell other
1407 * side that a full resync is required! */
1408 dev_err(DEV, "Failed to write bitmap to disk!\n");
1409 } else {
1410 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
1411 drbd_md_sync(mdev);
1412 }
1413 }
1414 put_ldev(mdev);
1415 }
1416
1417 c = (struct bm_xfer_ctx) {
1418 .bm_bits = drbd_bm_bits(mdev),
1419 .bm_words = drbd_bm_words(mdev),
1420 };
1421
1422 do {
Andreas Gruenbacher79ed9bd2011-03-24 21:31:38 +01001423 err = send_bitmap_rle_or_plain(mdev, &c);
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001424 } while (err > 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001425
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001426 return err == 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001427}
1428
1429int drbd_send_bitmap(struct drbd_conf *mdev)
1430{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001431 struct drbd_socket *sock = &mdev->tconn->data;
1432 int err = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001433
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001434 mutex_lock(&sock->mutex);
1435 if (sock->socket)
1436 err = !_drbd_send_bitmap(mdev);
1437 mutex_unlock(&sock->mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001438 return err;
1439}
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001440
Andreas Gruenbacherd4e67d72011-03-16 01:25:28 +01001441void drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001442{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001443 struct drbd_socket *sock;
1444 struct p_barrier_ack *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001445
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001446 if (mdev->state.conn < C_CONNECTED)
1447 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001448
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001449 sock = &mdev->tconn->meta;
1450 p = drbd_prepare_command(mdev, sock);
1451 if (!p)
1452 return;
1453 p->barrier = barrier_nr;
1454 p->set_size = cpu_to_be32(set_size);
1455 drbd_send_command(mdev, sock, P_BARRIER_ACK, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001456}
1457
1458/**
1459 * _drbd_send_ack() - Sends an ack packet
1460 * @mdev: DRBD device.
1461 * @cmd: Packet command code.
1462 * @sector: sector, needs to be in big endian byte order
1463 * @blksize: size in byte, needs to be in big endian byte order
1464 * @block_id: Id, big endian byte order
1465 */
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001466static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
1467 u64 sector, u32 blksize, u64 block_id)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001468{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001469 struct drbd_socket *sock;
1470 struct p_block_ack *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001471
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001472 if (mdev->state.conn < C_CONNECTED)
Andreas Gruenbachera8c32aa2011-03-16 01:27:22 +01001473 return -EIO;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001474
1475 sock = &mdev->tconn->meta;
1476 p = drbd_prepare_command(mdev, sock);
1477 if (!p)
1478 return -EIO;
1479 p->sector = sector;
1480 p->block_id = block_id;
1481 p->blksize = blksize;
1482 p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
1483 return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001484}
1485
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02001486/* dp->sector and dp->block_id already/still in network byte order,
1487 * data_size is payload size according to dp->head,
1488 * and may need to be corrected for digest size. */
Andreas Gruenbachera9a99942011-03-16 01:30:14 +01001489void drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packet cmd,
1490 struct p_data *dp, int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001491{
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02001492 if (mdev->tconn->peer_integrity_tfm)
1493 data_size -= crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
Andreas Gruenbachera9a99942011-03-16 01:30:14 +01001494 _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
1495 dp->block_id);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001496}
1497
Andreas Gruenbachera9a99942011-03-16 01:30:14 +01001498void drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packet cmd,
1499 struct p_block_req *rp)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001500{
Andreas Gruenbachera9a99942011-03-16 01:30:14 +01001501 _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001502}
1503
1504/**
1505 * drbd_send_ack() - Sends an ack packet
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001506 * @mdev: DRBD device
1507 * @cmd: packet command code
1508 * @peer_req: peer request
Philipp Reisnerb411b362009-09-25 16:07:19 -07001509 */
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001510int drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001511 struct drbd_peer_request *peer_req)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001512{
Andreas Gruenbacherdd516122011-03-16 15:39:08 +01001513 return _drbd_send_ack(mdev, cmd,
1514 cpu_to_be64(peer_req->i.sector),
1515 cpu_to_be32(peer_req->i.size),
1516 peer_req->block_id);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001517}
1518
1519/* This function misuses the block_id field to signal if the blocks
1520 * are is sync or not. */
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001521int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packet cmd,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001522 sector_t sector, int blksize, u64 block_id)
1523{
Andreas Gruenbacherfa79abd2011-03-16 01:31:39 +01001524 return _drbd_send_ack(mdev, cmd,
1525 cpu_to_be64(sector),
1526 cpu_to_be32(blksize),
1527 cpu_to_be64(block_id));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001528}
1529
1530int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
1531 sector_t sector, int size, u64 block_id)
1532{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001533 struct drbd_socket *sock;
1534 struct p_block_req *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001535
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001536 sock = &mdev->tconn->data;
1537 p = drbd_prepare_command(mdev, sock);
1538 if (!p)
1539 return -EIO;
1540 p->sector = cpu_to_be64(sector);
1541 p->block_id = block_id;
1542 p->blksize = cpu_to_be32(size);
1543 return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001544}
1545
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001546int drbd_send_drequest_csum(struct drbd_conf *mdev, sector_t sector, int size,
1547 void *digest, int digest_size, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001548{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001549 struct drbd_socket *sock;
1550 struct p_block_req *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001551
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001552 /* FIXME: Put the digest into the preallocated socket buffer. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001553
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001554 sock = &mdev->tconn->data;
1555 p = drbd_prepare_command(mdev, sock);
1556 if (!p)
1557 return -EIO;
1558 p->sector = cpu_to_be64(sector);
1559 p->block_id = ID_SYNCER /* unused */;
1560 p->blksize = cpu_to_be32(size);
1561 return drbd_send_command(mdev, sock, cmd, sizeof(*p),
1562 digest, digest_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001563}
1564
1565int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
1566{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001567 struct drbd_socket *sock;
1568 struct p_block_req *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001569
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001570 sock = &mdev->tconn->data;
1571 p = drbd_prepare_command(mdev, sock);
1572 if (!p)
1573 return -EIO;
1574 p->sector = cpu_to_be64(sector);
1575 p->block_id = ID_SYNCER /* unused */;
1576 p->blksize = cpu_to_be32(size);
1577 return drbd_send_command(mdev, sock, P_OV_REQUEST, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001578}
1579
1580/* called on sndtimeo
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001581 * returns false if we should retry,
1582 * true if we think connection is dead
Philipp Reisnerb411b362009-09-25 16:07:19 -07001583 */
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001584static int we_should_drop_the_connection(struct drbd_tconn *tconn, struct socket *sock)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001585{
1586 int drop_it;
1587 /* long elapsed = (long)(jiffies - mdev->last_received); */
1588
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001589 drop_it = tconn->meta.socket == sock
1590 || !tconn->asender.task
1591 || get_t_state(&tconn->asender) != RUNNING
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01001592 || tconn->cstate < C_WF_REPORT_PARAMS;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001593
1594 if (drop_it)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001595 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001596
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001597 drop_it = !--tconn->ko_count;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001598 if (!drop_it) {
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001599 conn_err(tconn, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
1600 current->comm, current->pid, tconn->ko_count);
1601 request_ping(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001602 }
1603
1604 return drop_it; /* && (mdev->state == R_PRIMARY) */;
1605}
1606
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001607static void drbd_update_congested(struct drbd_tconn *tconn)
Andreas Gruenbacher9e204cd2011-01-26 18:45:11 +01001608{
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001609 struct sock *sk = tconn->data.socket->sk;
Andreas Gruenbacher9e204cd2011-01-26 18:45:11 +01001610 if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001611 set_bit(NET_CONGESTED, &tconn->flags);
Andreas Gruenbacher9e204cd2011-01-26 18:45:11 +01001612}
1613
Philipp Reisnerb411b362009-09-25 16:07:19 -07001614/* The idea of sendpage seems to be to put some kind of reference
1615 * to the page into the skb, and to hand it over to the NIC. In
1616 * this process get_page() gets called.
1617 *
1618 * As soon as the page was really sent over the network put_page()
1619 * gets called by some part of the network layer. [ NIC driver? ]
1620 *
1621 * [ get_page() / put_page() increment/decrement the count. If count
1622 * reaches 0 the page will be freed. ]
1623 *
1624 * This works nicely with pages from FSs.
1625 * But this means that in protocol A we might signal IO completion too early!
1626 *
1627 * In order not to corrupt data during a resync we must make sure
1628 * that we do not reuse our own buffer pages (EEs) to early, therefore
1629 * we have the net_ee list.
1630 *
1631 * XFS seems to have problems, still, it submits pages with page_count == 0!
1632 * As a workaround, we disable sendpage on pages
1633 * with page_count == 0 or PageSlab.
1634 */
1635static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page,
Andreas Gruenbacherb9874272011-03-16 09:41:10 +01001636 int offset, size_t size, unsigned msg_flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001637{
Andreas Gruenbacherb9874272011-03-16 09:41:10 +01001638 struct socket *socket;
1639 void *addr;
1640 int err;
1641
1642 socket = mdev->tconn->data.socket;
1643 addr = kmap(page) + offset;
1644 err = drbd_send_all(mdev->tconn, socket, addr, size, msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001645 kunmap(page);
Andreas Gruenbacherb9874272011-03-16 09:41:10 +01001646 if (!err)
1647 mdev->send_cnt += size >> 9;
1648 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001649}
1650
1651static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001652 int offset, size_t size, unsigned msg_flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001653{
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001654 struct socket *socket = mdev->tconn->data.socket;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001655 mm_segment_t oldfs = get_fs();
Philipp Reisnerb411b362009-09-25 16:07:19 -07001656 int len = size;
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001657 int err = -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001658
1659 /* e.g. XFS meta- & log-data is in slab pages, which have a
1660 * page_count of 0 and/or have PageSlab() set.
1661 * we cannot use send_page for those, as that does get_page();
1662 * put_page(); and would cause either a VM_BUG directly, or
1663 * __page_cache_release a page that would actually still be referenced
1664 * by someone, leading to some obscure delayed Oops somewhere else. */
1665 if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001666 return _drbd_no_send_page(mdev, page, offset, size, msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001667
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001668 msg_flags |= MSG_NOSIGNAL;
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001669 drbd_update_congested(mdev->tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001670 set_fs(KERNEL_DS);
1671 do {
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001672 int sent;
1673
1674 sent = socket->ops->sendpage(socket, page, offset, len, msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001675 if (sent <= 0) {
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001676 if (sent == -EAGAIN) {
1677 if (we_should_drop_the_connection(mdev->tconn, socket))
1678 break;
1679 continue;
1680 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001681 dev_warn(DEV, "%s: size=%d len=%d sent=%d\n",
1682 __func__, (int)size, len, sent);
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001683 if (sent < 0)
1684 err = sent;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001685 break;
1686 }
1687 len -= sent;
1688 offset += sent;
1689 } while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
1690 set_fs(oldfs);
Philipp Reisner01a311a2011-02-07 14:30:33 +01001691 clear_bit(NET_CONGESTED, &mdev->tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001692
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001693 if (len == 0) {
1694 err = 0;
1695 mdev->send_cnt += size >> 9;
1696 }
1697 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001698}
1699
1700static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
1701{
1702 struct bio_vec *bvec;
1703 int i;
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001704 /* hint all but last page with MSG_MORE */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001705 __bio_for_each_segment(bvec, bio, i, 0) {
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001706 int err;
1707
1708 err = _drbd_no_send_page(mdev, bvec->bv_page,
1709 bvec->bv_offset, bvec->bv_len,
1710 i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
1711 if (err)
1712 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001713 }
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001714 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001715}
1716
1717static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
1718{
1719 struct bio_vec *bvec;
1720 int i;
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001721 /* hint all but last page with MSG_MORE */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001722 __bio_for_each_segment(bvec, bio, i, 0) {
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001723 int err;
1724
1725 err = _drbd_send_page(mdev, bvec->bv_page,
1726 bvec->bv_offset, bvec->bv_len,
1727 i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
1728 if (err)
1729 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001730 }
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001731 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001732}
1733
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001734static int _drbd_send_zc_ee(struct drbd_conf *mdev,
1735 struct drbd_peer_request *peer_req)
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001736{
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001737 struct page *page = peer_req->pages;
1738 unsigned len = peer_req->i.size;
Andreas Gruenbacher9f692302011-03-16 10:49:09 +01001739 int err;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001740
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001741 /* hint all but last page with MSG_MORE */
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001742 page_chain_for_each(page) {
1743 unsigned l = min_t(unsigned, len, PAGE_SIZE);
Andreas Gruenbacher9f692302011-03-16 10:49:09 +01001744
1745 err = _drbd_send_page(mdev, page, 0, l,
1746 page_chain_next(page) ? MSG_MORE : 0);
1747 if (err)
1748 return err;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001749 len -= l;
1750 }
Andreas Gruenbacher9f692302011-03-16 10:49:09 +01001751 return 0;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001752}
1753
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001754static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
1755{
Philipp Reisner31890f42011-01-19 14:12:51 +01001756 if (mdev->tconn->agreed_pro_version >= 95)
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001757 return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001758 (bi_rw & REQ_FUA ? DP_FUA : 0) |
1759 (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
1760 (bi_rw & REQ_DISCARD ? DP_DISCARD : 0);
1761 else
Jens Axboe721a9602011-03-09 11:56:30 +01001762 return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001763}
1764
Philipp Reisnerb411b362009-09-25 16:07:19 -07001765/* Used to send write requests
1766 * R_PRIMARY -> Peer (P_DATA)
1767 */
1768int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
1769{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001770 struct drbd_socket *sock;
1771 struct p_data *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001772 unsigned int dp_flags = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001773 int dgs;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001774 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001775
Philipp Reisner46e1ce42011-05-16 12:57:15 +02001776 sock = &mdev->tconn->data;
1777 p = drbd_prepare_command(mdev, sock);
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02001778 dgs = (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_tfm) ?
1779 crypto_hash_digestsize(mdev->tconn->integrity_tfm) : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001780
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001781 if (!p)
1782 return -EIO;
1783 p->sector = cpu_to_be64(req->i.sector);
1784 p->block_id = (unsigned long)req;
1785 p->seq_num = cpu_to_be32(req->seq_num = atomic_inc_return(&mdev->packet_seq));
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001786 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001787 if (mdev->state.conn >= C_SYNC_SOURCE &&
1788 mdev->state.conn <= C_PAUSED_SYNC_T)
1789 dp_flags |= DP_MAY_SET_IN_SYNC;
Philipp Reisner303d1442011-04-13 16:24:47 -07001790 if (mdev->tconn->agreed_pro_version >= 100) {
1791 if (req->rq_state & RQ_EXP_RECEIVE_ACK)
1792 dp_flags |= DP_SEND_RECEIVE_ACK;
1793 if (req->rq_state & RQ_EXP_WRITE_ACK)
1794 dp_flags |= DP_SEND_WRITE_ACK;
1795 }
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001796 p->dp_flags = cpu_to_be32(dp_flags);
1797 if (dgs)
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02001798 drbd_csum_bio(mdev, mdev->tconn->integrity_tfm, req->master_bio, p + 1);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001799 err = __send_command(mdev->tconn, mdev->vnr, sock, P_DATA, sizeof(*p) + dgs, NULL, req->i.size);
Andreas Gruenbacher6bdb9b02011-03-16 11:52:58 +01001800 if (!err) {
Lars Ellenberg470be442010-11-10 10:36:52 +01001801 /* For protocol A, we have to memcpy the payload into
1802 * socket buffers, as we may complete right away
1803 * as soon as we handed it over to tcp, at which point the data
1804 * pages may become invalid.
1805 *
1806 * For data-integrity enabled, we copy it as well, so we can be
1807 * sure that even if the bio pages may still be modified, it
1808 * won't change the data on the wire, thus if the digest checks
1809 * out ok after sending on this side, but does not fit on the
1810 * receiving side, we sure have detected corruption elsewhere.
1811 */
Philipp Reisner303d1442011-04-13 16:24:47 -07001812 if (!(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK)) || dgs)
Andreas Gruenbacher6bdb9b02011-03-16 11:52:58 +01001813 err = _drbd_send_bio(mdev, req->master_bio);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001814 else
Andreas Gruenbacher6bdb9b02011-03-16 11:52:58 +01001815 err = _drbd_send_zc_bio(mdev, req->master_bio);
Lars Ellenberg470be442010-11-10 10:36:52 +01001816
1817 /* double check digest, sometimes buffers have been modified in flight. */
1818 if (dgs > 0 && dgs <= 64) {
Bart Van Assche24c48302011-05-21 18:32:29 +02001819 /* 64 byte, 512 bit, is the largest digest size
Lars Ellenberg470be442010-11-10 10:36:52 +01001820 * currently supported in kernel crypto. */
1821 unsigned char digest[64];
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02001822 drbd_csum_bio(mdev, mdev->tconn->integrity_tfm, req->master_bio, digest);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001823 if (memcmp(p + 1, digest, dgs)) {
Lars Ellenberg470be442010-11-10 10:36:52 +01001824 dev_warn(DEV,
1825 "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
Andreas Gruenbacherace652a2011-01-03 17:09:58 +01001826 (unsigned long long)req->i.sector, req->i.size);
Lars Ellenberg470be442010-11-10 10:36:52 +01001827 }
1828 } /* else if (dgs > 64) {
1829 ... Be noisy about digest too large ...
1830 } */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001831 }
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001832 mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
Philipp Reisnerbd26bfc52010-05-04 12:33:58 +02001833
Andreas Gruenbacher6bdb9b02011-03-16 11:52:58 +01001834 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001835}
1836
1837/* answer packet, used to send data back for read requests:
1838 * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
1839 * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
1840 */
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001841int drbd_send_block(struct drbd_conf *mdev, enum drbd_packet cmd,
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001842 struct drbd_peer_request *peer_req)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001843{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001844 struct drbd_socket *sock;
1845 struct p_data *p;
Andreas Gruenbacher7b57b89d2011-03-16 11:35:20 +01001846 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001847 int dgs;
1848
Philipp Reisner46e1ce42011-05-16 12:57:15 +02001849 sock = &mdev->tconn->data;
1850 p = drbd_prepare_command(mdev, sock);
1851
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02001852 dgs = (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_tfm) ?
1853 crypto_hash_digestsize(mdev->tconn->integrity_tfm) : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001854
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001855 if (!p)
1856 return -EIO;
1857 p->sector = cpu_to_be64(peer_req->i.sector);
1858 p->block_id = peer_req->block_id;
1859 p->seq_num = 0; /* unused */
1860 if (dgs)
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02001861 drbd_csum_ee(mdev, mdev->tconn->integrity_tfm, peer_req, p + 1);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001862 err = __send_command(mdev->tconn, mdev->vnr, sock, cmd, sizeof(*p) + dgs, NULL, peer_req->i.size);
Andreas Gruenbacher7b57b89d2011-03-16 11:35:20 +01001863 if (!err)
1864 err = _drbd_send_zc_ee(mdev, peer_req);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001865 mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
Philipp Reisnerbd26bfc52010-05-04 12:33:58 +02001866
Andreas Gruenbacher7b57b89d2011-03-16 11:35:20 +01001867 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001868}
1869
Andreas Gruenbacher8f7bed72010-12-19 23:53:14 +01001870int drbd_send_out_of_sync(struct drbd_conf *mdev, struct drbd_request *req)
Philipp Reisner73a01a12010-10-27 14:33:00 +02001871{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001872 struct drbd_socket *sock;
1873 struct p_block_desc *p;
Philipp Reisner73a01a12010-10-27 14:33:00 +02001874
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001875 sock = &mdev->tconn->data;
1876 p = drbd_prepare_command(mdev, sock);
1877 if (!p)
1878 return -EIO;
1879 p->sector = cpu_to_be64(req->i.sector);
1880 p->blksize = cpu_to_be32(req->i.size);
1881 return drbd_send_command(mdev, sock, P_OUT_OF_SYNC, sizeof(*p), NULL, 0);
Philipp Reisner73a01a12010-10-27 14:33:00 +02001882}
1883
Philipp Reisnerb411b362009-09-25 16:07:19 -07001884/*
1885 drbd_send distinguishes two cases:
1886
1887 Packets sent via the data socket "sock"
1888 and packets sent via the meta data socket "msock"
1889
1890 sock msock
1891 -----------------+-------------------------+------------------------------
1892 timeout conf.timeout / 2 conf.timeout / 2
1893 timeout action send a ping via msock Abort communication
1894 and close all sockets
1895*/
1896
1897/*
1898 * you must have down()ed the appropriate [m]sock_mutex elsewhere!
1899 */
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001900int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001901 void *buf, size_t size, unsigned msg_flags)
1902{
1903 struct kvec iov;
1904 struct msghdr msg;
1905 int rv, sent = 0;
1906
1907 if (!sock)
Andreas Gruenbacherc0d42c82010-12-09 23:52:22 +01001908 return -EBADR;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001909
1910 /* THINK if (signal_pending) return ... ? */
1911
1912 iov.iov_base = buf;
1913 iov.iov_len = size;
1914
1915 msg.msg_name = NULL;
1916 msg.msg_namelen = 0;
1917 msg.msg_control = NULL;
1918 msg.msg_controllen = 0;
1919 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
1920
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001921 if (sock == tconn->data.socket) {
Philipp Reisner44ed1672011-04-19 17:10:19 +02001922 rcu_read_lock();
1923 tconn->ko_count = rcu_dereference(tconn->net_conf)->ko_count;
1924 rcu_read_unlock();
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001925 drbd_update_congested(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001926 }
1927 do {
1928 /* STRANGE
1929 * tcp_sendmsg does _not_ use its size parameter at all ?
1930 *
1931 * -EAGAIN on timeout, -EINTR on signal.
1932 */
1933/* THINK
1934 * do we need to block DRBD_SIG if sock == &meta.socket ??
1935 * otherwise wake_asender() might interrupt some send_*Ack !
1936 */
1937 rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
1938 if (rv == -EAGAIN) {
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001939 if (we_should_drop_the_connection(tconn, sock))
Philipp Reisnerb411b362009-09-25 16:07:19 -07001940 break;
1941 else
1942 continue;
1943 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001944 if (rv == -EINTR) {
1945 flush_signals(current);
1946 rv = 0;
1947 }
1948 if (rv < 0)
1949 break;
1950 sent += rv;
1951 iov.iov_base += rv;
1952 iov.iov_len -= rv;
1953 } while (sent < size);
1954
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001955 if (sock == tconn->data.socket)
1956 clear_bit(NET_CONGESTED, &tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001957
1958 if (rv <= 0) {
1959 if (rv != -EAGAIN) {
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001960 conn_err(tconn, "%s_sendmsg returned %d\n",
1961 sock == tconn->meta.socket ? "msock" : "sock",
1962 rv);
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01001963 conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001964 } else
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01001965 conn_request_state(tconn, NS(conn, C_TIMEOUT), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001966 }
1967
1968 return sent;
1969}
1970
Andreas Gruenbacherfb708e42010-12-15 17:04:36 +01001971/**
1972 * drbd_send_all - Send an entire buffer
1973 *
1974 * Returns 0 upon success and a negative error value otherwise.
1975 */
1976int drbd_send_all(struct drbd_tconn *tconn, struct socket *sock, void *buffer,
1977 size_t size, unsigned msg_flags)
1978{
1979 int err;
1980
1981 err = drbd_send(tconn, sock, buffer, size, msg_flags);
1982 if (err < 0)
1983 return err;
1984 if (err != size)
1985 return -EIO;
1986 return 0;
1987}
1988
Philipp Reisnerb411b362009-09-25 16:07:19 -07001989static int drbd_open(struct block_device *bdev, fmode_t mode)
1990{
1991 struct drbd_conf *mdev = bdev->bd_disk->private_data;
1992 unsigned long flags;
1993 int rv = 0;
1994
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02001995 mutex_lock(&drbd_main_mutex);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001996 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001997 /* to have a stable mdev->state.role
1998 * and no race with updating open_cnt */
1999
2000 if (mdev->state.role != R_PRIMARY) {
2001 if (mode & FMODE_WRITE)
2002 rv = -EROFS;
2003 else if (!allow_oos)
2004 rv = -EMEDIUMTYPE;
2005 }
2006
2007 if (!rv)
2008 mdev->open_cnt++;
Philipp Reisner87eeee42011-01-19 14:16:30 +01002009 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02002010 mutex_unlock(&drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002011
2012 return rv;
2013}
2014
2015static int drbd_release(struct gendisk *gd, fmode_t mode)
2016{
2017 struct drbd_conf *mdev = gd->private_data;
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02002018 mutex_lock(&drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002019 mdev->open_cnt--;
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02002020 mutex_unlock(&drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002021 return 0;
2022}
2023
Philipp Reisnerb411b362009-09-25 16:07:19 -07002024static void drbd_set_defaults(struct drbd_conf *mdev)
2025{
Lars Ellenbergf3990022011-03-23 14:31:09 +01002026 /* Beware! The actual layout differs
2027 * between big endian and little endian */
Philipp Reisnerda9fbc22011-03-29 10:52:01 +02002028 mdev->state = (union drbd_dev_state) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002029 { .role = R_SECONDARY,
2030 .peer = R_UNKNOWN,
2031 .conn = C_STANDALONE,
2032 .disk = D_DISKLESS,
2033 .pdsk = D_UNKNOWN,
Philipp Reisnerb411b362009-09-25 16:07:19 -07002034 } };
2035}
2036
2037void drbd_init_set_defaults(struct drbd_conf *mdev)
2038{
2039 /* the memset(,0,) did most of this.
2040 * note: only assignments, no allocation in here */
2041
2042 drbd_set_defaults(mdev);
2043
Philipp Reisnerb411b362009-09-25 16:07:19 -07002044 atomic_set(&mdev->ap_bio_cnt, 0);
2045 atomic_set(&mdev->ap_pending_cnt, 0);
2046 atomic_set(&mdev->rs_pending_cnt, 0);
2047 atomic_set(&mdev->unacked_cnt, 0);
2048 atomic_set(&mdev->local_cnt, 0);
Lars Ellenberg435f0742010-09-06 12:30:25 +02002049 atomic_set(&mdev->pp_in_use_by_net, 0);
Philipp Reisner778f2712010-07-06 11:14:00 +02002050 atomic_set(&mdev->rs_sect_in, 0);
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002051 atomic_set(&mdev->rs_sect_ev, 0);
Philipp Reisner759fbdf2010-10-26 16:02:27 +02002052 atomic_set(&mdev->ap_in_flight, 0);
Philipp Reisnercdfda632011-07-05 15:38:59 +02002053 atomic_set(&mdev->md_io_in_use, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002054
Philipp Reisner8410da8f02011-02-11 20:11:10 +01002055 mutex_init(&mdev->own_state_mutex);
2056 mdev->state_mutex = &mdev->own_state_mutex;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002057
Philipp Reisnerb411b362009-09-25 16:07:19 -07002058 spin_lock_init(&mdev->al_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002059 spin_lock_init(&mdev->peer_seq_lock);
2060 spin_lock_init(&mdev->epoch_lock);
2061
2062 INIT_LIST_HEAD(&mdev->active_ee);
2063 INIT_LIST_HEAD(&mdev->sync_ee);
2064 INIT_LIST_HEAD(&mdev->done_ee);
2065 INIT_LIST_HEAD(&mdev->read_ee);
2066 INIT_LIST_HEAD(&mdev->net_ee);
2067 INIT_LIST_HEAD(&mdev->resync_reads);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002068 INIT_LIST_HEAD(&mdev->resync_work.list);
2069 INIT_LIST_HEAD(&mdev->unplug_work.list);
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02002070 INIT_LIST_HEAD(&mdev->go_diskless.list);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002071 INIT_LIST_HEAD(&mdev->md_sync_work.list);
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02002072 INIT_LIST_HEAD(&mdev->start_resync_work.list);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002073 INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
Philipp Reisner0ced55a2010-04-30 15:26:20 +02002074
Philipp Reisner794abb72010-12-27 11:51:23 +01002075 mdev->resync_work.cb = w_resync_timer;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002076 mdev->unplug_work.cb = w_send_write_hint;
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02002077 mdev->go_diskless.cb = w_go_diskless;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002078 mdev->md_sync_work.cb = w_md_sync;
2079 mdev->bm_io_work.w.cb = w_bitmap_io;
Philipp Reisner370a43e2011-01-14 16:03:11 +01002080 mdev->start_resync_work.cb = w_start_resync;
Philipp Reisnera21e9292011-02-08 15:08:49 +01002081
2082 mdev->resync_work.mdev = mdev;
2083 mdev->unplug_work.mdev = mdev;
2084 mdev->go_diskless.mdev = mdev;
2085 mdev->md_sync_work.mdev = mdev;
2086 mdev->bm_io_work.w.mdev = mdev;
2087 mdev->start_resync_work.mdev = mdev;
2088
Philipp Reisnerb411b362009-09-25 16:07:19 -07002089 init_timer(&mdev->resync_timer);
2090 init_timer(&mdev->md_sync_timer);
Philipp Reisner370a43e2011-01-14 16:03:11 +01002091 init_timer(&mdev->start_resync_timer);
Philipp Reisner7fde2be2011-03-01 11:08:28 +01002092 init_timer(&mdev->request_timer);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002093 mdev->resync_timer.function = resync_timer_fn;
2094 mdev->resync_timer.data = (unsigned long) mdev;
2095 mdev->md_sync_timer.function = md_sync_timer_fn;
2096 mdev->md_sync_timer.data = (unsigned long) mdev;
Philipp Reisner370a43e2011-01-14 16:03:11 +01002097 mdev->start_resync_timer.function = start_resync_timer_fn;
2098 mdev->start_resync_timer.data = (unsigned long) mdev;
Philipp Reisner7fde2be2011-03-01 11:08:28 +01002099 mdev->request_timer.function = request_timer_fn;
2100 mdev->request_timer.data = (unsigned long) mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002101
2102 init_waitqueue_head(&mdev->misc_wait);
2103 init_waitqueue_head(&mdev->state_wait);
2104 init_waitqueue_head(&mdev->ee_wait);
2105 init_waitqueue_head(&mdev->al_wait);
2106 init_waitqueue_head(&mdev->seq_wait);
2107
Philipp Reisner2451fc32010-08-24 13:43:11 +02002108 mdev->write_ordering = WO_bdev_flush;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002109 mdev->resync_wenr = LC_FREE;
Philipp Reisner99432fc2011-05-20 16:39:13 +02002110 mdev->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
2111 mdev->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002112}
2113
2114void drbd_mdev_cleanup(struct drbd_conf *mdev)
2115{
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02002116 int i;
Philipp Reisnere6b3ea82011-01-19 14:02:01 +01002117 if (mdev->tconn->receiver.t_state != NONE)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002118 dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
Philipp Reisnere6b3ea82011-01-19 14:02:01 +01002119 mdev->tconn->receiver.t_state);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002120
2121 /* no need to lock it, I'm the only thread alive */
2122 if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
2123 dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
2124 mdev->al_writ_cnt =
2125 mdev->bm_writ_cnt =
2126 mdev->read_cnt =
2127 mdev->recv_cnt =
2128 mdev->send_cnt =
2129 mdev->writ_cnt =
2130 mdev->p_size =
2131 mdev->rs_start =
2132 mdev->rs_total =
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02002133 mdev->rs_failed = 0;
2134 mdev->rs_last_events = 0;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002135 mdev->rs_last_sect_ev = 0;
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02002136 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2137 mdev->rs_mark_left[i] = 0;
2138 mdev->rs_mark_time[i] = 0;
2139 }
Philipp Reisner89e58e72011-01-19 13:12:45 +01002140 D_ASSERT(mdev->tconn->net_conf == NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002141
2142 drbd_set_my_capacity(mdev, 0);
2143 if (mdev->bitmap) {
2144 /* maybe never allocated. */
Philipp Reisner02d9a942010-03-24 16:23:03 +01002145 drbd_bm_resize(mdev, 0, 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002146 drbd_bm_cleanup(mdev);
2147 }
2148
Philipp Reisner1d041222011-04-22 15:20:23 +02002149 drbd_free_bc(mdev->ldev);
2150 mdev->ldev = NULL;
2151
Philipp Reisner07782862010-08-31 12:00:50 +02002152 clear_bit(AL_SUSPENDED, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002153
Philipp Reisnerb411b362009-09-25 16:07:19 -07002154 D_ASSERT(list_empty(&mdev->active_ee));
2155 D_ASSERT(list_empty(&mdev->sync_ee));
2156 D_ASSERT(list_empty(&mdev->done_ee));
2157 D_ASSERT(list_empty(&mdev->read_ee));
2158 D_ASSERT(list_empty(&mdev->net_ee));
2159 D_ASSERT(list_empty(&mdev->resync_reads));
Philipp Reisnere42325a2011-01-19 13:55:45 +01002160 D_ASSERT(list_empty(&mdev->tconn->data.work.q));
2161 D_ASSERT(list_empty(&mdev->tconn->meta.work.q));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002162 D_ASSERT(list_empty(&mdev->resync_work.list));
2163 D_ASSERT(list_empty(&mdev->unplug_work.list));
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02002164 D_ASSERT(list_empty(&mdev->go_diskless.list));
Lars Ellenberg2265b472010-12-16 15:41:26 +01002165
2166 drbd_set_defaults(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002167}
2168
2169
2170static void drbd_destroy_mempools(void)
2171{
2172 struct page *page;
2173
2174 while (drbd_pp_pool) {
2175 page = drbd_pp_pool;
2176 drbd_pp_pool = (struct page *)page_private(page);
2177 __free_page(page);
2178 drbd_pp_vacant--;
2179 }
2180
2181 /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
2182
Lars Ellenbergda4a75d2011-02-23 17:02:01 +01002183 if (drbd_md_io_bio_set)
2184 bioset_free(drbd_md_io_bio_set);
Lars Ellenberg35abf592011-02-23 12:39:46 +01002185 if (drbd_md_io_page_pool)
2186 mempool_destroy(drbd_md_io_page_pool);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002187 if (drbd_ee_mempool)
2188 mempool_destroy(drbd_ee_mempool);
2189 if (drbd_request_mempool)
2190 mempool_destroy(drbd_request_mempool);
2191 if (drbd_ee_cache)
2192 kmem_cache_destroy(drbd_ee_cache);
2193 if (drbd_request_cache)
2194 kmem_cache_destroy(drbd_request_cache);
2195 if (drbd_bm_ext_cache)
2196 kmem_cache_destroy(drbd_bm_ext_cache);
2197 if (drbd_al_ext_cache)
2198 kmem_cache_destroy(drbd_al_ext_cache);
2199
Lars Ellenbergda4a75d2011-02-23 17:02:01 +01002200 drbd_md_io_bio_set = NULL;
Lars Ellenberg35abf592011-02-23 12:39:46 +01002201 drbd_md_io_page_pool = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002202 drbd_ee_mempool = NULL;
2203 drbd_request_mempool = NULL;
2204 drbd_ee_cache = NULL;
2205 drbd_request_cache = NULL;
2206 drbd_bm_ext_cache = NULL;
2207 drbd_al_ext_cache = NULL;
2208
2209 return;
2210}
2211
2212static int drbd_create_mempools(void)
2213{
2214 struct page *page;
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01002215 const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002216 int i;
2217
2218 /* prepare our caches and mempools */
2219 drbd_request_mempool = NULL;
2220 drbd_ee_cache = NULL;
2221 drbd_request_cache = NULL;
2222 drbd_bm_ext_cache = NULL;
2223 drbd_al_ext_cache = NULL;
2224 drbd_pp_pool = NULL;
Lars Ellenberg35abf592011-02-23 12:39:46 +01002225 drbd_md_io_page_pool = NULL;
Lars Ellenbergda4a75d2011-02-23 17:02:01 +01002226 drbd_md_io_bio_set = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002227
2228 /* caches */
2229 drbd_request_cache = kmem_cache_create(
2230 "drbd_req", sizeof(struct drbd_request), 0, 0, NULL);
2231 if (drbd_request_cache == NULL)
2232 goto Enomem;
2233
2234 drbd_ee_cache = kmem_cache_create(
Andreas Gruenbacherf6ffca92011-02-04 15:30:34 +01002235 "drbd_ee", sizeof(struct drbd_peer_request), 0, 0, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002236 if (drbd_ee_cache == NULL)
2237 goto Enomem;
2238
2239 drbd_bm_ext_cache = kmem_cache_create(
2240 "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL);
2241 if (drbd_bm_ext_cache == NULL)
2242 goto Enomem;
2243
2244 drbd_al_ext_cache = kmem_cache_create(
2245 "drbd_al", sizeof(struct lc_element), 0, 0, NULL);
2246 if (drbd_al_ext_cache == NULL)
2247 goto Enomem;
2248
2249 /* mempools */
Lars Ellenbergda4a75d2011-02-23 17:02:01 +01002250 drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0);
2251 if (drbd_md_io_bio_set == NULL)
2252 goto Enomem;
2253
Lars Ellenberg35abf592011-02-23 12:39:46 +01002254 drbd_md_io_page_pool = mempool_create_page_pool(DRBD_MIN_POOL_PAGES, 0);
2255 if (drbd_md_io_page_pool == NULL)
2256 goto Enomem;
2257
Philipp Reisnerb411b362009-09-25 16:07:19 -07002258 drbd_request_mempool = mempool_create(number,
2259 mempool_alloc_slab, mempool_free_slab, drbd_request_cache);
2260 if (drbd_request_mempool == NULL)
2261 goto Enomem;
2262
2263 drbd_ee_mempool = mempool_create(number,
2264 mempool_alloc_slab, mempool_free_slab, drbd_ee_cache);
Nicolas Kaiser2027ae12010-10-28 06:15:26 -06002265 if (drbd_ee_mempool == NULL)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002266 goto Enomem;
2267
2268 /* drbd's page pool */
2269 spin_lock_init(&drbd_pp_lock);
2270
2271 for (i = 0; i < number; i++) {
2272 page = alloc_page(GFP_HIGHUSER);
2273 if (!page)
2274 goto Enomem;
2275 set_page_private(page, (unsigned long)drbd_pp_pool);
2276 drbd_pp_pool = page;
2277 }
2278 drbd_pp_vacant = number;
2279
2280 return 0;
2281
2282Enomem:
2283 drbd_destroy_mempools(); /* in case we allocated some */
2284 return -ENOMEM;
2285}
2286
2287static int drbd_notify_sys(struct notifier_block *this, unsigned long code,
2288 void *unused)
2289{
2290 /* just so we have it. you never know what interesting things we
2291 * might want to do here some day...
2292 */
2293
2294 return NOTIFY_DONE;
2295}
2296
2297static struct notifier_block drbd_notifier = {
2298 .notifier_call = drbd_notify_sys,
2299};
2300
Andreas Gruenbacher7721f562011-04-06 17:14:02 +02002301static void drbd_release_all_peer_reqs(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002302{
2303 int rr;
2304
Andreas Gruenbacher7721f562011-04-06 17:14:02 +02002305 rr = drbd_free_peer_reqs(mdev, &mdev->active_ee);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002306 if (rr)
2307 dev_err(DEV, "%d EEs in active list found!\n", rr);
2308
Andreas Gruenbacher7721f562011-04-06 17:14:02 +02002309 rr = drbd_free_peer_reqs(mdev, &mdev->sync_ee);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002310 if (rr)
2311 dev_err(DEV, "%d EEs in sync list found!\n", rr);
2312
Andreas Gruenbacher7721f562011-04-06 17:14:02 +02002313 rr = drbd_free_peer_reqs(mdev, &mdev->read_ee);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002314 if (rr)
2315 dev_err(DEV, "%d EEs in read list found!\n", rr);
2316
Andreas Gruenbacher7721f562011-04-06 17:14:02 +02002317 rr = drbd_free_peer_reqs(mdev, &mdev->done_ee);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002318 if (rr)
2319 dev_err(DEV, "%d EEs in done list found!\n", rr);
2320
Andreas Gruenbacher7721f562011-04-06 17:14:02 +02002321 rr = drbd_free_peer_reqs(mdev, &mdev->net_ee);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002322 if (rr)
2323 dev_err(DEV, "%d EEs in net list found!\n", rr);
2324}
2325
Philipp Reisner774b3052011-02-22 02:07:03 -05002326/* caution. no locking. */
Philipp Reisner81fa2e62011-05-04 15:10:30 +02002327void drbd_minor_destroy(struct kref *kref)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002328{
Philipp Reisner81fa2e62011-05-04 15:10:30 +02002329 struct drbd_conf *mdev = container_of(kref, struct drbd_conf, kref);
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02002330 struct drbd_tconn *tconn = mdev->tconn;
2331
Philipp Reisnercdfda632011-07-05 15:38:59 +02002332 del_timer_sync(&mdev->request_timer);
2333
Philipp Reisnerb411b362009-09-25 16:07:19 -07002334 /* paranoia asserts */
Andreas Gruenbacher70dc65e2010-12-21 14:46:57 +01002335 D_ASSERT(mdev->open_cnt == 0);
Philipp Reisnere42325a2011-01-19 13:55:45 +01002336 D_ASSERT(list_empty(&mdev->tconn->data.work.q));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002337 /* end paranoia asserts */
2338
Philipp Reisnerb411b362009-09-25 16:07:19 -07002339 /* cleanup stuff that may have been allocated during
2340 * device (re-)configuration or state changes */
2341
2342 if (mdev->this_bdev)
2343 bdput(mdev->this_bdev);
2344
Philipp Reisner1d041222011-04-22 15:20:23 +02002345 drbd_free_bc(mdev->ldev);
2346 mdev->ldev = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002347
Andreas Gruenbacher7721f562011-04-06 17:14:02 +02002348 drbd_release_all_peer_reqs(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002349
Philipp Reisnerb411b362009-09-25 16:07:19 -07002350 lc_destroy(mdev->act_log);
2351 lc_destroy(mdev->resync);
2352
2353 kfree(mdev->p_uuid);
2354 /* mdev->p_uuid = NULL; */
2355
Philipp Reisnercd1d9952011-04-11 21:24:24 -07002356 kfree(mdev->current_epoch);
2357 if (mdev->bitmap) /* should no longer be there. */
2358 drbd_bm_cleanup(mdev);
2359 __free_page(mdev->md_io_page);
2360 put_disk(mdev->vdisk);
2361 blk_cleanup_queue(mdev->rq_queue);
Philipp Reisner9958c852011-05-03 16:19:31 +02002362 kfree(mdev->rs_plan_s);
Philipp Reisnercd1d9952011-04-11 21:24:24 -07002363 kfree(mdev);
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02002364
2365 kref_put(&tconn->kref, &conn_destroy);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002366}
2367
2368static void drbd_cleanup(void)
2369{
2370 unsigned int i;
Philipp Reisner81a5d602011-02-22 19:53:16 -05002371 struct drbd_conf *mdev;
Philipp Reisner81fa2e62011-05-04 15:10:30 +02002372 struct drbd_tconn *tconn, *tmp;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002373
2374 unregister_reboot_notifier(&drbd_notifier);
2375
Lars Ellenberg17a93f32010-11-24 10:37:35 +01002376 /* first remove proc,
2377 * drbdsetup uses it's presence to detect
2378 * whether DRBD is loaded.
2379 * If we would get stuck in proc removal,
2380 * but have netlink already deregistered,
2381 * some drbdsetup commands may wait forever
2382 * for an answer.
2383 */
2384 if (drbd_proc)
2385 remove_proc_entry("drbd", NULL);
2386
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002387 drbd_genl_unregister();
Philipp Reisnerb411b362009-09-25 16:07:19 -07002388
Philipp Reisner81fa2e62011-05-04 15:10:30 +02002389 idr_for_each_entry(&minors, mdev, i) {
2390 idr_remove(&minors, mdev_to_minor(mdev));
2391 idr_remove(&mdev->tconn->volumes, mdev->vnr);
2392 del_gendisk(mdev->vdisk);
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02002393 /* synchronize_rcu(); No other threads running at this point */
Philipp Reisner81fa2e62011-05-04 15:10:30 +02002394 kref_put(&mdev->kref, &drbd_minor_destroy);
2395 }
2396
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02002397 /* not _rcu since, no other updater anymore. Genl already unregistered */
Philipp Reisner81fa2e62011-05-04 15:10:30 +02002398 list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02002399 list_del(&tconn->all_tconn); /* not _rcu no proc, not other threads */
2400 /* synchronize_rcu(); */
Philipp Reisner81fa2e62011-05-04 15:10:30 +02002401 kref_put(&tconn->kref, &conn_destroy);
2402 }
Philipp Reisnerff370e52011-04-11 21:10:11 -07002403
Philipp Reisner81a5d602011-02-22 19:53:16 -05002404 drbd_destroy_mempools();
Philipp Reisnerb411b362009-09-25 16:07:19 -07002405 unregister_blkdev(DRBD_MAJOR, "drbd");
2406
Philipp Reisner81a5d602011-02-22 19:53:16 -05002407 idr_destroy(&minors);
2408
Philipp Reisnerb411b362009-09-25 16:07:19 -07002409 printk(KERN_INFO "drbd: module cleanup done.\n");
2410}
2411
2412/**
2413 * drbd_congested() - Callback for pdflush
2414 * @congested_data: User data
2415 * @bdi_bits: Bits pdflush is currently interested in
2416 *
2417 * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
2418 */
2419static int drbd_congested(void *congested_data, int bdi_bits)
2420{
2421 struct drbd_conf *mdev = congested_data;
2422 struct request_queue *q;
2423 char reason = '-';
2424 int r = 0;
2425
Andreas Gruenbacher1b881ef2010-12-13 18:03:38 +01002426 if (!may_inc_ap_bio(mdev)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002427 /* DRBD has frozen IO */
2428 r = bdi_bits;
2429 reason = 'd';
2430 goto out;
2431 }
2432
2433 if (get_ldev(mdev)) {
2434 q = bdev_get_queue(mdev->ldev->backing_bdev);
2435 r = bdi_congested(&q->backing_dev_info, bdi_bits);
2436 put_ldev(mdev);
2437 if (r)
2438 reason = 'b';
2439 }
2440
Philipp Reisner01a311a2011-02-07 14:30:33 +01002441 if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->tconn->flags)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002442 r |= (1 << BDI_async_congested);
2443 reason = reason == 'b' ? 'a' : 'n';
2444 }
2445
2446out:
2447 mdev->congestion_reason = reason;
2448 return r;
2449}
2450
Philipp Reisner6699b652011-02-09 11:10:24 +01002451static void drbd_init_workqueue(struct drbd_work_queue* wq)
2452{
2453 sema_init(&wq->s, 0);
2454 spin_lock_init(&wq->q_lock);
2455 INIT_LIST_HEAD(&wq->q);
2456}
2457
Philipp Reisner0ace9df2011-04-24 10:53:19 +02002458struct drbd_tconn *conn_get_by_name(const char *name)
Philipp Reisner1aba4d72011-02-21 15:38:08 +01002459{
2460 struct drbd_tconn *tconn;
2461
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002462 if (!name || !name[0])
2463 return NULL;
2464
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02002465 rcu_read_lock();
Philipp Reisnerec0bddb2011-05-04 15:47:01 +02002466 list_for_each_entry_rcu(tconn, &drbd_tconns, all_tconn) {
Philipp Reisner0ace9df2011-04-24 10:53:19 +02002467 if (!strcmp(tconn->name, name)) {
2468 kref_get(&tconn->kref);
Philipp Reisner1aba4d72011-02-21 15:38:08 +01002469 goto found;
Philipp Reisner0ace9df2011-04-24 10:53:19 +02002470 }
Philipp Reisner1aba4d72011-02-21 15:38:08 +01002471 }
2472 tconn = NULL;
2473found:
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02002474 rcu_read_unlock();
Philipp Reisner1aba4d72011-02-21 15:38:08 +01002475 return tconn;
2476}
2477
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002478struct drbd_tconn *conn_get_by_addrs(void *my_addr, int my_addr_len,
2479 void *peer_addr, int peer_addr_len)
2480{
2481 struct drbd_tconn *tconn;
2482
2483 rcu_read_lock();
2484 list_for_each_entry_rcu(tconn, &drbd_tconns, all_tconn) {
2485 if (tconn->my_addr_len == my_addr_len &&
2486 tconn->peer_addr_len == peer_addr_len &&
2487 !memcmp(&tconn->my_addr, my_addr, my_addr_len) &&
2488 !memcmp(&tconn->peer_addr, peer_addr, peer_addr_len)) {
2489 kref_get(&tconn->kref);
2490 goto found;
2491 }
2492 }
2493 tconn = NULL;
2494found:
2495 rcu_read_unlock();
2496 return tconn;
2497}
2498
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002499static int drbd_alloc_socket(struct drbd_socket *socket)
2500{
2501 socket->rbuf = (void *) __get_free_page(GFP_KERNEL);
2502 if (!socket->rbuf)
2503 return -ENOMEM;
Andreas Gruenbacher5a87d922011-03-24 21:17:52 +01002504 socket->sbuf = (void *) __get_free_page(GFP_KERNEL);
2505 if (!socket->sbuf)
2506 return -ENOMEM;
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002507 return 0;
2508}
2509
2510static void drbd_free_socket(struct drbd_socket *socket)
2511{
Andreas Gruenbacher5a87d922011-03-24 21:17:52 +01002512 free_page((unsigned long) socket->sbuf);
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002513 free_page((unsigned long) socket->rbuf);
2514}
2515
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002516void conn_free_crypto(struct drbd_tconn *tconn)
2517{
Philipp Reisner1d041222011-04-22 15:20:23 +02002518 drbd_free_sock(tconn);
2519
2520 crypto_free_hash(tconn->csums_tfm);
2521 crypto_free_hash(tconn->verify_tfm);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002522 crypto_free_hash(tconn->cram_hmac_tfm);
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02002523 crypto_free_hash(tconn->integrity_tfm);
Andreas Gruenbacher5b614ab2011-04-27 21:00:12 +02002524 crypto_free_hash(tconn->peer_integrity_tfm);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002525 kfree(tconn->int_dig_in);
2526 kfree(tconn->int_dig_vv);
Philipp Reisner1d041222011-04-22 15:20:23 +02002527
2528 tconn->csums_tfm = NULL;
2529 tconn->verify_tfm = NULL;
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002530 tconn->cram_hmac_tfm = NULL;
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02002531 tconn->integrity_tfm = NULL;
Andreas Gruenbacher5b614ab2011-04-27 21:00:12 +02002532 tconn->peer_integrity_tfm = NULL;
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002533 tconn->int_dig_in = NULL;
2534 tconn->int_dig_vv = NULL;
2535}
2536
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02002537int set_resource_options(struct drbd_tconn *tconn, struct res_opts *res_opts)
2538{
2539 cpumask_var_t new_cpu_mask;
2540 int err;
2541
2542 if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL))
2543 return -ENOMEM;
2544 /*
2545 retcode = ERR_NOMEM;
2546 drbd_msg_put_info("unable to allocate cpumask");
2547 */
2548
2549 /* silently ignore cpu mask on UP kernel */
2550 if (nr_cpu_ids > 1 && res_opts->cpu_mask[0] != 0) {
2551 /* FIXME: Get rid of constant 32 here */
2552 err = __bitmap_parse(res_opts->cpu_mask, 32, 0,
2553 cpumask_bits(new_cpu_mask), nr_cpu_ids);
2554 if (err) {
2555 conn_warn(tconn, "__bitmap_parse() failed with %d\n", err);
2556 /* retcode = ERR_CPU_MASK_PARSE; */
2557 goto fail;
2558 }
2559 }
2560 tconn->res_opts = *res_opts;
2561 if (!cpumask_equal(tconn->cpu_mask, new_cpu_mask)) {
2562 cpumask_copy(tconn->cpu_mask, new_cpu_mask);
2563 drbd_calc_cpu_mask(tconn);
2564 tconn->receiver.reset_cpu_mask = 1;
2565 tconn->asender.reset_cpu_mask = 1;
2566 tconn->worker.reset_cpu_mask = 1;
2567 }
2568 err = 0;
2569
2570fail:
2571 free_cpumask_var(new_cpu_mask);
2572 return err;
2573
2574}
2575
Philipp Reisnerec0bddb2011-05-04 15:47:01 +02002576/* caller must be under genl_lock() */
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02002577struct drbd_tconn *conn_create(const char *name, struct res_opts *res_opts)
Philipp Reisner21114382011-01-19 12:26:59 +01002578{
2579 struct drbd_tconn *tconn;
2580
2581 tconn = kzalloc(sizeof(struct drbd_tconn), GFP_KERNEL);
2582 if (!tconn)
2583 return NULL;
2584
2585 tconn->name = kstrdup(name, GFP_KERNEL);
2586 if (!tconn->name)
2587 goto fail;
2588
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002589 if (drbd_alloc_socket(&tconn->data))
2590 goto fail;
2591 if (drbd_alloc_socket(&tconn->meta))
2592 goto fail;
2593
Philipp Reisner774b3052011-02-22 02:07:03 -05002594 if (!zalloc_cpumask_var(&tconn->cpu_mask, GFP_KERNEL))
2595 goto fail;
2596
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02002597 if (set_resource_options(tconn, res_opts))
2598 goto fail;
2599
Philipp Reisner2f5cdd02011-02-21 14:29:27 +01002600 if (!tl_init(tconn))
2601 goto fail;
2602
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01002603 tconn->cstate = C_STANDALONE;
Philipp Reisner8410da8f02011-02-11 20:11:10 +01002604 mutex_init(&tconn->cstate_mutex);
Philipp Reisner6699b652011-02-09 11:10:24 +01002605 spin_lock_init(&tconn->req_lock);
Philipp Reisnera0095502011-05-03 13:14:15 +02002606 mutex_init(&tconn->conf_update);
Philipp Reisner2a67d8b2011-02-09 14:10:32 +01002607 init_waitqueue_head(&tconn->ping_wait);
Philipp Reisner062e8792011-02-08 11:09:18 +01002608 idr_init(&tconn->volumes);
Philipp Reisnerb2fb6dbe2011-01-19 13:48:44 +01002609
Philipp Reisner6699b652011-02-09 11:10:24 +01002610 drbd_init_workqueue(&tconn->data.work);
2611 mutex_init(&tconn->data.mutex);
2612
2613 drbd_init_workqueue(&tconn->meta.work);
2614 mutex_init(&tconn->meta.mutex);
2615
Philipp Reisner392c8802011-02-09 10:33:31 +01002616 drbd_thread_init(tconn, &tconn->receiver, drbdd_init, "receiver");
2617 drbd_thread_init(tconn, &tconn->worker, drbd_worker, "worker");
2618 drbd_thread_init(tconn, &tconn->asender, drbd_asender, "asender");
2619
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02002620 kref_init(&tconn->kref);
Philipp Reisnerec0bddb2011-05-04 15:47:01 +02002621 list_add_tail_rcu(&tconn->all_tconn, &drbd_tconns);
Philipp Reisner21114382011-01-19 12:26:59 +01002622
2623 return tconn;
2624
2625fail:
Philipp Reisner2f5cdd02011-02-21 14:29:27 +01002626 tl_cleanup(tconn);
Philipp Reisner774b3052011-02-22 02:07:03 -05002627 free_cpumask_var(tconn->cpu_mask);
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002628 drbd_free_socket(&tconn->meta);
2629 drbd_free_socket(&tconn->data);
Philipp Reisner21114382011-01-19 12:26:59 +01002630 kfree(tconn->name);
2631 kfree(tconn);
2632
2633 return NULL;
2634}
2635
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02002636void conn_destroy(struct kref *kref)
Philipp Reisner21114382011-01-19 12:26:59 +01002637{
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02002638 struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
2639
Philipp Reisner062e8792011-02-08 11:09:18 +01002640 idr_destroy(&tconn->volumes);
Philipp Reisner21114382011-01-19 12:26:59 +01002641
Philipp Reisner774b3052011-02-22 02:07:03 -05002642 free_cpumask_var(tconn->cpu_mask);
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002643 drbd_free_socket(&tconn->meta);
2644 drbd_free_socket(&tconn->data);
Philipp Reisner21114382011-01-19 12:26:59 +01002645 kfree(tconn->name);
Philipp Reisnerb42a70a2011-01-27 10:55:20 +01002646 kfree(tconn->int_dig_in);
2647 kfree(tconn->int_dig_vv);
Philipp Reisner21114382011-01-19 12:26:59 +01002648 kfree(tconn);
2649}
2650
Philipp Reisner774b3052011-02-22 02:07:03 -05002651enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, int vnr)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002652{
2653 struct drbd_conf *mdev;
2654 struct gendisk *disk;
2655 struct request_queue *q;
Philipp Reisner774b3052011-02-22 02:07:03 -05002656 int vnr_got = vnr;
Philipp Reisner81a5d602011-02-22 19:53:16 -05002657 int minor_got = minor;
Lars Ellenberg8432b312011-03-08 16:11:16 +01002658 enum drbd_ret_code err = ERR_NOMEM;
Philipp Reisner774b3052011-02-22 02:07:03 -05002659
2660 mdev = minor_to_mdev(minor);
2661 if (mdev)
2662 return ERR_MINOR_EXISTS;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002663
2664 /* GFP_KERNEL, we are outside of all write-out paths */
2665 mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL);
2666 if (!mdev)
Philipp Reisner774b3052011-02-22 02:07:03 -05002667 return ERR_NOMEM;
2668
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02002669 kref_get(&tconn->kref);
Philipp Reisner774b3052011-02-22 02:07:03 -05002670 mdev->tconn = tconn;
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02002671
Philipp Reisnerb411b362009-09-25 16:07:19 -07002672 mdev->minor = minor;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002673 mdev->vnr = vnr;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002674
2675 drbd_init_set_defaults(mdev);
2676
2677 q = blk_alloc_queue(GFP_KERNEL);
2678 if (!q)
2679 goto out_no_q;
2680 mdev->rq_queue = q;
2681 q->queuedata = mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002682
2683 disk = alloc_disk(1);
2684 if (!disk)
2685 goto out_no_disk;
2686 mdev->vdisk = disk;
2687
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002688 set_disk_ro(disk, true);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002689
2690 disk->queue = q;
2691 disk->major = DRBD_MAJOR;
2692 disk->first_minor = minor;
2693 disk->fops = &drbd_ops;
2694 sprintf(disk->disk_name, "drbd%d", minor);
2695 disk->private_data = mdev;
2696
2697 mdev->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
2698 /* we have no partitions. we contain only ourselves. */
2699 mdev->this_bdev->bd_contains = mdev->this_bdev;
2700
2701 q->backing_dev_info.congested_fn = drbd_congested;
2702 q->backing_dev_info.congested_data = mdev;
2703
Andreas Gruenbacher2f58dcf2010-12-13 17:48:19 +01002704 blk_queue_make_request(q, drbd_make_request);
Philipp Reisner99432fc2011-05-20 16:39:13 +02002705 /* Setting the max_hw_sectors to an odd value of 8kibyte here
2706 This triggers a max_bio_size message upon first attach or connect */
2707 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002708 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
2709 blk_queue_merge_bvec(q, drbd_merge_bvec);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002710 q->queue_lock = &mdev->tconn->req_lock; /* needed since we use */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002711
2712 mdev->md_io_page = alloc_page(GFP_KERNEL);
2713 if (!mdev->md_io_page)
2714 goto out_no_io_page;
2715
2716 if (drbd_bm_init(mdev))
2717 goto out_no_bitmap;
Andreas Gruenbacherdac13892011-01-21 17:18:39 +01002718 mdev->read_requests = RB_ROOT;
Andreas Gruenbacherde696712011-01-20 15:00:24 +01002719 mdev->write_requests = RB_ROOT;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002720
Philipp Reisnerb411b362009-09-25 16:07:19 -07002721 mdev->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
2722 if (!mdev->current_epoch)
2723 goto out_no_epoch;
2724
2725 INIT_LIST_HEAD(&mdev->current_epoch->list);
2726 mdev->epochs = 1;
2727
Lars Ellenberg8432b312011-03-08 16:11:16 +01002728 if (!idr_pre_get(&minors, GFP_KERNEL))
2729 goto out_no_minor_idr;
2730 if (idr_get_new_above(&minors, mdev, minor, &minor_got))
2731 goto out_no_minor_idr;
2732 if (minor_got != minor) {
2733 err = ERR_MINOR_EXISTS;
2734 drbd_msg_put_info("requested minor exists already");
2735 goto out_idr_remove_minor;
Lars Ellenberg569083c2011-03-07 09:49:02 +01002736 }
2737
Lars Ellenberg8432b312011-03-08 16:11:16 +01002738 if (!idr_pre_get(&tconn->volumes, GFP_KERNEL))
Lars Ellenberg569083c2011-03-07 09:49:02 +01002739 goto out_idr_remove_minor;
Lars Ellenberg8432b312011-03-08 16:11:16 +01002740 if (idr_get_new_above(&tconn->volumes, mdev, vnr, &vnr_got))
2741 goto out_idr_remove_minor;
2742 if (vnr_got != vnr) {
2743 err = ERR_INVALID_REQUEST;
2744 drbd_msg_put_info("requested volume exists already");
2745 goto out_idr_remove_vol;
Philipp Reisner81a5d602011-02-22 19:53:16 -05002746 }
Philipp Reisner774b3052011-02-22 02:07:03 -05002747 add_disk(disk);
Philipp Reisner81fa2e62011-05-04 15:10:30 +02002748 kref_init(&mdev->kref); /* one ref for both idrs and the the add_disk */
Philipp Reisner774b3052011-02-22 02:07:03 -05002749
Philipp Reisner2325eb62011-03-15 16:56:18 +01002750 /* inherit the connection state */
2751 mdev->state.conn = tconn->cstate;
2752 if (mdev->state.conn == C_WF_REPORT_PARAMS)
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02002753 drbd_connected(mdev);
Philipp Reisner2325eb62011-03-15 16:56:18 +01002754
Philipp Reisner774b3052011-02-22 02:07:03 -05002755 return NO_ERROR;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002756
Lars Ellenberg569083c2011-03-07 09:49:02 +01002757out_idr_remove_vol:
2758 idr_remove(&tconn->volumes, vnr_got);
Lars Ellenberg8432b312011-03-08 16:11:16 +01002759out_idr_remove_minor:
2760 idr_remove(&minors, minor_got);
Lars Ellenberg569083c2011-03-07 09:49:02 +01002761 synchronize_rcu();
Lars Ellenberg8432b312011-03-08 16:11:16 +01002762out_no_minor_idr:
Philipp Reisner81a5d602011-02-22 19:53:16 -05002763 kfree(mdev->current_epoch);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002764out_no_epoch:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002765 drbd_bm_cleanup(mdev);
2766out_no_bitmap:
2767 __free_page(mdev->md_io_page);
2768out_no_io_page:
2769 put_disk(disk);
2770out_no_disk:
2771 blk_cleanup_queue(q);
2772out_no_q:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002773 kfree(mdev);
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02002774 kref_put(&tconn->kref, &conn_destroy);
Lars Ellenberg8432b312011-03-08 16:11:16 +01002775 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002776}
2777
Philipp Reisnerb411b362009-09-25 16:07:19 -07002778int __init drbd_init(void)
2779{
2780 int err;
2781
Philipp Reisner2b8a90b2011-01-10 11:15:17 +01002782 if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002783 printk(KERN_ERR
Philipp Reisner81a5d602011-02-22 19:53:16 -05002784 "drbd: invalid minor_count (%d)\n", minor_count);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002785#ifdef MODULE
2786 return -EINVAL;
2787#else
Andreas Gruenbacher46530e82011-05-31 13:08:53 +02002788 minor_count = DRBD_MINOR_COUNT_DEF;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002789#endif
2790 }
2791
Philipp Reisnerb411b362009-09-25 16:07:19 -07002792 err = register_blkdev(DRBD_MAJOR, "drbd");
2793 if (err) {
2794 printk(KERN_ERR
2795 "drbd: unable to register block device major %d\n",
2796 DRBD_MAJOR);
2797 return err;
2798 }
2799
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002800 err = drbd_genl_register();
2801 if (err) {
2802 printk(KERN_ERR "drbd: unable to register generic netlink family\n");
2803 goto fail;
2804 }
2805
2806
Philipp Reisnerb411b362009-09-25 16:07:19 -07002807 register_reboot_notifier(&drbd_notifier);
2808
2809 /*
2810 * allocate all necessary structs
2811 */
2812 err = -ENOMEM;
2813
2814 init_waitqueue_head(&drbd_pp_wait);
2815
2816 drbd_proc = NULL; /* play safe for drbd_cleanup */
Philipp Reisner81a5d602011-02-22 19:53:16 -05002817 idr_init(&minors);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002818
2819 err = drbd_create_mempools();
2820 if (err)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002821 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002822
Lars Ellenberg8c484ee2010-03-11 16:47:58 +01002823 drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002824 if (!drbd_proc) {
2825 printk(KERN_ERR "drbd: unable to register proc file\n");
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002826 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002827 }
2828
2829 rwlock_init(&global_state_lock);
Philipp Reisner21114382011-01-19 12:26:59 +01002830 INIT_LIST_HEAD(&drbd_tconns);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002831
2832 printk(KERN_INFO "drbd: initialized. "
2833 "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
2834 API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
2835 printk(KERN_INFO "drbd: %s\n", drbd_buildtag());
2836 printk(KERN_INFO "drbd: registered as block device major %d\n",
2837 DRBD_MAJOR);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002838
2839 return 0; /* Success! */
2840
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002841fail:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002842 drbd_cleanup();
2843 if (err == -ENOMEM)
2844 /* currently always the case */
2845 printk(KERN_ERR "drbd: ran out of memory\n");
2846 else
2847 printk(KERN_ERR "drbd: initialization failure\n");
2848 return err;
2849}
2850
2851void drbd_free_bc(struct drbd_backing_dev *ldev)
2852{
2853 if (ldev == NULL)
2854 return;
2855
Tejun Heoe525fd82010-11-13 11:55:17 +01002856 blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
2857 blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002858
2859 kfree(ldev);
2860}
2861
Philipp Reisner360cc742011-02-08 14:29:53 +01002862void drbd_free_sock(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002863{
Philipp Reisner360cc742011-02-08 14:29:53 +01002864 if (tconn->data.socket) {
2865 mutex_lock(&tconn->data.mutex);
2866 kernel_sock_shutdown(tconn->data.socket, SHUT_RDWR);
2867 sock_release(tconn->data.socket);
2868 tconn->data.socket = NULL;
2869 mutex_unlock(&tconn->data.mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002870 }
Philipp Reisner360cc742011-02-08 14:29:53 +01002871 if (tconn->meta.socket) {
2872 mutex_lock(&tconn->meta.mutex);
2873 kernel_sock_shutdown(tconn->meta.socket, SHUT_RDWR);
2874 sock_release(tconn->meta.socket);
2875 tconn->meta.socket = NULL;
2876 mutex_unlock(&tconn->meta.mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002877 }
2878}
2879
Philipp Reisnerb411b362009-09-25 16:07:19 -07002880/* meta data management */
2881
2882struct meta_data_on_disk {
2883 u64 la_size; /* last agreed size. */
2884 u64 uuid[UI_SIZE]; /* UUIDs. */
2885 u64 device_uuid;
2886 u64 reserved_u64_1;
2887 u32 flags; /* MDF */
2888 u32 magic;
2889 u32 md_size_sect;
2890 u32 al_offset; /* offset to this block */
2891 u32 al_nr_extents; /* important for restoring the AL */
Lars Ellenbergf3990022011-03-23 14:31:09 +01002892 /* `-- act_log->nr_elements <-- ldev->dc.al_extents */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002893 u32 bm_offset; /* offset to the bitmap, from here */
2894 u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */
Philipp Reisner99432fc2011-05-20 16:39:13 +02002895 u32 la_peer_max_bio_size; /* last peer max_bio_size */
2896 u32 reserved_u32[3];
Philipp Reisnerb411b362009-09-25 16:07:19 -07002897
2898} __packed;
2899
2900/**
2901 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
2902 * @mdev: DRBD device.
2903 */
2904void drbd_md_sync(struct drbd_conf *mdev)
2905{
2906 struct meta_data_on_disk *buffer;
2907 sector_t sector;
2908 int i;
2909
Lars Ellenbergee15b032010-09-03 10:00:09 +02002910 del_timer(&mdev->md_sync_timer);
2911 /* timer may be rearmed by drbd_md_mark_dirty() now. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002912 if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
2913 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002914
2915 /* We use here D_FAILED and not D_ATTACHING because we try to write
2916 * metadata even if we detach due to a disk failure! */
2917 if (!get_ldev_if_state(mdev, D_FAILED))
2918 return;
2919
Philipp Reisnercdfda632011-07-05 15:38:59 +02002920 buffer = drbd_md_get_buffer(mdev);
2921 if (!buffer)
2922 goto out;
2923
Philipp Reisnerb411b362009-09-25 16:07:19 -07002924 memset(buffer, 0, 512);
2925
2926 buffer->la_size = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
2927 for (i = UI_CURRENT; i < UI_SIZE; i++)
2928 buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
2929 buffer->flags = cpu_to_be32(mdev->ldev->md.flags);
Lars Ellenbergd5d7ebd2011-07-05 20:59:26 +02002930 buffer->magic = cpu_to_be32(DRBD_MD_MAGIC_84_UNCLEAN);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002931
2932 buffer->md_size_sect = cpu_to_be32(mdev->ldev->md.md_size_sect);
2933 buffer->al_offset = cpu_to_be32(mdev->ldev->md.al_offset);
2934 buffer->al_nr_extents = cpu_to_be32(mdev->act_log->nr_elements);
2935 buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
2936 buffer->device_uuid = cpu_to_be64(mdev->ldev->md.device_uuid);
2937
2938 buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset);
Philipp Reisner99432fc2011-05-20 16:39:13 +02002939 buffer->la_peer_max_bio_size = cpu_to_be32(mdev->peer_max_bio_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002940
2941 D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset);
2942 sector = mdev->ldev->md.md_offset;
2943
Andreas Gruenbacher3fbf4d22010-12-13 02:25:41 +01002944 if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002945 /* this was a try anyways ... */
2946 dev_err(DEV, "meta data update failed!\n");
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002947 drbd_chk_io_error(mdev, 1, true);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002948 }
2949
2950 /* Update mdev->ldev->md.la_size_sect,
2951 * since we updated it on metadata. */
2952 mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev);
2953
Philipp Reisnercdfda632011-07-05 15:38:59 +02002954 drbd_md_put_buffer(mdev);
2955out:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002956 put_ldev(mdev);
2957}
2958
2959/**
2960 * drbd_md_read() - Reads in the meta data super block
2961 * @mdev: DRBD device.
2962 * @bdev: Device from which the meta data should be read in.
2963 *
Andreas Gruenbacher116676c2010-12-08 13:33:11 +01002964 * Return 0 (NO_ERROR) on success, and an enum drbd_ret_code in case
Lars Ellenbergd5d7ebd2011-07-05 20:59:26 +02002965 * something goes wrong.
Philipp Reisnerb411b362009-09-25 16:07:19 -07002966 */
2967int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
2968{
2969 struct meta_data_on_disk *buffer;
Lars Ellenbergd5d7ebd2011-07-05 20:59:26 +02002970 u32 magic, flags;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002971 int i, rv = NO_ERROR;
2972
2973 if (!get_ldev_if_state(mdev, D_ATTACHING))
2974 return ERR_IO_MD_DISK;
2975
Philipp Reisnercdfda632011-07-05 15:38:59 +02002976 buffer = drbd_md_get_buffer(mdev);
2977 if (!buffer)
2978 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002979
Andreas Gruenbacher3fbf4d22010-12-13 02:25:41 +01002980 if (drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002981 /* NOTE: can't do normal error processing here as this is
Philipp Reisnerb411b362009-09-25 16:07:19 -07002982 called BEFORE disk is attached */
2983 dev_err(DEV, "Error while reading metadata.\n");
2984 rv = ERR_IO_MD_DISK;
2985 goto err;
2986 }
2987
Lars Ellenbergd5d7ebd2011-07-05 20:59:26 +02002988 magic = be32_to_cpu(buffer->magic);
2989 flags = be32_to_cpu(buffer->flags);
2990 if (magic == DRBD_MD_MAGIC_84_UNCLEAN ||
2991 (magic == DRBD_MD_MAGIC_08 && !(flags & MDF_AL_CLEAN))) {
2992 /* btw: that's Activity Log clean, not "all" clean. */
2993 dev_err(DEV, "Found unclean meta data. Did you \"drbdadm apply-al\"?\n");
2994 rv = ERR_MD_UNCLEAN;
2995 goto err;
2996 }
2997 if (magic != DRBD_MD_MAGIC_08) {
2998 if (magic == DRBD_MD_MAGIC_07)
2999 dev_err(DEV, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n");
3000 else
3001 dev_err(DEV, "Meta data magic not found. Did you \"drbdadm create-md\"?\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07003002 rv = ERR_MD_INVALID;
3003 goto err;
3004 }
3005 if (be32_to_cpu(buffer->al_offset) != bdev->md.al_offset) {
3006 dev_err(DEV, "unexpected al_offset: %d (expected %d)\n",
3007 be32_to_cpu(buffer->al_offset), bdev->md.al_offset);
3008 rv = ERR_MD_INVALID;
3009 goto err;
3010 }
3011 if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
3012 dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n",
3013 be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
3014 rv = ERR_MD_INVALID;
3015 goto err;
3016 }
3017 if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
3018 dev_err(DEV, "unexpected md_size: %u (expected %u)\n",
3019 be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
3020 rv = ERR_MD_INVALID;
3021 goto err;
3022 }
3023
3024 if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
3025 dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
3026 be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
3027 rv = ERR_MD_INVALID;
3028 goto err;
3029 }
3030
3031 bdev->md.la_size_sect = be64_to_cpu(buffer->la_size);
3032 for (i = UI_CURRENT; i < UI_SIZE; i++)
3033 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
3034 bdev->md.flags = be32_to_cpu(buffer->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003035 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
3036
Philipp Reisner87eeee42011-01-19 14:16:30 +01003037 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisner99432fc2011-05-20 16:39:13 +02003038 if (mdev->state.conn < C_CONNECTED) {
3039 int peer;
3040 peer = be32_to_cpu(buffer->la_peer_max_bio_size);
3041 peer = max_t(int, peer, DRBD_MAX_BIO_SIZE_SAFE);
3042 mdev->peer_max_bio_size = peer;
3043 }
Philipp Reisner87eeee42011-01-19 14:16:30 +01003044 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisner99432fc2011-05-20 16:39:13 +02003045
Philipp Reisnerb411b362009-09-25 16:07:19 -07003046 err:
Philipp Reisnercdfda632011-07-05 15:38:59 +02003047 drbd_md_put_buffer(mdev);
3048 out:
Philipp Reisnerb411b362009-09-25 16:07:19 -07003049 put_ldev(mdev);
3050
3051 return rv;
3052}
3053
3054/**
3055 * drbd_md_mark_dirty() - Mark meta data super block as dirty
3056 * @mdev: DRBD device.
3057 *
3058 * Call this function if you change anything that should be written to
3059 * the meta-data super block. This function sets MD_DIRTY, and starts a
3060 * timer that ensures that within five seconds you have to call drbd_md_sync().
3061 */
Lars Ellenbergca0e6092010-10-14 15:01:21 +02003062#ifdef DEBUG
Lars Ellenbergee15b032010-09-03 10:00:09 +02003063void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func)
3064{
3065 if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) {
3066 mod_timer(&mdev->md_sync_timer, jiffies + HZ);
3067 mdev->last_md_mark_dirty.line = line;
3068 mdev->last_md_mark_dirty.func = func;
3069 }
3070}
3071#else
Philipp Reisnerb411b362009-09-25 16:07:19 -07003072void drbd_md_mark_dirty(struct drbd_conf *mdev)
3073{
Lars Ellenbergee15b032010-09-03 10:00:09 +02003074 if (!test_and_set_bit(MD_DIRTY, &mdev->flags))
Lars Ellenbergca0e6092010-10-14 15:01:21 +02003075 mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003076}
Lars Ellenbergee15b032010-09-03 10:00:09 +02003077#endif
Philipp Reisnerb411b362009-09-25 16:07:19 -07003078
3079static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
3080{
3081 int i;
3082
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003083 for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003084 mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
Philipp Reisnerb411b362009-09-25 16:07:19 -07003085}
3086
3087void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3088{
3089 if (idx == UI_CURRENT) {
3090 if (mdev->state.role == R_PRIMARY)
3091 val |= 1;
3092 else
3093 val &= ~((u64)1);
3094
3095 drbd_set_ed_uuid(mdev, val);
3096 }
3097
3098 mdev->ldev->md.uuid[idx] = val;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003099 drbd_md_mark_dirty(mdev);
3100}
3101
3102
3103void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3104{
3105 if (mdev->ldev->md.uuid[idx]) {
3106 drbd_uuid_move_history(mdev);
3107 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
Philipp Reisnerb411b362009-09-25 16:07:19 -07003108 }
3109 _drbd_uuid_set(mdev, idx, val);
3110}
3111
3112/**
3113 * drbd_uuid_new_current() - Creates a new current UUID
3114 * @mdev: DRBD device.
3115 *
3116 * Creates a new current UUID, and rotates the old current UUID into
3117 * the bitmap slot. Causes an incremental resync upon next connect.
3118 */
3119void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
3120{
3121 u64 val;
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003122 unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
Philipp Reisnerb411b362009-09-25 16:07:19 -07003123
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003124 if (bm_uuid)
3125 dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
3126
Philipp Reisnerb411b362009-09-25 16:07:19 -07003127 mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
Philipp Reisnerb411b362009-09-25 16:07:19 -07003128
3129 get_random_bytes(&val, sizeof(u64));
3130 _drbd_uuid_set(mdev, UI_CURRENT, val);
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003131 drbd_print_uuids(mdev, "new current UUID");
Lars Ellenbergaaa8e2b2010-10-15 13:16:53 +02003132 /* get it to stable storage _now_ */
3133 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003134}
3135
3136void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
3137{
3138 if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
3139 return;
3140
3141 if (val == 0) {
3142 drbd_uuid_move_history(mdev);
3143 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
3144 mdev->ldev->md.uuid[UI_BITMAP] = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003145 } else {
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003146 unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
3147 if (bm_uuid)
3148 dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003149
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003150 mdev->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003151 }
3152 drbd_md_mark_dirty(mdev);
3153}
3154
3155/**
3156 * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3157 * @mdev: DRBD device.
3158 *
3159 * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
3160 */
3161int drbd_bmio_set_n_write(struct drbd_conf *mdev)
3162{
3163 int rv = -EIO;
3164
3165 if (get_ldev_if_state(mdev, D_ATTACHING)) {
3166 drbd_md_set_flag(mdev, MDF_FULL_SYNC);
3167 drbd_md_sync(mdev);
3168 drbd_bm_set_all(mdev);
3169
3170 rv = drbd_bm_write(mdev);
3171
3172 if (!rv) {
3173 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
3174 drbd_md_sync(mdev);
3175 }
3176
3177 put_ldev(mdev);
3178 }
3179
3180 return rv;
3181}
3182
3183/**
3184 * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3185 * @mdev: DRBD device.
3186 *
3187 * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
3188 */
3189int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
3190{
3191 int rv = -EIO;
3192
Philipp Reisner07782862010-08-31 12:00:50 +02003193 drbd_resume_al(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003194 if (get_ldev_if_state(mdev, D_ATTACHING)) {
3195 drbd_bm_clear_all(mdev);
3196 rv = drbd_bm_write(mdev);
3197 put_ldev(mdev);
3198 }
3199
3200 return rv;
3201}
3202
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003203static int w_bitmap_io(struct drbd_work *w, int unused)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003204{
3205 struct bm_io_work *work = container_of(w, struct bm_io_work, w);
Philipp Reisner00d56942011-02-09 18:09:48 +01003206 struct drbd_conf *mdev = w->mdev;
Lars Ellenberg02851e92010-12-16 14:47:39 +01003207 int rv = -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003208
3209 D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0);
3210
Lars Ellenberg02851e92010-12-16 14:47:39 +01003211 if (get_ldev(mdev)) {
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003212 drbd_bm_lock(mdev, work->why, work->flags);
Lars Ellenberg02851e92010-12-16 14:47:39 +01003213 rv = work->io_fn(mdev);
3214 drbd_bm_unlock(mdev);
3215 put_ldev(mdev);
3216 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003217
Lars Ellenberg4738fa12011-02-21 13:20:55 +01003218 clear_bit_unlock(BITMAP_IO, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003219 wake_up(&mdev->misc_wait);
3220
3221 if (work->done)
3222 work->done(mdev, rv);
3223
3224 clear_bit(BITMAP_IO_QUEUED, &mdev->flags);
3225 work->why = NULL;
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003226 work->flags = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003227
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003228 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003229}
3230
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003231void drbd_ldev_destroy(struct drbd_conf *mdev)
3232{
3233 lc_destroy(mdev->resync);
3234 mdev->resync = NULL;
3235 lc_destroy(mdev->act_log);
3236 mdev->act_log = NULL;
3237 __no_warn(local,
3238 drbd_free_bc(mdev->ldev);
3239 mdev->ldev = NULL;);
3240
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003241 clear_bit(GO_DISKLESS, &mdev->flags);
3242}
3243
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003244static int w_go_diskless(struct drbd_work *w, int unused)
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003245{
Philipp Reisner00d56942011-02-09 18:09:48 +01003246 struct drbd_conf *mdev = w->mdev;
3247
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003248 D_ASSERT(mdev->state.disk == D_FAILED);
Lars Ellenberg9d282872010-10-14 13:57:07 +02003249 /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
3250 * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003251 * the protected members anymore, though, so once put_ldev reaches zero
3252 * again, it will be safe to free them. */
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003253 drbd_force_state(mdev, NS(disk, D_DISKLESS));
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003254 return 0;
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003255}
3256
3257void drbd_go_diskless(struct drbd_conf *mdev)
3258{
3259 D_ASSERT(mdev->state.disk == D_FAILED);
3260 if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
Philipp Reisnere42325a2011-01-19 13:55:45 +01003261 drbd_queue_work(&mdev->tconn->data.work, &mdev->go_diskless);
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003262}
3263
Philipp Reisnerb411b362009-09-25 16:07:19 -07003264/**
3265 * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
3266 * @mdev: DRBD device.
3267 * @io_fn: IO callback to be called when bitmap IO is possible
3268 * @done: callback to be called after the bitmap IO was performed
3269 * @why: Descriptive text of the reason for doing the IO
3270 *
3271 * While IO on the bitmap happens we freeze application IO thus we ensure
3272 * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
3273 * called from worker context. It MUST NOT be used while a previous such
3274 * work is still pending!
3275 */
3276void drbd_queue_bitmap_io(struct drbd_conf *mdev,
3277 int (*io_fn)(struct drbd_conf *),
3278 void (*done)(struct drbd_conf *, int),
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003279 char *why, enum bm_flag flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003280{
Philipp Reisnere6b3ea82011-01-19 14:02:01 +01003281 D_ASSERT(current == mdev->tconn->worker.task);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003282
3283 D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags));
3284 D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags));
3285 D_ASSERT(list_empty(&mdev->bm_io_work.w.list));
3286 if (mdev->bm_io_work.why)
3287 dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n",
3288 why, mdev->bm_io_work.why);
3289
3290 mdev->bm_io_work.io_fn = io_fn;
3291 mdev->bm_io_work.done = done;
3292 mdev->bm_io_work.why = why;
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003293 mdev->bm_io_work.flags = flags;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003294
Philipp Reisner87eeee42011-01-19 14:16:30 +01003295 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003296 set_bit(BITMAP_IO, &mdev->flags);
3297 if (atomic_read(&mdev->ap_bio_cnt) == 0) {
Philipp Reisner127b3172010-11-16 10:07:53 +01003298 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
Philipp Reisnere42325a2011-01-19 13:55:45 +01003299 drbd_queue_work(&mdev->tconn->data.work, &mdev->bm_io_work.w);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003300 }
Philipp Reisner87eeee42011-01-19 14:16:30 +01003301 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003302}
3303
3304/**
3305 * drbd_bitmap_io() - Does an IO operation on the whole bitmap
3306 * @mdev: DRBD device.
3307 * @io_fn: IO callback to be called when bitmap IO is possible
3308 * @why: Descriptive text of the reason for doing the IO
3309 *
3310 * freezes application IO while that the actual IO operations runs. This
3311 * functions MAY NOT be called from worker context.
3312 */
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003313int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *),
3314 char *why, enum bm_flag flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003315{
3316 int rv;
3317
Philipp Reisnere6b3ea82011-01-19 14:02:01 +01003318 D_ASSERT(current != mdev->tconn->worker.task);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003319
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003320 if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
3321 drbd_suspend_io(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003322
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003323 drbd_bm_lock(mdev, why, flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003324 rv = io_fn(mdev);
3325 drbd_bm_unlock(mdev);
3326
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003327 if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
3328 drbd_resume_io(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003329
3330 return rv;
3331}
3332
3333void drbd_md_set_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
3334{
3335 if ((mdev->ldev->md.flags & flag) != flag) {
3336 drbd_md_mark_dirty(mdev);
3337 mdev->ldev->md.flags |= flag;
3338 }
3339}
3340
3341void drbd_md_clear_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
3342{
3343 if ((mdev->ldev->md.flags & flag) != 0) {
3344 drbd_md_mark_dirty(mdev);
3345 mdev->ldev->md.flags &= ~flag;
3346 }
3347}
3348int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
3349{
3350 return (bdev->md.flags & flag) != 0;
3351}
3352
3353static void md_sync_timer_fn(unsigned long data)
3354{
3355 struct drbd_conf *mdev = (struct drbd_conf *) data;
3356
Philipp Reisnere42325a2011-01-19 13:55:45 +01003357 drbd_queue_work_front(&mdev->tconn->data.work, &mdev->md_sync_work);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003358}
3359
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003360static int w_md_sync(struct drbd_work *w, int unused)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003361{
Philipp Reisner00d56942011-02-09 18:09:48 +01003362 struct drbd_conf *mdev = w->mdev;
3363
Philipp Reisnerb411b362009-09-25 16:07:19 -07003364 dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
Lars Ellenbergee15b032010-09-03 10:00:09 +02003365#ifdef DEBUG
3366 dev_warn(DEV, "last md_mark_dirty: %s:%u\n",
3367 mdev->last_md_mark_dirty.func, mdev->last_md_mark_dirty.line);
3368#endif
Philipp Reisnerb411b362009-09-25 16:07:19 -07003369 drbd_md_sync(mdev);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003370 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003371}
3372
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01003373const char *cmdname(enum drbd_packet cmd)
Andreas Gruenbacherf2ad9062011-01-26 17:13:25 +01003374{
3375 /* THINK may need to become several global tables
3376 * when we want to support more than
3377 * one PRO_VERSION */
3378 static const char *cmdnames[] = {
3379 [P_DATA] = "Data",
3380 [P_DATA_REPLY] = "DataReply",
3381 [P_RS_DATA_REPLY] = "RSDataReply",
3382 [P_BARRIER] = "Barrier",
3383 [P_BITMAP] = "ReportBitMap",
3384 [P_BECOME_SYNC_TARGET] = "BecomeSyncTarget",
3385 [P_BECOME_SYNC_SOURCE] = "BecomeSyncSource",
3386 [P_UNPLUG_REMOTE] = "UnplugRemote",
3387 [P_DATA_REQUEST] = "DataRequest",
3388 [P_RS_DATA_REQUEST] = "RSDataRequest",
3389 [P_SYNC_PARAM] = "SyncParam",
3390 [P_SYNC_PARAM89] = "SyncParam89",
3391 [P_PROTOCOL] = "ReportProtocol",
3392 [P_UUIDS] = "ReportUUIDs",
3393 [P_SIZES] = "ReportSizes",
3394 [P_STATE] = "ReportState",
3395 [P_SYNC_UUID] = "ReportSyncUUID",
3396 [P_AUTH_CHALLENGE] = "AuthChallenge",
3397 [P_AUTH_RESPONSE] = "AuthResponse",
3398 [P_PING] = "Ping",
3399 [P_PING_ACK] = "PingAck",
3400 [P_RECV_ACK] = "RecvAck",
3401 [P_WRITE_ACK] = "WriteAck",
3402 [P_RS_WRITE_ACK] = "RSWriteAck",
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003403 [P_DISCARD_WRITE] = "DiscardWrite",
Andreas Gruenbacherf2ad9062011-01-26 17:13:25 +01003404 [P_NEG_ACK] = "NegAck",
3405 [P_NEG_DREPLY] = "NegDReply",
3406 [P_NEG_RS_DREPLY] = "NegRSDReply",
3407 [P_BARRIER_ACK] = "BarrierAck",
3408 [P_STATE_CHG_REQ] = "StateChgRequest",
3409 [P_STATE_CHG_REPLY] = "StateChgReply",
3410 [P_OV_REQUEST] = "OVRequest",
3411 [P_OV_REPLY] = "OVReply",
3412 [P_OV_RESULT] = "OVResult",
3413 [P_CSUM_RS_REQUEST] = "CsumRSRequest",
3414 [P_RS_IS_IN_SYNC] = "CsumRSIsInSync",
3415 [P_COMPRESSED_BITMAP] = "CBitmap",
3416 [P_DELAY_PROBE] = "DelayProbe",
3417 [P_OUT_OF_SYNC] = "OutOfSync",
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003418 [P_RETRY_WRITE] = "RetryWrite",
Lars Ellenbergae25b332011-04-24 00:01:16 +02003419 [P_RS_CANCEL] = "RSCancel",
3420 [P_CONN_ST_CHG_REQ] = "conn_st_chg_req",
3421 [P_CONN_ST_CHG_REPLY] = "conn_st_chg_reply",
Philipp Reisner036b17e2011-05-16 17:38:11 +02003422 [P_RETRY_WRITE] = "retry_write",
3423 [P_PROTOCOL_UPDATE] = "protocol_update",
Lars Ellenbergae25b332011-04-24 00:01:16 +02003424
3425 /* enum drbd_packet, but not commands - obsoleted flags:
3426 * P_MAY_IGNORE
3427 * P_MAX_OPT_CMD
3428 */
Andreas Gruenbacherf2ad9062011-01-26 17:13:25 +01003429 };
3430
Lars Ellenbergae25b332011-04-24 00:01:16 +02003431 /* too big for the array: 0xfffX */
Andreas Gruenbachere5d6f332011-03-28 16:44:40 +02003432 if (cmd == P_INITIAL_META)
3433 return "InitialMeta";
3434 if (cmd == P_INITIAL_DATA)
3435 return "InitialData";
Andreas Gruenbacher60381782011-03-28 17:05:50 +02003436 if (cmd == P_CONNECTION_FEATURES)
3437 return "ConnectionFeatures";
Andreas Gruenbacher6e849ce2011-03-14 17:27:45 +01003438 if (cmd >= ARRAY_SIZE(cmdnames))
Andreas Gruenbacherf2ad9062011-01-26 17:13:25 +01003439 return "Unknown";
3440 return cmdnames[cmd];
3441}
3442
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003443/**
3444 * drbd_wait_misc - wait for a request to make progress
3445 * @mdev: device associated with the request
3446 * @i: the struct drbd_interval embedded in struct drbd_request or
3447 * struct drbd_peer_request
3448 */
3449int drbd_wait_misc(struct drbd_conf *mdev, struct drbd_interval *i)
3450{
Philipp Reisner44ed1672011-04-19 17:10:19 +02003451 struct net_conf *nc;
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003452 DEFINE_WAIT(wait);
3453 long timeout;
3454
Philipp Reisner44ed1672011-04-19 17:10:19 +02003455 rcu_read_lock();
3456 nc = rcu_dereference(mdev->tconn->net_conf);
3457 if (!nc) {
3458 rcu_read_unlock();
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003459 return -ETIMEDOUT;
Philipp Reisner44ed1672011-04-19 17:10:19 +02003460 }
3461 timeout = nc->ko_count ? nc->timeout * HZ / 10 * nc->ko_count : MAX_SCHEDULE_TIMEOUT;
3462 rcu_read_unlock();
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003463
3464 /* Indicate to wake up mdev->misc_wait on progress. */
3465 i->waiting = true;
3466 prepare_to_wait(&mdev->misc_wait, &wait, TASK_INTERRUPTIBLE);
3467 spin_unlock_irq(&mdev->tconn->req_lock);
3468 timeout = schedule_timeout(timeout);
3469 finish_wait(&mdev->misc_wait, &wait);
3470 spin_lock_irq(&mdev->tconn->req_lock);
3471 if (!timeout || mdev->state.conn < C_CONNECTED)
3472 return -ETIMEDOUT;
3473 if (signal_pending(current))
3474 return -ERESTARTSYS;
3475 return 0;
3476}
3477
Philipp Reisnerb411b362009-09-25 16:07:19 -07003478#ifdef CONFIG_DRBD_FAULT_INJECTION
3479/* Fault insertion support including random number generator shamelessly
3480 * stolen from kernel/rcutorture.c */
3481struct fault_random_state {
3482 unsigned long state;
3483 unsigned long count;
3484};
3485
3486#define FAULT_RANDOM_MULT 39916801 /* prime */
3487#define FAULT_RANDOM_ADD 479001701 /* prime */
3488#define FAULT_RANDOM_REFRESH 10000
3489
3490/*
3491 * Crude but fast random-number generator. Uses a linear congruential
3492 * generator, with occasional help from get_random_bytes().
3493 */
3494static unsigned long
3495_drbd_fault_random(struct fault_random_state *rsp)
3496{
3497 long refresh;
3498
Roel Kluin49829ea2009-12-15 22:55:44 +01003499 if (!rsp->count--) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003500 get_random_bytes(&refresh, sizeof(refresh));
3501 rsp->state += refresh;
3502 rsp->count = FAULT_RANDOM_REFRESH;
3503 }
3504 rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
3505 return swahw32(rsp->state);
3506}
3507
3508static char *
3509_drbd_fault_str(unsigned int type) {
3510 static char *_faults[] = {
3511 [DRBD_FAULT_MD_WR] = "Meta-data write",
3512 [DRBD_FAULT_MD_RD] = "Meta-data read",
3513 [DRBD_FAULT_RS_WR] = "Resync write",
3514 [DRBD_FAULT_RS_RD] = "Resync read",
3515 [DRBD_FAULT_DT_WR] = "Data write",
3516 [DRBD_FAULT_DT_RD] = "Data read",
3517 [DRBD_FAULT_DT_RA] = "Data read ahead",
3518 [DRBD_FAULT_BM_ALLOC] = "BM allocation",
Philipp Reisner6b4388a2010-04-26 14:11:45 +02003519 [DRBD_FAULT_AL_EE] = "EE allocation",
3520 [DRBD_FAULT_RECEIVE] = "receive data corruption",
Philipp Reisnerb411b362009-09-25 16:07:19 -07003521 };
3522
3523 return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
3524}
3525
3526unsigned int
3527_drbd_insert_fault(struct drbd_conf *mdev, unsigned int type)
3528{
3529 static struct fault_random_state rrs = {0, 0};
3530
3531 unsigned int ret = (
3532 (fault_devs == 0 ||
3533 ((1 << mdev_to_minor(mdev)) & fault_devs) != 0) &&
3534 (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate));
3535
3536 if (ret) {
3537 fault_count++;
3538
Lars Ellenberg73835062010-05-27 11:51:56 +02003539 if (__ratelimit(&drbd_ratelimit_state))
Philipp Reisnerb411b362009-09-25 16:07:19 -07003540 dev_warn(DEV, "***Simulating %s failure\n",
3541 _drbd_fault_str(type));
3542 }
3543
3544 return ret;
3545}
3546#endif
3547
3548const char *drbd_buildtag(void)
3549{
3550 /* DRBD built from external sources has here a reference to the
3551 git hash of the source code. */
3552
3553 static char buildtag[38] = "\0uilt-in";
3554
3555 if (buildtag[0] == 0) {
3556#ifdef CONFIG_MODULES
3557 if (THIS_MODULE != NULL)
3558 sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
3559 else
3560#endif
3561 buildtag[0] = 'b';
3562 }
3563
3564 return buildtag;
3565}
3566
3567module_init(drbd_init)
3568module_exit(drbd_cleanup)
3569
Philipp Reisnerb411b362009-09-25 16:07:19 -07003570EXPORT_SYMBOL(drbd_conn_str);
3571EXPORT_SYMBOL(drbd_role_str);
3572EXPORT_SYMBOL(drbd_disk_str);
3573EXPORT_SYMBOL(drbd_set_st_err_str);