blob: edd0227f4b435a01a1b714061021de5b7d3645d6 [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11 from Logicworks, Inc. for making SDP replication support possible.
12
13 drbd is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
16 any later version.
17
18 drbd is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with drbd; see the file COPYING. If not, write to
25 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26
27 */
28
Philipp Reisnerb411b362009-09-25 16:07:19 -070029#include <linux/module.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070030#include <linux/drbd.h>
31#include <asm/uaccess.h>
32#include <asm/types.h>
33#include <net/sock.h>
34#include <linux/ctype.h>
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020035#include <linux/mutex.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070036#include <linux/fs.h>
37#include <linux/file.h>
38#include <linux/proc_fs.h>
39#include <linux/init.h>
40#include <linux/mm.h>
41#include <linux/memcontrol.h>
42#include <linux/mm_inline.h>
43#include <linux/slab.h>
44#include <linux/random.h>
45#include <linux/reboot.h>
46#include <linux/notifier.h>
47#include <linux/kthread.h>
48
49#define __KERNEL_SYSCALLS__
50#include <linux/unistd.h>
51#include <linux/vmalloc.h>
52
53#include <linux/drbd_limits.h>
54#include "drbd_int.h"
Philipp Reisnerb411b362009-09-25 16:07:19 -070055#include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
56
57#include "drbd_vli.h"
58
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020059static DEFINE_MUTEX(drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -070060int drbdd_init(struct drbd_thread *);
61int drbd_worker(struct drbd_thread *);
62int drbd_asender(struct drbd_thread *);
63
64int drbd_init(void);
65static int drbd_open(struct block_device *bdev, fmode_t mode);
66static int drbd_release(struct gendisk *gd, fmode_t mode);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +010067static int w_md_sync(struct drbd_work *w, int unused);
Philipp Reisnerb411b362009-09-25 16:07:19 -070068static void md_sync_timer_fn(unsigned long data);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +010069static int w_bitmap_io(struct drbd_work *w, int unused);
70static int w_go_diskless(struct drbd_work *w, int unused);
Philipp Reisnerb411b362009-09-25 16:07:19 -070071
Philipp Reisnerb411b362009-09-25 16:07:19 -070072MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
73 "Lars Ellenberg <lars@linbit.com>");
74MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
75MODULE_VERSION(REL_VERSION);
76MODULE_LICENSE("GPL");
Philipp Reisner81a5d602011-02-22 19:53:16 -050077MODULE_PARM_DESC(minor_count, "Approximate number of drbd devices ("
Philipp Reisner2b8a90b2011-01-10 11:15:17 +010078 __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")");
Philipp Reisnerb411b362009-09-25 16:07:19 -070079MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
80
81#include <linux/moduleparam.h>
82/* allow_open_on_secondary */
83MODULE_PARM_DESC(allow_oos, "DONT USE!");
84/* thanks to these macros, if compiled into the kernel (not-module),
85 * this becomes the boot parameter drbd.minor_count */
86module_param(minor_count, uint, 0444);
87module_param(disable_sendpage, bool, 0644);
88module_param(allow_oos, bool, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -070089module_param(proc_details, int, 0644);
90
91#ifdef CONFIG_DRBD_FAULT_INJECTION
92int enable_faults;
93int fault_rate;
94static int fault_count;
95int fault_devs;
96/* bitmap of enabled faults */
97module_param(enable_faults, int, 0664);
98/* fault rate % value - applies to all enabled faults */
99module_param(fault_rate, int, 0664);
100/* count of faults inserted */
101module_param(fault_count, int, 0664);
102/* bitmap of devices to insert faults on */
103module_param(fault_devs, int, 0644);
104#endif
105
106/* module parameter, defined */
Philipp Reisner2b8a90b2011-01-10 11:15:17 +0100107unsigned int minor_count = DRBD_MINOR_COUNT_DEF;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700108int disable_sendpage;
109int allow_oos;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700110int proc_details; /* Detail level in proc drbd*/
111
112/* Module parameter for setting the user mode helper program
113 * to run. Default is /sbin/drbdadm */
114char usermode_helper[80] = "/sbin/drbdadm";
115
116module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644);
117
118/* in 2.6.x, our device mapping and config info contains our virtual gendisks
119 * as member "struct gendisk *vdisk;"
120 */
Philipp Reisner81a5d602011-02-22 19:53:16 -0500121struct idr minors;
Philipp Reisner21114382011-01-19 12:26:59 +0100122struct list_head drbd_tconns; /* list of struct drbd_tconn */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700123
124struct kmem_cache *drbd_request_cache;
Andreas Gruenbacher6c852be2011-02-04 15:38:52 +0100125struct kmem_cache *drbd_ee_cache; /* peer requests */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700126struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
127struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
128mempool_t *drbd_request_mempool;
129mempool_t *drbd_ee_mempool;
Lars Ellenberg35abf592011-02-23 12:39:46 +0100130mempool_t *drbd_md_io_page_pool;
Lars Ellenbergda4a75d2011-02-23 17:02:01 +0100131struct bio_set *drbd_md_io_bio_set;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700132
133/* I do not use a standard mempool, because:
134 1) I want to hand out the pre-allocated objects first.
135 2) I want to be able to interrupt sleeping allocation with a signal.
136 Note: This is a single linked list, the next pointer is the private
137 member of struct page.
138 */
139struct page *drbd_pp_pool;
140spinlock_t drbd_pp_lock;
141int drbd_pp_vacant;
142wait_queue_head_t drbd_pp_wait;
143
144DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
145
Emese Revfy7d4e9d02009-12-14 00:59:30 +0100146static const struct block_device_operations drbd_ops = {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700147 .owner = THIS_MODULE,
148 .open = drbd_open,
149 .release = drbd_release,
150};
151
Lars Ellenbergda4a75d2011-02-23 17:02:01 +0100152static void bio_destructor_drbd(struct bio *bio)
153{
154 bio_free(bio, drbd_md_io_bio_set);
155}
156
157struct bio *bio_alloc_drbd(gfp_t gfp_mask)
158{
159 struct bio *bio;
160
161 if (!drbd_md_io_bio_set)
162 return bio_alloc(gfp_mask, 1);
163
164 bio = bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set);
165 if (!bio)
166 return NULL;
167 bio->bi_destructor = bio_destructor_drbd;
168 return bio;
169}
170
Philipp Reisnerb411b362009-09-25 16:07:19 -0700171#ifdef __CHECKER__
172/* When checking with sparse, and this is an inline function, sparse will
173 give tons of false positives. When this is a real functions sparse works.
174 */
175int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
176{
177 int io_allowed;
178
179 atomic_inc(&mdev->local_cnt);
180 io_allowed = (mdev->state.disk >= mins);
181 if (!io_allowed) {
182 if (atomic_dec_and_test(&mdev->local_cnt))
183 wake_up(&mdev->misc_wait);
184 }
185 return io_allowed;
186}
187
188#endif
189
190/**
191 * DOC: The transfer log
192 *
193 * The transfer log is a single linked list of &struct drbd_tl_epoch objects.
Philipp Reisner87eeee42011-01-19 14:16:30 +0100194 * mdev->tconn->newest_tle points to the head, mdev->tconn->oldest_tle points to the tail
Philipp Reisnerb411b362009-09-25 16:07:19 -0700195 * of the list. There is always at least one &struct drbd_tl_epoch object.
196 *
197 * Each &struct drbd_tl_epoch has a circular double linked list of requests
198 * attached.
199 */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100200static int tl_init(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700201{
202 struct drbd_tl_epoch *b;
203
204 /* during device minor initialization, we may well use GFP_KERNEL */
205 b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_KERNEL);
206 if (!b)
207 return 0;
208 INIT_LIST_HEAD(&b->requests);
209 INIT_LIST_HEAD(&b->w.list);
210 b->next = NULL;
211 b->br_number = 4711;
Philipp Reisner7e602c02010-05-27 14:49:27 +0200212 b->n_writes = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700213 b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
214
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100215 tconn->oldest_tle = b;
216 tconn->newest_tle = b;
217 INIT_LIST_HEAD(&tconn->out_of_sequence_requests);
Philipp Reisnercdfda632011-07-05 15:38:59 +0200218 INIT_LIST_HEAD(&tconn->barrier_acked_requests);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700219
Philipp Reisnerb411b362009-09-25 16:07:19 -0700220 return 1;
221}
222
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100223static void tl_cleanup(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700224{
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100225 if (tconn->oldest_tle != tconn->newest_tle)
226 conn_err(tconn, "ASSERT FAILED: oldest_tle == newest_tle\n");
227 if (!list_empty(&tconn->out_of_sequence_requests))
228 conn_err(tconn, "ASSERT FAILED: list_empty(out_of_sequence_requests)\n");
229 kfree(tconn->oldest_tle);
230 tconn->oldest_tle = NULL;
231 kfree(tconn->unused_spare_tle);
232 tconn->unused_spare_tle = NULL;
Andreas Gruenbacherd6287692011-01-13 23:05:39 +0100233}
234
Philipp Reisnerb411b362009-09-25 16:07:19 -0700235/**
236 * _tl_add_barrier() - Adds a barrier to the transfer log
237 * @mdev: DRBD device.
238 * @new: Barrier to be added before the current head of the TL.
239 *
240 * The caller must hold the req_lock.
241 */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100242void _tl_add_barrier(struct drbd_tconn *tconn, struct drbd_tl_epoch *new)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700243{
244 struct drbd_tl_epoch *newest_before;
245
246 INIT_LIST_HEAD(&new->requests);
247 INIT_LIST_HEAD(&new->w.list);
248 new->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
249 new->next = NULL;
Philipp Reisner7e602c02010-05-27 14:49:27 +0200250 new->n_writes = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700251
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100252 newest_before = tconn->newest_tle;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700253 /* never send a barrier number == 0, because that is special-cased
254 * when using TCQ for our write ordering code */
255 new->br_number = (newest_before->br_number+1) ?: 1;
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100256 if (tconn->newest_tle != new) {
257 tconn->newest_tle->next = new;
258 tconn->newest_tle = new;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700259 }
260}
261
262/**
263 * tl_release() - Free or recycle the oldest &struct drbd_tl_epoch object of the TL
264 * @mdev: DRBD device.
265 * @barrier_nr: Expected identifier of the DRBD write barrier packet.
266 * @set_size: Expected number of requests before that barrier.
267 *
268 * In case the passed barrier_nr or set_size does not match the oldest
269 * &struct drbd_tl_epoch objects this function will cause a termination
270 * of the connection.
271 */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100272void tl_release(struct drbd_tconn *tconn, unsigned int barrier_nr,
273 unsigned int set_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700274{
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100275 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700276 struct drbd_tl_epoch *b, *nob; /* next old barrier */
277 struct list_head *le, *tle;
278 struct drbd_request *r;
279
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100280 spin_lock_irq(&tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700281
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100282 b = tconn->oldest_tle;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700283
284 /* first some paranoia code */
285 if (b == NULL) {
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100286 conn_err(tconn, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
287 barrier_nr);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700288 goto bail;
289 }
290 if (b->br_number != barrier_nr) {
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100291 conn_err(tconn, "BAD! BarrierAck #%u received, expected #%u!\n",
292 barrier_nr, b->br_number);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700293 goto bail;
294 }
Philipp Reisner7e602c02010-05-27 14:49:27 +0200295 if (b->n_writes != set_size) {
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100296 conn_err(tconn, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
297 barrier_nr, set_size, b->n_writes);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700298 goto bail;
299 }
300
301 /* Clean up list of requests processed during current epoch */
302 list_for_each_safe(le, tle, &b->requests) {
303 r = list_entry(le, struct drbd_request, tl_requests);
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100304 _req_mod(r, BARRIER_ACKED);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700305 }
306 /* There could be requests on the list waiting for completion
307 of the write to the local disk. To avoid corruptions of
308 slab's data structures we have to remove the lists head.
309
310 Also there could have been a barrier ack out of sequence, overtaking
311 the write acks - which would be a bug and violating write ordering.
312 To not deadlock in case we lose connection while such requests are
313 still pending, we need some way to find them for the
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100314 _req_mode(CONNECTION_LOST_WHILE_PENDING).
Philipp Reisnerb411b362009-09-25 16:07:19 -0700315
316 These have been list_move'd to the out_of_sequence_requests list in
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100317 _req_mod(, BARRIER_ACKED) above.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700318 */
Philipp Reisnercdfda632011-07-05 15:38:59 +0200319 list_splice_init(&b->requests, &tconn->barrier_acked_requests);
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100320 mdev = b->w.mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700321
322 nob = b->next;
323 if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100324 _tl_add_barrier(tconn, b);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700325 if (nob)
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100326 tconn->oldest_tle = nob;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700327 /* if nob == NULL b was the only barrier, and becomes the new
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100328 barrier. Therefore tconn->oldest_tle points already to b */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700329 } else {
330 D_ASSERT(nob != NULL);
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100331 tconn->oldest_tle = nob;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700332 kfree(b);
333 }
334
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100335 spin_unlock_irq(&tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700336 dec_ap_pending(mdev);
337
338 return;
339
340bail:
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100341 spin_unlock_irq(&tconn->req_lock);
342 conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700343}
344
Philipp Reisner617049a2010-12-22 12:48:31 +0100345
Philipp Reisner11b58e72010-05-12 17:08:26 +0200346/**
347 * _tl_restart() - Walks the transfer log, and applies an action to all requests
348 * @mdev: DRBD device.
349 * @what: The action/event to perform with all request objects
350 *
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100351 * @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO,
352 * RESTART_FROZEN_DISK_IO.
Philipp Reisner11b58e72010-05-12 17:08:26 +0200353 */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100354void _tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what)
Philipp Reisner11b58e72010-05-12 17:08:26 +0200355{
356 struct drbd_tl_epoch *b, *tmp, **pn;
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200357 struct list_head *le, *tle, carry_reads;
Philipp Reisner11b58e72010-05-12 17:08:26 +0200358 struct drbd_request *req;
359 int rv, n_writes, n_reads;
360
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100361 b = tconn->oldest_tle;
362 pn = &tconn->oldest_tle;
Philipp Reisner11b58e72010-05-12 17:08:26 +0200363 while (b) {
364 n_writes = 0;
365 n_reads = 0;
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200366 INIT_LIST_HEAD(&carry_reads);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200367 list_for_each_safe(le, tle, &b->requests) {
368 req = list_entry(le, struct drbd_request, tl_requests);
369 rv = _req_mod(req, what);
370
Andreas Gruenbacherf4976092011-07-17 23:06:12 +0200371 if (rv & MR_WRITE)
372 n_writes++;
373 if (rv & MR_READ)
374 n_reads++;
Philipp Reisner11b58e72010-05-12 17:08:26 +0200375 }
376 tmp = b->next;
377
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200378 if (n_writes) {
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100379 if (what == RESEND) {
Philipp Reisner11b58e72010-05-12 17:08:26 +0200380 b->n_writes = n_writes;
381 if (b->w.cb == NULL) {
382 b->w.cb = w_send_barrier;
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100383 inc_ap_pending(b->w.mdev);
384 set_bit(CREATE_BARRIER, &b->w.mdev->flags);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200385 }
386
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100387 drbd_queue_work(&tconn->data.work, &b->w);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200388 }
389 pn = &b->next;
390 } else {
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200391 if (n_reads)
392 list_add(&carry_reads, &b->requests);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200393 /* there could still be requests on that ring list,
394 * in case local io is still pending */
395 list_del(&b->requests);
396
397 /* dec_ap_pending corresponding to queue_barrier.
398 * the newest barrier may not have been queued yet,
399 * in which case w.cb is still NULL. */
400 if (b->w.cb != NULL)
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100401 dec_ap_pending(b->w.mdev);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200402
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100403 if (b == tconn->newest_tle) {
Philipp Reisner11b58e72010-05-12 17:08:26 +0200404 /* recycle, but reinit! */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100405 if (tmp != NULL)
406 conn_err(tconn, "ASSERT FAILED tmp == NULL");
Philipp Reisner11b58e72010-05-12 17:08:26 +0200407 INIT_LIST_HEAD(&b->requests);
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200408 list_splice(&carry_reads, &b->requests);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200409 INIT_LIST_HEAD(&b->w.list);
410 b->w.cb = NULL;
411 b->br_number = net_random();
412 b->n_writes = 0;
413
414 *pn = b;
415 break;
416 }
417 *pn = tmp;
418 kfree(b);
419 }
420 b = tmp;
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200421 list_splice(&carry_reads, &b->requests);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200422 }
Philipp Reisner11b58e72010-05-12 17:08:26 +0200423
Philipp Reisnercdfda632011-07-05 15:38:59 +0200424 /* Actions operating on the disk state, also want to work on
425 requests that got barrier acked. */
426 switch (what) {
427 case FAIL_FROZEN_DISK_IO:
428 case RESTART_FROZEN_DISK_IO:
429 list_for_each_safe(le, tle, &tconn->barrier_acked_requests) {
430 req = list_entry(le, struct drbd_request, tl_requests);
431 _req_mod(req, what);
432 }
433 case CONNECTION_LOST_WHILE_PENDING:
434 case RESEND:
435 break;
436 default:
437 conn_err(tconn, "what = %d in _tl_restart()\n", what);
438 }
439}
Philipp Reisnerb411b362009-09-25 16:07:19 -0700440
441/**
442 * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
443 * @mdev: DRBD device.
444 *
445 * This is called after the connection to the peer was lost. The storage covered
446 * by the requests on the transfer gets marked as our of sync. Called from the
447 * receiver thread and the worker thread.
448 */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100449void tl_clear(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700450{
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100451 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700452 struct list_head *le, *tle;
453 struct drbd_request *r;
Philipp Reisnere90285e2011-03-22 12:51:21 +0100454 int vnr;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700455
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100456 spin_lock_irq(&tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700457
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100458 _tl_restart(tconn, CONNECTION_LOST_WHILE_PENDING);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700459
460 /* we expect this list to be empty. */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100461 if (!list_empty(&tconn->out_of_sequence_requests))
462 conn_err(tconn, "ASSERT FAILED list_empty(&out_of_sequence_requests)\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700463
464 /* but just in case, clean it up anyways! */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100465 list_for_each_safe(le, tle, &tconn->out_of_sequence_requests) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700466 r = list_entry(le, struct drbd_request, tl_requests);
467 /* It would be nice to complete outside of spinlock.
468 * But this is easier for now. */
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100469 _req_mod(r, CONNECTION_LOST_WHILE_PENDING);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700470 }
471
472 /* ensure bit indicating barrier is required is clear */
Philipp Reisner695d08f2011-04-11 22:53:32 -0700473 rcu_read_lock();
Philipp Reisnere90285e2011-03-22 12:51:21 +0100474 idr_for_each_entry(&tconn->volumes, mdev, vnr)
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100475 clear_bit(CREATE_BARRIER, &mdev->flags);
Philipp Reisner695d08f2011-04-11 22:53:32 -0700476 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700477
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100478 spin_unlock_irq(&tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700479}
480
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100481void tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what)
Philipp Reisner11b58e72010-05-12 17:08:26 +0200482{
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100483 spin_lock_irq(&tconn->req_lock);
484 _tl_restart(tconn, what);
485 spin_unlock_irq(&tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700486}
487
Philipp Reisnercdfda632011-07-05 15:38:59 +0200488/**
Andreas Gruenbacher71fc7ee2011-07-17 23:06:12 +0200489 * tl_abort_disk_io() - Abort disk I/O for all requests for a certain mdev in the TL
Philipp Reisnercdfda632011-07-05 15:38:59 +0200490 * @mdev: DRBD device.
Philipp Reisnercdfda632011-07-05 15:38:59 +0200491 */
Andreas Gruenbacher71fc7ee2011-07-17 23:06:12 +0200492void tl_abort_disk_io(struct drbd_conf *mdev)
Philipp Reisnercdfda632011-07-05 15:38:59 +0200493{
494 struct drbd_tconn *tconn = mdev->tconn;
495 struct drbd_tl_epoch *b;
496 struct list_head *le, *tle;
497 struct drbd_request *req;
498
Philipp Reisnercdfda632011-07-05 15:38:59 +0200499 spin_lock_irq(&tconn->req_lock);
500 b = tconn->oldest_tle;
501 while (b) {
502 list_for_each_safe(le, tle, &b->requests) {
503 req = list_entry(le, struct drbd_request, tl_requests);
Lars Ellenberg97ddb682011-07-15 23:52:44 +0200504 if (!(req->rq_state & RQ_LOCAL_PENDING))
505 continue;
Philipp Reisnercdfda632011-07-05 15:38:59 +0200506 if (req->w.mdev == mdev)
Andreas Gruenbacher71fc7ee2011-07-17 23:06:12 +0200507 _req_mod(req, ABORT_DISK_IO);
Philipp Reisnercdfda632011-07-05 15:38:59 +0200508 }
509 b = b->next;
510 }
511
512 list_for_each_safe(le, tle, &tconn->barrier_acked_requests) {
513 req = list_entry(le, struct drbd_request, tl_requests);
Lars Ellenberg97ddb682011-07-15 23:52:44 +0200514 if (!(req->rq_state & RQ_LOCAL_PENDING))
515 continue;
Philipp Reisnercdfda632011-07-05 15:38:59 +0200516 if (req->w.mdev == mdev)
Andreas Gruenbacher71fc7ee2011-07-17 23:06:12 +0200517 _req_mod(req, ABORT_DISK_IO);
Philipp Reisnercdfda632011-07-05 15:38:59 +0200518 }
519
520 spin_unlock_irq(&tconn->req_lock);
521}
522
Philipp Reisnerb411b362009-09-25 16:07:19 -0700523static int drbd_thread_setup(void *arg)
524{
525 struct drbd_thread *thi = (struct drbd_thread *) arg;
Philipp Reisner392c8802011-02-09 10:33:31 +0100526 struct drbd_tconn *tconn = thi->tconn;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700527 unsigned long flags;
528 int retval;
529
Philipp Reisnerf1b3a6e2011-02-08 15:35:58 +0100530 snprintf(current->comm, sizeof(current->comm), "drbd_%c_%s",
Philipp Reisner392c8802011-02-09 10:33:31 +0100531 thi->name[0], thi->tconn->name);
Philipp Reisnerf1b3a6e2011-02-08 15:35:58 +0100532
Philipp Reisnerb411b362009-09-25 16:07:19 -0700533restart:
534 retval = thi->function(thi);
535
536 spin_lock_irqsave(&thi->t_lock, flags);
537
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100538 /* if the receiver has been "EXITING", the last thing it did
Philipp Reisnerb411b362009-09-25 16:07:19 -0700539 * was set the conn state to "StandAlone",
540 * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
541 * and receiver thread will be "started".
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100542 * drbd_thread_start needs to set "RESTARTING" in that case.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700543 * t_state check and assignment needs to be within the same spinlock,
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100544 * so either thread_start sees EXITING, and can remap to RESTARTING,
545 * or thread_start see NONE, and can proceed as normal.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700546 */
547
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100548 if (thi->t_state == RESTARTING) {
Philipp Reisner392c8802011-02-09 10:33:31 +0100549 conn_info(tconn, "Restarting %s thread\n", thi->name);
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100550 thi->t_state = RUNNING;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700551 spin_unlock_irqrestore(&thi->t_lock, flags);
552 goto restart;
553 }
554
555 thi->task = NULL;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100556 thi->t_state = NONE;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700557 smp_mb();
Lars Ellenberg992d6e92011-05-02 11:47:18 +0200558 complete_all(&thi->stop);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700559 spin_unlock_irqrestore(&thi->t_lock, flags);
560
Philipp Reisner392c8802011-02-09 10:33:31 +0100561 conn_info(tconn, "Terminating %s\n", current->comm);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700562
563 /* Release mod reference taken when thread was started */
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200564
565 kref_put(&tconn->kref, &conn_destroy);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700566 module_put(THIS_MODULE);
567 return retval;
568}
569
Philipp Reisner392c8802011-02-09 10:33:31 +0100570static void drbd_thread_init(struct drbd_tconn *tconn, struct drbd_thread *thi,
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100571 int (*func) (struct drbd_thread *), char *name)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700572{
573 spin_lock_init(&thi->t_lock);
574 thi->task = NULL;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100575 thi->t_state = NONE;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700576 thi->function = func;
Philipp Reisner392c8802011-02-09 10:33:31 +0100577 thi->tconn = tconn;
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100578 strncpy(thi->name, name, ARRAY_SIZE(thi->name));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700579}
580
581int drbd_thread_start(struct drbd_thread *thi)
582{
Philipp Reisner392c8802011-02-09 10:33:31 +0100583 struct drbd_tconn *tconn = thi->tconn;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700584 struct task_struct *nt;
585 unsigned long flags;
586
Philipp Reisnerb411b362009-09-25 16:07:19 -0700587 /* is used from state engine doing drbd_thread_stop_nowait,
588 * while holding the req lock irqsave */
589 spin_lock_irqsave(&thi->t_lock, flags);
590
591 switch (thi->t_state) {
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100592 case NONE:
Philipp Reisner392c8802011-02-09 10:33:31 +0100593 conn_info(tconn, "Starting %s thread (from %s [%d])\n",
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100594 thi->name, current->comm, current->pid);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700595
596 /* Get ref on module for thread - this is released when thread exits */
597 if (!try_module_get(THIS_MODULE)) {
Philipp Reisner392c8802011-02-09 10:33:31 +0100598 conn_err(tconn, "Failed to get module reference in drbd_thread_start\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700599 spin_unlock_irqrestore(&thi->t_lock, flags);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100600 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700601 }
602
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200603 kref_get(&thi->tconn->kref);
604
Philipp Reisnerb411b362009-09-25 16:07:19 -0700605 init_completion(&thi->stop);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700606 thi->reset_cpu_mask = 1;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100607 thi->t_state = RUNNING;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700608 spin_unlock_irqrestore(&thi->t_lock, flags);
609 flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
610
611 nt = kthread_create(drbd_thread_setup, (void *) thi,
Philipp Reisner392c8802011-02-09 10:33:31 +0100612 "drbd_%c_%s", thi->name[0], thi->tconn->name);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700613
614 if (IS_ERR(nt)) {
Philipp Reisner392c8802011-02-09 10:33:31 +0100615 conn_err(tconn, "Couldn't start thread\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700616
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200617 kref_put(&tconn->kref, &conn_destroy);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700618 module_put(THIS_MODULE);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100619 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700620 }
621 spin_lock_irqsave(&thi->t_lock, flags);
622 thi->task = nt;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100623 thi->t_state = RUNNING;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700624 spin_unlock_irqrestore(&thi->t_lock, flags);
625 wake_up_process(nt);
626 break;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100627 case EXITING:
628 thi->t_state = RESTARTING;
Philipp Reisner392c8802011-02-09 10:33:31 +0100629 conn_info(tconn, "Restarting %s thread (from %s [%d])\n",
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100630 thi->name, current->comm, current->pid);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700631 /* fall through */
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100632 case RUNNING:
633 case RESTARTING:
Philipp Reisnerb411b362009-09-25 16:07:19 -0700634 default:
635 spin_unlock_irqrestore(&thi->t_lock, flags);
636 break;
637 }
638
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100639 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700640}
641
642
643void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
644{
645 unsigned long flags;
646
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100647 enum drbd_thread_state ns = restart ? RESTARTING : EXITING;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700648
649 /* may be called from state engine, holding the req lock irqsave */
650 spin_lock_irqsave(&thi->t_lock, flags);
651
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100652 if (thi->t_state == NONE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700653 spin_unlock_irqrestore(&thi->t_lock, flags);
654 if (restart)
655 drbd_thread_start(thi);
656 return;
657 }
658
659 if (thi->t_state != ns) {
660 if (thi->task == NULL) {
661 spin_unlock_irqrestore(&thi->t_lock, flags);
662 return;
663 }
664
665 thi->t_state = ns;
666 smp_mb();
667 init_completion(&thi->stop);
668 if (thi->task != current)
669 force_sig(DRBD_SIGKILL, thi->task);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700670 }
671
672 spin_unlock_irqrestore(&thi->t_lock, flags);
673
674 if (wait)
675 wait_for_completion(&thi->stop);
676}
677
Philipp Reisner392c8802011-02-09 10:33:31 +0100678static struct drbd_thread *drbd_task_to_thread(struct drbd_tconn *tconn, struct task_struct *task)
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100679{
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100680 struct drbd_thread *thi =
681 task == tconn->receiver.task ? &tconn->receiver :
682 task == tconn->asender.task ? &tconn->asender :
683 task == tconn->worker.task ? &tconn->worker : NULL;
684
685 return thi;
686}
687
Philipp Reisner392c8802011-02-09 10:33:31 +0100688char *drbd_task_to_thread_name(struct drbd_tconn *tconn, struct task_struct *task)
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100689{
Philipp Reisner392c8802011-02-09 10:33:31 +0100690 struct drbd_thread *thi = drbd_task_to_thread(tconn, task);
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100691 return thi ? thi->name : task->comm;
692}
693
Philipp Reisner80883192011-02-18 14:56:45 +0100694int conn_lowest_minor(struct drbd_tconn *tconn)
Philipp Reisner80822282011-02-08 12:46:30 +0100695{
Philipp Reisnere90285e2011-03-22 12:51:21 +0100696 struct drbd_conf *mdev;
Philipp Reisner695d08f2011-04-11 22:53:32 -0700697 int vnr = 0, m;
Philipp Reisner774b3052011-02-22 02:07:03 -0500698
Philipp Reisner695d08f2011-04-11 22:53:32 -0700699 rcu_read_lock();
Philipp Reisnere90285e2011-03-22 12:51:21 +0100700 mdev = idr_get_next(&tconn->volumes, &vnr);
Philipp Reisner695d08f2011-04-11 22:53:32 -0700701 m = mdev ? mdev_to_minor(mdev) : -1;
702 rcu_read_unlock();
703
704 return m;
Philipp Reisner80822282011-02-08 12:46:30 +0100705}
Philipp Reisner774b3052011-02-22 02:07:03 -0500706
707#ifdef CONFIG_SMP
Philipp Reisnerb411b362009-09-25 16:07:19 -0700708/**
709 * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
710 * @mdev: DRBD device.
711 *
712 * Forces all threads of a device onto the same CPU. This is beneficial for
713 * DRBD's performance. May be overwritten by user's configuration.
714 */
Philipp Reisner80822282011-02-08 12:46:30 +0100715void drbd_calc_cpu_mask(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700716{
717 int ord, cpu;
718
719 /* user override. */
Philipp Reisner80822282011-02-08 12:46:30 +0100720 if (cpumask_weight(tconn->cpu_mask))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700721 return;
722
Philipp Reisner80822282011-02-08 12:46:30 +0100723 ord = conn_lowest_minor(tconn) % cpumask_weight(cpu_online_mask);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700724 for_each_online_cpu(cpu) {
725 if (ord-- == 0) {
Philipp Reisner80822282011-02-08 12:46:30 +0100726 cpumask_set_cpu(cpu, tconn->cpu_mask);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700727 return;
728 }
729 }
730 /* should not be reached */
Philipp Reisner80822282011-02-08 12:46:30 +0100731 cpumask_setall(tconn->cpu_mask);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700732}
733
734/**
735 * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
736 * @mdev: DRBD device.
Philipp Reisnerbc31fe32011-02-07 11:14:38 +0100737 * @thi: drbd_thread object
Philipp Reisnerb411b362009-09-25 16:07:19 -0700738 *
739 * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
740 * prematurely.
741 */
Philipp Reisner80822282011-02-08 12:46:30 +0100742void drbd_thread_current_set_cpu(struct drbd_thread *thi)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700743{
744 struct task_struct *p = current;
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100745
Philipp Reisnerb411b362009-09-25 16:07:19 -0700746 if (!thi->reset_cpu_mask)
747 return;
748 thi->reset_cpu_mask = 0;
Philipp Reisner392c8802011-02-09 10:33:31 +0100749 set_cpus_allowed_ptr(p, thi->tconn->cpu_mask);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700750}
751#endif
752
Andreas Gruenbacher52b061a2011-03-30 11:38:49 +0200753/**
754 * drbd_header_size - size of a packet header
755 *
756 * The header size is a multiple of 8, so any payload following the header is
757 * word aligned on 64-bit architectures. (The bitmap send and receive code
758 * relies on this.)
759 */
760unsigned int drbd_header_size(struct drbd_tconn *tconn)
761{
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +0200762 if (tconn->agreed_pro_version >= 100) {
763 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header100), 8));
764 return sizeof(struct p_header100);
765 } else {
766 BUILD_BUG_ON(sizeof(struct p_header80) !=
767 sizeof(struct p_header95));
768 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header80), 8));
769 return sizeof(struct p_header80);
770 }
Andreas Gruenbacher52b061a2011-03-30 11:38:49 +0200771}
772
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200773static unsigned int prepare_header80(struct p_header80 *h, enum drbd_packet cmd, int size)
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100774{
775 h->magic = cpu_to_be32(DRBD_MAGIC);
776 h->command = cpu_to_be16(cmd);
777 h->length = cpu_to_be16(size);
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200778 return sizeof(struct p_header80);
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100779}
780
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200781static unsigned int prepare_header95(struct p_header95 *h, enum drbd_packet cmd, int size)
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100782{
783 h->magic = cpu_to_be16(DRBD_MAGIC_BIG);
784 h->command = cpu_to_be16(cmd);
Andreas Gruenbacherb55d84b2011-03-22 13:17:47 +0100785 h->length = cpu_to_be32(size);
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200786 return sizeof(struct p_header95);
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100787}
788
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +0200789static unsigned int prepare_header100(struct p_header100 *h, enum drbd_packet cmd,
790 int size, int vnr)
Philipp Reisnerd38e7872011-02-07 15:32:04 +0100791{
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +0200792 h->magic = cpu_to_be32(DRBD_MAGIC_100);
793 h->volume = cpu_to_be16(vnr);
794 h->command = cpu_to_be16(cmd);
795 h->length = cpu_to_be32(size);
796 h->pad = 0;
797 return sizeof(struct p_header100);
798}
799
800static unsigned int prepare_header(struct drbd_tconn *tconn, int vnr,
801 void *buffer, enum drbd_packet cmd, int size)
802{
803 if (tconn->agreed_pro_version >= 100)
804 return prepare_header100(buffer, cmd, size, vnr);
805 else if (tconn->agreed_pro_version >= 95 &&
806 size > DRBD_MAX_SIZE_H80_PACKET)
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200807 return prepare_header95(buffer, cmd, size);
Philipp Reisnerd38e7872011-02-07 15:32:04 +0100808 else
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200809 return prepare_header80(buffer, cmd, size);
Philipp Reisnerd38e7872011-02-07 15:32:04 +0100810}
811
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200812static void *__conn_prepare_command(struct drbd_tconn *tconn,
813 struct drbd_socket *sock)
814{
815 if (!sock->socket)
816 return NULL;
817 return sock->sbuf + drbd_header_size(tconn);
818}
819
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200820void *conn_prepare_command(struct drbd_tconn *tconn, struct drbd_socket *sock)
821{
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200822 void *p;
823
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200824 mutex_lock(&sock->mutex);
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200825 p = __conn_prepare_command(tconn, sock);
826 if (!p)
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200827 mutex_unlock(&sock->mutex);
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200828
829 return p;
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200830}
831
832void *drbd_prepare_command(struct drbd_conf *mdev, struct drbd_socket *sock)
833{
834 return conn_prepare_command(mdev->tconn, sock);
835}
836
837static int __send_command(struct drbd_tconn *tconn, int vnr,
838 struct drbd_socket *sock, enum drbd_packet cmd,
839 unsigned int header_size, void *data,
840 unsigned int size)
841{
842 int msg_flags;
843 int err;
844
845 /*
846 * Called with @data == NULL and the size of the data blocks in @size
847 * for commands that send data blocks. For those commands, omit the
848 * MSG_MORE flag: this will increase the likelihood that data blocks
849 * which are page aligned on the sender will end up page aligned on the
850 * receiver.
851 */
852 msg_flags = data ? MSG_MORE : 0;
853
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200854 header_size += prepare_header(tconn, vnr, sock->sbuf, cmd,
855 header_size + size);
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200856 err = drbd_send_all(tconn, sock->socket, sock->sbuf, header_size,
857 msg_flags);
858 if (data && !err)
859 err = drbd_send_all(tconn, sock->socket, data, size, 0);
860 return err;
861}
862
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200863static int __conn_send_command(struct drbd_tconn *tconn, struct drbd_socket *sock,
864 enum drbd_packet cmd, unsigned int header_size,
865 void *data, unsigned int size)
866{
867 return __send_command(tconn, 0, sock, cmd, header_size, data, size);
868}
869
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200870int conn_send_command(struct drbd_tconn *tconn, struct drbd_socket *sock,
871 enum drbd_packet cmd, unsigned int header_size,
872 void *data, unsigned int size)
873{
874 int err;
875
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200876 err = __conn_send_command(tconn, sock, cmd, header_size, data, size);
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200877 mutex_unlock(&sock->mutex);
878 return err;
879}
880
881int drbd_send_command(struct drbd_conf *mdev, struct drbd_socket *sock,
882 enum drbd_packet cmd, unsigned int header_size,
883 void *data, unsigned int size)
884{
885 int err;
886
887 err = __send_command(mdev->tconn, mdev->vnr, sock, cmd, header_size,
888 data, size);
889 mutex_unlock(&sock->mutex);
890 return err;
891}
892
Andreas Gruenbachere307f352011-03-22 10:55:48 +0100893int drbd_send_ping(struct drbd_tconn *tconn)
894{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200895 struct drbd_socket *sock;
896
897 sock = &tconn->meta;
898 if (!conn_prepare_command(tconn, sock))
899 return -EIO;
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200900 return conn_send_command(tconn, sock, P_PING, 0, NULL, 0);
Andreas Gruenbachere307f352011-03-22 10:55:48 +0100901}
902
903int drbd_send_ping_ack(struct drbd_tconn *tconn)
904{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200905 struct drbd_socket *sock;
906
907 sock = &tconn->meta;
908 if (!conn_prepare_command(tconn, sock))
909 return -EIO;
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200910 return conn_send_command(tconn, sock, P_PING_ACK, 0, NULL, 0);
Andreas Gruenbachere307f352011-03-22 10:55:48 +0100911}
912
Lars Ellenbergf3990022011-03-23 14:31:09 +0100913int drbd_send_sync_param(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700914{
Andreas Gruenbacher7c967152011-03-22 00:49:36 +0100915 struct drbd_socket *sock;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200916 struct p_rs_param_95 *p;
917 int size;
Philipp Reisner31890f42011-01-19 14:12:51 +0100918 const int apv = mdev->tconn->agreed_pro_version;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200919 enum drbd_packet cmd;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200920 struct net_conf *nc;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200921 struct disk_conf *dc;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200922
923 sock = &mdev->tconn->data;
924 p = drbd_prepare_command(mdev, sock);
925 if (!p)
926 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700927
Philipp Reisner44ed1672011-04-19 17:10:19 +0200928 rcu_read_lock();
929 nc = rcu_dereference(mdev->tconn->net_conf);
930
Philipp Reisnerb411b362009-09-25 16:07:19 -0700931 size = apv <= 87 ? sizeof(struct p_rs_param)
932 : apv == 88 ? sizeof(struct p_rs_param)
Philipp Reisner44ed1672011-04-19 17:10:19 +0200933 + strlen(nc->verify_alg) + 1
Philipp Reisner8e26f9c2010-07-06 17:25:54 +0200934 : apv <= 94 ? sizeof(struct p_rs_param_89)
935 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700936
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200937 cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700938
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200939 /* initialize verify_alg and csums_alg */
940 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700941
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200942 if (get_ldev(mdev)) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200943 dc = rcu_dereference(mdev->ldev->disk_conf);
Andreas Gruenbacher6394b932011-05-11 14:29:52 +0200944 p->resync_rate = cpu_to_be32(dc->resync_rate);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200945 p->c_plan_ahead = cpu_to_be32(dc->c_plan_ahead);
946 p->c_delay_target = cpu_to_be32(dc->c_delay_target);
947 p->c_fill_target = cpu_to_be32(dc->c_fill_target);
948 p->c_max_rate = cpu_to_be32(dc->c_max_rate);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200949 put_ldev(mdev);
950 } else {
Andreas Gruenbacher6394b932011-05-11 14:29:52 +0200951 p->resync_rate = cpu_to_be32(DRBD_RESYNC_RATE_DEF);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200952 p->c_plan_ahead = cpu_to_be32(DRBD_C_PLAN_AHEAD_DEF);
953 p->c_delay_target = cpu_to_be32(DRBD_C_DELAY_TARGET_DEF);
954 p->c_fill_target = cpu_to_be32(DRBD_C_FILL_TARGET_DEF);
955 p->c_max_rate = cpu_to_be32(DRBD_C_MAX_RATE_DEF);
956 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700957
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200958 if (apv >= 88)
Philipp Reisner44ed1672011-04-19 17:10:19 +0200959 strcpy(p->verify_alg, nc->verify_alg);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200960 if (apv >= 89)
Philipp Reisner44ed1672011-04-19 17:10:19 +0200961 strcpy(p->csums_alg, nc->csums_alg);
962 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700963
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200964 return drbd_send_command(mdev, sock, cmd, size, NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700965}
966
Philipp Reisnerd659f2a2011-05-16 17:38:45 +0200967int __drbd_send_protocol(struct drbd_tconn *tconn, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700968{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200969 struct drbd_socket *sock;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700970 struct p_protocol *p;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200971 struct net_conf *nc;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200972 int size, cf;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700973
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200974 sock = &tconn->data;
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200975 p = __conn_prepare_command(tconn, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200976 if (!p)
977 return -EIO;
978
Philipp Reisner44ed1672011-04-19 17:10:19 +0200979 rcu_read_lock();
980 nc = rcu_dereference(tconn->net_conf);
981
Andreas Gruenbacher6dff2902011-06-28 14:18:12 +0200982 if (nc->tentative && tconn->agreed_pro_version < 92) {
Philipp Reisner44ed1672011-04-19 17:10:19 +0200983 rcu_read_unlock();
984 mutex_unlock(&sock->mutex);
985 conn_err(tconn, "--dry-run is not supported by peer");
986 return -EOPNOTSUPP;
987 }
988
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200989 size = sizeof(*p);
Philipp Reisnerdc8228d2011-02-08 10:13:15 +0100990 if (tconn->agreed_pro_version >= 87)
Philipp Reisner44ed1672011-04-19 17:10:19 +0200991 size += strlen(nc->integrity_alg) + 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700992
Philipp Reisner44ed1672011-04-19 17:10:19 +0200993 p->protocol = cpu_to_be32(nc->wire_protocol);
994 p->after_sb_0p = cpu_to_be32(nc->after_sb_0p);
995 p->after_sb_1p = cpu_to_be32(nc->after_sb_1p);
996 p->after_sb_2p = cpu_to_be32(nc->after_sb_2p);
997 p->two_primaries = cpu_to_be32(nc->two_primaries);
Philipp Reisnercf14c2e2010-02-02 21:03:50 +0100998 cf = 0;
Andreas Gruenbacher6139f602011-05-06 20:00:02 +0200999 if (nc->discard_my_data)
1000 cf |= CF_DISCARD_MY_DATA;
Andreas Gruenbacher6dff2902011-06-28 14:18:12 +02001001 if (nc->tentative)
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001002 cf |= CF_DRY_RUN;
Philipp Reisnercf14c2e2010-02-02 21:03:50 +01001003 p->conn_flags = cpu_to_be32(cf);
1004
Philipp Reisnerdc8228d2011-02-08 10:13:15 +01001005 if (tconn->agreed_pro_version >= 87)
Philipp Reisner44ed1672011-04-19 17:10:19 +02001006 strcpy(p->integrity_alg, nc->integrity_alg);
1007 rcu_read_unlock();
1008
Philipp Reisnerd659f2a2011-05-16 17:38:45 +02001009 return __conn_send_command(tconn, sock, cmd, size, NULL, 0);
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +02001010}
1011
1012int drbd_send_protocol(struct drbd_tconn *tconn)
1013{
1014 int err;
1015
1016 mutex_lock(&tconn->data.mutex);
Philipp Reisnerd659f2a2011-05-16 17:38:45 +02001017 err = __drbd_send_protocol(tconn, P_PROTOCOL);
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +02001018 mutex_unlock(&tconn->data.mutex);
1019
1020 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001021}
1022
1023int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
1024{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001025 struct drbd_socket *sock;
1026 struct p_uuids *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001027 int i;
1028
1029 if (!get_ldev_if_state(mdev, D_NEGOTIATING))
Andreas Gruenbacher2ae5f952011-03-16 01:07:20 +01001030 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001031
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001032 sock = &mdev->tconn->data;
1033 p = drbd_prepare_command(mdev, sock);
1034 if (!p) {
1035 put_ldev(mdev);
1036 return -EIO;
1037 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001038 for (i = UI_CURRENT; i < UI_SIZE; i++)
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001039 p->uuid[i] = mdev->ldev ? cpu_to_be64(mdev->ldev->md.uuid[i]) : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001040
1041 mdev->comm_bm_set = drbd_bm_total_weight(mdev);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001042 p->uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
Philipp Reisner44ed1672011-04-19 17:10:19 +02001043 rcu_read_lock();
Andreas Gruenbacher6139f602011-05-06 20:00:02 +02001044 uuid_flags |= rcu_dereference(mdev->tconn->net_conf)->discard_my_data ? 1 : 0;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001045 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -07001046 uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0;
1047 uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001048 p->uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001049
1050 put_ldev(mdev);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001051 return drbd_send_command(mdev, sock, P_UUIDS, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001052}
1053
1054int drbd_send_uuids(struct drbd_conf *mdev)
1055{
1056 return _drbd_send_uuids(mdev, 0);
1057}
1058
1059int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev)
1060{
1061 return _drbd_send_uuids(mdev, 8);
1062}
1063
Lars Ellenberg62b0da32011-01-20 13:25:21 +01001064void drbd_print_uuids(struct drbd_conf *mdev, const char *text)
1065{
1066 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
1067 u64 *uuid = mdev->ldev->md.uuid;
1068 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX\n",
1069 text,
1070 (unsigned long long)uuid[UI_CURRENT],
1071 (unsigned long long)uuid[UI_BITMAP],
1072 (unsigned long long)uuid[UI_HISTORY_START],
1073 (unsigned long long)uuid[UI_HISTORY_END]);
1074 put_ldev(mdev);
1075 } else {
1076 dev_info(DEV, "%s effective data uuid: %016llX\n",
1077 text,
1078 (unsigned long long)mdev->ed_uuid);
1079 }
1080}
1081
Andreas Gruenbacher9c1b7f72011-03-16 01:09:01 +01001082void drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001083{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001084 struct drbd_socket *sock;
1085 struct p_rs_uuid *p;
Lars Ellenberg5a22db82010-12-17 21:14:23 +01001086 u64 uuid;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001087
Lars Ellenberg5a22db82010-12-17 21:14:23 +01001088 D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
1089
Philipp Reisner4a23f262011-01-11 17:42:17 +01001090 uuid = mdev->ldev->md.uuid[UI_BITMAP] + UUID_NEW_BM_OFFSET;
Lars Ellenberg5a22db82010-12-17 21:14:23 +01001091 drbd_uuid_set(mdev, UI_BITMAP, uuid);
Lars Ellenberg62b0da32011-01-20 13:25:21 +01001092 drbd_print_uuids(mdev, "updated sync UUID");
Lars Ellenberg5a22db82010-12-17 21:14:23 +01001093 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001094
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001095 sock = &mdev->tconn->data;
1096 p = drbd_prepare_command(mdev, sock);
1097 if (p) {
1098 p->uuid = cpu_to_be64(uuid);
1099 drbd_send_command(mdev, sock, P_SYNC_UUID, sizeof(*p), NULL, 0);
1100 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001101}
1102
Philipp Reisnere89b5912010-03-24 17:11:33 +01001103int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001104{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001105 struct drbd_socket *sock;
1106 struct p_sizes *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001107 sector_t d_size, u_size;
Philipp Reisner99432fc2011-05-20 16:39:13 +02001108 int q_order_type, max_bio_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001109
1110 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
1111 D_ASSERT(mdev->ldev->backing_bdev);
1112 d_size = drbd_get_max_capacity(mdev->ldev);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +02001113 rcu_read_lock();
1114 u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
1115 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -07001116 q_order_type = drbd_queue_order_type(mdev);
Philipp Reisner99432fc2011-05-20 16:39:13 +02001117 max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
1118 max_bio_size = min_t(int, max_bio_size, DRBD_MAX_BIO_SIZE);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001119 put_ldev(mdev);
1120 } else {
1121 d_size = 0;
1122 u_size = 0;
1123 q_order_type = QUEUE_ORDERED_NONE;
Philipp Reisner99432fc2011-05-20 16:39:13 +02001124 max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001125 }
1126
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001127 sock = &mdev->tconn->data;
1128 p = drbd_prepare_command(mdev, sock);
1129 if (!p)
1130 return -EIO;
Philipp Reisner2ffca4f2011-06-30 15:43:06 +02001131
1132 if (mdev->tconn->agreed_pro_version <= 94)
1133 max_bio_size = min_t(int, max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
1134 else if (mdev->tconn->agreed_pro_version < 100)
1135 max_bio_size = min_t(int, max_bio_size, DRBD_MAX_BIO_SIZE_P95);
1136
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001137 p->d_size = cpu_to_be64(d_size);
1138 p->u_size = cpu_to_be64(u_size);
1139 p->c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
1140 p->max_bio_size = cpu_to_be32(max_bio_size);
1141 p->queue_order_type = cpu_to_be16(q_order_type);
1142 p->dds_flags = cpu_to_be16(flags);
1143 return drbd_send_command(mdev, sock, P_SIZES, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001144}
1145
1146/**
1147 * drbd_send_state() - Sends the drbd state to the peer
1148 * @mdev: DRBD device.
1149 */
1150int drbd_send_state(struct drbd_conf *mdev)
1151{
Andreas Gruenbacher7c967152011-03-22 00:49:36 +01001152 struct drbd_socket *sock;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001153 struct p_state *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001154
Andreas Gruenbacher7c967152011-03-22 00:49:36 +01001155 sock = &mdev->tconn->data;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001156 p = drbd_prepare_command(mdev, sock);
1157 if (!p)
1158 return -EIO;
1159 p->state = cpu_to_be32(mdev->state.i); /* Within the send mutex */
1160 return drbd_send_command(mdev, sock, P_STATE, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001161}
1162
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001163int drbd_send_state_req(struct drbd_conf *mdev, union drbd_state mask, union drbd_state val)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001164{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001165 struct drbd_socket *sock;
1166 struct p_req_state *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001167
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001168 sock = &mdev->tconn->data;
1169 p = drbd_prepare_command(mdev, sock);
1170 if (!p)
1171 return -EIO;
1172 p->mask = cpu_to_be32(mask.i);
1173 p->val = cpu_to_be32(val.i);
1174 return drbd_send_command(mdev, sock, P_STATE_CHG_REQ, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001175
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001176}
1177
1178int conn_send_state_req(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val)
1179{
1180 enum drbd_packet cmd;
1181 struct drbd_socket *sock;
1182 struct p_req_state *p;
1183
1184 cmd = tconn->agreed_pro_version < 100 ? P_STATE_CHG_REQ : P_CONN_ST_CHG_REQ;
1185 sock = &tconn->data;
1186 p = conn_prepare_command(tconn, sock);
1187 if (!p)
1188 return -EIO;
1189 p->mask = cpu_to_be32(mask.i);
1190 p->val = cpu_to_be32(val.i);
1191 return conn_send_command(tconn, sock, cmd, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001192}
1193
Andreas Gruenbacher2f4e7ab2011-03-16 01:20:38 +01001194void drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001195{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001196 struct drbd_socket *sock;
1197 struct p_req_state_reply *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001198
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001199 sock = &mdev->tconn->meta;
1200 p = drbd_prepare_command(mdev, sock);
1201 if (p) {
1202 p->retcode = cpu_to_be32(retcode);
1203 drbd_send_command(mdev, sock, P_STATE_CHG_REPLY, sizeof(*p), NULL, 0);
1204 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001205}
1206
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001207void conn_send_sr_reply(struct drbd_tconn *tconn, enum drbd_state_rv retcode)
Philipp Reisner047cd4a2011-02-15 11:09:33 +01001208{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001209 struct drbd_socket *sock;
1210 struct p_req_state_reply *p;
Philipp Reisner047cd4a2011-02-15 11:09:33 +01001211 enum drbd_packet cmd = tconn->agreed_pro_version < 100 ? P_STATE_CHG_REPLY : P_CONN_ST_CHG_REPLY;
1212
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001213 sock = &tconn->meta;
1214 p = conn_prepare_command(tconn, sock);
1215 if (p) {
1216 p->retcode = cpu_to_be32(retcode);
1217 conn_send_command(tconn, sock, cmd, sizeof(*p), NULL, 0);
1218 }
Philipp Reisner047cd4a2011-02-15 11:09:33 +01001219}
1220
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001221static void dcbp_set_code(struct p_compressed_bm *p, enum drbd_bitmap_code code)
1222{
1223 BUG_ON(code & ~0xf);
1224 p->encoding = (p->encoding & ~0xf) | code;
1225}
1226
1227static void dcbp_set_start(struct p_compressed_bm *p, int set)
1228{
1229 p->encoding = (p->encoding & ~0x80) | (set ? 0x80 : 0);
1230}
1231
1232static void dcbp_set_pad_bits(struct p_compressed_bm *p, int n)
1233{
1234 BUG_ON(n & ~0x7);
1235 p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4);
1236}
1237
Philipp Reisnerb411b362009-09-25 16:07:19 -07001238int fill_bitmap_rle_bits(struct drbd_conf *mdev,
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001239 struct p_compressed_bm *p,
1240 unsigned int size,
1241 struct bm_xfer_ctx *c)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001242{
1243 struct bitstream bs;
1244 unsigned long plain_bits;
1245 unsigned long tmp;
1246 unsigned long rl;
1247 unsigned len;
1248 unsigned toggle;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001249 int bits, use_rle;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001250
1251 /* may we use this feature? */
Philipp Reisner44ed1672011-04-19 17:10:19 +02001252 rcu_read_lock();
1253 use_rle = rcu_dereference(mdev->tconn->net_conf)->use_rle;
1254 rcu_read_unlock();
1255 if (!use_rle || mdev->tconn->agreed_pro_version < 90)
1256 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001257
1258 if (c->bit_offset >= c->bm_bits)
1259 return 0; /* nothing to do. */
1260
1261 /* use at most thus many bytes */
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001262 bitstream_init(&bs, p->code, size, 0);
1263 memset(p->code, 0, size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001264 /* plain bits covered in this code string */
1265 plain_bits = 0;
1266
1267 /* p->encoding & 0x80 stores whether the first run length is set.
1268 * bit offset is implicit.
1269 * start with toggle == 2 to be able to tell the first iteration */
1270 toggle = 2;
1271
1272 /* see how much plain bits we can stuff into one packet
1273 * using RLE and VLI. */
1274 do {
1275 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(mdev, c->bit_offset)
1276 : _drbd_bm_find_next(mdev, c->bit_offset);
1277 if (tmp == -1UL)
1278 tmp = c->bm_bits;
1279 rl = tmp - c->bit_offset;
1280
1281 if (toggle == 2) { /* first iteration */
1282 if (rl == 0) {
1283 /* the first checked bit was set,
1284 * store start value, */
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001285 dcbp_set_start(p, 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001286 /* but skip encoding of zero run length */
1287 toggle = !toggle;
1288 continue;
1289 }
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001290 dcbp_set_start(p, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001291 }
1292
1293 /* paranoia: catch zero runlength.
1294 * can only happen if bitmap is modified while we scan it. */
1295 if (rl == 0) {
1296 dev_err(DEV, "unexpected zero runlength while encoding bitmap "
1297 "t:%u bo:%lu\n", toggle, c->bit_offset);
1298 return -1;
1299 }
1300
1301 bits = vli_encode_bits(&bs, rl);
1302 if (bits == -ENOBUFS) /* buffer full */
1303 break;
1304 if (bits <= 0) {
1305 dev_err(DEV, "error while encoding bitmap: %d\n", bits);
1306 return 0;
1307 }
1308
1309 toggle = !toggle;
1310 plain_bits += rl;
1311 c->bit_offset = tmp;
1312 } while (c->bit_offset < c->bm_bits);
1313
1314 len = bs.cur.b - p->code + !!bs.cur.bit;
1315
1316 if (plain_bits < (len << 3)) {
1317 /* incompressible with this method.
1318 * we need to rewind both word and bit position. */
1319 c->bit_offset -= plain_bits;
1320 bm_xfer_ctx_bit_to_word_offset(c);
1321 c->bit_offset = c->word_offset * BITS_PER_LONG;
1322 return 0;
1323 }
1324
1325 /* RLE + VLI was able to compress it just fine.
1326 * update c->word_offset. */
1327 bm_xfer_ctx_bit_to_word_offset(c);
1328
1329 /* store pad_bits */
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001330 dcbp_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001331
1332 return len;
1333}
1334
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001335/**
1336 * send_bitmap_rle_or_plain
1337 *
1338 * Return 0 when done, 1 when another iteration is needed, and a negative error
1339 * code upon failure.
1340 */
1341static int
Andreas Gruenbacher79ed9bd2011-03-24 21:31:38 +01001342send_bitmap_rle_or_plain(struct drbd_conf *mdev, struct bm_xfer_ctx *c)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001343{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001344 struct drbd_socket *sock = &mdev->tconn->data;
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001345 unsigned int header_size = drbd_header_size(mdev->tconn);
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001346 struct p_compressed_bm *p = sock->sbuf + header_size;
Andreas Gruenbachera982dd52010-12-10 00:45:25 +01001347 int len, err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001348
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001349 len = fill_bitmap_rle_bits(mdev, p,
1350 DRBD_SOCKET_BUFFER_SIZE - header_size - sizeof(*p), c);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001351 if (len < 0)
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001352 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001353
1354 if (len) {
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001355 dcbp_set_code(p, RLE_VLI_Bits);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001356 err = __send_command(mdev->tconn, mdev->vnr, sock,
1357 P_COMPRESSED_BITMAP, sizeof(*p) + len,
1358 NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001359 c->packets[0]++;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001360 c->bytes[0] += header_size + sizeof(*p) + len;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001361
1362 if (c->bit_offset >= c->bm_bits)
1363 len = 0; /* DONE */
1364 } else {
1365 /* was not compressible.
1366 * send a buffer full of plain text bits instead. */
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001367 unsigned int data_size;
1368 unsigned long num_words;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001369 unsigned long *p = sock->sbuf + header_size;
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001370
1371 data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001372 num_words = min_t(size_t, data_size / sizeof(*p),
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001373 c->bm_words - c->word_offset);
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001374 len = num_words * sizeof(*p);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001375 if (len)
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001376 drbd_bm_get_lel(mdev, c->word_offset, num_words, p);
1377 err = __send_command(mdev->tconn, mdev->vnr, sock, P_BITMAP, len, NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001378 c->word_offset += num_words;
1379 c->bit_offset = c->word_offset * BITS_PER_LONG;
1380
1381 c->packets[1]++;
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001382 c->bytes[1] += header_size + len;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001383
1384 if (c->bit_offset > c->bm_bits)
1385 c->bit_offset = c->bm_bits;
1386 }
Andreas Gruenbachera982dd52010-12-10 00:45:25 +01001387 if (!err) {
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001388 if (len == 0) {
1389 INFO_bm_xfer_stats(mdev, "send", c);
1390 return 0;
1391 } else
1392 return 1;
1393 }
1394 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001395}
1396
1397/* See the comment at receive_bitmap() */
Andreas Gruenbacher058820c2011-03-22 16:03:43 +01001398static int _drbd_send_bitmap(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001399{
1400 struct bm_xfer_ctx c;
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001401 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001402
Andreas Gruenbacher841ce242010-12-15 19:31:20 +01001403 if (!expect(mdev->bitmap))
1404 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001405
Philipp Reisnerb411b362009-09-25 16:07:19 -07001406 if (get_ldev(mdev)) {
1407 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
1408 dev_info(DEV, "Writing the whole bitmap, MDF_FullSync was set.\n");
1409 drbd_bm_set_all(mdev);
1410 if (drbd_bm_write(mdev)) {
1411 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
1412 * but otherwise process as per normal - need to tell other
1413 * side that a full resync is required! */
1414 dev_err(DEV, "Failed to write bitmap to disk!\n");
1415 } else {
1416 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
1417 drbd_md_sync(mdev);
1418 }
1419 }
1420 put_ldev(mdev);
1421 }
1422
1423 c = (struct bm_xfer_ctx) {
1424 .bm_bits = drbd_bm_bits(mdev),
1425 .bm_words = drbd_bm_words(mdev),
1426 };
1427
1428 do {
Andreas Gruenbacher79ed9bd2011-03-24 21:31:38 +01001429 err = send_bitmap_rle_or_plain(mdev, &c);
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001430 } while (err > 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001431
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001432 return err == 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001433}
1434
1435int drbd_send_bitmap(struct drbd_conf *mdev)
1436{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001437 struct drbd_socket *sock = &mdev->tconn->data;
1438 int err = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001439
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001440 mutex_lock(&sock->mutex);
1441 if (sock->socket)
1442 err = !_drbd_send_bitmap(mdev);
1443 mutex_unlock(&sock->mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001444 return err;
1445}
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001446
Andreas Gruenbacherd4e67d72011-03-16 01:25:28 +01001447void drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001448{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001449 struct drbd_socket *sock;
1450 struct p_barrier_ack *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001451
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001452 if (mdev->state.conn < C_CONNECTED)
1453 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001454
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001455 sock = &mdev->tconn->meta;
1456 p = drbd_prepare_command(mdev, sock);
1457 if (!p)
1458 return;
1459 p->barrier = barrier_nr;
1460 p->set_size = cpu_to_be32(set_size);
1461 drbd_send_command(mdev, sock, P_BARRIER_ACK, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001462}
1463
1464/**
1465 * _drbd_send_ack() - Sends an ack packet
1466 * @mdev: DRBD device.
1467 * @cmd: Packet command code.
1468 * @sector: sector, needs to be in big endian byte order
1469 * @blksize: size in byte, needs to be in big endian byte order
1470 * @block_id: Id, big endian byte order
1471 */
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001472static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
1473 u64 sector, u32 blksize, u64 block_id)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001474{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001475 struct drbd_socket *sock;
1476 struct p_block_ack *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001477
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001478 if (mdev->state.conn < C_CONNECTED)
Andreas Gruenbachera8c32aa2011-03-16 01:27:22 +01001479 return -EIO;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001480
1481 sock = &mdev->tconn->meta;
1482 p = drbd_prepare_command(mdev, sock);
1483 if (!p)
1484 return -EIO;
1485 p->sector = sector;
1486 p->block_id = block_id;
1487 p->blksize = blksize;
1488 p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
1489 return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001490}
1491
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02001492/* dp->sector and dp->block_id already/still in network byte order,
1493 * data_size is payload size according to dp->head,
1494 * and may need to be corrected for digest size. */
Andreas Gruenbachera9a99942011-03-16 01:30:14 +01001495void drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packet cmd,
1496 struct p_data *dp, int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001497{
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02001498 if (mdev->tconn->peer_integrity_tfm)
1499 data_size -= crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
Andreas Gruenbachera9a99942011-03-16 01:30:14 +01001500 _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
1501 dp->block_id);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001502}
1503
Andreas Gruenbachera9a99942011-03-16 01:30:14 +01001504void drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packet cmd,
1505 struct p_block_req *rp)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001506{
Andreas Gruenbachera9a99942011-03-16 01:30:14 +01001507 _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001508}
1509
1510/**
1511 * drbd_send_ack() - Sends an ack packet
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001512 * @mdev: DRBD device
1513 * @cmd: packet command code
1514 * @peer_req: peer request
Philipp Reisnerb411b362009-09-25 16:07:19 -07001515 */
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001516int drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001517 struct drbd_peer_request *peer_req)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001518{
Andreas Gruenbacherdd516122011-03-16 15:39:08 +01001519 return _drbd_send_ack(mdev, cmd,
1520 cpu_to_be64(peer_req->i.sector),
1521 cpu_to_be32(peer_req->i.size),
1522 peer_req->block_id);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001523}
1524
1525/* This function misuses the block_id field to signal if the blocks
1526 * are is sync or not. */
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001527int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packet cmd,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001528 sector_t sector, int blksize, u64 block_id)
1529{
Andreas Gruenbacherfa79abd2011-03-16 01:31:39 +01001530 return _drbd_send_ack(mdev, cmd,
1531 cpu_to_be64(sector),
1532 cpu_to_be32(blksize),
1533 cpu_to_be64(block_id));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001534}
1535
1536int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
1537 sector_t sector, int size, u64 block_id)
1538{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001539 struct drbd_socket *sock;
1540 struct p_block_req *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001541
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001542 sock = &mdev->tconn->data;
1543 p = drbd_prepare_command(mdev, sock);
1544 if (!p)
1545 return -EIO;
1546 p->sector = cpu_to_be64(sector);
1547 p->block_id = block_id;
1548 p->blksize = cpu_to_be32(size);
1549 return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001550}
1551
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001552int drbd_send_drequest_csum(struct drbd_conf *mdev, sector_t sector, int size,
1553 void *digest, int digest_size, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001554{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001555 struct drbd_socket *sock;
1556 struct p_block_req *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001557
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001558 /* FIXME: Put the digest into the preallocated socket buffer. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001559
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001560 sock = &mdev->tconn->data;
1561 p = drbd_prepare_command(mdev, sock);
1562 if (!p)
1563 return -EIO;
1564 p->sector = cpu_to_be64(sector);
1565 p->block_id = ID_SYNCER /* unused */;
1566 p->blksize = cpu_to_be32(size);
1567 return drbd_send_command(mdev, sock, cmd, sizeof(*p),
1568 digest, digest_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001569}
1570
1571int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
1572{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001573 struct drbd_socket *sock;
1574 struct p_block_req *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001575
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001576 sock = &mdev->tconn->data;
1577 p = drbd_prepare_command(mdev, sock);
1578 if (!p)
1579 return -EIO;
1580 p->sector = cpu_to_be64(sector);
1581 p->block_id = ID_SYNCER /* unused */;
1582 p->blksize = cpu_to_be32(size);
1583 return drbd_send_command(mdev, sock, P_OV_REQUEST, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001584}
1585
1586/* called on sndtimeo
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001587 * returns false if we should retry,
1588 * true if we think connection is dead
Philipp Reisnerb411b362009-09-25 16:07:19 -07001589 */
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001590static int we_should_drop_the_connection(struct drbd_tconn *tconn, struct socket *sock)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001591{
1592 int drop_it;
1593 /* long elapsed = (long)(jiffies - mdev->last_received); */
1594
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001595 drop_it = tconn->meta.socket == sock
1596 || !tconn->asender.task
1597 || get_t_state(&tconn->asender) != RUNNING
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01001598 || tconn->cstate < C_WF_REPORT_PARAMS;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001599
1600 if (drop_it)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001601 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001602
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001603 drop_it = !--tconn->ko_count;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001604 if (!drop_it) {
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001605 conn_err(tconn, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
1606 current->comm, current->pid, tconn->ko_count);
1607 request_ping(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001608 }
1609
1610 return drop_it; /* && (mdev->state == R_PRIMARY) */;
1611}
1612
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001613static void drbd_update_congested(struct drbd_tconn *tconn)
Andreas Gruenbacher9e204cd2011-01-26 18:45:11 +01001614{
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001615 struct sock *sk = tconn->data.socket->sk;
Andreas Gruenbacher9e204cd2011-01-26 18:45:11 +01001616 if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001617 set_bit(NET_CONGESTED, &tconn->flags);
Andreas Gruenbacher9e204cd2011-01-26 18:45:11 +01001618}
1619
Philipp Reisnerb411b362009-09-25 16:07:19 -07001620/* The idea of sendpage seems to be to put some kind of reference
1621 * to the page into the skb, and to hand it over to the NIC. In
1622 * this process get_page() gets called.
1623 *
1624 * As soon as the page was really sent over the network put_page()
1625 * gets called by some part of the network layer. [ NIC driver? ]
1626 *
1627 * [ get_page() / put_page() increment/decrement the count. If count
1628 * reaches 0 the page will be freed. ]
1629 *
1630 * This works nicely with pages from FSs.
1631 * But this means that in protocol A we might signal IO completion too early!
1632 *
1633 * In order not to corrupt data during a resync we must make sure
1634 * that we do not reuse our own buffer pages (EEs) to early, therefore
1635 * we have the net_ee list.
1636 *
1637 * XFS seems to have problems, still, it submits pages with page_count == 0!
1638 * As a workaround, we disable sendpage on pages
1639 * with page_count == 0 or PageSlab.
1640 */
1641static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page,
Andreas Gruenbacherb9874272011-03-16 09:41:10 +01001642 int offset, size_t size, unsigned msg_flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001643{
Andreas Gruenbacherb9874272011-03-16 09:41:10 +01001644 struct socket *socket;
1645 void *addr;
1646 int err;
1647
1648 socket = mdev->tconn->data.socket;
1649 addr = kmap(page) + offset;
1650 err = drbd_send_all(mdev->tconn, socket, addr, size, msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001651 kunmap(page);
Andreas Gruenbacherb9874272011-03-16 09:41:10 +01001652 if (!err)
1653 mdev->send_cnt += size >> 9;
1654 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001655}
1656
1657static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001658 int offset, size_t size, unsigned msg_flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001659{
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001660 struct socket *socket = mdev->tconn->data.socket;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001661 mm_segment_t oldfs = get_fs();
Philipp Reisnerb411b362009-09-25 16:07:19 -07001662 int len = size;
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001663 int err = -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001664
1665 /* e.g. XFS meta- & log-data is in slab pages, which have a
1666 * page_count of 0 and/or have PageSlab() set.
1667 * we cannot use send_page for those, as that does get_page();
1668 * put_page(); and would cause either a VM_BUG directly, or
1669 * __page_cache_release a page that would actually still be referenced
1670 * by someone, leading to some obscure delayed Oops somewhere else. */
1671 if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001672 return _drbd_no_send_page(mdev, page, offset, size, msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001673
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001674 msg_flags |= MSG_NOSIGNAL;
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001675 drbd_update_congested(mdev->tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001676 set_fs(KERNEL_DS);
1677 do {
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001678 int sent;
1679
1680 sent = socket->ops->sendpage(socket, page, offset, len, msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001681 if (sent <= 0) {
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001682 if (sent == -EAGAIN) {
1683 if (we_should_drop_the_connection(mdev->tconn, socket))
1684 break;
1685 continue;
1686 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001687 dev_warn(DEV, "%s: size=%d len=%d sent=%d\n",
1688 __func__, (int)size, len, sent);
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001689 if (sent < 0)
1690 err = sent;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001691 break;
1692 }
1693 len -= sent;
1694 offset += sent;
1695 } while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
1696 set_fs(oldfs);
Philipp Reisner01a311a2011-02-07 14:30:33 +01001697 clear_bit(NET_CONGESTED, &mdev->tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001698
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001699 if (len == 0) {
1700 err = 0;
1701 mdev->send_cnt += size >> 9;
1702 }
1703 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001704}
1705
1706static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
1707{
1708 struct bio_vec *bvec;
1709 int i;
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001710 /* hint all but last page with MSG_MORE */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001711 __bio_for_each_segment(bvec, bio, i, 0) {
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001712 int err;
1713
1714 err = _drbd_no_send_page(mdev, bvec->bv_page,
1715 bvec->bv_offset, bvec->bv_len,
1716 i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
1717 if (err)
1718 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001719 }
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001720 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001721}
1722
1723static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
1724{
1725 struct bio_vec *bvec;
1726 int i;
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001727 /* hint all but last page with MSG_MORE */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001728 __bio_for_each_segment(bvec, bio, i, 0) {
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001729 int err;
1730
1731 err = _drbd_send_page(mdev, bvec->bv_page,
1732 bvec->bv_offset, bvec->bv_len,
1733 i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
1734 if (err)
1735 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001736 }
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001737 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001738}
1739
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001740static int _drbd_send_zc_ee(struct drbd_conf *mdev,
1741 struct drbd_peer_request *peer_req)
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001742{
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001743 struct page *page = peer_req->pages;
1744 unsigned len = peer_req->i.size;
Andreas Gruenbacher9f692302011-03-16 10:49:09 +01001745 int err;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001746
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001747 /* hint all but last page with MSG_MORE */
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001748 page_chain_for_each(page) {
1749 unsigned l = min_t(unsigned, len, PAGE_SIZE);
Andreas Gruenbacher9f692302011-03-16 10:49:09 +01001750
1751 err = _drbd_send_page(mdev, page, 0, l,
1752 page_chain_next(page) ? MSG_MORE : 0);
1753 if (err)
1754 return err;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001755 len -= l;
1756 }
Andreas Gruenbacher9f692302011-03-16 10:49:09 +01001757 return 0;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001758}
1759
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001760static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
1761{
Philipp Reisner31890f42011-01-19 14:12:51 +01001762 if (mdev->tconn->agreed_pro_version >= 95)
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001763 return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001764 (bi_rw & REQ_FUA ? DP_FUA : 0) |
1765 (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
1766 (bi_rw & REQ_DISCARD ? DP_DISCARD : 0);
1767 else
Jens Axboe721a9602011-03-09 11:56:30 +01001768 return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001769}
1770
Philipp Reisnerb411b362009-09-25 16:07:19 -07001771/* Used to send write requests
1772 * R_PRIMARY -> Peer (P_DATA)
1773 */
1774int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
1775{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001776 struct drbd_socket *sock;
1777 struct p_data *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001778 unsigned int dp_flags = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001779 int dgs;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001780 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001781
Philipp Reisner46e1ce42011-05-16 12:57:15 +02001782 sock = &mdev->tconn->data;
1783 p = drbd_prepare_command(mdev, sock);
Andreas Gruenbacher7d4c7822011-07-17 23:06:12 +02001784 dgs = mdev->tconn->integrity_tfm ? crypto_hash_digestsize(mdev->tconn->integrity_tfm) : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001785
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001786 if (!p)
1787 return -EIO;
1788 p->sector = cpu_to_be64(req->i.sector);
1789 p->block_id = (unsigned long)req;
1790 p->seq_num = cpu_to_be32(req->seq_num = atomic_inc_return(&mdev->packet_seq));
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001791 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001792 if (mdev->state.conn >= C_SYNC_SOURCE &&
1793 mdev->state.conn <= C_PAUSED_SYNC_T)
1794 dp_flags |= DP_MAY_SET_IN_SYNC;
Philipp Reisner303d1442011-04-13 16:24:47 -07001795 if (mdev->tconn->agreed_pro_version >= 100) {
1796 if (req->rq_state & RQ_EXP_RECEIVE_ACK)
1797 dp_flags |= DP_SEND_RECEIVE_ACK;
1798 if (req->rq_state & RQ_EXP_WRITE_ACK)
1799 dp_flags |= DP_SEND_WRITE_ACK;
1800 }
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001801 p->dp_flags = cpu_to_be32(dp_flags);
1802 if (dgs)
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02001803 drbd_csum_bio(mdev, mdev->tconn->integrity_tfm, req->master_bio, p + 1);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001804 err = __send_command(mdev->tconn, mdev->vnr, sock, P_DATA, sizeof(*p) + dgs, NULL, req->i.size);
Andreas Gruenbacher6bdb9b02011-03-16 11:52:58 +01001805 if (!err) {
Lars Ellenberg470be442010-11-10 10:36:52 +01001806 /* For protocol A, we have to memcpy the payload into
1807 * socket buffers, as we may complete right away
1808 * as soon as we handed it over to tcp, at which point the data
1809 * pages may become invalid.
1810 *
1811 * For data-integrity enabled, we copy it as well, so we can be
1812 * sure that even if the bio pages may still be modified, it
1813 * won't change the data on the wire, thus if the digest checks
1814 * out ok after sending on this side, but does not fit on the
1815 * receiving side, we sure have detected corruption elsewhere.
1816 */
Philipp Reisner303d1442011-04-13 16:24:47 -07001817 if (!(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK)) || dgs)
Andreas Gruenbacher6bdb9b02011-03-16 11:52:58 +01001818 err = _drbd_send_bio(mdev, req->master_bio);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001819 else
Andreas Gruenbacher6bdb9b02011-03-16 11:52:58 +01001820 err = _drbd_send_zc_bio(mdev, req->master_bio);
Lars Ellenberg470be442010-11-10 10:36:52 +01001821
1822 /* double check digest, sometimes buffers have been modified in flight. */
1823 if (dgs > 0 && dgs <= 64) {
Bart Van Assche24c48302011-05-21 18:32:29 +02001824 /* 64 byte, 512 bit, is the largest digest size
Lars Ellenberg470be442010-11-10 10:36:52 +01001825 * currently supported in kernel crypto. */
1826 unsigned char digest[64];
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02001827 drbd_csum_bio(mdev, mdev->tconn->integrity_tfm, req->master_bio, digest);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001828 if (memcmp(p + 1, digest, dgs)) {
Lars Ellenberg470be442010-11-10 10:36:52 +01001829 dev_warn(DEV,
1830 "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
Andreas Gruenbacherace652a2011-01-03 17:09:58 +01001831 (unsigned long long)req->i.sector, req->i.size);
Lars Ellenberg470be442010-11-10 10:36:52 +01001832 }
1833 } /* else if (dgs > 64) {
1834 ... Be noisy about digest too large ...
1835 } */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001836 }
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001837 mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
Philipp Reisnerbd26bfc52010-05-04 12:33:58 +02001838
Andreas Gruenbacher6bdb9b02011-03-16 11:52:58 +01001839 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001840}
1841
1842/* answer packet, used to send data back for read requests:
1843 * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
1844 * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
1845 */
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001846int drbd_send_block(struct drbd_conf *mdev, enum drbd_packet cmd,
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001847 struct drbd_peer_request *peer_req)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001848{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001849 struct drbd_socket *sock;
1850 struct p_data *p;
Andreas Gruenbacher7b57b89d2011-03-16 11:35:20 +01001851 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001852 int dgs;
1853
Philipp Reisner46e1ce42011-05-16 12:57:15 +02001854 sock = &mdev->tconn->data;
1855 p = drbd_prepare_command(mdev, sock);
1856
Andreas Gruenbacher7d4c7822011-07-17 23:06:12 +02001857 dgs = mdev->tconn->integrity_tfm ? crypto_hash_digestsize(mdev->tconn->integrity_tfm) : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001858
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001859 if (!p)
1860 return -EIO;
1861 p->sector = cpu_to_be64(peer_req->i.sector);
1862 p->block_id = peer_req->block_id;
1863 p->seq_num = 0; /* unused */
1864 if (dgs)
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02001865 drbd_csum_ee(mdev, mdev->tconn->integrity_tfm, peer_req, p + 1);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001866 err = __send_command(mdev->tconn, mdev->vnr, sock, cmd, sizeof(*p) + dgs, NULL, peer_req->i.size);
Andreas Gruenbacher7b57b89d2011-03-16 11:35:20 +01001867 if (!err)
1868 err = _drbd_send_zc_ee(mdev, peer_req);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001869 mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
Philipp Reisnerbd26bfc52010-05-04 12:33:58 +02001870
Andreas Gruenbacher7b57b89d2011-03-16 11:35:20 +01001871 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001872}
1873
Andreas Gruenbacher8f7bed72010-12-19 23:53:14 +01001874int drbd_send_out_of_sync(struct drbd_conf *mdev, struct drbd_request *req)
Philipp Reisner73a01a12010-10-27 14:33:00 +02001875{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001876 struct drbd_socket *sock;
1877 struct p_block_desc *p;
Philipp Reisner73a01a12010-10-27 14:33:00 +02001878
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001879 sock = &mdev->tconn->data;
1880 p = drbd_prepare_command(mdev, sock);
1881 if (!p)
1882 return -EIO;
1883 p->sector = cpu_to_be64(req->i.sector);
1884 p->blksize = cpu_to_be32(req->i.size);
1885 return drbd_send_command(mdev, sock, P_OUT_OF_SYNC, sizeof(*p), NULL, 0);
Philipp Reisner73a01a12010-10-27 14:33:00 +02001886}
1887
Philipp Reisnerb411b362009-09-25 16:07:19 -07001888/*
1889 drbd_send distinguishes two cases:
1890
1891 Packets sent via the data socket "sock"
1892 and packets sent via the meta data socket "msock"
1893
1894 sock msock
1895 -----------------+-------------------------+------------------------------
1896 timeout conf.timeout / 2 conf.timeout / 2
1897 timeout action send a ping via msock Abort communication
1898 and close all sockets
1899*/
1900
1901/*
1902 * you must have down()ed the appropriate [m]sock_mutex elsewhere!
1903 */
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001904int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001905 void *buf, size_t size, unsigned msg_flags)
1906{
1907 struct kvec iov;
1908 struct msghdr msg;
1909 int rv, sent = 0;
1910
1911 if (!sock)
Andreas Gruenbacherc0d42c82010-12-09 23:52:22 +01001912 return -EBADR;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001913
1914 /* THINK if (signal_pending) return ... ? */
1915
1916 iov.iov_base = buf;
1917 iov.iov_len = size;
1918
1919 msg.msg_name = NULL;
1920 msg.msg_namelen = 0;
1921 msg.msg_control = NULL;
1922 msg.msg_controllen = 0;
1923 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
1924
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001925 if (sock == tconn->data.socket) {
Philipp Reisner44ed1672011-04-19 17:10:19 +02001926 rcu_read_lock();
1927 tconn->ko_count = rcu_dereference(tconn->net_conf)->ko_count;
1928 rcu_read_unlock();
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001929 drbd_update_congested(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001930 }
1931 do {
1932 /* STRANGE
1933 * tcp_sendmsg does _not_ use its size parameter at all ?
1934 *
1935 * -EAGAIN on timeout, -EINTR on signal.
1936 */
1937/* THINK
1938 * do we need to block DRBD_SIG if sock == &meta.socket ??
1939 * otherwise wake_asender() might interrupt some send_*Ack !
1940 */
1941 rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
1942 if (rv == -EAGAIN) {
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001943 if (we_should_drop_the_connection(tconn, sock))
Philipp Reisnerb411b362009-09-25 16:07:19 -07001944 break;
1945 else
1946 continue;
1947 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001948 if (rv == -EINTR) {
1949 flush_signals(current);
1950 rv = 0;
1951 }
1952 if (rv < 0)
1953 break;
1954 sent += rv;
1955 iov.iov_base += rv;
1956 iov.iov_len -= rv;
1957 } while (sent < size);
1958
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001959 if (sock == tconn->data.socket)
1960 clear_bit(NET_CONGESTED, &tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001961
1962 if (rv <= 0) {
1963 if (rv != -EAGAIN) {
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001964 conn_err(tconn, "%s_sendmsg returned %d\n",
1965 sock == tconn->meta.socket ? "msock" : "sock",
1966 rv);
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01001967 conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001968 } else
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01001969 conn_request_state(tconn, NS(conn, C_TIMEOUT), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001970 }
1971
1972 return sent;
1973}
1974
Andreas Gruenbacherfb708e42010-12-15 17:04:36 +01001975/**
1976 * drbd_send_all - Send an entire buffer
1977 *
1978 * Returns 0 upon success and a negative error value otherwise.
1979 */
1980int drbd_send_all(struct drbd_tconn *tconn, struct socket *sock, void *buffer,
1981 size_t size, unsigned msg_flags)
1982{
1983 int err;
1984
1985 err = drbd_send(tconn, sock, buffer, size, msg_flags);
1986 if (err < 0)
1987 return err;
1988 if (err != size)
1989 return -EIO;
1990 return 0;
1991}
1992
Philipp Reisnerb411b362009-09-25 16:07:19 -07001993static int drbd_open(struct block_device *bdev, fmode_t mode)
1994{
1995 struct drbd_conf *mdev = bdev->bd_disk->private_data;
1996 unsigned long flags;
1997 int rv = 0;
1998
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02001999 mutex_lock(&drbd_main_mutex);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002000 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002001 /* to have a stable mdev->state.role
2002 * and no race with updating open_cnt */
2003
2004 if (mdev->state.role != R_PRIMARY) {
2005 if (mode & FMODE_WRITE)
2006 rv = -EROFS;
2007 else if (!allow_oos)
2008 rv = -EMEDIUMTYPE;
2009 }
2010
2011 if (!rv)
2012 mdev->open_cnt++;
Philipp Reisner87eeee42011-01-19 14:16:30 +01002013 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02002014 mutex_unlock(&drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002015
2016 return rv;
2017}
2018
2019static int drbd_release(struct gendisk *gd, fmode_t mode)
2020{
2021 struct drbd_conf *mdev = gd->private_data;
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02002022 mutex_lock(&drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002023 mdev->open_cnt--;
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02002024 mutex_unlock(&drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002025 return 0;
2026}
2027
Philipp Reisnerb411b362009-09-25 16:07:19 -07002028static void drbd_set_defaults(struct drbd_conf *mdev)
2029{
Lars Ellenbergf3990022011-03-23 14:31:09 +01002030 /* Beware! The actual layout differs
2031 * between big endian and little endian */
Philipp Reisnerda9fbc22011-03-29 10:52:01 +02002032 mdev->state = (union drbd_dev_state) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002033 { .role = R_SECONDARY,
2034 .peer = R_UNKNOWN,
2035 .conn = C_STANDALONE,
2036 .disk = D_DISKLESS,
2037 .pdsk = D_UNKNOWN,
Philipp Reisnerb411b362009-09-25 16:07:19 -07002038 } };
2039}
2040
2041void drbd_init_set_defaults(struct drbd_conf *mdev)
2042{
2043 /* the memset(,0,) did most of this.
2044 * note: only assignments, no allocation in here */
2045
2046 drbd_set_defaults(mdev);
2047
Philipp Reisnerb411b362009-09-25 16:07:19 -07002048 atomic_set(&mdev->ap_bio_cnt, 0);
2049 atomic_set(&mdev->ap_pending_cnt, 0);
2050 atomic_set(&mdev->rs_pending_cnt, 0);
2051 atomic_set(&mdev->unacked_cnt, 0);
2052 atomic_set(&mdev->local_cnt, 0);
Lars Ellenberg435f0742010-09-06 12:30:25 +02002053 atomic_set(&mdev->pp_in_use_by_net, 0);
Philipp Reisner778f2712010-07-06 11:14:00 +02002054 atomic_set(&mdev->rs_sect_in, 0);
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002055 atomic_set(&mdev->rs_sect_ev, 0);
Philipp Reisner759fbdf2010-10-26 16:02:27 +02002056 atomic_set(&mdev->ap_in_flight, 0);
Philipp Reisnercdfda632011-07-05 15:38:59 +02002057 atomic_set(&mdev->md_io_in_use, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002058
Philipp Reisner8410da8f02011-02-11 20:11:10 +01002059 mutex_init(&mdev->own_state_mutex);
2060 mdev->state_mutex = &mdev->own_state_mutex;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002061
Philipp Reisnerb411b362009-09-25 16:07:19 -07002062 spin_lock_init(&mdev->al_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002063 spin_lock_init(&mdev->peer_seq_lock);
2064 spin_lock_init(&mdev->epoch_lock);
2065
2066 INIT_LIST_HEAD(&mdev->active_ee);
2067 INIT_LIST_HEAD(&mdev->sync_ee);
2068 INIT_LIST_HEAD(&mdev->done_ee);
2069 INIT_LIST_HEAD(&mdev->read_ee);
2070 INIT_LIST_HEAD(&mdev->net_ee);
2071 INIT_LIST_HEAD(&mdev->resync_reads);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002072 INIT_LIST_HEAD(&mdev->resync_work.list);
2073 INIT_LIST_HEAD(&mdev->unplug_work.list);
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02002074 INIT_LIST_HEAD(&mdev->go_diskless.list);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002075 INIT_LIST_HEAD(&mdev->md_sync_work.list);
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02002076 INIT_LIST_HEAD(&mdev->start_resync_work.list);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002077 INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
Philipp Reisner0ced55a2010-04-30 15:26:20 +02002078
Philipp Reisner794abb72010-12-27 11:51:23 +01002079 mdev->resync_work.cb = w_resync_timer;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002080 mdev->unplug_work.cb = w_send_write_hint;
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02002081 mdev->go_diskless.cb = w_go_diskless;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002082 mdev->md_sync_work.cb = w_md_sync;
2083 mdev->bm_io_work.w.cb = w_bitmap_io;
Philipp Reisner370a43e2011-01-14 16:03:11 +01002084 mdev->start_resync_work.cb = w_start_resync;
Philipp Reisnera21e9292011-02-08 15:08:49 +01002085
2086 mdev->resync_work.mdev = mdev;
2087 mdev->unplug_work.mdev = mdev;
2088 mdev->go_diskless.mdev = mdev;
2089 mdev->md_sync_work.mdev = mdev;
2090 mdev->bm_io_work.w.mdev = mdev;
2091 mdev->start_resync_work.mdev = mdev;
2092
Philipp Reisnerb411b362009-09-25 16:07:19 -07002093 init_timer(&mdev->resync_timer);
2094 init_timer(&mdev->md_sync_timer);
Philipp Reisner370a43e2011-01-14 16:03:11 +01002095 init_timer(&mdev->start_resync_timer);
Philipp Reisner7fde2be2011-03-01 11:08:28 +01002096 init_timer(&mdev->request_timer);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002097 mdev->resync_timer.function = resync_timer_fn;
2098 mdev->resync_timer.data = (unsigned long) mdev;
2099 mdev->md_sync_timer.function = md_sync_timer_fn;
2100 mdev->md_sync_timer.data = (unsigned long) mdev;
Philipp Reisner370a43e2011-01-14 16:03:11 +01002101 mdev->start_resync_timer.function = start_resync_timer_fn;
2102 mdev->start_resync_timer.data = (unsigned long) mdev;
Philipp Reisner7fde2be2011-03-01 11:08:28 +01002103 mdev->request_timer.function = request_timer_fn;
2104 mdev->request_timer.data = (unsigned long) mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002105
2106 init_waitqueue_head(&mdev->misc_wait);
2107 init_waitqueue_head(&mdev->state_wait);
2108 init_waitqueue_head(&mdev->ee_wait);
2109 init_waitqueue_head(&mdev->al_wait);
2110 init_waitqueue_head(&mdev->seq_wait);
2111
Philipp Reisner2451fc32010-08-24 13:43:11 +02002112 mdev->write_ordering = WO_bdev_flush;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002113 mdev->resync_wenr = LC_FREE;
Philipp Reisner99432fc2011-05-20 16:39:13 +02002114 mdev->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
2115 mdev->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002116}
2117
2118void drbd_mdev_cleanup(struct drbd_conf *mdev)
2119{
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02002120 int i;
Philipp Reisnere6b3ea82011-01-19 14:02:01 +01002121 if (mdev->tconn->receiver.t_state != NONE)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002122 dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
Philipp Reisnere6b3ea82011-01-19 14:02:01 +01002123 mdev->tconn->receiver.t_state);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002124
2125 /* no need to lock it, I'm the only thread alive */
2126 if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
2127 dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
2128 mdev->al_writ_cnt =
2129 mdev->bm_writ_cnt =
2130 mdev->read_cnt =
2131 mdev->recv_cnt =
2132 mdev->send_cnt =
2133 mdev->writ_cnt =
2134 mdev->p_size =
2135 mdev->rs_start =
2136 mdev->rs_total =
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02002137 mdev->rs_failed = 0;
2138 mdev->rs_last_events = 0;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002139 mdev->rs_last_sect_ev = 0;
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02002140 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2141 mdev->rs_mark_left[i] = 0;
2142 mdev->rs_mark_time[i] = 0;
2143 }
Philipp Reisner89e58e72011-01-19 13:12:45 +01002144 D_ASSERT(mdev->tconn->net_conf == NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002145
2146 drbd_set_my_capacity(mdev, 0);
2147 if (mdev->bitmap) {
2148 /* maybe never allocated. */
Philipp Reisner02d9a942010-03-24 16:23:03 +01002149 drbd_bm_resize(mdev, 0, 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002150 drbd_bm_cleanup(mdev);
2151 }
2152
Philipp Reisner1d041222011-04-22 15:20:23 +02002153 drbd_free_bc(mdev->ldev);
2154 mdev->ldev = NULL;
2155
Philipp Reisner07782862010-08-31 12:00:50 +02002156 clear_bit(AL_SUSPENDED, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002157
Philipp Reisnerb411b362009-09-25 16:07:19 -07002158 D_ASSERT(list_empty(&mdev->active_ee));
2159 D_ASSERT(list_empty(&mdev->sync_ee));
2160 D_ASSERT(list_empty(&mdev->done_ee));
2161 D_ASSERT(list_empty(&mdev->read_ee));
2162 D_ASSERT(list_empty(&mdev->net_ee));
2163 D_ASSERT(list_empty(&mdev->resync_reads));
Philipp Reisnere42325a2011-01-19 13:55:45 +01002164 D_ASSERT(list_empty(&mdev->tconn->data.work.q));
2165 D_ASSERT(list_empty(&mdev->tconn->meta.work.q));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002166 D_ASSERT(list_empty(&mdev->resync_work.list));
2167 D_ASSERT(list_empty(&mdev->unplug_work.list));
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02002168 D_ASSERT(list_empty(&mdev->go_diskless.list));
Lars Ellenberg2265b472010-12-16 15:41:26 +01002169
2170 drbd_set_defaults(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002171}
2172
2173
2174static void drbd_destroy_mempools(void)
2175{
2176 struct page *page;
2177
2178 while (drbd_pp_pool) {
2179 page = drbd_pp_pool;
2180 drbd_pp_pool = (struct page *)page_private(page);
2181 __free_page(page);
2182 drbd_pp_vacant--;
2183 }
2184
2185 /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
2186
Lars Ellenbergda4a75d2011-02-23 17:02:01 +01002187 if (drbd_md_io_bio_set)
2188 bioset_free(drbd_md_io_bio_set);
Lars Ellenberg35abf592011-02-23 12:39:46 +01002189 if (drbd_md_io_page_pool)
2190 mempool_destroy(drbd_md_io_page_pool);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002191 if (drbd_ee_mempool)
2192 mempool_destroy(drbd_ee_mempool);
2193 if (drbd_request_mempool)
2194 mempool_destroy(drbd_request_mempool);
2195 if (drbd_ee_cache)
2196 kmem_cache_destroy(drbd_ee_cache);
2197 if (drbd_request_cache)
2198 kmem_cache_destroy(drbd_request_cache);
2199 if (drbd_bm_ext_cache)
2200 kmem_cache_destroy(drbd_bm_ext_cache);
2201 if (drbd_al_ext_cache)
2202 kmem_cache_destroy(drbd_al_ext_cache);
2203
Lars Ellenbergda4a75d2011-02-23 17:02:01 +01002204 drbd_md_io_bio_set = NULL;
Lars Ellenberg35abf592011-02-23 12:39:46 +01002205 drbd_md_io_page_pool = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002206 drbd_ee_mempool = NULL;
2207 drbd_request_mempool = NULL;
2208 drbd_ee_cache = NULL;
2209 drbd_request_cache = NULL;
2210 drbd_bm_ext_cache = NULL;
2211 drbd_al_ext_cache = NULL;
2212
2213 return;
2214}
2215
2216static int drbd_create_mempools(void)
2217{
2218 struct page *page;
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01002219 const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002220 int i;
2221
2222 /* prepare our caches and mempools */
2223 drbd_request_mempool = NULL;
2224 drbd_ee_cache = NULL;
2225 drbd_request_cache = NULL;
2226 drbd_bm_ext_cache = NULL;
2227 drbd_al_ext_cache = NULL;
2228 drbd_pp_pool = NULL;
Lars Ellenberg35abf592011-02-23 12:39:46 +01002229 drbd_md_io_page_pool = NULL;
Lars Ellenbergda4a75d2011-02-23 17:02:01 +01002230 drbd_md_io_bio_set = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002231
2232 /* caches */
2233 drbd_request_cache = kmem_cache_create(
2234 "drbd_req", sizeof(struct drbd_request), 0, 0, NULL);
2235 if (drbd_request_cache == NULL)
2236 goto Enomem;
2237
2238 drbd_ee_cache = kmem_cache_create(
Andreas Gruenbacherf6ffca92011-02-04 15:30:34 +01002239 "drbd_ee", sizeof(struct drbd_peer_request), 0, 0, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002240 if (drbd_ee_cache == NULL)
2241 goto Enomem;
2242
2243 drbd_bm_ext_cache = kmem_cache_create(
2244 "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL);
2245 if (drbd_bm_ext_cache == NULL)
2246 goto Enomem;
2247
2248 drbd_al_ext_cache = kmem_cache_create(
2249 "drbd_al", sizeof(struct lc_element), 0, 0, NULL);
2250 if (drbd_al_ext_cache == NULL)
2251 goto Enomem;
2252
2253 /* mempools */
Lars Ellenbergda4a75d2011-02-23 17:02:01 +01002254 drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0);
2255 if (drbd_md_io_bio_set == NULL)
2256 goto Enomem;
2257
Lars Ellenberg35abf592011-02-23 12:39:46 +01002258 drbd_md_io_page_pool = mempool_create_page_pool(DRBD_MIN_POOL_PAGES, 0);
2259 if (drbd_md_io_page_pool == NULL)
2260 goto Enomem;
2261
Philipp Reisnerb411b362009-09-25 16:07:19 -07002262 drbd_request_mempool = mempool_create(number,
2263 mempool_alloc_slab, mempool_free_slab, drbd_request_cache);
2264 if (drbd_request_mempool == NULL)
2265 goto Enomem;
2266
2267 drbd_ee_mempool = mempool_create(number,
2268 mempool_alloc_slab, mempool_free_slab, drbd_ee_cache);
Nicolas Kaiser2027ae12010-10-28 06:15:26 -06002269 if (drbd_ee_mempool == NULL)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002270 goto Enomem;
2271
2272 /* drbd's page pool */
2273 spin_lock_init(&drbd_pp_lock);
2274
2275 for (i = 0; i < number; i++) {
2276 page = alloc_page(GFP_HIGHUSER);
2277 if (!page)
2278 goto Enomem;
2279 set_page_private(page, (unsigned long)drbd_pp_pool);
2280 drbd_pp_pool = page;
2281 }
2282 drbd_pp_vacant = number;
2283
2284 return 0;
2285
2286Enomem:
2287 drbd_destroy_mempools(); /* in case we allocated some */
2288 return -ENOMEM;
2289}
2290
2291static int drbd_notify_sys(struct notifier_block *this, unsigned long code,
2292 void *unused)
2293{
2294 /* just so we have it. you never know what interesting things we
2295 * might want to do here some day...
2296 */
2297
2298 return NOTIFY_DONE;
2299}
2300
2301static struct notifier_block drbd_notifier = {
2302 .notifier_call = drbd_notify_sys,
2303};
2304
Andreas Gruenbacher7721f562011-04-06 17:14:02 +02002305static void drbd_release_all_peer_reqs(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002306{
2307 int rr;
2308
Andreas Gruenbacher7721f562011-04-06 17:14:02 +02002309 rr = drbd_free_peer_reqs(mdev, &mdev->active_ee);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002310 if (rr)
2311 dev_err(DEV, "%d EEs in active list found!\n", rr);
2312
Andreas Gruenbacher7721f562011-04-06 17:14:02 +02002313 rr = drbd_free_peer_reqs(mdev, &mdev->sync_ee);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002314 if (rr)
2315 dev_err(DEV, "%d EEs in sync list found!\n", rr);
2316
Andreas Gruenbacher7721f562011-04-06 17:14:02 +02002317 rr = drbd_free_peer_reqs(mdev, &mdev->read_ee);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002318 if (rr)
2319 dev_err(DEV, "%d EEs in read list found!\n", rr);
2320
Andreas Gruenbacher7721f562011-04-06 17:14:02 +02002321 rr = drbd_free_peer_reqs(mdev, &mdev->done_ee);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002322 if (rr)
2323 dev_err(DEV, "%d EEs in done list found!\n", rr);
2324
Andreas Gruenbacher7721f562011-04-06 17:14:02 +02002325 rr = drbd_free_peer_reqs(mdev, &mdev->net_ee);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002326 if (rr)
2327 dev_err(DEV, "%d EEs in net list found!\n", rr);
2328}
2329
Philipp Reisner774b3052011-02-22 02:07:03 -05002330/* caution. no locking. */
Philipp Reisner81fa2e62011-05-04 15:10:30 +02002331void drbd_minor_destroy(struct kref *kref)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002332{
Philipp Reisner81fa2e62011-05-04 15:10:30 +02002333 struct drbd_conf *mdev = container_of(kref, struct drbd_conf, kref);
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02002334 struct drbd_tconn *tconn = mdev->tconn;
2335
Philipp Reisnercdfda632011-07-05 15:38:59 +02002336 del_timer_sync(&mdev->request_timer);
2337
Philipp Reisnerb411b362009-09-25 16:07:19 -07002338 /* paranoia asserts */
Andreas Gruenbacher70dc65e2010-12-21 14:46:57 +01002339 D_ASSERT(mdev->open_cnt == 0);
Philipp Reisnere42325a2011-01-19 13:55:45 +01002340 D_ASSERT(list_empty(&mdev->tconn->data.work.q));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002341 /* end paranoia asserts */
2342
Philipp Reisnerb411b362009-09-25 16:07:19 -07002343 /* cleanup stuff that may have been allocated during
2344 * device (re-)configuration or state changes */
2345
2346 if (mdev->this_bdev)
2347 bdput(mdev->this_bdev);
2348
Philipp Reisner1d041222011-04-22 15:20:23 +02002349 drbd_free_bc(mdev->ldev);
2350 mdev->ldev = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002351
Andreas Gruenbacher7721f562011-04-06 17:14:02 +02002352 drbd_release_all_peer_reqs(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002353
Philipp Reisnerb411b362009-09-25 16:07:19 -07002354 lc_destroy(mdev->act_log);
2355 lc_destroy(mdev->resync);
2356
2357 kfree(mdev->p_uuid);
2358 /* mdev->p_uuid = NULL; */
2359
Philipp Reisnercd1d9952011-04-11 21:24:24 -07002360 kfree(mdev->current_epoch);
2361 if (mdev->bitmap) /* should no longer be there. */
2362 drbd_bm_cleanup(mdev);
2363 __free_page(mdev->md_io_page);
2364 put_disk(mdev->vdisk);
2365 blk_cleanup_queue(mdev->rq_queue);
Philipp Reisner9958c852011-05-03 16:19:31 +02002366 kfree(mdev->rs_plan_s);
Philipp Reisnercd1d9952011-04-11 21:24:24 -07002367 kfree(mdev);
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02002368
2369 kref_put(&tconn->kref, &conn_destroy);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002370}
2371
2372static void drbd_cleanup(void)
2373{
2374 unsigned int i;
Philipp Reisner81a5d602011-02-22 19:53:16 -05002375 struct drbd_conf *mdev;
Philipp Reisner81fa2e62011-05-04 15:10:30 +02002376 struct drbd_tconn *tconn, *tmp;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002377
2378 unregister_reboot_notifier(&drbd_notifier);
2379
Lars Ellenberg17a93f32010-11-24 10:37:35 +01002380 /* first remove proc,
2381 * drbdsetup uses it's presence to detect
2382 * whether DRBD is loaded.
2383 * If we would get stuck in proc removal,
2384 * but have netlink already deregistered,
2385 * some drbdsetup commands may wait forever
2386 * for an answer.
2387 */
2388 if (drbd_proc)
2389 remove_proc_entry("drbd", NULL);
2390
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002391 drbd_genl_unregister();
Philipp Reisnerb411b362009-09-25 16:07:19 -07002392
Philipp Reisner81fa2e62011-05-04 15:10:30 +02002393 idr_for_each_entry(&minors, mdev, i) {
2394 idr_remove(&minors, mdev_to_minor(mdev));
2395 idr_remove(&mdev->tconn->volumes, mdev->vnr);
2396 del_gendisk(mdev->vdisk);
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02002397 /* synchronize_rcu(); No other threads running at this point */
Philipp Reisner81fa2e62011-05-04 15:10:30 +02002398 kref_put(&mdev->kref, &drbd_minor_destroy);
2399 }
2400
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02002401 /* not _rcu since, no other updater anymore. Genl already unregistered */
Philipp Reisner81fa2e62011-05-04 15:10:30 +02002402 list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02002403 list_del(&tconn->all_tconn); /* not _rcu no proc, not other threads */
2404 /* synchronize_rcu(); */
Philipp Reisner81fa2e62011-05-04 15:10:30 +02002405 kref_put(&tconn->kref, &conn_destroy);
2406 }
Philipp Reisnerff370e52011-04-11 21:10:11 -07002407
Philipp Reisner81a5d602011-02-22 19:53:16 -05002408 drbd_destroy_mempools();
Philipp Reisnerb411b362009-09-25 16:07:19 -07002409 unregister_blkdev(DRBD_MAJOR, "drbd");
2410
Philipp Reisner81a5d602011-02-22 19:53:16 -05002411 idr_destroy(&minors);
2412
Philipp Reisnerb411b362009-09-25 16:07:19 -07002413 printk(KERN_INFO "drbd: module cleanup done.\n");
2414}
2415
2416/**
2417 * drbd_congested() - Callback for pdflush
2418 * @congested_data: User data
2419 * @bdi_bits: Bits pdflush is currently interested in
2420 *
2421 * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
2422 */
2423static int drbd_congested(void *congested_data, int bdi_bits)
2424{
2425 struct drbd_conf *mdev = congested_data;
2426 struct request_queue *q;
2427 char reason = '-';
2428 int r = 0;
2429
Andreas Gruenbacher1b881ef2010-12-13 18:03:38 +01002430 if (!may_inc_ap_bio(mdev)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002431 /* DRBD has frozen IO */
2432 r = bdi_bits;
2433 reason = 'd';
2434 goto out;
2435 }
2436
2437 if (get_ldev(mdev)) {
2438 q = bdev_get_queue(mdev->ldev->backing_bdev);
2439 r = bdi_congested(&q->backing_dev_info, bdi_bits);
2440 put_ldev(mdev);
2441 if (r)
2442 reason = 'b';
2443 }
2444
Philipp Reisner01a311a2011-02-07 14:30:33 +01002445 if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->tconn->flags)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002446 r |= (1 << BDI_async_congested);
2447 reason = reason == 'b' ? 'a' : 'n';
2448 }
2449
2450out:
2451 mdev->congestion_reason = reason;
2452 return r;
2453}
2454
Philipp Reisner6699b652011-02-09 11:10:24 +01002455static void drbd_init_workqueue(struct drbd_work_queue* wq)
2456{
2457 sema_init(&wq->s, 0);
2458 spin_lock_init(&wq->q_lock);
2459 INIT_LIST_HEAD(&wq->q);
2460}
2461
Philipp Reisner0ace9df2011-04-24 10:53:19 +02002462struct drbd_tconn *conn_get_by_name(const char *name)
Philipp Reisner1aba4d72011-02-21 15:38:08 +01002463{
2464 struct drbd_tconn *tconn;
2465
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002466 if (!name || !name[0])
2467 return NULL;
2468
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02002469 rcu_read_lock();
Philipp Reisnerec0bddb2011-05-04 15:47:01 +02002470 list_for_each_entry_rcu(tconn, &drbd_tconns, all_tconn) {
Philipp Reisner0ace9df2011-04-24 10:53:19 +02002471 if (!strcmp(tconn->name, name)) {
2472 kref_get(&tconn->kref);
Philipp Reisner1aba4d72011-02-21 15:38:08 +01002473 goto found;
Philipp Reisner0ace9df2011-04-24 10:53:19 +02002474 }
Philipp Reisner1aba4d72011-02-21 15:38:08 +01002475 }
2476 tconn = NULL;
2477found:
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02002478 rcu_read_unlock();
Philipp Reisner1aba4d72011-02-21 15:38:08 +01002479 return tconn;
2480}
2481
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002482struct drbd_tconn *conn_get_by_addrs(void *my_addr, int my_addr_len,
2483 void *peer_addr, int peer_addr_len)
2484{
2485 struct drbd_tconn *tconn;
2486
2487 rcu_read_lock();
2488 list_for_each_entry_rcu(tconn, &drbd_tconns, all_tconn) {
2489 if (tconn->my_addr_len == my_addr_len &&
2490 tconn->peer_addr_len == peer_addr_len &&
2491 !memcmp(&tconn->my_addr, my_addr, my_addr_len) &&
2492 !memcmp(&tconn->peer_addr, peer_addr, peer_addr_len)) {
2493 kref_get(&tconn->kref);
2494 goto found;
2495 }
2496 }
2497 tconn = NULL;
2498found:
2499 rcu_read_unlock();
2500 return tconn;
2501}
2502
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002503static int drbd_alloc_socket(struct drbd_socket *socket)
2504{
2505 socket->rbuf = (void *) __get_free_page(GFP_KERNEL);
2506 if (!socket->rbuf)
2507 return -ENOMEM;
Andreas Gruenbacher5a87d922011-03-24 21:17:52 +01002508 socket->sbuf = (void *) __get_free_page(GFP_KERNEL);
2509 if (!socket->sbuf)
2510 return -ENOMEM;
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002511 return 0;
2512}
2513
2514static void drbd_free_socket(struct drbd_socket *socket)
2515{
Andreas Gruenbacher5a87d922011-03-24 21:17:52 +01002516 free_page((unsigned long) socket->sbuf);
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002517 free_page((unsigned long) socket->rbuf);
2518}
2519
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002520void conn_free_crypto(struct drbd_tconn *tconn)
2521{
Philipp Reisner1d041222011-04-22 15:20:23 +02002522 drbd_free_sock(tconn);
2523
2524 crypto_free_hash(tconn->csums_tfm);
2525 crypto_free_hash(tconn->verify_tfm);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002526 crypto_free_hash(tconn->cram_hmac_tfm);
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02002527 crypto_free_hash(tconn->integrity_tfm);
Andreas Gruenbacher5b614ab2011-04-27 21:00:12 +02002528 crypto_free_hash(tconn->peer_integrity_tfm);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002529 kfree(tconn->int_dig_in);
2530 kfree(tconn->int_dig_vv);
Philipp Reisner1d041222011-04-22 15:20:23 +02002531
2532 tconn->csums_tfm = NULL;
2533 tconn->verify_tfm = NULL;
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002534 tconn->cram_hmac_tfm = NULL;
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02002535 tconn->integrity_tfm = NULL;
Andreas Gruenbacher5b614ab2011-04-27 21:00:12 +02002536 tconn->peer_integrity_tfm = NULL;
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002537 tconn->int_dig_in = NULL;
2538 tconn->int_dig_vv = NULL;
2539}
2540
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02002541int set_resource_options(struct drbd_tconn *tconn, struct res_opts *res_opts)
2542{
2543 cpumask_var_t new_cpu_mask;
2544 int err;
2545
2546 if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL))
2547 return -ENOMEM;
2548 /*
2549 retcode = ERR_NOMEM;
2550 drbd_msg_put_info("unable to allocate cpumask");
2551 */
2552
2553 /* silently ignore cpu mask on UP kernel */
2554 if (nr_cpu_ids > 1 && res_opts->cpu_mask[0] != 0) {
2555 /* FIXME: Get rid of constant 32 here */
2556 err = __bitmap_parse(res_opts->cpu_mask, 32, 0,
2557 cpumask_bits(new_cpu_mask), nr_cpu_ids);
2558 if (err) {
2559 conn_warn(tconn, "__bitmap_parse() failed with %d\n", err);
2560 /* retcode = ERR_CPU_MASK_PARSE; */
2561 goto fail;
2562 }
2563 }
2564 tconn->res_opts = *res_opts;
2565 if (!cpumask_equal(tconn->cpu_mask, new_cpu_mask)) {
2566 cpumask_copy(tconn->cpu_mask, new_cpu_mask);
2567 drbd_calc_cpu_mask(tconn);
2568 tconn->receiver.reset_cpu_mask = 1;
2569 tconn->asender.reset_cpu_mask = 1;
2570 tconn->worker.reset_cpu_mask = 1;
2571 }
2572 err = 0;
2573
2574fail:
2575 free_cpumask_var(new_cpu_mask);
2576 return err;
2577
2578}
2579
Philipp Reisnerec0bddb2011-05-04 15:47:01 +02002580/* caller must be under genl_lock() */
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02002581struct drbd_tconn *conn_create(const char *name, struct res_opts *res_opts)
Philipp Reisner21114382011-01-19 12:26:59 +01002582{
2583 struct drbd_tconn *tconn;
2584
2585 tconn = kzalloc(sizeof(struct drbd_tconn), GFP_KERNEL);
2586 if (!tconn)
2587 return NULL;
2588
2589 tconn->name = kstrdup(name, GFP_KERNEL);
2590 if (!tconn->name)
2591 goto fail;
2592
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002593 if (drbd_alloc_socket(&tconn->data))
2594 goto fail;
2595 if (drbd_alloc_socket(&tconn->meta))
2596 goto fail;
2597
Philipp Reisner774b3052011-02-22 02:07:03 -05002598 if (!zalloc_cpumask_var(&tconn->cpu_mask, GFP_KERNEL))
2599 goto fail;
2600
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02002601 if (set_resource_options(tconn, res_opts))
2602 goto fail;
2603
Philipp Reisner2f5cdd02011-02-21 14:29:27 +01002604 if (!tl_init(tconn))
2605 goto fail;
2606
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01002607 tconn->cstate = C_STANDALONE;
Philipp Reisner8410da8f02011-02-11 20:11:10 +01002608 mutex_init(&tconn->cstate_mutex);
Philipp Reisner6699b652011-02-09 11:10:24 +01002609 spin_lock_init(&tconn->req_lock);
Philipp Reisnera0095502011-05-03 13:14:15 +02002610 mutex_init(&tconn->conf_update);
Philipp Reisner2a67d8b2011-02-09 14:10:32 +01002611 init_waitqueue_head(&tconn->ping_wait);
Philipp Reisner062e8792011-02-08 11:09:18 +01002612 idr_init(&tconn->volumes);
Philipp Reisnerb2fb6dbe2011-01-19 13:48:44 +01002613
Philipp Reisner6699b652011-02-09 11:10:24 +01002614 drbd_init_workqueue(&tconn->data.work);
2615 mutex_init(&tconn->data.mutex);
2616
2617 drbd_init_workqueue(&tconn->meta.work);
2618 mutex_init(&tconn->meta.mutex);
2619
Philipp Reisner392c8802011-02-09 10:33:31 +01002620 drbd_thread_init(tconn, &tconn->receiver, drbdd_init, "receiver");
2621 drbd_thread_init(tconn, &tconn->worker, drbd_worker, "worker");
2622 drbd_thread_init(tconn, &tconn->asender, drbd_asender, "asender");
2623
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02002624 kref_init(&tconn->kref);
Philipp Reisnerec0bddb2011-05-04 15:47:01 +02002625 list_add_tail_rcu(&tconn->all_tconn, &drbd_tconns);
Philipp Reisner21114382011-01-19 12:26:59 +01002626
2627 return tconn;
2628
2629fail:
Philipp Reisner2f5cdd02011-02-21 14:29:27 +01002630 tl_cleanup(tconn);
Philipp Reisner774b3052011-02-22 02:07:03 -05002631 free_cpumask_var(tconn->cpu_mask);
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002632 drbd_free_socket(&tconn->meta);
2633 drbd_free_socket(&tconn->data);
Philipp Reisner21114382011-01-19 12:26:59 +01002634 kfree(tconn->name);
2635 kfree(tconn);
2636
2637 return NULL;
2638}
2639
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02002640void conn_destroy(struct kref *kref)
Philipp Reisner21114382011-01-19 12:26:59 +01002641{
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02002642 struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
2643
Philipp Reisner062e8792011-02-08 11:09:18 +01002644 idr_destroy(&tconn->volumes);
Philipp Reisner21114382011-01-19 12:26:59 +01002645
Philipp Reisner774b3052011-02-22 02:07:03 -05002646 free_cpumask_var(tconn->cpu_mask);
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002647 drbd_free_socket(&tconn->meta);
2648 drbd_free_socket(&tconn->data);
Philipp Reisner21114382011-01-19 12:26:59 +01002649 kfree(tconn->name);
Philipp Reisnerb42a70a2011-01-27 10:55:20 +01002650 kfree(tconn->int_dig_in);
2651 kfree(tconn->int_dig_vv);
Philipp Reisner21114382011-01-19 12:26:59 +01002652 kfree(tconn);
2653}
2654
Philipp Reisner774b3052011-02-22 02:07:03 -05002655enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, int vnr)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002656{
2657 struct drbd_conf *mdev;
2658 struct gendisk *disk;
2659 struct request_queue *q;
Philipp Reisner774b3052011-02-22 02:07:03 -05002660 int vnr_got = vnr;
Philipp Reisner81a5d602011-02-22 19:53:16 -05002661 int minor_got = minor;
Lars Ellenberg8432b312011-03-08 16:11:16 +01002662 enum drbd_ret_code err = ERR_NOMEM;
Philipp Reisner774b3052011-02-22 02:07:03 -05002663
2664 mdev = minor_to_mdev(minor);
2665 if (mdev)
2666 return ERR_MINOR_EXISTS;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002667
2668 /* GFP_KERNEL, we are outside of all write-out paths */
2669 mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL);
2670 if (!mdev)
Philipp Reisner774b3052011-02-22 02:07:03 -05002671 return ERR_NOMEM;
2672
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02002673 kref_get(&tconn->kref);
Philipp Reisner774b3052011-02-22 02:07:03 -05002674 mdev->tconn = tconn;
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02002675
Philipp Reisnerb411b362009-09-25 16:07:19 -07002676 mdev->minor = minor;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002677 mdev->vnr = vnr;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002678
2679 drbd_init_set_defaults(mdev);
2680
2681 q = blk_alloc_queue(GFP_KERNEL);
2682 if (!q)
2683 goto out_no_q;
2684 mdev->rq_queue = q;
2685 q->queuedata = mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002686
2687 disk = alloc_disk(1);
2688 if (!disk)
2689 goto out_no_disk;
2690 mdev->vdisk = disk;
2691
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002692 set_disk_ro(disk, true);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002693
2694 disk->queue = q;
2695 disk->major = DRBD_MAJOR;
2696 disk->first_minor = minor;
2697 disk->fops = &drbd_ops;
2698 sprintf(disk->disk_name, "drbd%d", minor);
2699 disk->private_data = mdev;
2700
2701 mdev->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
2702 /* we have no partitions. we contain only ourselves. */
2703 mdev->this_bdev->bd_contains = mdev->this_bdev;
2704
2705 q->backing_dev_info.congested_fn = drbd_congested;
2706 q->backing_dev_info.congested_data = mdev;
2707
Andreas Gruenbacher2f58dcf2010-12-13 17:48:19 +01002708 blk_queue_make_request(q, drbd_make_request);
Philipp Reisner99432fc2011-05-20 16:39:13 +02002709 /* Setting the max_hw_sectors to an odd value of 8kibyte here
2710 This triggers a max_bio_size message upon first attach or connect */
2711 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002712 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
2713 blk_queue_merge_bvec(q, drbd_merge_bvec);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002714 q->queue_lock = &mdev->tconn->req_lock; /* needed since we use */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002715
2716 mdev->md_io_page = alloc_page(GFP_KERNEL);
2717 if (!mdev->md_io_page)
2718 goto out_no_io_page;
2719
2720 if (drbd_bm_init(mdev))
2721 goto out_no_bitmap;
Andreas Gruenbacherdac13892011-01-21 17:18:39 +01002722 mdev->read_requests = RB_ROOT;
Andreas Gruenbacherde696712011-01-20 15:00:24 +01002723 mdev->write_requests = RB_ROOT;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002724
Philipp Reisnerb411b362009-09-25 16:07:19 -07002725 mdev->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
2726 if (!mdev->current_epoch)
2727 goto out_no_epoch;
2728
2729 INIT_LIST_HEAD(&mdev->current_epoch->list);
2730 mdev->epochs = 1;
2731
Lars Ellenberg8432b312011-03-08 16:11:16 +01002732 if (!idr_pre_get(&minors, GFP_KERNEL))
2733 goto out_no_minor_idr;
2734 if (idr_get_new_above(&minors, mdev, minor, &minor_got))
2735 goto out_no_minor_idr;
2736 if (minor_got != minor) {
2737 err = ERR_MINOR_EXISTS;
2738 drbd_msg_put_info("requested minor exists already");
2739 goto out_idr_remove_minor;
Lars Ellenberg569083c2011-03-07 09:49:02 +01002740 }
2741
Lars Ellenberg8432b312011-03-08 16:11:16 +01002742 if (!idr_pre_get(&tconn->volumes, GFP_KERNEL))
Lars Ellenberg569083c2011-03-07 09:49:02 +01002743 goto out_idr_remove_minor;
Lars Ellenberg8432b312011-03-08 16:11:16 +01002744 if (idr_get_new_above(&tconn->volumes, mdev, vnr, &vnr_got))
2745 goto out_idr_remove_minor;
2746 if (vnr_got != vnr) {
2747 err = ERR_INVALID_REQUEST;
2748 drbd_msg_put_info("requested volume exists already");
2749 goto out_idr_remove_vol;
Philipp Reisner81a5d602011-02-22 19:53:16 -05002750 }
Philipp Reisner774b3052011-02-22 02:07:03 -05002751 add_disk(disk);
Philipp Reisner81fa2e62011-05-04 15:10:30 +02002752 kref_init(&mdev->kref); /* one ref for both idrs and the the add_disk */
Philipp Reisner774b3052011-02-22 02:07:03 -05002753
Philipp Reisner2325eb62011-03-15 16:56:18 +01002754 /* inherit the connection state */
2755 mdev->state.conn = tconn->cstate;
2756 if (mdev->state.conn == C_WF_REPORT_PARAMS)
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02002757 drbd_connected(mdev);
Philipp Reisner2325eb62011-03-15 16:56:18 +01002758
Philipp Reisner774b3052011-02-22 02:07:03 -05002759 return NO_ERROR;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002760
Lars Ellenberg569083c2011-03-07 09:49:02 +01002761out_idr_remove_vol:
2762 idr_remove(&tconn->volumes, vnr_got);
Lars Ellenberg8432b312011-03-08 16:11:16 +01002763out_idr_remove_minor:
2764 idr_remove(&minors, minor_got);
Lars Ellenberg569083c2011-03-07 09:49:02 +01002765 synchronize_rcu();
Lars Ellenberg8432b312011-03-08 16:11:16 +01002766out_no_minor_idr:
Philipp Reisner81a5d602011-02-22 19:53:16 -05002767 kfree(mdev->current_epoch);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002768out_no_epoch:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002769 drbd_bm_cleanup(mdev);
2770out_no_bitmap:
2771 __free_page(mdev->md_io_page);
2772out_no_io_page:
2773 put_disk(disk);
2774out_no_disk:
2775 blk_cleanup_queue(q);
2776out_no_q:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002777 kfree(mdev);
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02002778 kref_put(&tconn->kref, &conn_destroy);
Lars Ellenberg8432b312011-03-08 16:11:16 +01002779 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002780}
2781
Philipp Reisnerb411b362009-09-25 16:07:19 -07002782int __init drbd_init(void)
2783{
2784 int err;
2785
Philipp Reisner2b8a90b2011-01-10 11:15:17 +01002786 if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002787 printk(KERN_ERR
Philipp Reisner81a5d602011-02-22 19:53:16 -05002788 "drbd: invalid minor_count (%d)\n", minor_count);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002789#ifdef MODULE
2790 return -EINVAL;
2791#else
Andreas Gruenbacher46530e82011-05-31 13:08:53 +02002792 minor_count = DRBD_MINOR_COUNT_DEF;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002793#endif
2794 }
2795
Philipp Reisnerb411b362009-09-25 16:07:19 -07002796 err = register_blkdev(DRBD_MAJOR, "drbd");
2797 if (err) {
2798 printk(KERN_ERR
2799 "drbd: unable to register block device major %d\n",
2800 DRBD_MAJOR);
2801 return err;
2802 }
2803
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002804 err = drbd_genl_register();
2805 if (err) {
2806 printk(KERN_ERR "drbd: unable to register generic netlink family\n");
2807 goto fail;
2808 }
2809
2810
Philipp Reisnerb411b362009-09-25 16:07:19 -07002811 register_reboot_notifier(&drbd_notifier);
2812
2813 /*
2814 * allocate all necessary structs
2815 */
2816 err = -ENOMEM;
2817
2818 init_waitqueue_head(&drbd_pp_wait);
2819
2820 drbd_proc = NULL; /* play safe for drbd_cleanup */
Philipp Reisner81a5d602011-02-22 19:53:16 -05002821 idr_init(&minors);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002822
2823 err = drbd_create_mempools();
2824 if (err)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002825 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002826
Lars Ellenberg8c484ee2010-03-11 16:47:58 +01002827 drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002828 if (!drbd_proc) {
2829 printk(KERN_ERR "drbd: unable to register proc file\n");
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002830 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002831 }
2832
2833 rwlock_init(&global_state_lock);
Philipp Reisner21114382011-01-19 12:26:59 +01002834 INIT_LIST_HEAD(&drbd_tconns);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002835
2836 printk(KERN_INFO "drbd: initialized. "
2837 "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
2838 API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
2839 printk(KERN_INFO "drbd: %s\n", drbd_buildtag());
2840 printk(KERN_INFO "drbd: registered as block device major %d\n",
2841 DRBD_MAJOR);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002842
2843 return 0; /* Success! */
2844
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002845fail:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002846 drbd_cleanup();
2847 if (err == -ENOMEM)
2848 /* currently always the case */
2849 printk(KERN_ERR "drbd: ran out of memory\n");
2850 else
2851 printk(KERN_ERR "drbd: initialization failure\n");
2852 return err;
2853}
2854
2855void drbd_free_bc(struct drbd_backing_dev *ldev)
2856{
2857 if (ldev == NULL)
2858 return;
2859
Tejun Heoe525fd82010-11-13 11:55:17 +01002860 blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
2861 blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002862
2863 kfree(ldev);
2864}
2865
Philipp Reisner360cc742011-02-08 14:29:53 +01002866void drbd_free_sock(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002867{
Philipp Reisner360cc742011-02-08 14:29:53 +01002868 if (tconn->data.socket) {
2869 mutex_lock(&tconn->data.mutex);
2870 kernel_sock_shutdown(tconn->data.socket, SHUT_RDWR);
2871 sock_release(tconn->data.socket);
2872 tconn->data.socket = NULL;
2873 mutex_unlock(&tconn->data.mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002874 }
Philipp Reisner360cc742011-02-08 14:29:53 +01002875 if (tconn->meta.socket) {
2876 mutex_lock(&tconn->meta.mutex);
2877 kernel_sock_shutdown(tconn->meta.socket, SHUT_RDWR);
2878 sock_release(tconn->meta.socket);
2879 tconn->meta.socket = NULL;
2880 mutex_unlock(&tconn->meta.mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002881 }
2882}
2883
Philipp Reisnerb411b362009-09-25 16:07:19 -07002884/* meta data management */
2885
2886struct meta_data_on_disk {
2887 u64 la_size; /* last agreed size. */
2888 u64 uuid[UI_SIZE]; /* UUIDs. */
2889 u64 device_uuid;
2890 u64 reserved_u64_1;
2891 u32 flags; /* MDF */
2892 u32 magic;
2893 u32 md_size_sect;
2894 u32 al_offset; /* offset to this block */
2895 u32 al_nr_extents; /* important for restoring the AL */
Lars Ellenbergf3990022011-03-23 14:31:09 +01002896 /* `-- act_log->nr_elements <-- ldev->dc.al_extents */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002897 u32 bm_offset; /* offset to the bitmap, from here */
2898 u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */
Philipp Reisner99432fc2011-05-20 16:39:13 +02002899 u32 la_peer_max_bio_size; /* last peer max_bio_size */
2900 u32 reserved_u32[3];
Philipp Reisnerb411b362009-09-25 16:07:19 -07002901
2902} __packed;
2903
2904/**
2905 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
2906 * @mdev: DRBD device.
2907 */
2908void drbd_md_sync(struct drbd_conf *mdev)
2909{
2910 struct meta_data_on_disk *buffer;
2911 sector_t sector;
2912 int i;
2913
Lars Ellenbergee15b032010-09-03 10:00:09 +02002914 del_timer(&mdev->md_sync_timer);
2915 /* timer may be rearmed by drbd_md_mark_dirty() now. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002916 if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
2917 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002918
2919 /* We use here D_FAILED and not D_ATTACHING because we try to write
2920 * metadata even if we detach due to a disk failure! */
2921 if (!get_ldev_if_state(mdev, D_FAILED))
2922 return;
2923
Philipp Reisnercdfda632011-07-05 15:38:59 +02002924 buffer = drbd_md_get_buffer(mdev);
2925 if (!buffer)
2926 goto out;
2927
Philipp Reisnerb411b362009-09-25 16:07:19 -07002928 memset(buffer, 0, 512);
2929
2930 buffer->la_size = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
2931 for (i = UI_CURRENT; i < UI_SIZE; i++)
2932 buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
2933 buffer->flags = cpu_to_be32(mdev->ldev->md.flags);
Lars Ellenbergd5d7ebd2011-07-05 20:59:26 +02002934 buffer->magic = cpu_to_be32(DRBD_MD_MAGIC_84_UNCLEAN);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002935
2936 buffer->md_size_sect = cpu_to_be32(mdev->ldev->md.md_size_sect);
2937 buffer->al_offset = cpu_to_be32(mdev->ldev->md.al_offset);
2938 buffer->al_nr_extents = cpu_to_be32(mdev->act_log->nr_elements);
2939 buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
2940 buffer->device_uuid = cpu_to_be64(mdev->ldev->md.device_uuid);
2941
2942 buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset);
Philipp Reisner99432fc2011-05-20 16:39:13 +02002943 buffer->la_peer_max_bio_size = cpu_to_be32(mdev->peer_max_bio_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002944
2945 D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset);
2946 sector = mdev->ldev->md.md_offset;
2947
Andreas Gruenbacher3fbf4d22010-12-13 02:25:41 +01002948 if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002949 /* this was a try anyways ... */
2950 dev_err(DEV, "meta data update failed!\n");
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002951 drbd_chk_io_error(mdev, 1, true);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002952 }
2953
2954 /* Update mdev->ldev->md.la_size_sect,
2955 * since we updated it on metadata. */
2956 mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev);
2957
Philipp Reisnercdfda632011-07-05 15:38:59 +02002958 drbd_md_put_buffer(mdev);
2959out:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002960 put_ldev(mdev);
2961}
2962
2963/**
2964 * drbd_md_read() - Reads in the meta data super block
2965 * @mdev: DRBD device.
2966 * @bdev: Device from which the meta data should be read in.
2967 *
Andreas Gruenbacher116676c2010-12-08 13:33:11 +01002968 * Return 0 (NO_ERROR) on success, and an enum drbd_ret_code in case
Lars Ellenbergd5d7ebd2011-07-05 20:59:26 +02002969 * something goes wrong.
Philipp Reisnerb411b362009-09-25 16:07:19 -07002970 */
2971int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
2972{
2973 struct meta_data_on_disk *buffer;
Lars Ellenbergd5d7ebd2011-07-05 20:59:26 +02002974 u32 magic, flags;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002975 int i, rv = NO_ERROR;
2976
2977 if (!get_ldev_if_state(mdev, D_ATTACHING))
2978 return ERR_IO_MD_DISK;
2979
Philipp Reisnercdfda632011-07-05 15:38:59 +02002980 buffer = drbd_md_get_buffer(mdev);
2981 if (!buffer)
2982 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002983
Andreas Gruenbacher3fbf4d22010-12-13 02:25:41 +01002984 if (drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002985 /* NOTE: can't do normal error processing here as this is
Philipp Reisnerb411b362009-09-25 16:07:19 -07002986 called BEFORE disk is attached */
2987 dev_err(DEV, "Error while reading metadata.\n");
2988 rv = ERR_IO_MD_DISK;
2989 goto err;
2990 }
2991
Lars Ellenbergd5d7ebd2011-07-05 20:59:26 +02002992 magic = be32_to_cpu(buffer->magic);
2993 flags = be32_to_cpu(buffer->flags);
2994 if (magic == DRBD_MD_MAGIC_84_UNCLEAN ||
2995 (magic == DRBD_MD_MAGIC_08 && !(flags & MDF_AL_CLEAN))) {
2996 /* btw: that's Activity Log clean, not "all" clean. */
2997 dev_err(DEV, "Found unclean meta data. Did you \"drbdadm apply-al\"?\n");
2998 rv = ERR_MD_UNCLEAN;
2999 goto err;
3000 }
3001 if (magic != DRBD_MD_MAGIC_08) {
3002 if (magic == DRBD_MD_MAGIC_07)
3003 dev_err(DEV, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n");
3004 else
3005 dev_err(DEV, "Meta data magic not found. Did you \"drbdadm create-md\"?\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07003006 rv = ERR_MD_INVALID;
3007 goto err;
3008 }
3009 if (be32_to_cpu(buffer->al_offset) != bdev->md.al_offset) {
3010 dev_err(DEV, "unexpected al_offset: %d (expected %d)\n",
3011 be32_to_cpu(buffer->al_offset), bdev->md.al_offset);
3012 rv = ERR_MD_INVALID;
3013 goto err;
3014 }
3015 if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
3016 dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n",
3017 be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
3018 rv = ERR_MD_INVALID;
3019 goto err;
3020 }
3021 if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
3022 dev_err(DEV, "unexpected md_size: %u (expected %u)\n",
3023 be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
3024 rv = ERR_MD_INVALID;
3025 goto err;
3026 }
3027
3028 if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
3029 dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
3030 be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
3031 rv = ERR_MD_INVALID;
3032 goto err;
3033 }
3034
3035 bdev->md.la_size_sect = be64_to_cpu(buffer->la_size);
3036 for (i = UI_CURRENT; i < UI_SIZE; i++)
3037 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
3038 bdev->md.flags = be32_to_cpu(buffer->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003039 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
3040
Philipp Reisner87eeee42011-01-19 14:16:30 +01003041 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisner99432fc2011-05-20 16:39:13 +02003042 if (mdev->state.conn < C_CONNECTED) {
3043 int peer;
3044 peer = be32_to_cpu(buffer->la_peer_max_bio_size);
3045 peer = max_t(int, peer, DRBD_MAX_BIO_SIZE_SAFE);
3046 mdev->peer_max_bio_size = peer;
3047 }
Philipp Reisner87eeee42011-01-19 14:16:30 +01003048 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisner99432fc2011-05-20 16:39:13 +02003049
Philipp Reisnerb411b362009-09-25 16:07:19 -07003050 err:
Philipp Reisnercdfda632011-07-05 15:38:59 +02003051 drbd_md_put_buffer(mdev);
3052 out:
Philipp Reisnerb411b362009-09-25 16:07:19 -07003053 put_ldev(mdev);
3054
3055 return rv;
3056}
3057
3058/**
3059 * drbd_md_mark_dirty() - Mark meta data super block as dirty
3060 * @mdev: DRBD device.
3061 *
3062 * Call this function if you change anything that should be written to
3063 * the meta-data super block. This function sets MD_DIRTY, and starts a
3064 * timer that ensures that within five seconds you have to call drbd_md_sync().
3065 */
Lars Ellenbergca0e6092010-10-14 15:01:21 +02003066#ifdef DEBUG
Lars Ellenbergee15b032010-09-03 10:00:09 +02003067void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func)
3068{
3069 if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) {
3070 mod_timer(&mdev->md_sync_timer, jiffies + HZ);
3071 mdev->last_md_mark_dirty.line = line;
3072 mdev->last_md_mark_dirty.func = func;
3073 }
3074}
3075#else
Philipp Reisnerb411b362009-09-25 16:07:19 -07003076void drbd_md_mark_dirty(struct drbd_conf *mdev)
3077{
Lars Ellenbergee15b032010-09-03 10:00:09 +02003078 if (!test_and_set_bit(MD_DIRTY, &mdev->flags))
Lars Ellenbergca0e6092010-10-14 15:01:21 +02003079 mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003080}
Lars Ellenbergee15b032010-09-03 10:00:09 +02003081#endif
Philipp Reisnerb411b362009-09-25 16:07:19 -07003082
3083static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
3084{
3085 int i;
3086
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003087 for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003088 mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
Philipp Reisnerb411b362009-09-25 16:07:19 -07003089}
3090
3091void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3092{
3093 if (idx == UI_CURRENT) {
3094 if (mdev->state.role == R_PRIMARY)
3095 val |= 1;
3096 else
3097 val &= ~((u64)1);
3098
3099 drbd_set_ed_uuid(mdev, val);
3100 }
3101
3102 mdev->ldev->md.uuid[idx] = val;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003103 drbd_md_mark_dirty(mdev);
3104}
3105
3106
3107void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3108{
3109 if (mdev->ldev->md.uuid[idx]) {
3110 drbd_uuid_move_history(mdev);
3111 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
Philipp Reisnerb411b362009-09-25 16:07:19 -07003112 }
3113 _drbd_uuid_set(mdev, idx, val);
3114}
3115
3116/**
3117 * drbd_uuid_new_current() - Creates a new current UUID
3118 * @mdev: DRBD device.
3119 *
3120 * Creates a new current UUID, and rotates the old current UUID into
3121 * the bitmap slot. Causes an incremental resync upon next connect.
3122 */
3123void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
3124{
3125 u64 val;
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003126 unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
Philipp Reisnerb411b362009-09-25 16:07:19 -07003127
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003128 if (bm_uuid)
3129 dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
3130
Philipp Reisnerb411b362009-09-25 16:07:19 -07003131 mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
Philipp Reisnerb411b362009-09-25 16:07:19 -07003132
3133 get_random_bytes(&val, sizeof(u64));
3134 _drbd_uuid_set(mdev, UI_CURRENT, val);
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003135 drbd_print_uuids(mdev, "new current UUID");
Lars Ellenbergaaa8e2b2010-10-15 13:16:53 +02003136 /* get it to stable storage _now_ */
3137 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003138}
3139
3140void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
3141{
3142 if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
3143 return;
3144
3145 if (val == 0) {
3146 drbd_uuid_move_history(mdev);
3147 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
3148 mdev->ldev->md.uuid[UI_BITMAP] = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003149 } else {
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003150 unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
3151 if (bm_uuid)
3152 dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003153
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003154 mdev->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003155 }
3156 drbd_md_mark_dirty(mdev);
3157}
3158
3159/**
3160 * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3161 * @mdev: DRBD device.
3162 *
3163 * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
3164 */
3165int drbd_bmio_set_n_write(struct drbd_conf *mdev)
3166{
3167 int rv = -EIO;
3168
3169 if (get_ldev_if_state(mdev, D_ATTACHING)) {
3170 drbd_md_set_flag(mdev, MDF_FULL_SYNC);
3171 drbd_md_sync(mdev);
3172 drbd_bm_set_all(mdev);
3173
3174 rv = drbd_bm_write(mdev);
3175
3176 if (!rv) {
3177 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
3178 drbd_md_sync(mdev);
3179 }
3180
3181 put_ldev(mdev);
3182 }
3183
3184 return rv;
3185}
3186
3187/**
3188 * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3189 * @mdev: DRBD device.
3190 *
3191 * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
3192 */
3193int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
3194{
3195 int rv = -EIO;
3196
Philipp Reisner07782862010-08-31 12:00:50 +02003197 drbd_resume_al(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003198 if (get_ldev_if_state(mdev, D_ATTACHING)) {
3199 drbd_bm_clear_all(mdev);
3200 rv = drbd_bm_write(mdev);
3201 put_ldev(mdev);
3202 }
3203
3204 return rv;
3205}
3206
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003207static int w_bitmap_io(struct drbd_work *w, int unused)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003208{
3209 struct bm_io_work *work = container_of(w, struct bm_io_work, w);
Philipp Reisner00d56942011-02-09 18:09:48 +01003210 struct drbd_conf *mdev = w->mdev;
Lars Ellenberg02851e92010-12-16 14:47:39 +01003211 int rv = -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003212
3213 D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0);
3214
Lars Ellenberg02851e92010-12-16 14:47:39 +01003215 if (get_ldev(mdev)) {
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003216 drbd_bm_lock(mdev, work->why, work->flags);
Lars Ellenberg02851e92010-12-16 14:47:39 +01003217 rv = work->io_fn(mdev);
3218 drbd_bm_unlock(mdev);
3219 put_ldev(mdev);
3220 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003221
Lars Ellenberg4738fa12011-02-21 13:20:55 +01003222 clear_bit_unlock(BITMAP_IO, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003223 wake_up(&mdev->misc_wait);
3224
3225 if (work->done)
3226 work->done(mdev, rv);
3227
3228 clear_bit(BITMAP_IO_QUEUED, &mdev->flags);
3229 work->why = NULL;
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003230 work->flags = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003231
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003232 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003233}
3234
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003235void drbd_ldev_destroy(struct drbd_conf *mdev)
3236{
3237 lc_destroy(mdev->resync);
3238 mdev->resync = NULL;
3239 lc_destroy(mdev->act_log);
3240 mdev->act_log = NULL;
3241 __no_warn(local,
3242 drbd_free_bc(mdev->ldev);
3243 mdev->ldev = NULL;);
3244
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003245 clear_bit(GO_DISKLESS, &mdev->flags);
3246}
3247
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003248static int w_go_diskless(struct drbd_work *w, int unused)
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003249{
Philipp Reisner00d56942011-02-09 18:09:48 +01003250 struct drbd_conf *mdev = w->mdev;
3251
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003252 D_ASSERT(mdev->state.disk == D_FAILED);
Lars Ellenberg9d282872010-10-14 13:57:07 +02003253 /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
3254 * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003255 * the protected members anymore, though, so once put_ldev reaches zero
3256 * again, it will be safe to free them. */
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003257 drbd_force_state(mdev, NS(disk, D_DISKLESS));
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003258 return 0;
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003259}
3260
3261void drbd_go_diskless(struct drbd_conf *mdev)
3262{
3263 D_ASSERT(mdev->state.disk == D_FAILED);
3264 if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
Philipp Reisnere42325a2011-01-19 13:55:45 +01003265 drbd_queue_work(&mdev->tconn->data.work, &mdev->go_diskless);
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003266}
3267
Philipp Reisnerb411b362009-09-25 16:07:19 -07003268/**
3269 * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
3270 * @mdev: DRBD device.
3271 * @io_fn: IO callback to be called when bitmap IO is possible
3272 * @done: callback to be called after the bitmap IO was performed
3273 * @why: Descriptive text of the reason for doing the IO
3274 *
3275 * While IO on the bitmap happens we freeze application IO thus we ensure
3276 * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
3277 * called from worker context. It MUST NOT be used while a previous such
3278 * work is still pending!
3279 */
3280void drbd_queue_bitmap_io(struct drbd_conf *mdev,
3281 int (*io_fn)(struct drbd_conf *),
3282 void (*done)(struct drbd_conf *, int),
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003283 char *why, enum bm_flag flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003284{
Philipp Reisnere6b3ea82011-01-19 14:02:01 +01003285 D_ASSERT(current == mdev->tconn->worker.task);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003286
3287 D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags));
3288 D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags));
3289 D_ASSERT(list_empty(&mdev->bm_io_work.w.list));
3290 if (mdev->bm_io_work.why)
3291 dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n",
3292 why, mdev->bm_io_work.why);
3293
3294 mdev->bm_io_work.io_fn = io_fn;
3295 mdev->bm_io_work.done = done;
3296 mdev->bm_io_work.why = why;
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003297 mdev->bm_io_work.flags = flags;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003298
Philipp Reisner87eeee42011-01-19 14:16:30 +01003299 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003300 set_bit(BITMAP_IO, &mdev->flags);
3301 if (atomic_read(&mdev->ap_bio_cnt) == 0) {
Philipp Reisner127b3172010-11-16 10:07:53 +01003302 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
Philipp Reisnere42325a2011-01-19 13:55:45 +01003303 drbd_queue_work(&mdev->tconn->data.work, &mdev->bm_io_work.w);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003304 }
Philipp Reisner87eeee42011-01-19 14:16:30 +01003305 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003306}
3307
3308/**
3309 * drbd_bitmap_io() - Does an IO operation on the whole bitmap
3310 * @mdev: DRBD device.
3311 * @io_fn: IO callback to be called when bitmap IO is possible
3312 * @why: Descriptive text of the reason for doing the IO
3313 *
3314 * freezes application IO while that the actual IO operations runs. This
3315 * functions MAY NOT be called from worker context.
3316 */
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003317int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *),
3318 char *why, enum bm_flag flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003319{
3320 int rv;
3321
Philipp Reisnere6b3ea82011-01-19 14:02:01 +01003322 D_ASSERT(current != mdev->tconn->worker.task);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003323
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003324 if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
3325 drbd_suspend_io(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003326
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003327 drbd_bm_lock(mdev, why, flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003328 rv = io_fn(mdev);
3329 drbd_bm_unlock(mdev);
3330
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003331 if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
3332 drbd_resume_io(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003333
3334 return rv;
3335}
3336
3337void drbd_md_set_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
3338{
3339 if ((mdev->ldev->md.flags & flag) != flag) {
3340 drbd_md_mark_dirty(mdev);
3341 mdev->ldev->md.flags |= flag;
3342 }
3343}
3344
3345void drbd_md_clear_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
3346{
3347 if ((mdev->ldev->md.flags & flag) != 0) {
3348 drbd_md_mark_dirty(mdev);
3349 mdev->ldev->md.flags &= ~flag;
3350 }
3351}
3352int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
3353{
3354 return (bdev->md.flags & flag) != 0;
3355}
3356
3357static void md_sync_timer_fn(unsigned long data)
3358{
3359 struct drbd_conf *mdev = (struct drbd_conf *) data;
3360
Philipp Reisnere42325a2011-01-19 13:55:45 +01003361 drbd_queue_work_front(&mdev->tconn->data.work, &mdev->md_sync_work);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003362}
3363
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003364static int w_md_sync(struct drbd_work *w, int unused)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003365{
Philipp Reisner00d56942011-02-09 18:09:48 +01003366 struct drbd_conf *mdev = w->mdev;
3367
Philipp Reisnerb411b362009-09-25 16:07:19 -07003368 dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
Lars Ellenbergee15b032010-09-03 10:00:09 +02003369#ifdef DEBUG
3370 dev_warn(DEV, "last md_mark_dirty: %s:%u\n",
3371 mdev->last_md_mark_dirty.func, mdev->last_md_mark_dirty.line);
3372#endif
Philipp Reisnerb411b362009-09-25 16:07:19 -07003373 drbd_md_sync(mdev);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003374 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003375}
3376
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01003377const char *cmdname(enum drbd_packet cmd)
Andreas Gruenbacherf2ad9062011-01-26 17:13:25 +01003378{
3379 /* THINK may need to become several global tables
3380 * when we want to support more than
3381 * one PRO_VERSION */
3382 static const char *cmdnames[] = {
3383 [P_DATA] = "Data",
3384 [P_DATA_REPLY] = "DataReply",
3385 [P_RS_DATA_REPLY] = "RSDataReply",
3386 [P_BARRIER] = "Barrier",
3387 [P_BITMAP] = "ReportBitMap",
3388 [P_BECOME_SYNC_TARGET] = "BecomeSyncTarget",
3389 [P_BECOME_SYNC_SOURCE] = "BecomeSyncSource",
3390 [P_UNPLUG_REMOTE] = "UnplugRemote",
3391 [P_DATA_REQUEST] = "DataRequest",
3392 [P_RS_DATA_REQUEST] = "RSDataRequest",
3393 [P_SYNC_PARAM] = "SyncParam",
3394 [P_SYNC_PARAM89] = "SyncParam89",
3395 [P_PROTOCOL] = "ReportProtocol",
3396 [P_UUIDS] = "ReportUUIDs",
3397 [P_SIZES] = "ReportSizes",
3398 [P_STATE] = "ReportState",
3399 [P_SYNC_UUID] = "ReportSyncUUID",
3400 [P_AUTH_CHALLENGE] = "AuthChallenge",
3401 [P_AUTH_RESPONSE] = "AuthResponse",
3402 [P_PING] = "Ping",
3403 [P_PING_ACK] = "PingAck",
3404 [P_RECV_ACK] = "RecvAck",
3405 [P_WRITE_ACK] = "WriteAck",
3406 [P_RS_WRITE_ACK] = "RSWriteAck",
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003407 [P_DISCARD_WRITE] = "DiscardWrite",
Andreas Gruenbacherf2ad9062011-01-26 17:13:25 +01003408 [P_NEG_ACK] = "NegAck",
3409 [P_NEG_DREPLY] = "NegDReply",
3410 [P_NEG_RS_DREPLY] = "NegRSDReply",
3411 [P_BARRIER_ACK] = "BarrierAck",
3412 [P_STATE_CHG_REQ] = "StateChgRequest",
3413 [P_STATE_CHG_REPLY] = "StateChgReply",
3414 [P_OV_REQUEST] = "OVRequest",
3415 [P_OV_REPLY] = "OVReply",
3416 [P_OV_RESULT] = "OVResult",
3417 [P_CSUM_RS_REQUEST] = "CsumRSRequest",
3418 [P_RS_IS_IN_SYNC] = "CsumRSIsInSync",
3419 [P_COMPRESSED_BITMAP] = "CBitmap",
3420 [P_DELAY_PROBE] = "DelayProbe",
3421 [P_OUT_OF_SYNC] = "OutOfSync",
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003422 [P_RETRY_WRITE] = "RetryWrite",
Lars Ellenbergae25b332011-04-24 00:01:16 +02003423 [P_RS_CANCEL] = "RSCancel",
3424 [P_CONN_ST_CHG_REQ] = "conn_st_chg_req",
3425 [P_CONN_ST_CHG_REPLY] = "conn_st_chg_reply",
Philipp Reisner036b17e2011-05-16 17:38:11 +02003426 [P_RETRY_WRITE] = "retry_write",
3427 [P_PROTOCOL_UPDATE] = "protocol_update",
Lars Ellenbergae25b332011-04-24 00:01:16 +02003428
3429 /* enum drbd_packet, but not commands - obsoleted flags:
3430 * P_MAY_IGNORE
3431 * P_MAX_OPT_CMD
3432 */
Andreas Gruenbacherf2ad9062011-01-26 17:13:25 +01003433 };
3434
Lars Ellenbergae25b332011-04-24 00:01:16 +02003435 /* too big for the array: 0xfffX */
Andreas Gruenbachere5d6f332011-03-28 16:44:40 +02003436 if (cmd == P_INITIAL_META)
3437 return "InitialMeta";
3438 if (cmd == P_INITIAL_DATA)
3439 return "InitialData";
Andreas Gruenbacher60381782011-03-28 17:05:50 +02003440 if (cmd == P_CONNECTION_FEATURES)
3441 return "ConnectionFeatures";
Andreas Gruenbacher6e849ce2011-03-14 17:27:45 +01003442 if (cmd >= ARRAY_SIZE(cmdnames))
Andreas Gruenbacherf2ad9062011-01-26 17:13:25 +01003443 return "Unknown";
3444 return cmdnames[cmd];
3445}
3446
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003447/**
3448 * drbd_wait_misc - wait for a request to make progress
3449 * @mdev: device associated with the request
3450 * @i: the struct drbd_interval embedded in struct drbd_request or
3451 * struct drbd_peer_request
3452 */
3453int drbd_wait_misc(struct drbd_conf *mdev, struct drbd_interval *i)
3454{
Philipp Reisner44ed1672011-04-19 17:10:19 +02003455 struct net_conf *nc;
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003456 DEFINE_WAIT(wait);
3457 long timeout;
3458
Philipp Reisner44ed1672011-04-19 17:10:19 +02003459 rcu_read_lock();
3460 nc = rcu_dereference(mdev->tconn->net_conf);
3461 if (!nc) {
3462 rcu_read_unlock();
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003463 return -ETIMEDOUT;
Philipp Reisner44ed1672011-04-19 17:10:19 +02003464 }
3465 timeout = nc->ko_count ? nc->timeout * HZ / 10 * nc->ko_count : MAX_SCHEDULE_TIMEOUT;
3466 rcu_read_unlock();
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003467
3468 /* Indicate to wake up mdev->misc_wait on progress. */
3469 i->waiting = true;
3470 prepare_to_wait(&mdev->misc_wait, &wait, TASK_INTERRUPTIBLE);
3471 spin_unlock_irq(&mdev->tconn->req_lock);
3472 timeout = schedule_timeout(timeout);
3473 finish_wait(&mdev->misc_wait, &wait);
3474 spin_lock_irq(&mdev->tconn->req_lock);
3475 if (!timeout || mdev->state.conn < C_CONNECTED)
3476 return -ETIMEDOUT;
3477 if (signal_pending(current))
3478 return -ERESTARTSYS;
3479 return 0;
3480}
3481
Philipp Reisnerb411b362009-09-25 16:07:19 -07003482#ifdef CONFIG_DRBD_FAULT_INJECTION
3483/* Fault insertion support including random number generator shamelessly
3484 * stolen from kernel/rcutorture.c */
3485struct fault_random_state {
3486 unsigned long state;
3487 unsigned long count;
3488};
3489
3490#define FAULT_RANDOM_MULT 39916801 /* prime */
3491#define FAULT_RANDOM_ADD 479001701 /* prime */
3492#define FAULT_RANDOM_REFRESH 10000
3493
3494/*
3495 * Crude but fast random-number generator. Uses a linear congruential
3496 * generator, with occasional help from get_random_bytes().
3497 */
3498static unsigned long
3499_drbd_fault_random(struct fault_random_state *rsp)
3500{
3501 long refresh;
3502
Roel Kluin49829ea2009-12-15 22:55:44 +01003503 if (!rsp->count--) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003504 get_random_bytes(&refresh, sizeof(refresh));
3505 rsp->state += refresh;
3506 rsp->count = FAULT_RANDOM_REFRESH;
3507 }
3508 rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
3509 return swahw32(rsp->state);
3510}
3511
3512static char *
3513_drbd_fault_str(unsigned int type) {
3514 static char *_faults[] = {
3515 [DRBD_FAULT_MD_WR] = "Meta-data write",
3516 [DRBD_FAULT_MD_RD] = "Meta-data read",
3517 [DRBD_FAULT_RS_WR] = "Resync write",
3518 [DRBD_FAULT_RS_RD] = "Resync read",
3519 [DRBD_FAULT_DT_WR] = "Data write",
3520 [DRBD_FAULT_DT_RD] = "Data read",
3521 [DRBD_FAULT_DT_RA] = "Data read ahead",
3522 [DRBD_FAULT_BM_ALLOC] = "BM allocation",
Philipp Reisner6b4388a2010-04-26 14:11:45 +02003523 [DRBD_FAULT_AL_EE] = "EE allocation",
3524 [DRBD_FAULT_RECEIVE] = "receive data corruption",
Philipp Reisnerb411b362009-09-25 16:07:19 -07003525 };
3526
3527 return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
3528}
3529
3530unsigned int
3531_drbd_insert_fault(struct drbd_conf *mdev, unsigned int type)
3532{
3533 static struct fault_random_state rrs = {0, 0};
3534
3535 unsigned int ret = (
3536 (fault_devs == 0 ||
3537 ((1 << mdev_to_minor(mdev)) & fault_devs) != 0) &&
3538 (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate));
3539
3540 if (ret) {
3541 fault_count++;
3542
Lars Ellenberg73835062010-05-27 11:51:56 +02003543 if (__ratelimit(&drbd_ratelimit_state))
Philipp Reisnerb411b362009-09-25 16:07:19 -07003544 dev_warn(DEV, "***Simulating %s failure\n",
3545 _drbd_fault_str(type));
3546 }
3547
3548 return ret;
3549}
3550#endif
3551
3552const char *drbd_buildtag(void)
3553{
3554 /* DRBD built from external sources has here a reference to the
3555 git hash of the source code. */
3556
3557 static char buildtag[38] = "\0uilt-in";
3558
3559 if (buildtag[0] == 0) {
3560#ifdef CONFIG_MODULES
3561 if (THIS_MODULE != NULL)
3562 sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
3563 else
3564#endif
3565 buildtag[0] = 'b';
3566 }
3567
3568 return buildtag;
3569}
3570
3571module_init(drbd_init)
3572module_exit(drbd_cleanup)
3573
Philipp Reisnerb411b362009-09-25 16:07:19 -07003574EXPORT_SYMBOL(drbd_conn_str);
3575EXPORT_SYMBOL(drbd_role_str);
3576EXPORT_SYMBOL(drbd_disk_str);
3577EXPORT_SYMBOL(drbd_set_st_err_str);