blob: e1aef12e5091f3aa0a25273af39b4c42707a3cec [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11 from Logicworks, Inc. for making SDP replication support possible.
12
13 drbd is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
16 any later version.
17
18 drbd is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with drbd; see the file COPYING. If not, write to
25 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26
27 */
28
Philipp Reisnerb411b362009-09-25 16:07:19 -070029#include <linux/module.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070030#include <linux/drbd.h>
31#include <asm/uaccess.h>
32#include <asm/types.h>
33#include <net/sock.h>
34#include <linux/ctype.h>
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020035#include <linux/mutex.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070036#include <linux/fs.h>
37#include <linux/file.h>
38#include <linux/proc_fs.h>
39#include <linux/init.h>
40#include <linux/mm.h>
41#include <linux/memcontrol.h>
42#include <linux/mm_inline.h>
43#include <linux/slab.h>
44#include <linux/random.h>
45#include <linux/reboot.h>
46#include <linux/notifier.h>
47#include <linux/kthread.h>
48
49#define __KERNEL_SYSCALLS__
50#include <linux/unistd.h>
51#include <linux/vmalloc.h>
52
53#include <linux/drbd_limits.h>
54#include "drbd_int.h"
Philipp Reisnerb411b362009-09-25 16:07:19 -070055#include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
56
57#include "drbd_vli.h"
58
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020059static DEFINE_MUTEX(drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -070060int drbdd_init(struct drbd_thread *);
61int drbd_worker(struct drbd_thread *);
62int drbd_asender(struct drbd_thread *);
63
64int drbd_init(void);
65static int drbd_open(struct block_device *bdev, fmode_t mode);
66static int drbd_release(struct gendisk *gd, fmode_t mode);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +010067static int w_md_sync(struct drbd_work *w, int unused);
Philipp Reisnerb411b362009-09-25 16:07:19 -070068static void md_sync_timer_fn(unsigned long data);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +010069static int w_bitmap_io(struct drbd_work *w, int unused);
70static int w_go_diskless(struct drbd_work *w, int unused);
Philipp Reisnerb411b362009-09-25 16:07:19 -070071
Philipp Reisnerb411b362009-09-25 16:07:19 -070072MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
73 "Lars Ellenberg <lars@linbit.com>");
74MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
75MODULE_VERSION(REL_VERSION);
76MODULE_LICENSE("GPL");
Philipp Reisner81a5d602011-02-22 19:53:16 -050077MODULE_PARM_DESC(minor_count, "Approximate number of drbd devices ("
Philipp Reisner2b8a90b2011-01-10 11:15:17 +010078 __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")");
Philipp Reisnerb411b362009-09-25 16:07:19 -070079MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
80
81#include <linux/moduleparam.h>
82/* allow_open_on_secondary */
83MODULE_PARM_DESC(allow_oos, "DONT USE!");
84/* thanks to these macros, if compiled into the kernel (not-module),
85 * this becomes the boot parameter drbd.minor_count */
86module_param(minor_count, uint, 0444);
87module_param(disable_sendpage, bool, 0644);
88module_param(allow_oos, bool, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -070089module_param(proc_details, int, 0644);
90
91#ifdef CONFIG_DRBD_FAULT_INJECTION
92int enable_faults;
93int fault_rate;
94static int fault_count;
95int fault_devs;
96/* bitmap of enabled faults */
97module_param(enable_faults, int, 0664);
98/* fault rate % value - applies to all enabled faults */
99module_param(fault_rate, int, 0664);
100/* count of faults inserted */
101module_param(fault_count, int, 0664);
102/* bitmap of devices to insert faults on */
103module_param(fault_devs, int, 0644);
104#endif
105
106/* module parameter, defined */
Philipp Reisner2b8a90b2011-01-10 11:15:17 +0100107unsigned int minor_count = DRBD_MINOR_COUNT_DEF;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700108int disable_sendpage;
109int allow_oos;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700110int proc_details; /* Detail level in proc drbd*/
111
112/* Module parameter for setting the user mode helper program
113 * to run. Default is /sbin/drbdadm */
114char usermode_helper[80] = "/sbin/drbdadm";
115
116module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644);
117
118/* in 2.6.x, our device mapping and config info contains our virtual gendisks
119 * as member "struct gendisk *vdisk;"
120 */
Philipp Reisner81a5d602011-02-22 19:53:16 -0500121struct idr minors;
Philipp Reisner21114382011-01-19 12:26:59 +0100122struct list_head drbd_tconns; /* list of struct drbd_tconn */
Lars Ellenberg543cc102011-03-10 22:18:18 +0100123DEFINE_MUTEX(drbd_cfg_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700124
125struct kmem_cache *drbd_request_cache;
Andreas Gruenbacher6c852be2011-02-04 15:38:52 +0100126struct kmem_cache *drbd_ee_cache; /* peer requests */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700127struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
128struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
129mempool_t *drbd_request_mempool;
130mempool_t *drbd_ee_mempool;
Lars Ellenberg35abf592011-02-23 12:39:46 +0100131mempool_t *drbd_md_io_page_pool;
Lars Ellenbergda4a75d2011-02-23 17:02:01 +0100132struct bio_set *drbd_md_io_bio_set;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700133
134/* I do not use a standard mempool, because:
135 1) I want to hand out the pre-allocated objects first.
136 2) I want to be able to interrupt sleeping allocation with a signal.
137 Note: This is a single linked list, the next pointer is the private
138 member of struct page.
139 */
140struct page *drbd_pp_pool;
141spinlock_t drbd_pp_lock;
142int drbd_pp_vacant;
143wait_queue_head_t drbd_pp_wait;
144
145DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
146
Emese Revfy7d4e9d02009-12-14 00:59:30 +0100147static const struct block_device_operations drbd_ops = {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700148 .owner = THIS_MODULE,
149 .open = drbd_open,
150 .release = drbd_release,
151};
152
Lars Ellenbergda4a75d2011-02-23 17:02:01 +0100153static void bio_destructor_drbd(struct bio *bio)
154{
155 bio_free(bio, drbd_md_io_bio_set);
156}
157
158struct bio *bio_alloc_drbd(gfp_t gfp_mask)
159{
160 struct bio *bio;
161
162 if (!drbd_md_io_bio_set)
163 return bio_alloc(gfp_mask, 1);
164
165 bio = bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set);
166 if (!bio)
167 return NULL;
168 bio->bi_destructor = bio_destructor_drbd;
169 return bio;
170}
171
Philipp Reisnerb411b362009-09-25 16:07:19 -0700172#ifdef __CHECKER__
173/* When checking with sparse, and this is an inline function, sparse will
174 give tons of false positives. When this is a real functions sparse works.
175 */
176int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
177{
178 int io_allowed;
179
180 atomic_inc(&mdev->local_cnt);
181 io_allowed = (mdev->state.disk >= mins);
182 if (!io_allowed) {
183 if (atomic_dec_and_test(&mdev->local_cnt))
184 wake_up(&mdev->misc_wait);
185 }
186 return io_allowed;
187}
188
189#endif
190
191/**
192 * DOC: The transfer log
193 *
194 * The transfer log is a single linked list of &struct drbd_tl_epoch objects.
Philipp Reisner87eeee42011-01-19 14:16:30 +0100195 * mdev->tconn->newest_tle points to the head, mdev->tconn->oldest_tle points to the tail
Philipp Reisnerb411b362009-09-25 16:07:19 -0700196 * of the list. There is always at least one &struct drbd_tl_epoch object.
197 *
198 * Each &struct drbd_tl_epoch has a circular double linked list of requests
199 * attached.
200 */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100201static int tl_init(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700202{
203 struct drbd_tl_epoch *b;
204
205 /* during device minor initialization, we may well use GFP_KERNEL */
206 b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_KERNEL);
207 if (!b)
208 return 0;
209 INIT_LIST_HEAD(&b->requests);
210 INIT_LIST_HEAD(&b->w.list);
211 b->next = NULL;
212 b->br_number = 4711;
Philipp Reisner7e602c02010-05-27 14:49:27 +0200213 b->n_writes = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700214 b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
215
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100216 tconn->oldest_tle = b;
217 tconn->newest_tle = b;
218 INIT_LIST_HEAD(&tconn->out_of_sequence_requests);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700219
Philipp Reisnerb411b362009-09-25 16:07:19 -0700220 return 1;
221}
222
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100223static void tl_cleanup(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700224{
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100225 if (tconn->oldest_tle != tconn->newest_tle)
226 conn_err(tconn, "ASSERT FAILED: oldest_tle == newest_tle\n");
227 if (!list_empty(&tconn->out_of_sequence_requests))
228 conn_err(tconn, "ASSERT FAILED: list_empty(out_of_sequence_requests)\n");
229 kfree(tconn->oldest_tle);
230 tconn->oldest_tle = NULL;
231 kfree(tconn->unused_spare_tle);
232 tconn->unused_spare_tle = NULL;
Andreas Gruenbacherd6287692011-01-13 23:05:39 +0100233}
234
Philipp Reisnerb411b362009-09-25 16:07:19 -0700235/**
236 * _tl_add_barrier() - Adds a barrier to the transfer log
237 * @mdev: DRBD device.
238 * @new: Barrier to be added before the current head of the TL.
239 *
240 * The caller must hold the req_lock.
241 */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100242void _tl_add_barrier(struct drbd_tconn *tconn, struct drbd_tl_epoch *new)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700243{
244 struct drbd_tl_epoch *newest_before;
245
246 INIT_LIST_HEAD(&new->requests);
247 INIT_LIST_HEAD(&new->w.list);
248 new->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
249 new->next = NULL;
Philipp Reisner7e602c02010-05-27 14:49:27 +0200250 new->n_writes = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700251
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100252 newest_before = tconn->newest_tle;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700253 /* never send a barrier number == 0, because that is special-cased
254 * when using TCQ for our write ordering code */
255 new->br_number = (newest_before->br_number+1) ?: 1;
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100256 if (tconn->newest_tle != new) {
257 tconn->newest_tle->next = new;
258 tconn->newest_tle = new;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700259 }
260}
261
262/**
263 * tl_release() - Free or recycle the oldest &struct drbd_tl_epoch object of the TL
264 * @mdev: DRBD device.
265 * @barrier_nr: Expected identifier of the DRBD write barrier packet.
266 * @set_size: Expected number of requests before that barrier.
267 *
268 * In case the passed barrier_nr or set_size does not match the oldest
269 * &struct drbd_tl_epoch objects this function will cause a termination
270 * of the connection.
271 */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100272void tl_release(struct drbd_tconn *tconn, unsigned int barrier_nr,
273 unsigned int set_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700274{
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100275 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700276 struct drbd_tl_epoch *b, *nob; /* next old barrier */
277 struct list_head *le, *tle;
278 struct drbd_request *r;
279
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100280 spin_lock_irq(&tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700281
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100282 b = tconn->oldest_tle;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700283
284 /* first some paranoia code */
285 if (b == NULL) {
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100286 conn_err(tconn, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
287 barrier_nr);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700288 goto bail;
289 }
290 if (b->br_number != barrier_nr) {
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100291 conn_err(tconn, "BAD! BarrierAck #%u received, expected #%u!\n",
292 barrier_nr, b->br_number);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700293 goto bail;
294 }
Philipp Reisner7e602c02010-05-27 14:49:27 +0200295 if (b->n_writes != set_size) {
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100296 conn_err(tconn, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
297 barrier_nr, set_size, b->n_writes);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700298 goto bail;
299 }
300
301 /* Clean up list of requests processed during current epoch */
302 list_for_each_safe(le, tle, &b->requests) {
303 r = list_entry(le, struct drbd_request, tl_requests);
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100304 _req_mod(r, BARRIER_ACKED);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700305 }
306 /* There could be requests on the list waiting for completion
307 of the write to the local disk. To avoid corruptions of
308 slab's data structures we have to remove the lists head.
309
310 Also there could have been a barrier ack out of sequence, overtaking
311 the write acks - which would be a bug and violating write ordering.
312 To not deadlock in case we lose connection while such requests are
313 still pending, we need some way to find them for the
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100314 _req_mode(CONNECTION_LOST_WHILE_PENDING).
Philipp Reisnerb411b362009-09-25 16:07:19 -0700315
316 These have been list_move'd to the out_of_sequence_requests list in
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100317 _req_mod(, BARRIER_ACKED) above.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700318 */
319 list_del_init(&b->requests);
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100320 mdev = b->w.mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700321
322 nob = b->next;
323 if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100324 _tl_add_barrier(tconn, b);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700325 if (nob)
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100326 tconn->oldest_tle = nob;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700327 /* if nob == NULL b was the only barrier, and becomes the new
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100328 barrier. Therefore tconn->oldest_tle points already to b */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700329 } else {
330 D_ASSERT(nob != NULL);
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100331 tconn->oldest_tle = nob;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700332 kfree(b);
333 }
334
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100335 spin_unlock_irq(&tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700336 dec_ap_pending(mdev);
337
338 return;
339
340bail:
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100341 spin_unlock_irq(&tconn->req_lock);
342 conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700343}
344
Philipp Reisner617049a2010-12-22 12:48:31 +0100345
Philipp Reisner11b58e72010-05-12 17:08:26 +0200346/**
347 * _tl_restart() - Walks the transfer log, and applies an action to all requests
348 * @mdev: DRBD device.
349 * @what: The action/event to perform with all request objects
350 *
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100351 * @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO,
352 * RESTART_FROZEN_DISK_IO.
Philipp Reisner11b58e72010-05-12 17:08:26 +0200353 */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100354void _tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what)
Philipp Reisner11b58e72010-05-12 17:08:26 +0200355{
356 struct drbd_tl_epoch *b, *tmp, **pn;
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200357 struct list_head *le, *tle, carry_reads;
Philipp Reisner11b58e72010-05-12 17:08:26 +0200358 struct drbd_request *req;
359 int rv, n_writes, n_reads;
360
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100361 b = tconn->oldest_tle;
362 pn = &tconn->oldest_tle;
Philipp Reisner11b58e72010-05-12 17:08:26 +0200363 while (b) {
364 n_writes = 0;
365 n_reads = 0;
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200366 INIT_LIST_HEAD(&carry_reads);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200367 list_for_each_safe(le, tle, &b->requests) {
368 req = list_entry(le, struct drbd_request, tl_requests);
369 rv = _req_mod(req, what);
370
371 n_writes += (rv & MR_WRITE) >> MR_WRITE_SHIFT;
372 n_reads += (rv & MR_READ) >> MR_READ_SHIFT;
373 }
374 tmp = b->next;
375
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200376 if (n_writes) {
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100377 if (what == RESEND) {
Philipp Reisner11b58e72010-05-12 17:08:26 +0200378 b->n_writes = n_writes;
379 if (b->w.cb == NULL) {
380 b->w.cb = w_send_barrier;
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100381 inc_ap_pending(b->w.mdev);
382 set_bit(CREATE_BARRIER, &b->w.mdev->flags);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200383 }
384
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100385 drbd_queue_work(&tconn->data.work, &b->w);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200386 }
387 pn = &b->next;
388 } else {
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200389 if (n_reads)
390 list_add(&carry_reads, &b->requests);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200391 /* there could still be requests on that ring list,
392 * in case local io is still pending */
393 list_del(&b->requests);
394
395 /* dec_ap_pending corresponding to queue_barrier.
396 * the newest barrier may not have been queued yet,
397 * in which case w.cb is still NULL. */
398 if (b->w.cb != NULL)
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100399 dec_ap_pending(b->w.mdev);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200400
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100401 if (b == tconn->newest_tle) {
Philipp Reisner11b58e72010-05-12 17:08:26 +0200402 /* recycle, but reinit! */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100403 if (tmp != NULL)
404 conn_err(tconn, "ASSERT FAILED tmp == NULL");
Philipp Reisner11b58e72010-05-12 17:08:26 +0200405 INIT_LIST_HEAD(&b->requests);
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200406 list_splice(&carry_reads, &b->requests);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200407 INIT_LIST_HEAD(&b->w.list);
408 b->w.cb = NULL;
409 b->br_number = net_random();
410 b->n_writes = 0;
411
412 *pn = b;
413 break;
414 }
415 *pn = tmp;
416 kfree(b);
417 }
418 b = tmp;
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200419 list_splice(&carry_reads, &b->requests);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200420 }
421}
422
Philipp Reisnerb411b362009-09-25 16:07:19 -0700423
424/**
425 * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
426 * @mdev: DRBD device.
427 *
428 * This is called after the connection to the peer was lost. The storage covered
429 * by the requests on the transfer gets marked as our of sync. Called from the
430 * receiver thread and the worker thread.
431 */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100432void tl_clear(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700433{
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100434 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700435 struct list_head *le, *tle;
436 struct drbd_request *r;
Philipp Reisnere90285e2011-03-22 12:51:21 +0100437 int vnr;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700438
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100439 spin_lock_irq(&tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700440
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100441 _tl_restart(tconn, CONNECTION_LOST_WHILE_PENDING);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700442
443 /* we expect this list to be empty. */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100444 if (!list_empty(&tconn->out_of_sequence_requests))
445 conn_err(tconn, "ASSERT FAILED list_empty(&out_of_sequence_requests)\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700446
447 /* but just in case, clean it up anyways! */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100448 list_for_each_safe(le, tle, &tconn->out_of_sequence_requests) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700449 r = list_entry(le, struct drbd_request, tl_requests);
450 /* It would be nice to complete outside of spinlock.
451 * But this is easier for now. */
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100452 _req_mod(r, CONNECTION_LOST_WHILE_PENDING);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700453 }
454
455 /* ensure bit indicating barrier is required is clear */
Philipp Reisnere90285e2011-03-22 12:51:21 +0100456 idr_for_each_entry(&tconn->volumes, mdev, vnr)
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100457 clear_bit(CREATE_BARRIER, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700458
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100459 spin_unlock_irq(&tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700460}
461
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100462void tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what)
Philipp Reisner11b58e72010-05-12 17:08:26 +0200463{
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100464 spin_lock_irq(&tconn->req_lock);
465 _tl_restart(tconn, what);
466 spin_unlock_irq(&tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700467}
468
Philipp Reisnerb411b362009-09-25 16:07:19 -0700469static int drbd_thread_setup(void *arg)
470{
471 struct drbd_thread *thi = (struct drbd_thread *) arg;
Philipp Reisner392c8802011-02-09 10:33:31 +0100472 struct drbd_tconn *tconn = thi->tconn;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700473 unsigned long flags;
474 int retval;
475
Philipp Reisnerf1b3a6e2011-02-08 15:35:58 +0100476 snprintf(current->comm, sizeof(current->comm), "drbd_%c_%s",
Philipp Reisner392c8802011-02-09 10:33:31 +0100477 thi->name[0], thi->tconn->name);
Philipp Reisnerf1b3a6e2011-02-08 15:35:58 +0100478
Philipp Reisnerb411b362009-09-25 16:07:19 -0700479restart:
480 retval = thi->function(thi);
481
482 spin_lock_irqsave(&thi->t_lock, flags);
483
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100484 /* if the receiver has been "EXITING", the last thing it did
Philipp Reisnerb411b362009-09-25 16:07:19 -0700485 * was set the conn state to "StandAlone",
486 * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
487 * and receiver thread will be "started".
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100488 * drbd_thread_start needs to set "RESTARTING" in that case.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700489 * t_state check and assignment needs to be within the same spinlock,
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100490 * so either thread_start sees EXITING, and can remap to RESTARTING,
491 * or thread_start see NONE, and can proceed as normal.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700492 */
493
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100494 if (thi->t_state == RESTARTING) {
Philipp Reisner392c8802011-02-09 10:33:31 +0100495 conn_info(tconn, "Restarting %s thread\n", thi->name);
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100496 thi->t_state = RUNNING;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700497 spin_unlock_irqrestore(&thi->t_lock, flags);
498 goto restart;
499 }
500
501 thi->task = NULL;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100502 thi->t_state = NONE;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700503 smp_mb();
504 complete(&thi->stop);
505 spin_unlock_irqrestore(&thi->t_lock, flags);
506
Philipp Reisner392c8802011-02-09 10:33:31 +0100507 conn_info(tconn, "Terminating %s\n", current->comm);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700508
509 /* Release mod reference taken when thread was started */
510 module_put(THIS_MODULE);
511 return retval;
512}
513
Philipp Reisner392c8802011-02-09 10:33:31 +0100514static void drbd_thread_init(struct drbd_tconn *tconn, struct drbd_thread *thi,
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100515 int (*func) (struct drbd_thread *), char *name)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700516{
517 spin_lock_init(&thi->t_lock);
518 thi->task = NULL;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100519 thi->t_state = NONE;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700520 thi->function = func;
Philipp Reisner392c8802011-02-09 10:33:31 +0100521 thi->tconn = tconn;
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100522 strncpy(thi->name, name, ARRAY_SIZE(thi->name));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700523}
524
525int drbd_thread_start(struct drbd_thread *thi)
526{
Philipp Reisner392c8802011-02-09 10:33:31 +0100527 struct drbd_tconn *tconn = thi->tconn;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700528 struct task_struct *nt;
529 unsigned long flags;
530
Philipp Reisnerb411b362009-09-25 16:07:19 -0700531 /* is used from state engine doing drbd_thread_stop_nowait,
532 * while holding the req lock irqsave */
533 spin_lock_irqsave(&thi->t_lock, flags);
534
535 switch (thi->t_state) {
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100536 case NONE:
Philipp Reisner392c8802011-02-09 10:33:31 +0100537 conn_info(tconn, "Starting %s thread (from %s [%d])\n",
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100538 thi->name, current->comm, current->pid);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700539
540 /* Get ref on module for thread - this is released when thread exits */
541 if (!try_module_get(THIS_MODULE)) {
Philipp Reisner392c8802011-02-09 10:33:31 +0100542 conn_err(tconn, "Failed to get module reference in drbd_thread_start\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700543 spin_unlock_irqrestore(&thi->t_lock, flags);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100544 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700545 }
546
547 init_completion(&thi->stop);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700548 thi->reset_cpu_mask = 1;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100549 thi->t_state = RUNNING;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700550 spin_unlock_irqrestore(&thi->t_lock, flags);
551 flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
552
553 nt = kthread_create(drbd_thread_setup, (void *) thi,
Philipp Reisner392c8802011-02-09 10:33:31 +0100554 "drbd_%c_%s", thi->name[0], thi->tconn->name);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700555
556 if (IS_ERR(nt)) {
Philipp Reisner392c8802011-02-09 10:33:31 +0100557 conn_err(tconn, "Couldn't start thread\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700558
559 module_put(THIS_MODULE);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100560 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700561 }
562 spin_lock_irqsave(&thi->t_lock, flags);
563 thi->task = nt;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100564 thi->t_state = RUNNING;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700565 spin_unlock_irqrestore(&thi->t_lock, flags);
566 wake_up_process(nt);
567 break;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100568 case EXITING:
569 thi->t_state = RESTARTING;
Philipp Reisner392c8802011-02-09 10:33:31 +0100570 conn_info(tconn, "Restarting %s thread (from %s [%d])\n",
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100571 thi->name, current->comm, current->pid);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700572 /* fall through */
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100573 case RUNNING:
574 case RESTARTING:
Philipp Reisnerb411b362009-09-25 16:07:19 -0700575 default:
576 spin_unlock_irqrestore(&thi->t_lock, flags);
577 break;
578 }
579
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100580 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700581}
582
583
584void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
585{
586 unsigned long flags;
587
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100588 enum drbd_thread_state ns = restart ? RESTARTING : EXITING;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700589
590 /* may be called from state engine, holding the req lock irqsave */
591 spin_lock_irqsave(&thi->t_lock, flags);
592
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100593 if (thi->t_state == NONE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700594 spin_unlock_irqrestore(&thi->t_lock, flags);
595 if (restart)
596 drbd_thread_start(thi);
597 return;
598 }
599
600 if (thi->t_state != ns) {
601 if (thi->task == NULL) {
602 spin_unlock_irqrestore(&thi->t_lock, flags);
603 return;
604 }
605
606 thi->t_state = ns;
607 smp_mb();
608 init_completion(&thi->stop);
609 if (thi->task != current)
610 force_sig(DRBD_SIGKILL, thi->task);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700611 }
612
613 spin_unlock_irqrestore(&thi->t_lock, flags);
614
615 if (wait)
616 wait_for_completion(&thi->stop);
617}
618
Philipp Reisner392c8802011-02-09 10:33:31 +0100619static struct drbd_thread *drbd_task_to_thread(struct drbd_tconn *tconn, struct task_struct *task)
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100620{
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100621 struct drbd_thread *thi =
622 task == tconn->receiver.task ? &tconn->receiver :
623 task == tconn->asender.task ? &tconn->asender :
624 task == tconn->worker.task ? &tconn->worker : NULL;
625
626 return thi;
627}
628
Philipp Reisner392c8802011-02-09 10:33:31 +0100629char *drbd_task_to_thread_name(struct drbd_tconn *tconn, struct task_struct *task)
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100630{
Philipp Reisner392c8802011-02-09 10:33:31 +0100631 struct drbd_thread *thi = drbd_task_to_thread(tconn, task);
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100632 return thi ? thi->name : task->comm;
633}
634
Philipp Reisner80883192011-02-18 14:56:45 +0100635int conn_lowest_minor(struct drbd_tconn *tconn)
Philipp Reisner80822282011-02-08 12:46:30 +0100636{
Philipp Reisnere90285e2011-03-22 12:51:21 +0100637 int vnr = 0;
638 struct drbd_conf *mdev;
Philipp Reisner774b3052011-02-22 02:07:03 -0500639
Philipp Reisnere90285e2011-03-22 12:51:21 +0100640 mdev = idr_get_next(&tconn->volumes, &vnr);
641 if (!mdev)
Philipp Reisner774b3052011-02-22 02:07:03 -0500642 return -1;
Philipp Reisnere90285e2011-03-22 12:51:21 +0100643 return mdev_to_minor(mdev);
Philipp Reisner80822282011-02-08 12:46:30 +0100644}
Philipp Reisner774b3052011-02-22 02:07:03 -0500645
646#ifdef CONFIG_SMP
Philipp Reisnerb411b362009-09-25 16:07:19 -0700647/**
648 * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
649 * @mdev: DRBD device.
650 *
651 * Forces all threads of a device onto the same CPU. This is beneficial for
652 * DRBD's performance. May be overwritten by user's configuration.
653 */
Philipp Reisner80822282011-02-08 12:46:30 +0100654void drbd_calc_cpu_mask(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700655{
656 int ord, cpu;
657
658 /* user override. */
Philipp Reisner80822282011-02-08 12:46:30 +0100659 if (cpumask_weight(tconn->cpu_mask))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700660 return;
661
Philipp Reisner80822282011-02-08 12:46:30 +0100662 ord = conn_lowest_minor(tconn) % cpumask_weight(cpu_online_mask);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700663 for_each_online_cpu(cpu) {
664 if (ord-- == 0) {
Philipp Reisner80822282011-02-08 12:46:30 +0100665 cpumask_set_cpu(cpu, tconn->cpu_mask);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700666 return;
667 }
668 }
669 /* should not be reached */
Philipp Reisner80822282011-02-08 12:46:30 +0100670 cpumask_setall(tconn->cpu_mask);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700671}
672
673/**
674 * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
675 * @mdev: DRBD device.
Philipp Reisnerbc31fe32011-02-07 11:14:38 +0100676 * @thi: drbd_thread object
Philipp Reisnerb411b362009-09-25 16:07:19 -0700677 *
678 * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
679 * prematurely.
680 */
Philipp Reisner80822282011-02-08 12:46:30 +0100681void drbd_thread_current_set_cpu(struct drbd_thread *thi)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700682{
683 struct task_struct *p = current;
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100684
Philipp Reisnerb411b362009-09-25 16:07:19 -0700685 if (!thi->reset_cpu_mask)
686 return;
687 thi->reset_cpu_mask = 0;
Philipp Reisner392c8802011-02-09 10:33:31 +0100688 set_cpus_allowed_ptr(p, thi->tconn->cpu_mask);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700689}
690#endif
691
Andreas Gruenbacher52b061a2011-03-30 11:38:49 +0200692/**
693 * drbd_header_size - size of a packet header
694 *
695 * The header size is a multiple of 8, so any payload following the header is
696 * word aligned on 64-bit architectures. (The bitmap send and receive code
697 * relies on this.)
698 */
699unsigned int drbd_header_size(struct drbd_tconn *tconn)
700{
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +0200701 if (tconn->agreed_pro_version >= 100) {
702 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header100), 8));
703 return sizeof(struct p_header100);
704 } else {
705 BUILD_BUG_ON(sizeof(struct p_header80) !=
706 sizeof(struct p_header95));
707 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header80), 8));
708 return sizeof(struct p_header80);
709 }
Andreas Gruenbacher52b061a2011-03-30 11:38:49 +0200710}
711
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200712static unsigned int prepare_header80(struct p_header80 *h, enum drbd_packet cmd, int size)
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100713{
714 h->magic = cpu_to_be32(DRBD_MAGIC);
715 h->command = cpu_to_be16(cmd);
716 h->length = cpu_to_be16(size);
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200717 return sizeof(struct p_header80);
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100718}
719
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200720static unsigned int prepare_header95(struct p_header95 *h, enum drbd_packet cmd, int size)
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100721{
722 h->magic = cpu_to_be16(DRBD_MAGIC_BIG);
723 h->command = cpu_to_be16(cmd);
Andreas Gruenbacherb55d84b2011-03-22 13:17:47 +0100724 h->length = cpu_to_be32(size);
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200725 return sizeof(struct p_header95);
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100726}
727
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +0200728static unsigned int prepare_header100(struct p_header100 *h, enum drbd_packet cmd,
729 int size, int vnr)
Philipp Reisnerd38e7872011-02-07 15:32:04 +0100730{
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +0200731 h->magic = cpu_to_be32(DRBD_MAGIC_100);
732 h->volume = cpu_to_be16(vnr);
733 h->command = cpu_to_be16(cmd);
734 h->length = cpu_to_be32(size);
735 h->pad = 0;
736 return sizeof(struct p_header100);
737}
738
739static unsigned int prepare_header(struct drbd_tconn *tconn, int vnr,
740 void *buffer, enum drbd_packet cmd, int size)
741{
742 if (tconn->agreed_pro_version >= 100)
743 return prepare_header100(buffer, cmd, size, vnr);
744 else if (tconn->agreed_pro_version >= 95 &&
745 size > DRBD_MAX_SIZE_H80_PACKET)
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200746 return prepare_header95(buffer, cmd, size);
Philipp Reisnerd38e7872011-02-07 15:32:04 +0100747 else
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200748 return prepare_header80(buffer, cmd, size);
Philipp Reisnerd38e7872011-02-07 15:32:04 +0100749}
750
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200751void *conn_prepare_command(struct drbd_tconn *tconn, struct drbd_socket *sock)
752{
753 mutex_lock(&sock->mutex);
754 if (!sock->socket) {
755 mutex_unlock(&sock->mutex);
756 return NULL;
757 }
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200758 return sock->sbuf + drbd_header_size(tconn);
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200759}
760
761void *drbd_prepare_command(struct drbd_conf *mdev, struct drbd_socket *sock)
762{
763 return conn_prepare_command(mdev->tconn, sock);
764}
765
766static int __send_command(struct drbd_tconn *tconn, int vnr,
767 struct drbd_socket *sock, enum drbd_packet cmd,
768 unsigned int header_size, void *data,
769 unsigned int size)
770{
771 int msg_flags;
772 int err;
773
774 /*
775 * Called with @data == NULL and the size of the data blocks in @size
776 * for commands that send data blocks. For those commands, omit the
777 * MSG_MORE flag: this will increase the likelihood that data blocks
778 * which are page aligned on the sender will end up page aligned on the
779 * receiver.
780 */
781 msg_flags = data ? MSG_MORE : 0;
782
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200783 header_size += prepare_header(tconn, vnr, sock->sbuf, cmd,
784 header_size + size);
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200785 err = drbd_send_all(tconn, sock->socket, sock->sbuf, header_size,
786 msg_flags);
787 if (data && !err)
788 err = drbd_send_all(tconn, sock->socket, data, size, 0);
789 return err;
790}
791
792int conn_send_command(struct drbd_tconn *tconn, struct drbd_socket *sock,
793 enum drbd_packet cmd, unsigned int header_size,
794 void *data, unsigned int size)
795{
796 int err;
797
798 err = __send_command(tconn, 0, sock, cmd, header_size, data, size);
799 mutex_unlock(&sock->mutex);
800 return err;
801}
802
803int drbd_send_command(struct drbd_conf *mdev, struct drbd_socket *sock,
804 enum drbd_packet cmd, unsigned int header_size,
805 void *data, unsigned int size)
806{
807 int err;
808
809 err = __send_command(mdev->tconn, mdev->vnr, sock, cmd, header_size,
810 data, size);
811 mutex_unlock(&sock->mutex);
812 return err;
813}
814
Andreas Gruenbachere307f352011-03-22 10:55:48 +0100815int drbd_send_ping(struct drbd_tconn *tconn)
816{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200817 struct drbd_socket *sock;
818
819 sock = &tconn->meta;
820 if (!conn_prepare_command(tconn, sock))
821 return -EIO;
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200822 return conn_send_command(tconn, sock, P_PING, 0, NULL, 0);
Andreas Gruenbachere307f352011-03-22 10:55:48 +0100823}
824
825int drbd_send_ping_ack(struct drbd_tconn *tconn)
826{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200827 struct drbd_socket *sock;
828
829 sock = &tconn->meta;
830 if (!conn_prepare_command(tconn, sock))
831 return -EIO;
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200832 return conn_send_command(tconn, sock, P_PING_ACK, 0, NULL, 0);
Andreas Gruenbachere307f352011-03-22 10:55:48 +0100833}
834
Lars Ellenbergf3990022011-03-23 14:31:09 +0100835int drbd_send_sync_param(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700836{
Andreas Gruenbacher7c967152011-03-22 00:49:36 +0100837 struct drbd_socket *sock;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200838 struct p_rs_param_95 *p;
839 int size;
Philipp Reisner31890f42011-01-19 14:12:51 +0100840 const int apv = mdev->tconn->agreed_pro_version;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200841 enum drbd_packet cmd;
842
843 sock = &mdev->tconn->data;
844 p = drbd_prepare_command(mdev, sock);
845 if (!p)
846 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700847
848 size = apv <= 87 ? sizeof(struct p_rs_param)
849 : apv == 88 ? sizeof(struct p_rs_param)
Lars Ellenbergf3990022011-03-23 14:31:09 +0100850 + strlen(mdev->tconn->net_conf->verify_alg) + 1
Philipp Reisner8e26f9c2010-07-06 17:25:54 +0200851 : apv <= 94 ? sizeof(struct p_rs_param_89)
852 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700853
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200854 cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700855
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200856 /* initialize verify_alg and csums_alg */
857 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700858
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200859 if (get_ldev(mdev)) {
860 p->rate = cpu_to_be32(mdev->ldev->dc.resync_rate);
861 p->c_plan_ahead = cpu_to_be32(mdev->ldev->dc.c_plan_ahead);
862 p->c_delay_target = cpu_to_be32(mdev->ldev->dc.c_delay_target);
863 p->c_fill_target = cpu_to_be32(mdev->ldev->dc.c_fill_target);
864 p->c_max_rate = cpu_to_be32(mdev->ldev->dc.c_max_rate);
865 put_ldev(mdev);
866 } else {
867 p->rate = cpu_to_be32(DRBD_RATE_DEF);
868 p->c_plan_ahead = cpu_to_be32(DRBD_C_PLAN_AHEAD_DEF);
869 p->c_delay_target = cpu_to_be32(DRBD_C_DELAY_TARGET_DEF);
870 p->c_fill_target = cpu_to_be32(DRBD_C_FILL_TARGET_DEF);
871 p->c_max_rate = cpu_to_be32(DRBD_C_MAX_RATE_DEF);
872 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700873
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200874 if (apv >= 88)
875 strcpy(p->verify_alg, mdev->tconn->net_conf->verify_alg);
876 if (apv >= 89)
877 strcpy(p->csums_alg, mdev->tconn->net_conf->csums_alg);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700878
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200879 return drbd_send_command(mdev, sock, cmd, size, NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700880}
881
Philipp Reisnerdc8228d2011-02-08 10:13:15 +0100882int drbd_send_protocol(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700883{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200884 struct drbd_socket *sock;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700885 struct p_protocol *p;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200886 int size, cf;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700887
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200888 if (tconn->net_conf->dry_run && tconn->agreed_pro_version < 92) {
889 conn_err(tconn, "--dry-run is not supported by peer");
890 return -EOPNOTSUPP;
891 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700892
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200893 sock = &tconn->data;
894 p = conn_prepare_command(tconn, sock);
895 if (!p)
896 return -EIO;
897
898 size = sizeof(*p);
Philipp Reisnerdc8228d2011-02-08 10:13:15 +0100899 if (tconn->agreed_pro_version >= 87)
900 size += strlen(tconn->net_conf->integrity_alg) + 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700901
Philipp Reisnerdc8228d2011-02-08 10:13:15 +0100902 p->protocol = cpu_to_be32(tconn->net_conf->wire_protocol);
903 p->after_sb_0p = cpu_to_be32(tconn->net_conf->after_sb_0p);
904 p->after_sb_1p = cpu_to_be32(tconn->net_conf->after_sb_1p);
905 p->after_sb_2p = cpu_to_be32(tconn->net_conf->after_sb_2p);
906 p->two_primaries = cpu_to_be32(tconn->net_conf->two_primaries);
Philipp Reisnercf14c2e2010-02-02 21:03:50 +0100907 cf = 0;
Philipp Reisnerdc8228d2011-02-08 10:13:15 +0100908 if (tconn->net_conf->want_lose)
Philipp Reisnercf14c2e2010-02-02 21:03:50 +0100909 cf |= CF_WANT_LOSE;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200910 if (tconn->net_conf->dry_run)
911 cf |= CF_DRY_RUN;
Philipp Reisnercf14c2e2010-02-02 21:03:50 +0100912 p->conn_flags = cpu_to_be32(cf);
913
Philipp Reisnerdc8228d2011-02-08 10:13:15 +0100914 if (tconn->agreed_pro_version >= 87)
915 strcpy(p->integrity_alg, tconn->net_conf->integrity_alg);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200916 return conn_send_command(tconn, sock, P_PROTOCOL, size, NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700917}
918
919int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
920{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200921 struct drbd_socket *sock;
922 struct p_uuids *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700923 int i;
924
925 if (!get_ldev_if_state(mdev, D_NEGOTIATING))
Andreas Gruenbacher2ae5f952011-03-16 01:07:20 +0100926 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700927
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200928 sock = &mdev->tconn->data;
929 p = drbd_prepare_command(mdev, sock);
930 if (!p) {
931 put_ldev(mdev);
932 return -EIO;
933 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700934 for (i = UI_CURRENT; i < UI_SIZE; i++)
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200935 p->uuid[i] = mdev->ldev ? cpu_to_be64(mdev->ldev->md.uuid[i]) : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700936
937 mdev->comm_bm_set = drbd_bm_total_weight(mdev);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200938 p->uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
Philipp Reisner89e58e72011-01-19 13:12:45 +0100939 uuid_flags |= mdev->tconn->net_conf->want_lose ? 1 : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700940 uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0;
941 uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200942 p->uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700943
944 put_ldev(mdev);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200945 return drbd_send_command(mdev, sock, P_UUIDS, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700946}
947
948int drbd_send_uuids(struct drbd_conf *mdev)
949{
950 return _drbd_send_uuids(mdev, 0);
951}
952
953int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev)
954{
955 return _drbd_send_uuids(mdev, 8);
956}
957
Lars Ellenberg62b0da32011-01-20 13:25:21 +0100958void drbd_print_uuids(struct drbd_conf *mdev, const char *text)
959{
960 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
961 u64 *uuid = mdev->ldev->md.uuid;
962 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX\n",
963 text,
964 (unsigned long long)uuid[UI_CURRENT],
965 (unsigned long long)uuid[UI_BITMAP],
966 (unsigned long long)uuid[UI_HISTORY_START],
967 (unsigned long long)uuid[UI_HISTORY_END]);
968 put_ldev(mdev);
969 } else {
970 dev_info(DEV, "%s effective data uuid: %016llX\n",
971 text,
972 (unsigned long long)mdev->ed_uuid);
973 }
974}
975
Andreas Gruenbacher9c1b7f72011-03-16 01:09:01 +0100976void drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700977{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200978 struct drbd_socket *sock;
979 struct p_rs_uuid *p;
Lars Ellenberg5a22db82010-12-17 21:14:23 +0100980 u64 uuid;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700981
Lars Ellenberg5a22db82010-12-17 21:14:23 +0100982 D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
983
Philipp Reisner4a23f262011-01-11 17:42:17 +0100984 uuid = mdev->ldev->md.uuid[UI_BITMAP] + UUID_NEW_BM_OFFSET;
Lars Ellenberg5a22db82010-12-17 21:14:23 +0100985 drbd_uuid_set(mdev, UI_BITMAP, uuid);
Lars Ellenberg62b0da32011-01-20 13:25:21 +0100986 drbd_print_uuids(mdev, "updated sync UUID");
Lars Ellenberg5a22db82010-12-17 21:14:23 +0100987 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700988
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200989 sock = &mdev->tconn->data;
990 p = drbd_prepare_command(mdev, sock);
991 if (p) {
992 p->uuid = cpu_to_be64(uuid);
993 drbd_send_command(mdev, sock, P_SYNC_UUID, sizeof(*p), NULL, 0);
994 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700995}
996
Philipp Reisnere89b5912010-03-24 17:11:33 +0100997int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700998{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200999 struct drbd_socket *sock;
1000 struct p_sizes *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001001 sector_t d_size, u_size;
Philipp Reisner99432fc2011-05-20 16:39:13 +02001002 int q_order_type, max_bio_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001003
1004 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
1005 D_ASSERT(mdev->ldev->backing_bdev);
1006 d_size = drbd_get_max_capacity(mdev->ldev);
1007 u_size = mdev->ldev->dc.disk_size;
1008 q_order_type = drbd_queue_order_type(mdev);
Philipp Reisner99432fc2011-05-20 16:39:13 +02001009 max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
1010 max_bio_size = min_t(int, max_bio_size, DRBD_MAX_BIO_SIZE);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001011 put_ldev(mdev);
1012 } else {
1013 d_size = 0;
1014 u_size = 0;
1015 q_order_type = QUEUE_ORDERED_NONE;
Philipp Reisner99432fc2011-05-20 16:39:13 +02001016 max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001017 }
1018
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001019 sock = &mdev->tconn->data;
1020 p = drbd_prepare_command(mdev, sock);
1021 if (!p)
1022 return -EIO;
1023 p->d_size = cpu_to_be64(d_size);
1024 p->u_size = cpu_to_be64(u_size);
1025 p->c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
1026 p->max_bio_size = cpu_to_be32(max_bio_size);
1027 p->queue_order_type = cpu_to_be16(q_order_type);
1028 p->dds_flags = cpu_to_be16(flags);
1029 return drbd_send_command(mdev, sock, P_SIZES, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001030}
1031
1032/**
1033 * drbd_send_state() - Sends the drbd state to the peer
1034 * @mdev: DRBD device.
1035 */
1036int drbd_send_state(struct drbd_conf *mdev)
1037{
Andreas Gruenbacher7c967152011-03-22 00:49:36 +01001038 struct drbd_socket *sock;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001039 struct p_state *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001040
Andreas Gruenbacher7c967152011-03-22 00:49:36 +01001041 sock = &mdev->tconn->data;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001042 p = drbd_prepare_command(mdev, sock);
1043 if (!p)
1044 return -EIO;
1045 p->state = cpu_to_be32(mdev->state.i); /* Within the send mutex */
1046 return drbd_send_command(mdev, sock, P_STATE, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001047}
1048
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001049int drbd_send_state_req(struct drbd_conf *mdev, union drbd_state mask, union drbd_state val)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001050{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001051 struct drbd_socket *sock;
1052 struct p_req_state *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001053
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001054 sock = &mdev->tconn->data;
1055 p = drbd_prepare_command(mdev, sock);
1056 if (!p)
1057 return -EIO;
1058 p->mask = cpu_to_be32(mask.i);
1059 p->val = cpu_to_be32(val.i);
1060 return drbd_send_command(mdev, sock, P_STATE_CHG_REQ, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001061
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001062}
1063
1064int conn_send_state_req(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val)
1065{
1066 enum drbd_packet cmd;
1067 struct drbd_socket *sock;
1068 struct p_req_state *p;
1069
1070 cmd = tconn->agreed_pro_version < 100 ? P_STATE_CHG_REQ : P_CONN_ST_CHG_REQ;
1071 sock = &tconn->data;
1072 p = conn_prepare_command(tconn, sock);
1073 if (!p)
1074 return -EIO;
1075 p->mask = cpu_to_be32(mask.i);
1076 p->val = cpu_to_be32(val.i);
1077 return conn_send_command(tconn, sock, cmd, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001078}
1079
Andreas Gruenbacher2f4e7ab2011-03-16 01:20:38 +01001080void drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001081{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001082 struct drbd_socket *sock;
1083 struct p_req_state_reply *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001084
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001085 sock = &mdev->tconn->meta;
1086 p = drbd_prepare_command(mdev, sock);
1087 if (p) {
1088 p->retcode = cpu_to_be32(retcode);
1089 drbd_send_command(mdev, sock, P_STATE_CHG_REPLY, sizeof(*p), NULL, 0);
1090 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001091}
1092
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001093void conn_send_sr_reply(struct drbd_tconn *tconn, enum drbd_state_rv retcode)
Philipp Reisner047cd4a2011-02-15 11:09:33 +01001094{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001095 struct drbd_socket *sock;
1096 struct p_req_state_reply *p;
Philipp Reisner047cd4a2011-02-15 11:09:33 +01001097 enum drbd_packet cmd = tconn->agreed_pro_version < 100 ? P_STATE_CHG_REPLY : P_CONN_ST_CHG_REPLY;
1098
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001099 sock = &tconn->meta;
1100 p = conn_prepare_command(tconn, sock);
1101 if (p) {
1102 p->retcode = cpu_to_be32(retcode);
1103 conn_send_command(tconn, sock, cmd, sizeof(*p), NULL, 0);
1104 }
Philipp Reisner047cd4a2011-02-15 11:09:33 +01001105}
1106
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001107static void dcbp_set_code(struct p_compressed_bm *p, enum drbd_bitmap_code code)
1108{
1109 BUG_ON(code & ~0xf);
1110 p->encoding = (p->encoding & ~0xf) | code;
1111}
1112
1113static void dcbp_set_start(struct p_compressed_bm *p, int set)
1114{
1115 p->encoding = (p->encoding & ~0x80) | (set ? 0x80 : 0);
1116}
1117
1118static void dcbp_set_pad_bits(struct p_compressed_bm *p, int n)
1119{
1120 BUG_ON(n & ~0x7);
1121 p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4);
1122}
1123
Philipp Reisnerb411b362009-09-25 16:07:19 -07001124int fill_bitmap_rle_bits(struct drbd_conf *mdev,
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001125 struct p_compressed_bm *p,
1126 unsigned int size,
1127 struct bm_xfer_ctx *c)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001128{
1129 struct bitstream bs;
1130 unsigned long plain_bits;
1131 unsigned long tmp;
1132 unsigned long rl;
1133 unsigned len;
1134 unsigned toggle;
1135 int bits;
1136
1137 /* may we use this feature? */
Lars Ellenbergf3990022011-03-23 14:31:09 +01001138 if ((mdev->tconn->net_conf->use_rle == 0) ||
Philipp Reisner31890f42011-01-19 14:12:51 +01001139 (mdev->tconn->agreed_pro_version < 90))
Philipp Reisnerb411b362009-09-25 16:07:19 -07001140 return 0;
1141
1142 if (c->bit_offset >= c->bm_bits)
1143 return 0; /* nothing to do. */
1144
1145 /* use at most thus many bytes */
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001146 bitstream_init(&bs, p->code, size, 0);
1147 memset(p->code, 0, size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001148 /* plain bits covered in this code string */
1149 plain_bits = 0;
1150
1151 /* p->encoding & 0x80 stores whether the first run length is set.
1152 * bit offset is implicit.
1153 * start with toggle == 2 to be able to tell the first iteration */
1154 toggle = 2;
1155
1156 /* see how much plain bits we can stuff into one packet
1157 * using RLE and VLI. */
1158 do {
1159 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(mdev, c->bit_offset)
1160 : _drbd_bm_find_next(mdev, c->bit_offset);
1161 if (tmp == -1UL)
1162 tmp = c->bm_bits;
1163 rl = tmp - c->bit_offset;
1164
1165 if (toggle == 2) { /* first iteration */
1166 if (rl == 0) {
1167 /* the first checked bit was set,
1168 * store start value, */
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001169 dcbp_set_start(p, 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001170 /* but skip encoding of zero run length */
1171 toggle = !toggle;
1172 continue;
1173 }
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001174 dcbp_set_start(p, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001175 }
1176
1177 /* paranoia: catch zero runlength.
1178 * can only happen if bitmap is modified while we scan it. */
1179 if (rl == 0) {
1180 dev_err(DEV, "unexpected zero runlength while encoding bitmap "
1181 "t:%u bo:%lu\n", toggle, c->bit_offset);
1182 return -1;
1183 }
1184
1185 bits = vli_encode_bits(&bs, rl);
1186 if (bits == -ENOBUFS) /* buffer full */
1187 break;
1188 if (bits <= 0) {
1189 dev_err(DEV, "error while encoding bitmap: %d\n", bits);
1190 return 0;
1191 }
1192
1193 toggle = !toggle;
1194 plain_bits += rl;
1195 c->bit_offset = tmp;
1196 } while (c->bit_offset < c->bm_bits);
1197
1198 len = bs.cur.b - p->code + !!bs.cur.bit;
1199
1200 if (plain_bits < (len << 3)) {
1201 /* incompressible with this method.
1202 * we need to rewind both word and bit position. */
1203 c->bit_offset -= plain_bits;
1204 bm_xfer_ctx_bit_to_word_offset(c);
1205 c->bit_offset = c->word_offset * BITS_PER_LONG;
1206 return 0;
1207 }
1208
1209 /* RLE + VLI was able to compress it just fine.
1210 * update c->word_offset. */
1211 bm_xfer_ctx_bit_to_word_offset(c);
1212
1213 /* store pad_bits */
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001214 dcbp_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001215
1216 return len;
1217}
1218
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001219/**
1220 * send_bitmap_rle_or_plain
1221 *
1222 * Return 0 when done, 1 when another iteration is needed, and a negative error
1223 * code upon failure.
1224 */
1225static int
Andreas Gruenbacher79ed9bd2011-03-24 21:31:38 +01001226send_bitmap_rle_or_plain(struct drbd_conf *mdev, struct bm_xfer_ctx *c)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001227{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001228 struct drbd_socket *sock = &mdev->tconn->data;
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001229 unsigned int header_size = drbd_header_size(mdev->tconn);
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001230 struct p_compressed_bm *p = sock->sbuf + header_size;
Andreas Gruenbachera982dd52010-12-10 00:45:25 +01001231 int len, err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001232
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001233 len = fill_bitmap_rle_bits(mdev, p,
1234 DRBD_SOCKET_BUFFER_SIZE - header_size - sizeof(*p), c);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001235 if (len < 0)
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001236 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001237
1238 if (len) {
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001239 dcbp_set_code(p, RLE_VLI_Bits);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001240 err = __send_command(mdev->tconn, mdev->vnr, sock,
1241 P_COMPRESSED_BITMAP, sizeof(*p) + len,
1242 NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001243 c->packets[0]++;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001244 c->bytes[0] += header_size + sizeof(*p) + len;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001245
1246 if (c->bit_offset >= c->bm_bits)
1247 len = 0; /* DONE */
1248 } else {
1249 /* was not compressible.
1250 * send a buffer full of plain text bits instead. */
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001251 unsigned int data_size;
1252 unsigned long num_words;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001253 unsigned long *p = sock->sbuf + header_size;
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001254
1255 data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001256 num_words = min_t(size_t, data_size / sizeof(*p),
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001257 c->bm_words - c->word_offset);
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001258 len = num_words * sizeof(*p);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001259 if (len)
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001260 drbd_bm_get_lel(mdev, c->word_offset, num_words, p);
1261 err = __send_command(mdev->tconn, mdev->vnr, sock, P_BITMAP, len, NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001262 c->word_offset += num_words;
1263 c->bit_offset = c->word_offset * BITS_PER_LONG;
1264
1265 c->packets[1]++;
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001266 c->bytes[1] += header_size + len;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001267
1268 if (c->bit_offset > c->bm_bits)
1269 c->bit_offset = c->bm_bits;
1270 }
Andreas Gruenbachera982dd52010-12-10 00:45:25 +01001271 if (!err) {
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001272 if (len == 0) {
1273 INFO_bm_xfer_stats(mdev, "send", c);
1274 return 0;
1275 } else
1276 return 1;
1277 }
1278 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001279}
1280
1281/* See the comment at receive_bitmap() */
Andreas Gruenbacher058820c2011-03-22 16:03:43 +01001282static int _drbd_send_bitmap(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001283{
1284 struct bm_xfer_ctx c;
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001285 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001286
Andreas Gruenbacher841ce242010-12-15 19:31:20 +01001287 if (!expect(mdev->bitmap))
1288 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001289
Philipp Reisnerb411b362009-09-25 16:07:19 -07001290 if (get_ldev(mdev)) {
1291 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
1292 dev_info(DEV, "Writing the whole bitmap, MDF_FullSync was set.\n");
1293 drbd_bm_set_all(mdev);
1294 if (drbd_bm_write(mdev)) {
1295 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
1296 * but otherwise process as per normal - need to tell other
1297 * side that a full resync is required! */
1298 dev_err(DEV, "Failed to write bitmap to disk!\n");
1299 } else {
1300 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
1301 drbd_md_sync(mdev);
1302 }
1303 }
1304 put_ldev(mdev);
1305 }
1306
1307 c = (struct bm_xfer_ctx) {
1308 .bm_bits = drbd_bm_bits(mdev),
1309 .bm_words = drbd_bm_words(mdev),
1310 };
1311
1312 do {
Andreas Gruenbacher79ed9bd2011-03-24 21:31:38 +01001313 err = send_bitmap_rle_or_plain(mdev, &c);
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001314 } while (err > 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001315
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001316 return err == 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001317}
1318
1319int drbd_send_bitmap(struct drbd_conf *mdev)
1320{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001321 struct drbd_socket *sock = &mdev->tconn->data;
1322 int err = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001323
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001324 mutex_lock(&sock->mutex);
1325 if (sock->socket)
1326 err = !_drbd_send_bitmap(mdev);
1327 mutex_unlock(&sock->mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001328 return err;
1329}
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001330
Andreas Gruenbacherd4e67d72011-03-16 01:25:28 +01001331void drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001332{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001333 struct drbd_socket *sock;
1334 struct p_barrier_ack *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001335
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001336 if (mdev->state.conn < C_CONNECTED)
1337 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001338
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001339 sock = &mdev->tconn->meta;
1340 p = drbd_prepare_command(mdev, sock);
1341 if (!p)
1342 return;
1343 p->barrier = barrier_nr;
1344 p->set_size = cpu_to_be32(set_size);
1345 drbd_send_command(mdev, sock, P_BARRIER_ACK, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001346}
1347
1348/**
1349 * _drbd_send_ack() - Sends an ack packet
1350 * @mdev: DRBD device.
1351 * @cmd: Packet command code.
1352 * @sector: sector, needs to be in big endian byte order
1353 * @blksize: size in byte, needs to be in big endian byte order
1354 * @block_id: Id, big endian byte order
1355 */
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001356static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
1357 u64 sector, u32 blksize, u64 block_id)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001358{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001359 struct drbd_socket *sock;
1360 struct p_block_ack *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001361
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001362 if (mdev->state.conn < C_CONNECTED)
Andreas Gruenbachera8c32aa2011-03-16 01:27:22 +01001363 return -EIO;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001364
1365 sock = &mdev->tconn->meta;
1366 p = drbd_prepare_command(mdev, sock);
1367 if (!p)
1368 return -EIO;
1369 p->sector = sector;
1370 p->block_id = block_id;
1371 p->blksize = blksize;
1372 p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
1373 return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001374}
1375
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02001376/* dp->sector and dp->block_id already/still in network byte order,
1377 * data_size is payload size according to dp->head,
1378 * and may need to be corrected for digest size. */
Andreas Gruenbachera9a99942011-03-16 01:30:14 +01001379void drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packet cmd,
1380 struct p_data *dp, int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001381{
Philipp Reisnera0638452011-01-19 14:31:32 +01001382 data_size -= (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_r_tfm) ?
1383 crypto_hash_digestsize(mdev->tconn->integrity_r_tfm) : 0;
Andreas Gruenbachera9a99942011-03-16 01:30:14 +01001384 _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
1385 dp->block_id);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001386}
1387
Andreas Gruenbachera9a99942011-03-16 01:30:14 +01001388void drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packet cmd,
1389 struct p_block_req *rp)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001390{
Andreas Gruenbachera9a99942011-03-16 01:30:14 +01001391 _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001392}
1393
1394/**
1395 * drbd_send_ack() - Sends an ack packet
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001396 * @mdev: DRBD device
1397 * @cmd: packet command code
1398 * @peer_req: peer request
Philipp Reisnerb411b362009-09-25 16:07:19 -07001399 */
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001400int drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001401 struct drbd_peer_request *peer_req)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001402{
Andreas Gruenbacherdd516122011-03-16 15:39:08 +01001403 return _drbd_send_ack(mdev, cmd,
1404 cpu_to_be64(peer_req->i.sector),
1405 cpu_to_be32(peer_req->i.size),
1406 peer_req->block_id);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001407}
1408
1409/* This function misuses the block_id field to signal if the blocks
1410 * are is sync or not. */
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001411int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packet cmd,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001412 sector_t sector, int blksize, u64 block_id)
1413{
Andreas Gruenbacherfa79abd2011-03-16 01:31:39 +01001414 return _drbd_send_ack(mdev, cmd,
1415 cpu_to_be64(sector),
1416 cpu_to_be32(blksize),
1417 cpu_to_be64(block_id));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001418}
1419
1420int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
1421 sector_t sector, int size, u64 block_id)
1422{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001423 struct drbd_socket *sock;
1424 struct p_block_req *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001425
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001426 sock = &mdev->tconn->data;
1427 p = drbd_prepare_command(mdev, sock);
1428 if (!p)
1429 return -EIO;
1430 p->sector = cpu_to_be64(sector);
1431 p->block_id = block_id;
1432 p->blksize = cpu_to_be32(size);
1433 return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001434}
1435
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001436int drbd_send_drequest_csum(struct drbd_conf *mdev, sector_t sector, int size,
1437 void *digest, int digest_size, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001438{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001439 struct drbd_socket *sock;
1440 struct p_block_req *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001441
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001442 /* FIXME: Put the digest into the preallocated socket buffer. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001443
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001444 sock = &mdev->tconn->data;
1445 p = drbd_prepare_command(mdev, sock);
1446 if (!p)
1447 return -EIO;
1448 p->sector = cpu_to_be64(sector);
1449 p->block_id = ID_SYNCER /* unused */;
1450 p->blksize = cpu_to_be32(size);
1451 return drbd_send_command(mdev, sock, cmd, sizeof(*p),
1452 digest, digest_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001453}
1454
1455int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
1456{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001457 struct drbd_socket *sock;
1458 struct p_block_req *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001459
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001460 sock = &mdev->tconn->data;
1461 p = drbd_prepare_command(mdev, sock);
1462 if (!p)
1463 return -EIO;
1464 p->sector = cpu_to_be64(sector);
1465 p->block_id = ID_SYNCER /* unused */;
1466 p->blksize = cpu_to_be32(size);
1467 return drbd_send_command(mdev, sock, P_OV_REQUEST, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001468}
1469
1470/* called on sndtimeo
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001471 * returns false if we should retry,
1472 * true if we think connection is dead
Philipp Reisnerb411b362009-09-25 16:07:19 -07001473 */
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001474static int we_should_drop_the_connection(struct drbd_tconn *tconn, struct socket *sock)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001475{
1476 int drop_it;
1477 /* long elapsed = (long)(jiffies - mdev->last_received); */
1478
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001479 drop_it = tconn->meta.socket == sock
1480 || !tconn->asender.task
1481 || get_t_state(&tconn->asender) != RUNNING
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01001482 || tconn->cstate < C_WF_REPORT_PARAMS;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001483
1484 if (drop_it)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001485 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001486
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001487 drop_it = !--tconn->ko_count;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001488 if (!drop_it) {
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001489 conn_err(tconn, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
1490 current->comm, current->pid, tconn->ko_count);
1491 request_ping(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001492 }
1493
1494 return drop_it; /* && (mdev->state == R_PRIMARY) */;
1495}
1496
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001497static void drbd_update_congested(struct drbd_tconn *tconn)
Andreas Gruenbacher9e204cd2011-01-26 18:45:11 +01001498{
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001499 struct sock *sk = tconn->data.socket->sk;
Andreas Gruenbacher9e204cd2011-01-26 18:45:11 +01001500 if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001501 set_bit(NET_CONGESTED, &tconn->flags);
Andreas Gruenbacher9e204cd2011-01-26 18:45:11 +01001502}
1503
Philipp Reisnerb411b362009-09-25 16:07:19 -07001504/* The idea of sendpage seems to be to put some kind of reference
1505 * to the page into the skb, and to hand it over to the NIC. In
1506 * this process get_page() gets called.
1507 *
1508 * As soon as the page was really sent over the network put_page()
1509 * gets called by some part of the network layer. [ NIC driver? ]
1510 *
1511 * [ get_page() / put_page() increment/decrement the count. If count
1512 * reaches 0 the page will be freed. ]
1513 *
1514 * This works nicely with pages from FSs.
1515 * But this means that in protocol A we might signal IO completion too early!
1516 *
1517 * In order not to corrupt data during a resync we must make sure
1518 * that we do not reuse our own buffer pages (EEs) to early, therefore
1519 * we have the net_ee list.
1520 *
1521 * XFS seems to have problems, still, it submits pages with page_count == 0!
1522 * As a workaround, we disable sendpage on pages
1523 * with page_count == 0 or PageSlab.
1524 */
1525static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page,
Andreas Gruenbacherb9874272011-03-16 09:41:10 +01001526 int offset, size_t size, unsigned msg_flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001527{
Andreas Gruenbacherb9874272011-03-16 09:41:10 +01001528 struct socket *socket;
1529 void *addr;
1530 int err;
1531
1532 socket = mdev->tconn->data.socket;
1533 addr = kmap(page) + offset;
1534 err = drbd_send_all(mdev->tconn, socket, addr, size, msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001535 kunmap(page);
Andreas Gruenbacherb9874272011-03-16 09:41:10 +01001536 if (!err)
1537 mdev->send_cnt += size >> 9;
1538 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001539}
1540
1541static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001542 int offset, size_t size, unsigned msg_flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001543{
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001544 struct socket *socket = mdev->tconn->data.socket;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001545 mm_segment_t oldfs = get_fs();
Philipp Reisnerb411b362009-09-25 16:07:19 -07001546 int len = size;
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001547 int err = -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001548
1549 /* e.g. XFS meta- & log-data is in slab pages, which have a
1550 * page_count of 0 and/or have PageSlab() set.
1551 * we cannot use send_page for those, as that does get_page();
1552 * put_page(); and would cause either a VM_BUG directly, or
1553 * __page_cache_release a page that would actually still be referenced
1554 * by someone, leading to some obscure delayed Oops somewhere else. */
1555 if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001556 return _drbd_no_send_page(mdev, page, offset, size, msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001557
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001558 msg_flags |= MSG_NOSIGNAL;
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001559 drbd_update_congested(mdev->tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001560 set_fs(KERNEL_DS);
1561 do {
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001562 int sent;
1563
1564 sent = socket->ops->sendpage(socket, page, offset, len, msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001565 if (sent <= 0) {
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001566 if (sent == -EAGAIN) {
1567 if (we_should_drop_the_connection(mdev->tconn, socket))
1568 break;
1569 continue;
1570 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001571 dev_warn(DEV, "%s: size=%d len=%d sent=%d\n",
1572 __func__, (int)size, len, sent);
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001573 if (sent < 0)
1574 err = sent;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001575 break;
1576 }
1577 len -= sent;
1578 offset += sent;
1579 } while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
1580 set_fs(oldfs);
Philipp Reisner01a311a2011-02-07 14:30:33 +01001581 clear_bit(NET_CONGESTED, &mdev->tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001582
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001583 if (len == 0) {
1584 err = 0;
1585 mdev->send_cnt += size >> 9;
1586 }
1587 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001588}
1589
1590static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
1591{
1592 struct bio_vec *bvec;
1593 int i;
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001594 /* hint all but last page with MSG_MORE */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001595 __bio_for_each_segment(bvec, bio, i, 0) {
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001596 int err;
1597
1598 err = _drbd_no_send_page(mdev, bvec->bv_page,
1599 bvec->bv_offset, bvec->bv_len,
1600 i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
1601 if (err)
1602 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001603 }
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001604 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001605}
1606
1607static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
1608{
1609 struct bio_vec *bvec;
1610 int i;
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001611 /* hint all but last page with MSG_MORE */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001612 __bio_for_each_segment(bvec, bio, i, 0) {
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001613 int err;
1614
1615 err = _drbd_send_page(mdev, bvec->bv_page,
1616 bvec->bv_offset, bvec->bv_len,
1617 i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
1618 if (err)
1619 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001620 }
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001621 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001622}
1623
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001624static int _drbd_send_zc_ee(struct drbd_conf *mdev,
1625 struct drbd_peer_request *peer_req)
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001626{
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001627 struct page *page = peer_req->pages;
1628 unsigned len = peer_req->i.size;
Andreas Gruenbacher9f692302011-03-16 10:49:09 +01001629 int err;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001630
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001631 /* hint all but last page with MSG_MORE */
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001632 page_chain_for_each(page) {
1633 unsigned l = min_t(unsigned, len, PAGE_SIZE);
Andreas Gruenbacher9f692302011-03-16 10:49:09 +01001634
1635 err = _drbd_send_page(mdev, page, 0, l,
1636 page_chain_next(page) ? MSG_MORE : 0);
1637 if (err)
1638 return err;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001639 len -= l;
1640 }
Andreas Gruenbacher9f692302011-03-16 10:49:09 +01001641 return 0;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001642}
1643
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001644static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
1645{
Philipp Reisner31890f42011-01-19 14:12:51 +01001646 if (mdev->tconn->agreed_pro_version >= 95)
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001647 return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001648 (bi_rw & REQ_FUA ? DP_FUA : 0) |
1649 (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
1650 (bi_rw & REQ_DISCARD ? DP_DISCARD : 0);
1651 else
Jens Axboe721a9602011-03-09 11:56:30 +01001652 return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001653}
1654
Philipp Reisnerb411b362009-09-25 16:07:19 -07001655/* Used to send write requests
1656 * R_PRIMARY -> Peer (P_DATA)
1657 */
1658int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
1659{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001660 struct drbd_socket *sock;
1661 struct p_data *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001662 unsigned int dp_flags = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001663 int dgs;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001664 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001665
Philipp Reisnera0638452011-01-19 14:31:32 +01001666 dgs = (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_w_tfm) ?
1667 crypto_hash_digestsize(mdev->tconn->integrity_w_tfm) : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001668
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001669 sock = &mdev->tconn->data;
1670 p = drbd_prepare_command(mdev, sock);
1671 if (!p)
1672 return -EIO;
1673 p->sector = cpu_to_be64(req->i.sector);
1674 p->block_id = (unsigned long)req;
1675 p->seq_num = cpu_to_be32(req->seq_num = atomic_inc_return(&mdev->packet_seq));
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001676 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001677 if (mdev->state.conn >= C_SYNC_SOURCE &&
1678 mdev->state.conn <= C_PAUSED_SYNC_T)
1679 dp_flags |= DP_MAY_SET_IN_SYNC;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001680 p->dp_flags = cpu_to_be32(dp_flags);
1681 if (dgs)
1682 drbd_csum_bio(mdev, mdev->tconn->integrity_w_tfm, req->master_bio, p + 1);
1683 err = __send_command(mdev->tconn, mdev->vnr, sock, P_DATA, sizeof(*p) + dgs, NULL, req->i.size);
Andreas Gruenbacher6bdb9b02011-03-16 11:52:58 +01001684 if (!err) {
Lars Ellenberg470be442010-11-10 10:36:52 +01001685 /* For protocol A, we have to memcpy the payload into
1686 * socket buffers, as we may complete right away
1687 * as soon as we handed it over to tcp, at which point the data
1688 * pages may become invalid.
1689 *
1690 * For data-integrity enabled, we copy it as well, so we can be
1691 * sure that even if the bio pages may still be modified, it
1692 * won't change the data on the wire, thus if the digest checks
1693 * out ok after sending on this side, but does not fit on the
1694 * receiving side, we sure have detected corruption elsewhere.
1695 */
Philipp Reisner89e58e72011-01-19 13:12:45 +01001696 if (mdev->tconn->net_conf->wire_protocol == DRBD_PROT_A || dgs)
Andreas Gruenbacher6bdb9b02011-03-16 11:52:58 +01001697 err = _drbd_send_bio(mdev, req->master_bio);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001698 else
Andreas Gruenbacher6bdb9b02011-03-16 11:52:58 +01001699 err = _drbd_send_zc_bio(mdev, req->master_bio);
Lars Ellenberg470be442010-11-10 10:36:52 +01001700
1701 /* double check digest, sometimes buffers have been modified in flight. */
1702 if (dgs > 0 && dgs <= 64) {
Bart Van Assche24c48302011-05-21 18:32:29 +02001703 /* 64 byte, 512 bit, is the largest digest size
Lars Ellenberg470be442010-11-10 10:36:52 +01001704 * currently supported in kernel crypto. */
1705 unsigned char digest[64];
Philipp Reisnera0638452011-01-19 14:31:32 +01001706 drbd_csum_bio(mdev, mdev->tconn->integrity_w_tfm, req->master_bio, digest);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001707 if (memcmp(p + 1, digest, dgs)) {
Lars Ellenberg470be442010-11-10 10:36:52 +01001708 dev_warn(DEV,
1709 "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
Andreas Gruenbacherace652a2011-01-03 17:09:58 +01001710 (unsigned long long)req->i.sector, req->i.size);
Lars Ellenberg470be442010-11-10 10:36:52 +01001711 }
1712 } /* else if (dgs > 64) {
1713 ... Be noisy about digest too large ...
1714 } */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001715 }
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001716 mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
Philipp Reisnerbd26bfc52010-05-04 12:33:58 +02001717
Andreas Gruenbacher6bdb9b02011-03-16 11:52:58 +01001718 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001719}
1720
1721/* answer packet, used to send data back for read requests:
1722 * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
1723 * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
1724 */
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001725int drbd_send_block(struct drbd_conf *mdev, enum drbd_packet cmd,
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001726 struct drbd_peer_request *peer_req)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001727{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001728 struct drbd_socket *sock;
1729 struct p_data *p;
Andreas Gruenbacher7b57b89d2011-03-16 11:35:20 +01001730 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001731 int dgs;
1732
Philipp Reisnera0638452011-01-19 14:31:32 +01001733 dgs = (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_w_tfm) ?
1734 crypto_hash_digestsize(mdev->tconn->integrity_w_tfm) : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001735
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001736 sock = &mdev->tconn->data;
1737 p = drbd_prepare_command(mdev, sock);
1738 if (!p)
1739 return -EIO;
1740 p->sector = cpu_to_be64(peer_req->i.sector);
1741 p->block_id = peer_req->block_id;
1742 p->seq_num = 0; /* unused */
1743 if (dgs)
1744 drbd_csum_ee(mdev, mdev->tconn->integrity_w_tfm, peer_req, p + 1);
1745 err = __send_command(mdev->tconn, mdev->vnr, sock, cmd, sizeof(*p) + dgs, NULL, peer_req->i.size);
Andreas Gruenbacher7b57b89d2011-03-16 11:35:20 +01001746 if (!err)
1747 err = _drbd_send_zc_ee(mdev, peer_req);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001748 mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
Philipp Reisnerbd26bfc52010-05-04 12:33:58 +02001749
Andreas Gruenbacher7b57b89d2011-03-16 11:35:20 +01001750 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001751}
1752
Andreas Gruenbacher8f7bed72010-12-19 23:53:14 +01001753int drbd_send_out_of_sync(struct drbd_conf *mdev, struct drbd_request *req)
Philipp Reisner73a01a12010-10-27 14:33:00 +02001754{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001755 struct drbd_socket *sock;
1756 struct p_block_desc *p;
Philipp Reisner73a01a12010-10-27 14:33:00 +02001757
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001758 sock = &mdev->tconn->data;
1759 p = drbd_prepare_command(mdev, sock);
1760 if (!p)
1761 return -EIO;
1762 p->sector = cpu_to_be64(req->i.sector);
1763 p->blksize = cpu_to_be32(req->i.size);
1764 return drbd_send_command(mdev, sock, P_OUT_OF_SYNC, sizeof(*p), NULL, 0);
Philipp Reisner73a01a12010-10-27 14:33:00 +02001765}
1766
Philipp Reisnerb411b362009-09-25 16:07:19 -07001767/*
1768 drbd_send distinguishes two cases:
1769
1770 Packets sent via the data socket "sock"
1771 and packets sent via the meta data socket "msock"
1772
1773 sock msock
1774 -----------------+-------------------------+------------------------------
1775 timeout conf.timeout / 2 conf.timeout / 2
1776 timeout action send a ping via msock Abort communication
1777 and close all sockets
1778*/
1779
1780/*
1781 * you must have down()ed the appropriate [m]sock_mutex elsewhere!
1782 */
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001783int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001784 void *buf, size_t size, unsigned msg_flags)
1785{
1786 struct kvec iov;
1787 struct msghdr msg;
1788 int rv, sent = 0;
1789
1790 if (!sock)
Andreas Gruenbacherc0d42c82010-12-09 23:52:22 +01001791 return -EBADR;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001792
1793 /* THINK if (signal_pending) return ... ? */
1794
1795 iov.iov_base = buf;
1796 iov.iov_len = size;
1797
1798 msg.msg_name = NULL;
1799 msg.msg_namelen = 0;
1800 msg.msg_control = NULL;
1801 msg.msg_controllen = 0;
1802 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
1803
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001804 if (sock == tconn->data.socket) {
1805 tconn->ko_count = tconn->net_conf->ko_count;
1806 drbd_update_congested(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001807 }
1808 do {
1809 /* STRANGE
1810 * tcp_sendmsg does _not_ use its size parameter at all ?
1811 *
1812 * -EAGAIN on timeout, -EINTR on signal.
1813 */
1814/* THINK
1815 * do we need to block DRBD_SIG if sock == &meta.socket ??
1816 * otherwise wake_asender() might interrupt some send_*Ack !
1817 */
1818 rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
1819 if (rv == -EAGAIN) {
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001820 if (we_should_drop_the_connection(tconn, sock))
Philipp Reisnerb411b362009-09-25 16:07:19 -07001821 break;
1822 else
1823 continue;
1824 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001825 if (rv == -EINTR) {
1826 flush_signals(current);
1827 rv = 0;
1828 }
1829 if (rv < 0)
1830 break;
1831 sent += rv;
1832 iov.iov_base += rv;
1833 iov.iov_len -= rv;
1834 } while (sent < size);
1835
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001836 if (sock == tconn->data.socket)
1837 clear_bit(NET_CONGESTED, &tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001838
1839 if (rv <= 0) {
1840 if (rv != -EAGAIN) {
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001841 conn_err(tconn, "%s_sendmsg returned %d\n",
1842 sock == tconn->meta.socket ? "msock" : "sock",
1843 rv);
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01001844 conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001845 } else
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01001846 conn_request_state(tconn, NS(conn, C_TIMEOUT), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001847 }
1848
1849 return sent;
1850}
1851
Andreas Gruenbacherfb708e42010-12-15 17:04:36 +01001852/**
1853 * drbd_send_all - Send an entire buffer
1854 *
1855 * Returns 0 upon success and a negative error value otherwise.
1856 */
1857int drbd_send_all(struct drbd_tconn *tconn, struct socket *sock, void *buffer,
1858 size_t size, unsigned msg_flags)
1859{
1860 int err;
1861
1862 err = drbd_send(tconn, sock, buffer, size, msg_flags);
1863 if (err < 0)
1864 return err;
1865 if (err != size)
1866 return -EIO;
1867 return 0;
1868}
1869
Philipp Reisnerb411b362009-09-25 16:07:19 -07001870static int drbd_open(struct block_device *bdev, fmode_t mode)
1871{
1872 struct drbd_conf *mdev = bdev->bd_disk->private_data;
1873 unsigned long flags;
1874 int rv = 0;
1875
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02001876 mutex_lock(&drbd_main_mutex);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001877 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001878 /* to have a stable mdev->state.role
1879 * and no race with updating open_cnt */
1880
1881 if (mdev->state.role != R_PRIMARY) {
1882 if (mode & FMODE_WRITE)
1883 rv = -EROFS;
1884 else if (!allow_oos)
1885 rv = -EMEDIUMTYPE;
1886 }
1887
1888 if (!rv)
1889 mdev->open_cnt++;
Philipp Reisner87eeee42011-01-19 14:16:30 +01001890 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02001891 mutex_unlock(&drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001892
1893 return rv;
1894}
1895
1896static int drbd_release(struct gendisk *gd, fmode_t mode)
1897{
1898 struct drbd_conf *mdev = gd->private_data;
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02001899 mutex_lock(&drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001900 mdev->open_cnt--;
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02001901 mutex_unlock(&drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001902 return 0;
1903}
1904
Philipp Reisnerb411b362009-09-25 16:07:19 -07001905static void drbd_set_defaults(struct drbd_conf *mdev)
1906{
Lars Ellenbergf3990022011-03-23 14:31:09 +01001907 /* Beware! The actual layout differs
1908 * between big endian and little endian */
Philipp Reisnerda9fbc22011-03-29 10:52:01 +02001909 mdev->state = (union drbd_dev_state) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001910 { .role = R_SECONDARY,
1911 .peer = R_UNKNOWN,
1912 .conn = C_STANDALONE,
1913 .disk = D_DISKLESS,
1914 .pdsk = D_UNKNOWN,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001915 } };
1916}
1917
1918void drbd_init_set_defaults(struct drbd_conf *mdev)
1919{
1920 /* the memset(,0,) did most of this.
1921 * note: only assignments, no allocation in here */
1922
1923 drbd_set_defaults(mdev);
1924
Philipp Reisnerb411b362009-09-25 16:07:19 -07001925 atomic_set(&mdev->ap_bio_cnt, 0);
1926 atomic_set(&mdev->ap_pending_cnt, 0);
1927 atomic_set(&mdev->rs_pending_cnt, 0);
1928 atomic_set(&mdev->unacked_cnt, 0);
1929 atomic_set(&mdev->local_cnt, 0);
Lars Ellenberg435f0742010-09-06 12:30:25 +02001930 atomic_set(&mdev->pp_in_use_by_net, 0);
Philipp Reisner778f2712010-07-06 11:14:00 +02001931 atomic_set(&mdev->rs_sect_in, 0);
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001932 atomic_set(&mdev->rs_sect_ev, 0);
Philipp Reisner759fbdf2010-10-26 16:02:27 +02001933 atomic_set(&mdev->ap_in_flight, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001934
1935 mutex_init(&mdev->md_io_mutex);
Philipp Reisner8410da8f02011-02-11 20:11:10 +01001936 mutex_init(&mdev->own_state_mutex);
1937 mdev->state_mutex = &mdev->own_state_mutex;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001938
Philipp Reisnerb411b362009-09-25 16:07:19 -07001939 spin_lock_init(&mdev->al_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001940 spin_lock_init(&mdev->peer_seq_lock);
1941 spin_lock_init(&mdev->epoch_lock);
1942
1943 INIT_LIST_HEAD(&mdev->active_ee);
1944 INIT_LIST_HEAD(&mdev->sync_ee);
1945 INIT_LIST_HEAD(&mdev->done_ee);
1946 INIT_LIST_HEAD(&mdev->read_ee);
1947 INIT_LIST_HEAD(&mdev->net_ee);
1948 INIT_LIST_HEAD(&mdev->resync_reads);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001949 INIT_LIST_HEAD(&mdev->resync_work.list);
1950 INIT_LIST_HEAD(&mdev->unplug_work.list);
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02001951 INIT_LIST_HEAD(&mdev->go_diskless.list);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001952 INIT_LIST_HEAD(&mdev->md_sync_work.list);
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02001953 INIT_LIST_HEAD(&mdev->start_resync_work.list);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001954 INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
Philipp Reisner0ced55a2010-04-30 15:26:20 +02001955
Philipp Reisner794abb72010-12-27 11:51:23 +01001956 mdev->resync_work.cb = w_resync_timer;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001957 mdev->unplug_work.cb = w_send_write_hint;
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02001958 mdev->go_diskless.cb = w_go_diskless;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001959 mdev->md_sync_work.cb = w_md_sync;
1960 mdev->bm_io_work.w.cb = w_bitmap_io;
Philipp Reisner370a43e2011-01-14 16:03:11 +01001961 mdev->start_resync_work.cb = w_start_resync;
Philipp Reisnera21e9292011-02-08 15:08:49 +01001962
1963 mdev->resync_work.mdev = mdev;
1964 mdev->unplug_work.mdev = mdev;
1965 mdev->go_diskless.mdev = mdev;
1966 mdev->md_sync_work.mdev = mdev;
1967 mdev->bm_io_work.w.mdev = mdev;
1968 mdev->start_resync_work.mdev = mdev;
1969
Philipp Reisnerb411b362009-09-25 16:07:19 -07001970 init_timer(&mdev->resync_timer);
1971 init_timer(&mdev->md_sync_timer);
Philipp Reisner370a43e2011-01-14 16:03:11 +01001972 init_timer(&mdev->start_resync_timer);
Philipp Reisner7fde2be2011-03-01 11:08:28 +01001973 init_timer(&mdev->request_timer);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001974 mdev->resync_timer.function = resync_timer_fn;
1975 mdev->resync_timer.data = (unsigned long) mdev;
1976 mdev->md_sync_timer.function = md_sync_timer_fn;
1977 mdev->md_sync_timer.data = (unsigned long) mdev;
Philipp Reisner370a43e2011-01-14 16:03:11 +01001978 mdev->start_resync_timer.function = start_resync_timer_fn;
1979 mdev->start_resync_timer.data = (unsigned long) mdev;
Philipp Reisner7fde2be2011-03-01 11:08:28 +01001980 mdev->request_timer.function = request_timer_fn;
1981 mdev->request_timer.data = (unsigned long) mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001982
1983 init_waitqueue_head(&mdev->misc_wait);
1984 init_waitqueue_head(&mdev->state_wait);
1985 init_waitqueue_head(&mdev->ee_wait);
1986 init_waitqueue_head(&mdev->al_wait);
1987 init_waitqueue_head(&mdev->seq_wait);
1988
Philipp Reisnerfd340c12011-01-19 16:57:39 +01001989 /* mdev->tconn->agreed_pro_version gets initialized in drbd_connect() */
Philipp Reisner2451fc32010-08-24 13:43:11 +02001990 mdev->write_ordering = WO_bdev_flush;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001991 mdev->resync_wenr = LC_FREE;
Philipp Reisner99432fc2011-05-20 16:39:13 +02001992 mdev->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
1993 mdev->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001994}
1995
1996void drbd_mdev_cleanup(struct drbd_conf *mdev)
1997{
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001998 int i;
Philipp Reisnere6b3ea82011-01-19 14:02:01 +01001999 if (mdev->tconn->receiver.t_state != NONE)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002000 dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
Philipp Reisnere6b3ea82011-01-19 14:02:01 +01002001 mdev->tconn->receiver.t_state);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002002
2003 /* no need to lock it, I'm the only thread alive */
2004 if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
2005 dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
2006 mdev->al_writ_cnt =
2007 mdev->bm_writ_cnt =
2008 mdev->read_cnt =
2009 mdev->recv_cnt =
2010 mdev->send_cnt =
2011 mdev->writ_cnt =
2012 mdev->p_size =
2013 mdev->rs_start =
2014 mdev->rs_total =
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02002015 mdev->rs_failed = 0;
2016 mdev->rs_last_events = 0;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02002017 mdev->rs_last_sect_ev = 0;
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02002018 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2019 mdev->rs_mark_left[i] = 0;
2020 mdev->rs_mark_time[i] = 0;
2021 }
Philipp Reisner89e58e72011-01-19 13:12:45 +01002022 D_ASSERT(mdev->tconn->net_conf == NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002023
2024 drbd_set_my_capacity(mdev, 0);
2025 if (mdev->bitmap) {
2026 /* maybe never allocated. */
Philipp Reisner02d9a942010-03-24 16:23:03 +01002027 drbd_bm_resize(mdev, 0, 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002028 drbd_bm_cleanup(mdev);
2029 }
2030
2031 drbd_free_resources(mdev);
Philipp Reisner07782862010-08-31 12:00:50 +02002032 clear_bit(AL_SUSPENDED, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002033
2034 /*
2035 * currently we drbd_init_ee only on module load, so
2036 * we may do drbd_release_ee only on module unload!
2037 */
2038 D_ASSERT(list_empty(&mdev->active_ee));
2039 D_ASSERT(list_empty(&mdev->sync_ee));
2040 D_ASSERT(list_empty(&mdev->done_ee));
2041 D_ASSERT(list_empty(&mdev->read_ee));
2042 D_ASSERT(list_empty(&mdev->net_ee));
2043 D_ASSERT(list_empty(&mdev->resync_reads));
Philipp Reisnere42325a2011-01-19 13:55:45 +01002044 D_ASSERT(list_empty(&mdev->tconn->data.work.q));
2045 D_ASSERT(list_empty(&mdev->tconn->meta.work.q));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002046 D_ASSERT(list_empty(&mdev->resync_work.list));
2047 D_ASSERT(list_empty(&mdev->unplug_work.list));
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02002048 D_ASSERT(list_empty(&mdev->go_diskless.list));
Lars Ellenberg2265b472010-12-16 15:41:26 +01002049
2050 drbd_set_defaults(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002051}
2052
2053
2054static void drbd_destroy_mempools(void)
2055{
2056 struct page *page;
2057
2058 while (drbd_pp_pool) {
2059 page = drbd_pp_pool;
2060 drbd_pp_pool = (struct page *)page_private(page);
2061 __free_page(page);
2062 drbd_pp_vacant--;
2063 }
2064
2065 /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
2066
Lars Ellenbergda4a75d2011-02-23 17:02:01 +01002067 if (drbd_md_io_bio_set)
2068 bioset_free(drbd_md_io_bio_set);
Lars Ellenberg35abf592011-02-23 12:39:46 +01002069 if (drbd_md_io_page_pool)
2070 mempool_destroy(drbd_md_io_page_pool);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002071 if (drbd_ee_mempool)
2072 mempool_destroy(drbd_ee_mempool);
2073 if (drbd_request_mempool)
2074 mempool_destroy(drbd_request_mempool);
2075 if (drbd_ee_cache)
2076 kmem_cache_destroy(drbd_ee_cache);
2077 if (drbd_request_cache)
2078 kmem_cache_destroy(drbd_request_cache);
2079 if (drbd_bm_ext_cache)
2080 kmem_cache_destroy(drbd_bm_ext_cache);
2081 if (drbd_al_ext_cache)
2082 kmem_cache_destroy(drbd_al_ext_cache);
2083
Lars Ellenbergda4a75d2011-02-23 17:02:01 +01002084 drbd_md_io_bio_set = NULL;
Lars Ellenberg35abf592011-02-23 12:39:46 +01002085 drbd_md_io_page_pool = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002086 drbd_ee_mempool = NULL;
2087 drbd_request_mempool = NULL;
2088 drbd_ee_cache = NULL;
2089 drbd_request_cache = NULL;
2090 drbd_bm_ext_cache = NULL;
2091 drbd_al_ext_cache = NULL;
2092
2093 return;
2094}
2095
2096static int drbd_create_mempools(void)
2097{
2098 struct page *page;
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01002099 const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002100 int i;
2101
2102 /* prepare our caches and mempools */
2103 drbd_request_mempool = NULL;
2104 drbd_ee_cache = NULL;
2105 drbd_request_cache = NULL;
2106 drbd_bm_ext_cache = NULL;
2107 drbd_al_ext_cache = NULL;
2108 drbd_pp_pool = NULL;
Lars Ellenberg35abf592011-02-23 12:39:46 +01002109 drbd_md_io_page_pool = NULL;
Lars Ellenbergda4a75d2011-02-23 17:02:01 +01002110 drbd_md_io_bio_set = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002111
2112 /* caches */
2113 drbd_request_cache = kmem_cache_create(
2114 "drbd_req", sizeof(struct drbd_request), 0, 0, NULL);
2115 if (drbd_request_cache == NULL)
2116 goto Enomem;
2117
2118 drbd_ee_cache = kmem_cache_create(
Andreas Gruenbacherf6ffca92011-02-04 15:30:34 +01002119 "drbd_ee", sizeof(struct drbd_peer_request), 0, 0, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002120 if (drbd_ee_cache == NULL)
2121 goto Enomem;
2122
2123 drbd_bm_ext_cache = kmem_cache_create(
2124 "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL);
2125 if (drbd_bm_ext_cache == NULL)
2126 goto Enomem;
2127
2128 drbd_al_ext_cache = kmem_cache_create(
2129 "drbd_al", sizeof(struct lc_element), 0, 0, NULL);
2130 if (drbd_al_ext_cache == NULL)
2131 goto Enomem;
2132
2133 /* mempools */
Lars Ellenbergda4a75d2011-02-23 17:02:01 +01002134 drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0);
2135 if (drbd_md_io_bio_set == NULL)
2136 goto Enomem;
2137
Lars Ellenberg35abf592011-02-23 12:39:46 +01002138 drbd_md_io_page_pool = mempool_create_page_pool(DRBD_MIN_POOL_PAGES, 0);
2139 if (drbd_md_io_page_pool == NULL)
2140 goto Enomem;
2141
Philipp Reisnerb411b362009-09-25 16:07:19 -07002142 drbd_request_mempool = mempool_create(number,
2143 mempool_alloc_slab, mempool_free_slab, drbd_request_cache);
2144 if (drbd_request_mempool == NULL)
2145 goto Enomem;
2146
2147 drbd_ee_mempool = mempool_create(number,
2148 mempool_alloc_slab, mempool_free_slab, drbd_ee_cache);
Nicolas Kaiser2027ae12010-10-28 06:15:26 -06002149 if (drbd_ee_mempool == NULL)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002150 goto Enomem;
2151
2152 /* drbd's page pool */
2153 spin_lock_init(&drbd_pp_lock);
2154
2155 for (i = 0; i < number; i++) {
2156 page = alloc_page(GFP_HIGHUSER);
2157 if (!page)
2158 goto Enomem;
2159 set_page_private(page, (unsigned long)drbd_pp_pool);
2160 drbd_pp_pool = page;
2161 }
2162 drbd_pp_vacant = number;
2163
2164 return 0;
2165
2166Enomem:
2167 drbd_destroy_mempools(); /* in case we allocated some */
2168 return -ENOMEM;
2169}
2170
2171static int drbd_notify_sys(struct notifier_block *this, unsigned long code,
2172 void *unused)
2173{
2174 /* just so we have it. you never know what interesting things we
2175 * might want to do here some day...
2176 */
2177
2178 return NOTIFY_DONE;
2179}
2180
2181static struct notifier_block drbd_notifier = {
2182 .notifier_call = drbd_notify_sys,
2183};
2184
2185static void drbd_release_ee_lists(struct drbd_conf *mdev)
2186{
2187 int rr;
2188
2189 rr = drbd_release_ee(mdev, &mdev->active_ee);
2190 if (rr)
2191 dev_err(DEV, "%d EEs in active list found!\n", rr);
2192
2193 rr = drbd_release_ee(mdev, &mdev->sync_ee);
2194 if (rr)
2195 dev_err(DEV, "%d EEs in sync list found!\n", rr);
2196
2197 rr = drbd_release_ee(mdev, &mdev->read_ee);
2198 if (rr)
2199 dev_err(DEV, "%d EEs in read list found!\n", rr);
2200
2201 rr = drbd_release_ee(mdev, &mdev->done_ee);
2202 if (rr)
2203 dev_err(DEV, "%d EEs in done list found!\n", rr);
2204
2205 rr = drbd_release_ee(mdev, &mdev->net_ee);
2206 if (rr)
2207 dev_err(DEV, "%d EEs in net list found!\n", rr);
2208}
2209
Philipp Reisner774b3052011-02-22 02:07:03 -05002210/* caution. no locking. */
2211void drbd_delete_device(unsigned int minor)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002212{
2213 struct drbd_conf *mdev = minor_to_mdev(minor);
2214
2215 if (!mdev)
2216 return;
2217
Lars Ellenberg569083c2011-03-07 09:49:02 +01002218 idr_remove(&mdev->tconn->volumes, mdev->vnr);
2219 idr_remove(&minors, minor);
2220 synchronize_rcu();
Philipp Reisner774b3052011-02-22 02:07:03 -05002221
Philipp Reisnerb411b362009-09-25 16:07:19 -07002222 /* paranoia asserts */
Andreas Gruenbacher70dc65e2010-12-21 14:46:57 +01002223 D_ASSERT(mdev->open_cnt == 0);
Philipp Reisnere42325a2011-01-19 13:55:45 +01002224 D_ASSERT(list_empty(&mdev->tconn->data.work.q));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002225 /* end paranoia asserts */
2226
2227 del_gendisk(mdev->vdisk);
2228
2229 /* cleanup stuff that may have been allocated during
2230 * device (re-)configuration or state changes */
2231
2232 if (mdev->this_bdev)
2233 bdput(mdev->this_bdev);
2234
2235 drbd_free_resources(mdev);
2236
2237 drbd_release_ee_lists(mdev);
2238
Philipp Reisnerb411b362009-09-25 16:07:19 -07002239 lc_destroy(mdev->act_log);
2240 lc_destroy(mdev->resync);
2241
2242 kfree(mdev->p_uuid);
2243 /* mdev->p_uuid = NULL; */
2244
Philipp Reisnerb411b362009-09-25 16:07:19 -07002245 /* cleanup the rest that has been
2246 * allocated from drbd_new_device
2247 * and actually free the mdev itself */
2248 drbd_free_mdev(mdev);
2249}
2250
2251static void drbd_cleanup(void)
2252{
2253 unsigned int i;
Philipp Reisner81a5d602011-02-22 19:53:16 -05002254 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002255
2256 unregister_reboot_notifier(&drbd_notifier);
2257
Lars Ellenberg17a93f32010-11-24 10:37:35 +01002258 /* first remove proc,
2259 * drbdsetup uses it's presence to detect
2260 * whether DRBD is loaded.
2261 * If we would get stuck in proc removal,
2262 * but have netlink already deregistered,
2263 * some drbdsetup commands may wait forever
2264 * for an answer.
2265 */
2266 if (drbd_proc)
2267 remove_proc_entry("drbd", NULL);
2268
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002269 drbd_genl_unregister();
Philipp Reisnerb411b362009-09-25 16:07:19 -07002270
Philipp Reisner81a5d602011-02-22 19:53:16 -05002271 idr_for_each_entry(&minors, mdev, i)
2272 drbd_delete_device(i);
2273 drbd_destroy_mempools();
Philipp Reisnerb411b362009-09-25 16:07:19 -07002274 unregister_blkdev(DRBD_MAJOR, "drbd");
2275
Philipp Reisner81a5d602011-02-22 19:53:16 -05002276 idr_destroy(&minors);
2277
Philipp Reisnerb411b362009-09-25 16:07:19 -07002278 printk(KERN_INFO "drbd: module cleanup done.\n");
2279}
2280
2281/**
2282 * drbd_congested() - Callback for pdflush
2283 * @congested_data: User data
2284 * @bdi_bits: Bits pdflush is currently interested in
2285 *
2286 * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
2287 */
2288static int drbd_congested(void *congested_data, int bdi_bits)
2289{
2290 struct drbd_conf *mdev = congested_data;
2291 struct request_queue *q;
2292 char reason = '-';
2293 int r = 0;
2294
Andreas Gruenbacher1b881ef2010-12-13 18:03:38 +01002295 if (!may_inc_ap_bio(mdev)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002296 /* DRBD has frozen IO */
2297 r = bdi_bits;
2298 reason = 'd';
2299 goto out;
2300 }
2301
2302 if (get_ldev(mdev)) {
2303 q = bdev_get_queue(mdev->ldev->backing_bdev);
2304 r = bdi_congested(&q->backing_dev_info, bdi_bits);
2305 put_ldev(mdev);
2306 if (r)
2307 reason = 'b';
2308 }
2309
Philipp Reisner01a311a2011-02-07 14:30:33 +01002310 if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->tconn->flags)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002311 r |= (1 << BDI_async_congested);
2312 reason = reason == 'b' ? 'a' : 'n';
2313 }
2314
2315out:
2316 mdev->congestion_reason = reason;
2317 return r;
2318}
2319
Philipp Reisner6699b652011-02-09 11:10:24 +01002320static void drbd_init_workqueue(struct drbd_work_queue* wq)
2321{
2322 sema_init(&wq->s, 0);
2323 spin_lock_init(&wq->q_lock);
2324 INIT_LIST_HEAD(&wq->q);
2325}
2326
Philipp Reisner1aba4d72011-02-21 15:38:08 +01002327struct drbd_tconn *conn_by_name(const char *name)
2328{
2329 struct drbd_tconn *tconn;
2330
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002331 if (!name || !name[0])
2332 return NULL;
2333
Lars Ellenberg543cc102011-03-10 22:18:18 +01002334 mutex_lock(&drbd_cfg_mutex);
Philipp Reisner1aba4d72011-02-21 15:38:08 +01002335 list_for_each_entry(tconn, &drbd_tconns, all_tconn) {
2336 if (!strcmp(tconn->name, name))
2337 goto found;
2338 }
2339 tconn = NULL;
2340found:
Lars Ellenberg543cc102011-03-10 22:18:18 +01002341 mutex_unlock(&drbd_cfg_mutex);
Philipp Reisner1aba4d72011-02-21 15:38:08 +01002342 return tconn;
2343}
2344
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002345static int drbd_alloc_socket(struct drbd_socket *socket)
2346{
2347 socket->rbuf = (void *) __get_free_page(GFP_KERNEL);
2348 if (!socket->rbuf)
2349 return -ENOMEM;
Andreas Gruenbacher5a87d922011-03-24 21:17:52 +01002350 socket->sbuf = (void *) __get_free_page(GFP_KERNEL);
2351 if (!socket->sbuf)
2352 return -ENOMEM;
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002353 return 0;
2354}
2355
2356static void drbd_free_socket(struct drbd_socket *socket)
2357{
Andreas Gruenbacher5a87d922011-03-24 21:17:52 +01002358 free_page((unsigned long) socket->sbuf);
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002359 free_page((unsigned long) socket->rbuf);
2360}
2361
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002362struct drbd_tconn *drbd_new_tconn(const char *name)
Philipp Reisner21114382011-01-19 12:26:59 +01002363{
2364 struct drbd_tconn *tconn;
2365
2366 tconn = kzalloc(sizeof(struct drbd_tconn), GFP_KERNEL);
2367 if (!tconn)
2368 return NULL;
2369
2370 tconn->name = kstrdup(name, GFP_KERNEL);
2371 if (!tconn->name)
2372 goto fail;
2373
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002374 if (drbd_alloc_socket(&tconn->data))
2375 goto fail;
2376 if (drbd_alloc_socket(&tconn->meta))
2377 goto fail;
2378
Philipp Reisner774b3052011-02-22 02:07:03 -05002379 if (!zalloc_cpumask_var(&tconn->cpu_mask, GFP_KERNEL))
2380 goto fail;
2381
Philipp Reisner2f5cdd02011-02-21 14:29:27 +01002382 if (!tl_init(tconn))
2383 goto fail;
2384
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01002385 tconn->cstate = C_STANDALONE;
Philipp Reisner8410da8f02011-02-11 20:11:10 +01002386 mutex_init(&tconn->cstate_mutex);
Philipp Reisner6699b652011-02-09 11:10:24 +01002387 spin_lock_init(&tconn->req_lock);
Philipp Reisnerb2fb6dbe2011-01-19 13:48:44 +01002388 atomic_set(&tconn->net_cnt, 0);
2389 init_waitqueue_head(&tconn->net_cnt_wait);
Philipp Reisner2a67d8b2011-02-09 14:10:32 +01002390 init_waitqueue_head(&tconn->ping_wait);
Philipp Reisner062e8792011-02-08 11:09:18 +01002391 idr_init(&tconn->volumes);
Philipp Reisnerb2fb6dbe2011-01-19 13:48:44 +01002392
Philipp Reisner6699b652011-02-09 11:10:24 +01002393 drbd_init_workqueue(&tconn->data.work);
2394 mutex_init(&tconn->data.mutex);
2395
2396 drbd_init_workqueue(&tconn->meta.work);
2397 mutex_init(&tconn->meta.mutex);
2398
Philipp Reisner392c8802011-02-09 10:33:31 +01002399 drbd_thread_init(tconn, &tconn->receiver, drbdd_init, "receiver");
2400 drbd_thread_init(tconn, &tconn->worker, drbd_worker, "worker");
2401 drbd_thread_init(tconn, &tconn->asender, drbd_asender, "asender");
2402
Lars Ellenbergf3990022011-03-23 14:31:09 +01002403 tconn->res_opts = (struct res_opts) {
2404 {}, 0, /* cpu_mask */
2405 DRBD_ON_NO_DATA_DEF, /* on_no_data */
2406 };
2407
Lars Ellenberg543cc102011-03-10 22:18:18 +01002408 mutex_lock(&drbd_cfg_mutex);
2409 list_add_tail(&tconn->all_tconn, &drbd_tconns);
2410 mutex_unlock(&drbd_cfg_mutex);
Philipp Reisner21114382011-01-19 12:26:59 +01002411
2412 return tconn;
2413
2414fail:
Philipp Reisner2f5cdd02011-02-21 14:29:27 +01002415 tl_cleanup(tconn);
Philipp Reisner774b3052011-02-22 02:07:03 -05002416 free_cpumask_var(tconn->cpu_mask);
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002417 drbd_free_socket(&tconn->meta);
2418 drbd_free_socket(&tconn->data);
Philipp Reisner21114382011-01-19 12:26:59 +01002419 kfree(tconn->name);
2420 kfree(tconn);
2421
2422 return NULL;
2423}
2424
2425void drbd_free_tconn(struct drbd_tconn *tconn)
2426{
Philipp Reisner21114382011-01-19 12:26:59 +01002427 list_del(&tconn->all_tconn);
Philipp Reisner062e8792011-02-08 11:09:18 +01002428 idr_destroy(&tconn->volumes);
Philipp Reisner21114382011-01-19 12:26:59 +01002429
Philipp Reisner774b3052011-02-22 02:07:03 -05002430 free_cpumask_var(tconn->cpu_mask);
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002431 drbd_free_socket(&tconn->meta);
2432 drbd_free_socket(&tconn->data);
Philipp Reisner21114382011-01-19 12:26:59 +01002433 kfree(tconn->name);
Philipp Reisnerb42a70a2011-01-27 10:55:20 +01002434 kfree(tconn->int_dig_in);
2435 kfree(tconn->int_dig_vv);
Philipp Reisner21114382011-01-19 12:26:59 +01002436 kfree(tconn);
2437}
2438
Philipp Reisner774b3052011-02-22 02:07:03 -05002439enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, int vnr)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002440{
2441 struct drbd_conf *mdev;
2442 struct gendisk *disk;
2443 struct request_queue *q;
Philipp Reisner774b3052011-02-22 02:07:03 -05002444 int vnr_got = vnr;
Philipp Reisner81a5d602011-02-22 19:53:16 -05002445 int minor_got = minor;
Lars Ellenberg8432b312011-03-08 16:11:16 +01002446 enum drbd_ret_code err = ERR_NOMEM;
Philipp Reisner774b3052011-02-22 02:07:03 -05002447
2448 mdev = minor_to_mdev(minor);
2449 if (mdev)
2450 return ERR_MINOR_EXISTS;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002451
2452 /* GFP_KERNEL, we are outside of all write-out paths */
2453 mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL);
2454 if (!mdev)
Philipp Reisner774b3052011-02-22 02:07:03 -05002455 return ERR_NOMEM;
2456
2457 mdev->tconn = tconn;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002458 mdev->minor = minor;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002459 mdev->vnr = vnr;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002460
2461 drbd_init_set_defaults(mdev);
2462
2463 q = blk_alloc_queue(GFP_KERNEL);
2464 if (!q)
2465 goto out_no_q;
2466 mdev->rq_queue = q;
2467 q->queuedata = mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002468
2469 disk = alloc_disk(1);
2470 if (!disk)
2471 goto out_no_disk;
2472 mdev->vdisk = disk;
2473
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002474 set_disk_ro(disk, true);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002475
2476 disk->queue = q;
2477 disk->major = DRBD_MAJOR;
2478 disk->first_minor = minor;
2479 disk->fops = &drbd_ops;
2480 sprintf(disk->disk_name, "drbd%d", minor);
2481 disk->private_data = mdev;
2482
2483 mdev->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
2484 /* we have no partitions. we contain only ourselves. */
2485 mdev->this_bdev->bd_contains = mdev->this_bdev;
2486
2487 q->backing_dev_info.congested_fn = drbd_congested;
2488 q->backing_dev_info.congested_data = mdev;
2489
Andreas Gruenbacher2f58dcf2010-12-13 17:48:19 +01002490 blk_queue_make_request(q, drbd_make_request);
Philipp Reisner99432fc2011-05-20 16:39:13 +02002491 /* Setting the max_hw_sectors to an odd value of 8kibyte here
2492 This triggers a max_bio_size message upon first attach or connect */
2493 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002494 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
2495 blk_queue_merge_bvec(q, drbd_merge_bvec);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002496 q->queue_lock = &mdev->tconn->req_lock; /* needed since we use */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002497
2498 mdev->md_io_page = alloc_page(GFP_KERNEL);
2499 if (!mdev->md_io_page)
2500 goto out_no_io_page;
2501
2502 if (drbd_bm_init(mdev))
2503 goto out_no_bitmap;
Andreas Gruenbacherdac13892011-01-21 17:18:39 +01002504 mdev->read_requests = RB_ROOT;
Andreas Gruenbacherde696712011-01-20 15:00:24 +01002505 mdev->write_requests = RB_ROOT;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002506
Philipp Reisnerb411b362009-09-25 16:07:19 -07002507 mdev->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
2508 if (!mdev->current_epoch)
2509 goto out_no_epoch;
2510
2511 INIT_LIST_HEAD(&mdev->current_epoch->list);
2512 mdev->epochs = 1;
2513
Lars Ellenberg8432b312011-03-08 16:11:16 +01002514 if (!idr_pre_get(&minors, GFP_KERNEL))
2515 goto out_no_minor_idr;
2516 if (idr_get_new_above(&minors, mdev, minor, &minor_got))
2517 goto out_no_minor_idr;
2518 if (minor_got != minor) {
2519 err = ERR_MINOR_EXISTS;
2520 drbd_msg_put_info("requested minor exists already");
2521 goto out_idr_remove_minor;
Lars Ellenberg569083c2011-03-07 09:49:02 +01002522 }
2523
Lars Ellenberg8432b312011-03-08 16:11:16 +01002524 if (!idr_pre_get(&tconn->volumes, GFP_KERNEL))
Lars Ellenberg569083c2011-03-07 09:49:02 +01002525 goto out_idr_remove_minor;
Lars Ellenberg8432b312011-03-08 16:11:16 +01002526 if (idr_get_new_above(&tconn->volumes, mdev, vnr, &vnr_got))
2527 goto out_idr_remove_minor;
2528 if (vnr_got != vnr) {
2529 err = ERR_INVALID_REQUEST;
2530 drbd_msg_put_info("requested volume exists already");
2531 goto out_idr_remove_vol;
Philipp Reisner81a5d602011-02-22 19:53:16 -05002532 }
Philipp Reisner774b3052011-02-22 02:07:03 -05002533 add_disk(disk);
2534
Philipp Reisner2325eb62011-03-15 16:56:18 +01002535 /* inherit the connection state */
2536 mdev->state.conn = tconn->cstate;
2537 if (mdev->state.conn == C_WF_REPORT_PARAMS)
2538 drbd_connected(vnr, mdev, tconn);
2539
Philipp Reisner774b3052011-02-22 02:07:03 -05002540 return NO_ERROR;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002541
Lars Ellenberg569083c2011-03-07 09:49:02 +01002542out_idr_remove_vol:
2543 idr_remove(&tconn->volumes, vnr_got);
Lars Ellenberg8432b312011-03-08 16:11:16 +01002544out_idr_remove_minor:
2545 idr_remove(&minors, minor_got);
Lars Ellenberg569083c2011-03-07 09:49:02 +01002546 synchronize_rcu();
Lars Ellenberg8432b312011-03-08 16:11:16 +01002547out_no_minor_idr:
Philipp Reisner81a5d602011-02-22 19:53:16 -05002548 kfree(mdev->current_epoch);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002549out_no_epoch:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002550 drbd_bm_cleanup(mdev);
2551out_no_bitmap:
2552 __free_page(mdev->md_io_page);
2553out_no_io_page:
2554 put_disk(disk);
2555out_no_disk:
2556 blk_cleanup_queue(q);
2557out_no_q:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002558 kfree(mdev);
Lars Ellenberg8432b312011-03-08 16:11:16 +01002559 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002560}
2561
2562/* counterpart of drbd_new_device.
2563 * last part of drbd_delete_device. */
2564void drbd_free_mdev(struct drbd_conf *mdev)
2565{
2566 kfree(mdev->current_epoch);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002567 if (mdev->bitmap) /* should no longer be there. */
2568 drbd_bm_cleanup(mdev);
2569 __free_page(mdev->md_io_page);
2570 put_disk(mdev->vdisk);
2571 blk_cleanup_queue(mdev->rq_queue);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002572 kfree(mdev);
2573}
2574
2575
2576int __init drbd_init(void)
2577{
2578 int err;
2579
Philipp Reisner2b8a90b2011-01-10 11:15:17 +01002580 if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002581 printk(KERN_ERR
Philipp Reisner81a5d602011-02-22 19:53:16 -05002582 "drbd: invalid minor_count (%d)\n", minor_count);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002583#ifdef MODULE
2584 return -EINVAL;
2585#else
2586 minor_count = 8;
2587#endif
2588 }
2589
Philipp Reisnerb411b362009-09-25 16:07:19 -07002590 err = register_blkdev(DRBD_MAJOR, "drbd");
2591 if (err) {
2592 printk(KERN_ERR
2593 "drbd: unable to register block device major %d\n",
2594 DRBD_MAJOR);
2595 return err;
2596 }
2597
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002598 err = drbd_genl_register();
2599 if (err) {
2600 printk(KERN_ERR "drbd: unable to register generic netlink family\n");
2601 goto fail;
2602 }
2603
2604
Philipp Reisnerb411b362009-09-25 16:07:19 -07002605 register_reboot_notifier(&drbd_notifier);
2606
2607 /*
2608 * allocate all necessary structs
2609 */
2610 err = -ENOMEM;
2611
2612 init_waitqueue_head(&drbd_pp_wait);
2613
2614 drbd_proc = NULL; /* play safe for drbd_cleanup */
Philipp Reisner81a5d602011-02-22 19:53:16 -05002615 idr_init(&minors);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002616
2617 err = drbd_create_mempools();
2618 if (err)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002619 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002620
Lars Ellenberg8c484ee2010-03-11 16:47:58 +01002621 drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002622 if (!drbd_proc) {
2623 printk(KERN_ERR "drbd: unable to register proc file\n");
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002624 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002625 }
2626
2627 rwlock_init(&global_state_lock);
Philipp Reisner21114382011-01-19 12:26:59 +01002628 INIT_LIST_HEAD(&drbd_tconns);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002629
2630 printk(KERN_INFO "drbd: initialized. "
2631 "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
2632 API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
2633 printk(KERN_INFO "drbd: %s\n", drbd_buildtag());
2634 printk(KERN_INFO "drbd: registered as block device major %d\n",
2635 DRBD_MAJOR);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002636
2637 return 0; /* Success! */
2638
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002639fail:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002640 drbd_cleanup();
2641 if (err == -ENOMEM)
2642 /* currently always the case */
2643 printk(KERN_ERR "drbd: ran out of memory\n");
2644 else
2645 printk(KERN_ERR "drbd: initialization failure\n");
2646 return err;
2647}
2648
2649void drbd_free_bc(struct drbd_backing_dev *ldev)
2650{
2651 if (ldev == NULL)
2652 return;
2653
Tejun Heoe525fd82010-11-13 11:55:17 +01002654 blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
2655 blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002656
2657 kfree(ldev);
2658}
2659
Philipp Reisner360cc742011-02-08 14:29:53 +01002660void drbd_free_sock(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002661{
Philipp Reisner360cc742011-02-08 14:29:53 +01002662 if (tconn->data.socket) {
2663 mutex_lock(&tconn->data.mutex);
2664 kernel_sock_shutdown(tconn->data.socket, SHUT_RDWR);
2665 sock_release(tconn->data.socket);
2666 tconn->data.socket = NULL;
2667 mutex_unlock(&tconn->data.mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002668 }
Philipp Reisner360cc742011-02-08 14:29:53 +01002669 if (tconn->meta.socket) {
2670 mutex_lock(&tconn->meta.mutex);
2671 kernel_sock_shutdown(tconn->meta.socket, SHUT_RDWR);
2672 sock_release(tconn->meta.socket);
2673 tconn->meta.socket = NULL;
2674 mutex_unlock(&tconn->meta.mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002675 }
2676}
2677
2678
2679void drbd_free_resources(struct drbd_conf *mdev)
2680{
Lars Ellenbergf3990022011-03-23 14:31:09 +01002681 crypto_free_hash(mdev->tconn->csums_tfm);
2682 mdev->tconn->csums_tfm = NULL;
2683 crypto_free_hash(mdev->tconn->verify_tfm);
2684 mdev->tconn->verify_tfm = NULL;
Philipp Reisnera0638452011-01-19 14:31:32 +01002685 crypto_free_hash(mdev->tconn->cram_hmac_tfm);
2686 mdev->tconn->cram_hmac_tfm = NULL;
2687 crypto_free_hash(mdev->tconn->integrity_w_tfm);
2688 mdev->tconn->integrity_w_tfm = NULL;
2689 crypto_free_hash(mdev->tconn->integrity_r_tfm);
2690 mdev->tconn->integrity_r_tfm = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002691
Philipp Reisner360cc742011-02-08 14:29:53 +01002692 drbd_free_sock(mdev->tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002693
2694 __no_warn(local,
2695 drbd_free_bc(mdev->ldev);
2696 mdev->ldev = NULL;);
2697}
2698
2699/* meta data management */
2700
2701struct meta_data_on_disk {
2702 u64 la_size; /* last agreed size. */
2703 u64 uuid[UI_SIZE]; /* UUIDs. */
2704 u64 device_uuid;
2705 u64 reserved_u64_1;
2706 u32 flags; /* MDF */
2707 u32 magic;
2708 u32 md_size_sect;
2709 u32 al_offset; /* offset to this block */
2710 u32 al_nr_extents; /* important for restoring the AL */
Lars Ellenbergf3990022011-03-23 14:31:09 +01002711 /* `-- act_log->nr_elements <-- ldev->dc.al_extents */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002712 u32 bm_offset; /* offset to the bitmap, from here */
2713 u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */
Philipp Reisner99432fc2011-05-20 16:39:13 +02002714 u32 la_peer_max_bio_size; /* last peer max_bio_size */
2715 u32 reserved_u32[3];
Philipp Reisnerb411b362009-09-25 16:07:19 -07002716
2717} __packed;
2718
2719/**
2720 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
2721 * @mdev: DRBD device.
2722 */
2723void drbd_md_sync(struct drbd_conf *mdev)
2724{
2725 struct meta_data_on_disk *buffer;
2726 sector_t sector;
2727 int i;
2728
Lars Ellenbergee15b032010-09-03 10:00:09 +02002729 del_timer(&mdev->md_sync_timer);
2730 /* timer may be rearmed by drbd_md_mark_dirty() now. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002731 if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
2732 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002733
2734 /* We use here D_FAILED and not D_ATTACHING because we try to write
2735 * metadata even if we detach due to a disk failure! */
2736 if (!get_ldev_if_state(mdev, D_FAILED))
2737 return;
2738
Philipp Reisnerb411b362009-09-25 16:07:19 -07002739 mutex_lock(&mdev->md_io_mutex);
2740 buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
2741 memset(buffer, 0, 512);
2742
2743 buffer->la_size = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
2744 for (i = UI_CURRENT; i < UI_SIZE; i++)
2745 buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
2746 buffer->flags = cpu_to_be32(mdev->ldev->md.flags);
2747 buffer->magic = cpu_to_be32(DRBD_MD_MAGIC);
2748
2749 buffer->md_size_sect = cpu_to_be32(mdev->ldev->md.md_size_sect);
2750 buffer->al_offset = cpu_to_be32(mdev->ldev->md.al_offset);
2751 buffer->al_nr_extents = cpu_to_be32(mdev->act_log->nr_elements);
2752 buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
2753 buffer->device_uuid = cpu_to_be64(mdev->ldev->md.device_uuid);
2754
2755 buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset);
Philipp Reisner99432fc2011-05-20 16:39:13 +02002756 buffer->la_peer_max_bio_size = cpu_to_be32(mdev->peer_max_bio_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002757
2758 D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset);
2759 sector = mdev->ldev->md.md_offset;
2760
Andreas Gruenbacher3fbf4d22010-12-13 02:25:41 +01002761 if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002762 /* this was a try anyways ... */
2763 dev_err(DEV, "meta data update failed!\n");
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002764 drbd_chk_io_error(mdev, 1, true);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002765 }
2766
2767 /* Update mdev->ldev->md.la_size_sect,
2768 * since we updated it on metadata. */
2769 mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev);
2770
2771 mutex_unlock(&mdev->md_io_mutex);
2772 put_ldev(mdev);
2773}
2774
2775/**
2776 * drbd_md_read() - Reads in the meta data super block
2777 * @mdev: DRBD device.
2778 * @bdev: Device from which the meta data should be read in.
2779 *
Andreas Gruenbacher116676c2010-12-08 13:33:11 +01002780 * Return 0 (NO_ERROR) on success, and an enum drbd_ret_code in case
Philipp Reisnerb411b362009-09-25 16:07:19 -07002781 * something goes wrong. Currently only: ERR_IO_MD_DISK, ERR_MD_INVALID.
2782 */
2783int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
2784{
2785 struct meta_data_on_disk *buffer;
2786 int i, rv = NO_ERROR;
2787
2788 if (!get_ldev_if_state(mdev, D_ATTACHING))
2789 return ERR_IO_MD_DISK;
2790
Philipp Reisnerb411b362009-09-25 16:07:19 -07002791 mutex_lock(&mdev->md_io_mutex);
2792 buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
2793
Andreas Gruenbacher3fbf4d22010-12-13 02:25:41 +01002794 if (drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002795 /* NOTE: can't do normal error processing here as this is
Philipp Reisnerb411b362009-09-25 16:07:19 -07002796 called BEFORE disk is attached */
2797 dev_err(DEV, "Error while reading metadata.\n");
2798 rv = ERR_IO_MD_DISK;
2799 goto err;
2800 }
2801
Andreas Gruenbachere7fad8a2011-01-11 13:54:02 +01002802 if (buffer->magic != cpu_to_be32(DRBD_MD_MAGIC)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002803 dev_err(DEV, "Error while reading metadata, magic not found.\n");
2804 rv = ERR_MD_INVALID;
2805 goto err;
2806 }
2807 if (be32_to_cpu(buffer->al_offset) != bdev->md.al_offset) {
2808 dev_err(DEV, "unexpected al_offset: %d (expected %d)\n",
2809 be32_to_cpu(buffer->al_offset), bdev->md.al_offset);
2810 rv = ERR_MD_INVALID;
2811 goto err;
2812 }
2813 if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
2814 dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n",
2815 be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
2816 rv = ERR_MD_INVALID;
2817 goto err;
2818 }
2819 if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
2820 dev_err(DEV, "unexpected md_size: %u (expected %u)\n",
2821 be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
2822 rv = ERR_MD_INVALID;
2823 goto err;
2824 }
2825
2826 if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
2827 dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
2828 be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
2829 rv = ERR_MD_INVALID;
2830 goto err;
2831 }
2832
2833 bdev->md.la_size_sect = be64_to_cpu(buffer->la_size);
2834 for (i = UI_CURRENT; i < UI_SIZE; i++)
2835 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
2836 bdev->md.flags = be32_to_cpu(buffer->flags);
Lars Ellenbergf3990022011-03-23 14:31:09 +01002837 bdev->dc.al_extents = be32_to_cpu(buffer->al_nr_extents);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002838 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
2839
Philipp Reisner87eeee42011-01-19 14:16:30 +01002840 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisner99432fc2011-05-20 16:39:13 +02002841 if (mdev->state.conn < C_CONNECTED) {
2842 int peer;
2843 peer = be32_to_cpu(buffer->la_peer_max_bio_size);
2844 peer = max_t(int, peer, DRBD_MAX_BIO_SIZE_SAFE);
2845 mdev->peer_max_bio_size = peer;
2846 }
Philipp Reisner87eeee42011-01-19 14:16:30 +01002847 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisner99432fc2011-05-20 16:39:13 +02002848
Lars Ellenbergf3990022011-03-23 14:31:09 +01002849 if (bdev->dc.al_extents < 7)
2850 bdev->dc.al_extents = 127;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002851
2852 err:
2853 mutex_unlock(&mdev->md_io_mutex);
2854 put_ldev(mdev);
2855
2856 return rv;
2857}
2858
2859/**
2860 * drbd_md_mark_dirty() - Mark meta data super block as dirty
2861 * @mdev: DRBD device.
2862 *
2863 * Call this function if you change anything that should be written to
2864 * the meta-data super block. This function sets MD_DIRTY, and starts a
2865 * timer that ensures that within five seconds you have to call drbd_md_sync().
2866 */
Lars Ellenbergca0e6092010-10-14 15:01:21 +02002867#ifdef DEBUG
Lars Ellenbergee15b032010-09-03 10:00:09 +02002868void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func)
2869{
2870 if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) {
2871 mod_timer(&mdev->md_sync_timer, jiffies + HZ);
2872 mdev->last_md_mark_dirty.line = line;
2873 mdev->last_md_mark_dirty.func = func;
2874 }
2875}
2876#else
Philipp Reisnerb411b362009-09-25 16:07:19 -07002877void drbd_md_mark_dirty(struct drbd_conf *mdev)
2878{
Lars Ellenbergee15b032010-09-03 10:00:09 +02002879 if (!test_and_set_bit(MD_DIRTY, &mdev->flags))
Lars Ellenbergca0e6092010-10-14 15:01:21 +02002880 mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002881}
Lars Ellenbergee15b032010-09-03 10:00:09 +02002882#endif
Philipp Reisnerb411b362009-09-25 16:07:19 -07002883
2884static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
2885{
2886 int i;
2887
Lars Ellenberg62b0da32011-01-20 13:25:21 +01002888 for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002889 mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
Philipp Reisnerb411b362009-09-25 16:07:19 -07002890}
2891
2892void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
2893{
2894 if (idx == UI_CURRENT) {
2895 if (mdev->state.role == R_PRIMARY)
2896 val |= 1;
2897 else
2898 val &= ~((u64)1);
2899
2900 drbd_set_ed_uuid(mdev, val);
2901 }
2902
2903 mdev->ldev->md.uuid[idx] = val;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002904 drbd_md_mark_dirty(mdev);
2905}
2906
2907
2908void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
2909{
2910 if (mdev->ldev->md.uuid[idx]) {
2911 drbd_uuid_move_history(mdev);
2912 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
Philipp Reisnerb411b362009-09-25 16:07:19 -07002913 }
2914 _drbd_uuid_set(mdev, idx, val);
2915}
2916
2917/**
2918 * drbd_uuid_new_current() - Creates a new current UUID
2919 * @mdev: DRBD device.
2920 *
2921 * Creates a new current UUID, and rotates the old current UUID into
2922 * the bitmap slot. Causes an incremental resync upon next connect.
2923 */
2924void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
2925{
2926 u64 val;
Lars Ellenberg62b0da32011-01-20 13:25:21 +01002927 unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
Philipp Reisnerb411b362009-09-25 16:07:19 -07002928
Lars Ellenberg62b0da32011-01-20 13:25:21 +01002929 if (bm_uuid)
2930 dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
2931
Philipp Reisnerb411b362009-09-25 16:07:19 -07002932 mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
Philipp Reisnerb411b362009-09-25 16:07:19 -07002933
2934 get_random_bytes(&val, sizeof(u64));
2935 _drbd_uuid_set(mdev, UI_CURRENT, val);
Lars Ellenberg62b0da32011-01-20 13:25:21 +01002936 drbd_print_uuids(mdev, "new current UUID");
Lars Ellenbergaaa8e2b2010-10-15 13:16:53 +02002937 /* get it to stable storage _now_ */
2938 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002939}
2940
2941void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
2942{
2943 if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
2944 return;
2945
2946 if (val == 0) {
2947 drbd_uuid_move_history(mdev);
2948 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
2949 mdev->ldev->md.uuid[UI_BITMAP] = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002950 } else {
Lars Ellenberg62b0da32011-01-20 13:25:21 +01002951 unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
2952 if (bm_uuid)
2953 dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002954
Lars Ellenberg62b0da32011-01-20 13:25:21 +01002955 mdev->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002956 }
2957 drbd_md_mark_dirty(mdev);
2958}
2959
2960/**
2961 * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
2962 * @mdev: DRBD device.
2963 *
2964 * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
2965 */
2966int drbd_bmio_set_n_write(struct drbd_conf *mdev)
2967{
2968 int rv = -EIO;
2969
2970 if (get_ldev_if_state(mdev, D_ATTACHING)) {
2971 drbd_md_set_flag(mdev, MDF_FULL_SYNC);
2972 drbd_md_sync(mdev);
2973 drbd_bm_set_all(mdev);
2974
2975 rv = drbd_bm_write(mdev);
2976
2977 if (!rv) {
2978 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
2979 drbd_md_sync(mdev);
2980 }
2981
2982 put_ldev(mdev);
2983 }
2984
2985 return rv;
2986}
2987
2988/**
2989 * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
2990 * @mdev: DRBD device.
2991 *
2992 * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
2993 */
2994int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
2995{
2996 int rv = -EIO;
2997
Philipp Reisner07782862010-08-31 12:00:50 +02002998 drbd_resume_al(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002999 if (get_ldev_if_state(mdev, D_ATTACHING)) {
3000 drbd_bm_clear_all(mdev);
3001 rv = drbd_bm_write(mdev);
3002 put_ldev(mdev);
3003 }
3004
3005 return rv;
3006}
3007
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003008static int w_bitmap_io(struct drbd_work *w, int unused)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003009{
3010 struct bm_io_work *work = container_of(w, struct bm_io_work, w);
Philipp Reisner00d56942011-02-09 18:09:48 +01003011 struct drbd_conf *mdev = w->mdev;
Lars Ellenberg02851e92010-12-16 14:47:39 +01003012 int rv = -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003013
3014 D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0);
3015
Lars Ellenberg02851e92010-12-16 14:47:39 +01003016 if (get_ldev(mdev)) {
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003017 drbd_bm_lock(mdev, work->why, work->flags);
Lars Ellenberg02851e92010-12-16 14:47:39 +01003018 rv = work->io_fn(mdev);
3019 drbd_bm_unlock(mdev);
3020 put_ldev(mdev);
3021 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003022
Lars Ellenberg4738fa12011-02-21 13:20:55 +01003023 clear_bit_unlock(BITMAP_IO, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003024 wake_up(&mdev->misc_wait);
3025
3026 if (work->done)
3027 work->done(mdev, rv);
3028
3029 clear_bit(BITMAP_IO_QUEUED, &mdev->flags);
3030 work->why = NULL;
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003031 work->flags = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003032
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003033 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003034}
3035
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003036void drbd_ldev_destroy(struct drbd_conf *mdev)
3037{
3038 lc_destroy(mdev->resync);
3039 mdev->resync = NULL;
3040 lc_destroy(mdev->act_log);
3041 mdev->act_log = NULL;
3042 __no_warn(local,
3043 drbd_free_bc(mdev->ldev);
3044 mdev->ldev = NULL;);
3045
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003046 clear_bit(GO_DISKLESS, &mdev->flags);
3047}
3048
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003049static int w_go_diskless(struct drbd_work *w, int unused)
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003050{
Philipp Reisner00d56942011-02-09 18:09:48 +01003051 struct drbd_conf *mdev = w->mdev;
3052
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003053 D_ASSERT(mdev->state.disk == D_FAILED);
Lars Ellenberg9d282872010-10-14 13:57:07 +02003054 /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
3055 * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003056 * the protected members anymore, though, so once put_ldev reaches zero
3057 * again, it will be safe to free them. */
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003058 drbd_force_state(mdev, NS(disk, D_DISKLESS));
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003059 return 0;
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003060}
3061
3062void drbd_go_diskless(struct drbd_conf *mdev)
3063{
3064 D_ASSERT(mdev->state.disk == D_FAILED);
3065 if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
Philipp Reisnere42325a2011-01-19 13:55:45 +01003066 drbd_queue_work(&mdev->tconn->data.work, &mdev->go_diskless);
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003067}
3068
Philipp Reisnerb411b362009-09-25 16:07:19 -07003069/**
3070 * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
3071 * @mdev: DRBD device.
3072 * @io_fn: IO callback to be called when bitmap IO is possible
3073 * @done: callback to be called after the bitmap IO was performed
3074 * @why: Descriptive text of the reason for doing the IO
3075 *
3076 * While IO on the bitmap happens we freeze application IO thus we ensure
3077 * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
3078 * called from worker context. It MUST NOT be used while a previous such
3079 * work is still pending!
3080 */
3081void drbd_queue_bitmap_io(struct drbd_conf *mdev,
3082 int (*io_fn)(struct drbd_conf *),
3083 void (*done)(struct drbd_conf *, int),
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003084 char *why, enum bm_flag flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003085{
Philipp Reisnere6b3ea82011-01-19 14:02:01 +01003086 D_ASSERT(current == mdev->tconn->worker.task);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003087
3088 D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags));
3089 D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags));
3090 D_ASSERT(list_empty(&mdev->bm_io_work.w.list));
3091 if (mdev->bm_io_work.why)
3092 dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n",
3093 why, mdev->bm_io_work.why);
3094
3095 mdev->bm_io_work.io_fn = io_fn;
3096 mdev->bm_io_work.done = done;
3097 mdev->bm_io_work.why = why;
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003098 mdev->bm_io_work.flags = flags;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003099
Philipp Reisner87eeee42011-01-19 14:16:30 +01003100 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003101 set_bit(BITMAP_IO, &mdev->flags);
3102 if (atomic_read(&mdev->ap_bio_cnt) == 0) {
Philipp Reisner127b3172010-11-16 10:07:53 +01003103 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
Philipp Reisnere42325a2011-01-19 13:55:45 +01003104 drbd_queue_work(&mdev->tconn->data.work, &mdev->bm_io_work.w);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003105 }
Philipp Reisner87eeee42011-01-19 14:16:30 +01003106 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003107}
3108
3109/**
3110 * drbd_bitmap_io() - Does an IO operation on the whole bitmap
3111 * @mdev: DRBD device.
3112 * @io_fn: IO callback to be called when bitmap IO is possible
3113 * @why: Descriptive text of the reason for doing the IO
3114 *
3115 * freezes application IO while that the actual IO operations runs. This
3116 * functions MAY NOT be called from worker context.
3117 */
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003118int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *),
3119 char *why, enum bm_flag flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003120{
3121 int rv;
3122
Philipp Reisnere6b3ea82011-01-19 14:02:01 +01003123 D_ASSERT(current != mdev->tconn->worker.task);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003124
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003125 if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
3126 drbd_suspend_io(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003127
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003128 drbd_bm_lock(mdev, why, flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003129 rv = io_fn(mdev);
3130 drbd_bm_unlock(mdev);
3131
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003132 if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
3133 drbd_resume_io(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003134
3135 return rv;
3136}
3137
3138void drbd_md_set_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
3139{
3140 if ((mdev->ldev->md.flags & flag) != flag) {
3141 drbd_md_mark_dirty(mdev);
3142 mdev->ldev->md.flags |= flag;
3143 }
3144}
3145
3146void drbd_md_clear_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
3147{
3148 if ((mdev->ldev->md.flags & flag) != 0) {
3149 drbd_md_mark_dirty(mdev);
3150 mdev->ldev->md.flags &= ~flag;
3151 }
3152}
3153int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
3154{
3155 return (bdev->md.flags & flag) != 0;
3156}
3157
3158static void md_sync_timer_fn(unsigned long data)
3159{
3160 struct drbd_conf *mdev = (struct drbd_conf *) data;
3161
Philipp Reisnere42325a2011-01-19 13:55:45 +01003162 drbd_queue_work_front(&mdev->tconn->data.work, &mdev->md_sync_work);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003163}
3164
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003165static int w_md_sync(struct drbd_work *w, int unused)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003166{
Philipp Reisner00d56942011-02-09 18:09:48 +01003167 struct drbd_conf *mdev = w->mdev;
3168
Philipp Reisnerb411b362009-09-25 16:07:19 -07003169 dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
Lars Ellenbergee15b032010-09-03 10:00:09 +02003170#ifdef DEBUG
3171 dev_warn(DEV, "last md_mark_dirty: %s:%u\n",
3172 mdev->last_md_mark_dirty.func, mdev->last_md_mark_dirty.line);
3173#endif
Philipp Reisnerb411b362009-09-25 16:07:19 -07003174 drbd_md_sync(mdev);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003175 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003176}
3177
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01003178const char *cmdname(enum drbd_packet cmd)
Andreas Gruenbacherf2ad9062011-01-26 17:13:25 +01003179{
3180 /* THINK may need to become several global tables
3181 * when we want to support more than
3182 * one PRO_VERSION */
3183 static const char *cmdnames[] = {
3184 [P_DATA] = "Data",
3185 [P_DATA_REPLY] = "DataReply",
3186 [P_RS_DATA_REPLY] = "RSDataReply",
3187 [P_BARRIER] = "Barrier",
3188 [P_BITMAP] = "ReportBitMap",
3189 [P_BECOME_SYNC_TARGET] = "BecomeSyncTarget",
3190 [P_BECOME_SYNC_SOURCE] = "BecomeSyncSource",
3191 [P_UNPLUG_REMOTE] = "UnplugRemote",
3192 [P_DATA_REQUEST] = "DataRequest",
3193 [P_RS_DATA_REQUEST] = "RSDataRequest",
3194 [P_SYNC_PARAM] = "SyncParam",
3195 [P_SYNC_PARAM89] = "SyncParam89",
3196 [P_PROTOCOL] = "ReportProtocol",
3197 [P_UUIDS] = "ReportUUIDs",
3198 [P_SIZES] = "ReportSizes",
3199 [P_STATE] = "ReportState",
3200 [P_SYNC_UUID] = "ReportSyncUUID",
3201 [P_AUTH_CHALLENGE] = "AuthChallenge",
3202 [P_AUTH_RESPONSE] = "AuthResponse",
3203 [P_PING] = "Ping",
3204 [P_PING_ACK] = "PingAck",
3205 [P_RECV_ACK] = "RecvAck",
3206 [P_WRITE_ACK] = "WriteAck",
3207 [P_RS_WRITE_ACK] = "RSWriteAck",
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003208 [P_DISCARD_WRITE] = "DiscardWrite",
Andreas Gruenbacherf2ad9062011-01-26 17:13:25 +01003209 [P_NEG_ACK] = "NegAck",
3210 [P_NEG_DREPLY] = "NegDReply",
3211 [P_NEG_RS_DREPLY] = "NegRSDReply",
3212 [P_BARRIER_ACK] = "BarrierAck",
3213 [P_STATE_CHG_REQ] = "StateChgRequest",
3214 [P_STATE_CHG_REPLY] = "StateChgReply",
3215 [P_OV_REQUEST] = "OVRequest",
3216 [P_OV_REPLY] = "OVReply",
3217 [P_OV_RESULT] = "OVResult",
3218 [P_CSUM_RS_REQUEST] = "CsumRSRequest",
3219 [P_RS_IS_IN_SYNC] = "CsumRSIsInSync",
3220 [P_COMPRESSED_BITMAP] = "CBitmap",
3221 [P_DELAY_PROBE] = "DelayProbe",
3222 [P_OUT_OF_SYNC] = "OutOfSync",
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003223 [P_RETRY_WRITE] = "RetryWrite",
Andreas Gruenbacherf2ad9062011-01-26 17:13:25 +01003224 };
3225
Andreas Gruenbachere5d6f332011-03-28 16:44:40 +02003226 if (cmd == P_INITIAL_META)
3227 return "InitialMeta";
3228 if (cmd == P_INITIAL_DATA)
3229 return "InitialData";
Andreas Gruenbacher60381782011-03-28 17:05:50 +02003230 if (cmd == P_CONNECTION_FEATURES)
3231 return "ConnectionFeatures";
Andreas Gruenbacher6e849ce2011-03-14 17:27:45 +01003232 if (cmd >= ARRAY_SIZE(cmdnames))
Andreas Gruenbacherf2ad9062011-01-26 17:13:25 +01003233 return "Unknown";
3234 return cmdnames[cmd];
3235}
3236
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003237/**
3238 * drbd_wait_misc - wait for a request to make progress
3239 * @mdev: device associated with the request
3240 * @i: the struct drbd_interval embedded in struct drbd_request or
3241 * struct drbd_peer_request
3242 */
3243int drbd_wait_misc(struct drbd_conf *mdev, struct drbd_interval *i)
3244{
3245 struct net_conf *net_conf = mdev->tconn->net_conf;
3246 DEFINE_WAIT(wait);
3247 long timeout;
3248
3249 if (!net_conf)
3250 return -ETIMEDOUT;
3251 timeout = MAX_SCHEDULE_TIMEOUT;
3252 if (net_conf->ko_count)
3253 timeout = net_conf->timeout * HZ / 10 * net_conf->ko_count;
3254
3255 /* Indicate to wake up mdev->misc_wait on progress. */
3256 i->waiting = true;
3257 prepare_to_wait(&mdev->misc_wait, &wait, TASK_INTERRUPTIBLE);
3258 spin_unlock_irq(&mdev->tconn->req_lock);
3259 timeout = schedule_timeout(timeout);
3260 finish_wait(&mdev->misc_wait, &wait);
3261 spin_lock_irq(&mdev->tconn->req_lock);
3262 if (!timeout || mdev->state.conn < C_CONNECTED)
3263 return -ETIMEDOUT;
3264 if (signal_pending(current))
3265 return -ERESTARTSYS;
3266 return 0;
3267}
3268
Philipp Reisnerb411b362009-09-25 16:07:19 -07003269#ifdef CONFIG_DRBD_FAULT_INJECTION
3270/* Fault insertion support including random number generator shamelessly
3271 * stolen from kernel/rcutorture.c */
3272struct fault_random_state {
3273 unsigned long state;
3274 unsigned long count;
3275};
3276
3277#define FAULT_RANDOM_MULT 39916801 /* prime */
3278#define FAULT_RANDOM_ADD 479001701 /* prime */
3279#define FAULT_RANDOM_REFRESH 10000
3280
3281/*
3282 * Crude but fast random-number generator. Uses a linear congruential
3283 * generator, with occasional help from get_random_bytes().
3284 */
3285static unsigned long
3286_drbd_fault_random(struct fault_random_state *rsp)
3287{
3288 long refresh;
3289
Roel Kluin49829ea2009-12-15 22:55:44 +01003290 if (!rsp->count--) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003291 get_random_bytes(&refresh, sizeof(refresh));
3292 rsp->state += refresh;
3293 rsp->count = FAULT_RANDOM_REFRESH;
3294 }
3295 rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
3296 return swahw32(rsp->state);
3297}
3298
3299static char *
3300_drbd_fault_str(unsigned int type) {
3301 static char *_faults[] = {
3302 [DRBD_FAULT_MD_WR] = "Meta-data write",
3303 [DRBD_FAULT_MD_RD] = "Meta-data read",
3304 [DRBD_FAULT_RS_WR] = "Resync write",
3305 [DRBD_FAULT_RS_RD] = "Resync read",
3306 [DRBD_FAULT_DT_WR] = "Data write",
3307 [DRBD_FAULT_DT_RD] = "Data read",
3308 [DRBD_FAULT_DT_RA] = "Data read ahead",
3309 [DRBD_FAULT_BM_ALLOC] = "BM allocation",
Philipp Reisner6b4388a2010-04-26 14:11:45 +02003310 [DRBD_FAULT_AL_EE] = "EE allocation",
3311 [DRBD_FAULT_RECEIVE] = "receive data corruption",
Philipp Reisnerb411b362009-09-25 16:07:19 -07003312 };
3313
3314 return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
3315}
3316
3317unsigned int
3318_drbd_insert_fault(struct drbd_conf *mdev, unsigned int type)
3319{
3320 static struct fault_random_state rrs = {0, 0};
3321
3322 unsigned int ret = (
3323 (fault_devs == 0 ||
3324 ((1 << mdev_to_minor(mdev)) & fault_devs) != 0) &&
3325 (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate));
3326
3327 if (ret) {
3328 fault_count++;
3329
Lars Ellenberg73835062010-05-27 11:51:56 +02003330 if (__ratelimit(&drbd_ratelimit_state))
Philipp Reisnerb411b362009-09-25 16:07:19 -07003331 dev_warn(DEV, "***Simulating %s failure\n",
3332 _drbd_fault_str(type));
3333 }
3334
3335 return ret;
3336}
3337#endif
3338
3339const char *drbd_buildtag(void)
3340{
3341 /* DRBD built from external sources has here a reference to the
3342 git hash of the source code. */
3343
3344 static char buildtag[38] = "\0uilt-in";
3345
3346 if (buildtag[0] == 0) {
3347#ifdef CONFIG_MODULES
3348 if (THIS_MODULE != NULL)
3349 sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
3350 else
3351#endif
3352 buildtag[0] = 'b';
3353 }
3354
3355 return buildtag;
3356}
3357
3358module_init(drbd_init)
3359module_exit(drbd_cleanup)
3360
Philipp Reisnerb411b362009-09-25 16:07:19 -07003361EXPORT_SYMBOL(drbd_conn_str);
3362EXPORT_SYMBOL(drbd_role_str);
3363EXPORT_SYMBOL(drbd_disk_str);
3364EXPORT_SYMBOL(drbd_set_st_err_str);