blob: 3ecbd4908cdcb352d18cad6ad9bc37aa41092d43 [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11 from Logicworks, Inc. for making SDP replication support possible.
12
13 drbd is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
16 any later version.
17
18 drbd is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with drbd; see the file COPYING. If not, write to
25 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26
27 */
28
Philipp Reisnerb411b362009-09-25 16:07:19 -070029#include <linux/module.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070030#include <linux/drbd.h>
31#include <asm/uaccess.h>
32#include <asm/types.h>
33#include <net/sock.h>
34#include <linux/ctype.h>
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020035#include <linux/mutex.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070036#include <linux/fs.h>
37#include <linux/file.h>
38#include <linux/proc_fs.h>
39#include <linux/init.h>
40#include <linux/mm.h>
41#include <linux/memcontrol.h>
42#include <linux/mm_inline.h>
43#include <linux/slab.h>
44#include <linux/random.h>
45#include <linux/reboot.h>
46#include <linux/notifier.h>
47#include <linux/kthread.h>
48
49#define __KERNEL_SYSCALLS__
50#include <linux/unistd.h>
51#include <linux/vmalloc.h>
52
53#include <linux/drbd_limits.h>
54#include "drbd_int.h"
Philipp Reisnerb411b362009-09-25 16:07:19 -070055#include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
56
57#include "drbd_vli.h"
58
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020059static DEFINE_MUTEX(drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -070060int drbdd_init(struct drbd_thread *);
61int drbd_worker(struct drbd_thread *);
62int drbd_asender(struct drbd_thread *);
63
64int drbd_init(void);
65static int drbd_open(struct block_device *bdev, fmode_t mode);
66static int drbd_release(struct gendisk *gd, fmode_t mode);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +010067static int w_md_sync(struct drbd_work *w, int unused);
Philipp Reisnerb411b362009-09-25 16:07:19 -070068static void md_sync_timer_fn(unsigned long data);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +010069static int w_bitmap_io(struct drbd_work *w, int unused);
70static int w_go_diskless(struct drbd_work *w, int unused);
Philipp Reisnerb411b362009-09-25 16:07:19 -070071
Philipp Reisnerb411b362009-09-25 16:07:19 -070072MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
73 "Lars Ellenberg <lars@linbit.com>");
74MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
75MODULE_VERSION(REL_VERSION);
76MODULE_LICENSE("GPL");
Philipp Reisner81a5d602011-02-22 19:53:16 -050077MODULE_PARM_DESC(minor_count, "Approximate number of drbd devices ("
Philipp Reisner2b8a90b2011-01-10 11:15:17 +010078 __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")");
Philipp Reisnerb411b362009-09-25 16:07:19 -070079MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
80
81#include <linux/moduleparam.h>
82/* allow_open_on_secondary */
83MODULE_PARM_DESC(allow_oos, "DONT USE!");
84/* thanks to these macros, if compiled into the kernel (not-module),
85 * this becomes the boot parameter drbd.minor_count */
86module_param(minor_count, uint, 0444);
87module_param(disable_sendpage, bool, 0644);
88module_param(allow_oos, bool, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -070089module_param(proc_details, int, 0644);
90
91#ifdef CONFIG_DRBD_FAULT_INJECTION
92int enable_faults;
93int fault_rate;
94static int fault_count;
95int fault_devs;
96/* bitmap of enabled faults */
97module_param(enable_faults, int, 0664);
98/* fault rate % value - applies to all enabled faults */
99module_param(fault_rate, int, 0664);
100/* count of faults inserted */
101module_param(fault_count, int, 0664);
102/* bitmap of devices to insert faults on */
103module_param(fault_devs, int, 0644);
104#endif
105
106/* module parameter, defined */
Philipp Reisner2b8a90b2011-01-10 11:15:17 +0100107unsigned int minor_count = DRBD_MINOR_COUNT_DEF;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700108int disable_sendpage;
109int allow_oos;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700110int proc_details; /* Detail level in proc drbd*/
111
112/* Module parameter for setting the user mode helper program
113 * to run. Default is /sbin/drbdadm */
114char usermode_helper[80] = "/sbin/drbdadm";
115
116module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644);
117
118/* in 2.6.x, our device mapping and config info contains our virtual gendisks
119 * as member "struct gendisk *vdisk;"
120 */
Philipp Reisner81a5d602011-02-22 19:53:16 -0500121struct idr minors;
Philipp Reisner21114382011-01-19 12:26:59 +0100122struct list_head drbd_tconns; /* list of struct drbd_tconn */
Lars Ellenberg543cc102011-03-10 22:18:18 +0100123DEFINE_MUTEX(drbd_cfg_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700124
125struct kmem_cache *drbd_request_cache;
Andreas Gruenbacher6c852be2011-02-04 15:38:52 +0100126struct kmem_cache *drbd_ee_cache; /* peer requests */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700127struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
128struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
129mempool_t *drbd_request_mempool;
130mempool_t *drbd_ee_mempool;
Lars Ellenberg35abf592011-02-23 12:39:46 +0100131mempool_t *drbd_md_io_page_pool;
Lars Ellenbergda4a75d2011-02-23 17:02:01 +0100132struct bio_set *drbd_md_io_bio_set;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700133
134/* I do not use a standard mempool, because:
135 1) I want to hand out the pre-allocated objects first.
136 2) I want to be able to interrupt sleeping allocation with a signal.
137 Note: This is a single linked list, the next pointer is the private
138 member of struct page.
139 */
140struct page *drbd_pp_pool;
141spinlock_t drbd_pp_lock;
142int drbd_pp_vacant;
143wait_queue_head_t drbd_pp_wait;
144
145DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
146
Emese Revfy7d4e9d02009-12-14 00:59:30 +0100147static const struct block_device_operations drbd_ops = {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700148 .owner = THIS_MODULE,
149 .open = drbd_open,
150 .release = drbd_release,
151};
152
Lars Ellenbergda4a75d2011-02-23 17:02:01 +0100153static void bio_destructor_drbd(struct bio *bio)
154{
155 bio_free(bio, drbd_md_io_bio_set);
156}
157
158struct bio *bio_alloc_drbd(gfp_t gfp_mask)
159{
160 struct bio *bio;
161
162 if (!drbd_md_io_bio_set)
163 return bio_alloc(gfp_mask, 1);
164
165 bio = bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set);
166 if (!bio)
167 return NULL;
168 bio->bi_destructor = bio_destructor_drbd;
169 return bio;
170}
171
Philipp Reisnerb411b362009-09-25 16:07:19 -0700172#ifdef __CHECKER__
173/* When checking with sparse, and this is an inline function, sparse will
174 give tons of false positives. When this is a real functions sparse works.
175 */
176int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
177{
178 int io_allowed;
179
180 atomic_inc(&mdev->local_cnt);
181 io_allowed = (mdev->state.disk >= mins);
182 if (!io_allowed) {
183 if (atomic_dec_and_test(&mdev->local_cnt))
184 wake_up(&mdev->misc_wait);
185 }
186 return io_allowed;
187}
188
189#endif
190
191/**
192 * DOC: The transfer log
193 *
194 * The transfer log is a single linked list of &struct drbd_tl_epoch objects.
Philipp Reisner87eeee42011-01-19 14:16:30 +0100195 * mdev->tconn->newest_tle points to the head, mdev->tconn->oldest_tle points to the tail
Philipp Reisnerb411b362009-09-25 16:07:19 -0700196 * of the list. There is always at least one &struct drbd_tl_epoch object.
197 *
198 * Each &struct drbd_tl_epoch has a circular double linked list of requests
199 * attached.
200 */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100201static int tl_init(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700202{
203 struct drbd_tl_epoch *b;
204
205 /* during device minor initialization, we may well use GFP_KERNEL */
206 b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_KERNEL);
207 if (!b)
208 return 0;
209 INIT_LIST_HEAD(&b->requests);
210 INIT_LIST_HEAD(&b->w.list);
211 b->next = NULL;
212 b->br_number = 4711;
Philipp Reisner7e602c02010-05-27 14:49:27 +0200213 b->n_writes = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700214 b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
215
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100216 tconn->oldest_tle = b;
217 tconn->newest_tle = b;
218 INIT_LIST_HEAD(&tconn->out_of_sequence_requests);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700219
Philipp Reisnerb411b362009-09-25 16:07:19 -0700220 return 1;
221}
222
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100223static void tl_cleanup(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700224{
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100225 if (tconn->oldest_tle != tconn->newest_tle)
226 conn_err(tconn, "ASSERT FAILED: oldest_tle == newest_tle\n");
227 if (!list_empty(&tconn->out_of_sequence_requests))
228 conn_err(tconn, "ASSERT FAILED: list_empty(out_of_sequence_requests)\n");
229 kfree(tconn->oldest_tle);
230 tconn->oldest_tle = NULL;
231 kfree(tconn->unused_spare_tle);
232 tconn->unused_spare_tle = NULL;
Andreas Gruenbacherd6287692011-01-13 23:05:39 +0100233}
234
Philipp Reisnerb411b362009-09-25 16:07:19 -0700235/**
236 * _tl_add_barrier() - Adds a barrier to the transfer log
237 * @mdev: DRBD device.
238 * @new: Barrier to be added before the current head of the TL.
239 *
240 * The caller must hold the req_lock.
241 */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100242void _tl_add_barrier(struct drbd_tconn *tconn, struct drbd_tl_epoch *new)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700243{
244 struct drbd_tl_epoch *newest_before;
245
246 INIT_LIST_HEAD(&new->requests);
247 INIT_LIST_HEAD(&new->w.list);
248 new->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
249 new->next = NULL;
Philipp Reisner7e602c02010-05-27 14:49:27 +0200250 new->n_writes = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700251
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100252 newest_before = tconn->newest_tle;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700253 /* never send a barrier number == 0, because that is special-cased
254 * when using TCQ for our write ordering code */
255 new->br_number = (newest_before->br_number+1) ?: 1;
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100256 if (tconn->newest_tle != new) {
257 tconn->newest_tle->next = new;
258 tconn->newest_tle = new;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700259 }
260}
261
262/**
263 * tl_release() - Free or recycle the oldest &struct drbd_tl_epoch object of the TL
264 * @mdev: DRBD device.
265 * @barrier_nr: Expected identifier of the DRBD write barrier packet.
266 * @set_size: Expected number of requests before that barrier.
267 *
268 * In case the passed barrier_nr or set_size does not match the oldest
269 * &struct drbd_tl_epoch objects this function will cause a termination
270 * of the connection.
271 */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100272void tl_release(struct drbd_tconn *tconn, unsigned int barrier_nr,
273 unsigned int set_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700274{
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100275 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700276 struct drbd_tl_epoch *b, *nob; /* next old barrier */
277 struct list_head *le, *tle;
278 struct drbd_request *r;
279
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100280 spin_lock_irq(&tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700281
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100282 b = tconn->oldest_tle;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700283
284 /* first some paranoia code */
285 if (b == NULL) {
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100286 conn_err(tconn, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
287 barrier_nr);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700288 goto bail;
289 }
290 if (b->br_number != barrier_nr) {
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100291 conn_err(tconn, "BAD! BarrierAck #%u received, expected #%u!\n",
292 barrier_nr, b->br_number);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700293 goto bail;
294 }
Philipp Reisner7e602c02010-05-27 14:49:27 +0200295 if (b->n_writes != set_size) {
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100296 conn_err(tconn, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
297 barrier_nr, set_size, b->n_writes);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700298 goto bail;
299 }
300
301 /* Clean up list of requests processed during current epoch */
302 list_for_each_safe(le, tle, &b->requests) {
303 r = list_entry(le, struct drbd_request, tl_requests);
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100304 _req_mod(r, BARRIER_ACKED);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700305 }
306 /* There could be requests on the list waiting for completion
307 of the write to the local disk. To avoid corruptions of
308 slab's data structures we have to remove the lists head.
309
310 Also there could have been a barrier ack out of sequence, overtaking
311 the write acks - which would be a bug and violating write ordering.
312 To not deadlock in case we lose connection while such requests are
313 still pending, we need some way to find them for the
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100314 _req_mode(CONNECTION_LOST_WHILE_PENDING).
Philipp Reisnerb411b362009-09-25 16:07:19 -0700315
316 These have been list_move'd to the out_of_sequence_requests list in
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100317 _req_mod(, BARRIER_ACKED) above.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700318 */
319 list_del_init(&b->requests);
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100320 mdev = b->w.mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700321
322 nob = b->next;
323 if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100324 _tl_add_barrier(tconn, b);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700325 if (nob)
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100326 tconn->oldest_tle = nob;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700327 /* if nob == NULL b was the only barrier, and becomes the new
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100328 barrier. Therefore tconn->oldest_tle points already to b */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700329 } else {
330 D_ASSERT(nob != NULL);
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100331 tconn->oldest_tle = nob;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700332 kfree(b);
333 }
334
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100335 spin_unlock_irq(&tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700336 dec_ap_pending(mdev);
337
338 return;
339
340bail:
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100341 spin_unlock_irq(&tconn->req_lock);
342 conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700343}
344
Philipp Reisner617049a2010-12-22 12:48:31 +0100345
Philipp Reisner11b58e72010-05-12 17:08:26 +0200346/**
347 * _tl_restart() - Walks the transfer log, and applies an action to all requests
348 * @mdev: DRBD device.
349 * @what: The action/event to perform with all request objects
350 *
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100351 * @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO,
352 * RESTART_FROZEN_DISK_IO.
Philipp Reisner11b58e72010-05-12 17:08:26 +0200353 */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100354void _tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what)
Philipp Reisner11b58e72010-05-12 17:08:26 +0200355{
356 struct drbd_tl_epoch *b, *tmp, **pn;
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200357 struct list_head *le, *tle, carry_reads;
Philipp Reisner11b58e72010-05-12 17:08:26 +0200358 struct drbd_request *req;
359 int rv, n_writes, n_reads;
360
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100361 b = tconn->oldest_tle;
362 pn = &tconn->oldest_tle;
Philipp Reisner11b58e72010-05-12 17:08:26 +0200363 while (b) {
364 n_writes = 0;
365 n_reads = 0;
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200366 INIT_LIST_HEAD(&carry_reads);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200367 list_for_each_safe(le, tle, &b->requests) {
368 req = list_entry(le, struct drbd_request, tl_requests);
369 rv = _req_mod(req, what);
370
371 n_writes += (rv & MR_WRITE) >> MR_WRITE_SHIFT;
372 n_reads += (rv & MR_READ) >> MR_READ_SHIFT;
373 }
374 tmp = b->next;
375
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200376 if (n_writes) {
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100377 if (what == RESEND) {
Philipp Reisner11b58e72010-05-12 17:08:26 +0200378 b->n_writes = n_writes;
379 if (b->w.cb == NULL) {
380 b->w.cb = w_send_barrier;
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100381 inc_ap_pending(b->w.mdev);
382 set_bit(CREATE_BARRIER, &b->w.mdev->flags);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200383 }
384
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100385 drbd_queue_work(&tconn->data.work, &b->w);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200386 }
387 pn = &b->next;
388 } else {
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200389 if (n_reads)
390 list_add(&carry_reads, &b->requests);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200391 /* there could still be requests on that ring list,
392 * in case local io is still pending */
393 list_del(&b->requests);
394
395 /* dec_ap_pending corresponding to queue_barrier.
396 * the newest barrier may not have been queued yet,
397 * in which case w.cb is still NULL. */
398 if (b->w.cb != NULL)
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100399 dec_ap_pending(b->w.mdev);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200400
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100401 if (b == tconn->newest_tle) {
Philipp Reisner11b58e72010-05-12 17:08:26 +0200402 /* recycle, but reinit! */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100403 if (tmp != NULL)
404 conn_err(tconn, "ASSERT FAILED tmp == NULL");
Philipp Reisner11b58e72010-05-12 17:08:26 +0200405 INIT_LIST_HEAD(&b->requests);
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200406 list_splice(&carry_reads, &b->requests);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200407 INIT_LIST_HEAD(&b->w.list);
408 b->w.cb = NULL;
409 b->br_number = net_random();
410 b->n_writes = 0;
411
412 *pn = b;
413 break;
414 }
415 *pn = tmp;
416 kfree(b);
417 }
418 b = tmp;
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200419 list_splice(&carry_reads, &b->requests);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200420 }
421}
422
Philipp Reisnerb411b362009-09-25 16:07:19 -0700423
424/**
425 * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
426 * @mdev: DRBD device.
427 *
428 * This is called after the connection to the peer was lost. The storage covered
429 * by the requests on the transfer gets marked as our of sync. Called from the
430 * receiver thread and the worker thread.
431 */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100432void tl_clear(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700433{
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100434 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700435 struct list_head *le, *tle;
436 struct drbd_request *r;
Philipp Reisnere90285e2011-03-22 12:51:21 +0100437 int vnr;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700438
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100439 spin_lock_irq(&tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700440
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100441 _tl_restart(tconn, CONNECTION_LOST_WHILE_PENDING);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700442
443 /* we expect this list to be empty. */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100444 if (!list_empty(&tconn->out_of_sequence_requests))
445 conn_err(tconn, "ASSERT FAILED list_empty(&out_of_sequence_requests)\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700446
447 /* but just in case, clean it up anyways! */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100448 list_for_each_safe(le, tle, &tconn->out_of_sequence_requests) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700449 r = list_entry(le, struct drbd_request, tl_requests);
450 /* It would be nice to complete outside of spinlock.
451 * But this is easier for now. */
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100452 _req_mod(r, CONNECTION_LOST_WHILE_PENDING);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700453 }
454
455 /* ensure bit indicating barrier is required is clear */
Philipp Reisnere90285e2011-03-22 12:51:21 +0100456 idr_for_each_entry(&tconn->volumes, mdev, vnr)
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100457 clear_bit(CREATE_BARRIER, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700458
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100459 spin_unlock_irq(&tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700460}
461
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100462void tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what)
Philipp Reisner11b58e72010-05-12 17:08:26 +0200463{
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100464 spin_lock_irq(&tconn->req_lock);
465 _tl_restart(tconn, what);
466 spin_unlock_irq(&tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700467}
468
Philipp Reisnerb411b362009-09-25 16:07:19 -0700469static int drbd_thread_setup(void *arg)
470{
471 struct drbd_thread *thi = (struct drbd_thread *) arg;
Philipp Reisner392c8802011-02-09 10:33:31 +0100472 struct drbd_tconn *tconn = thi->tconn;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700473 unsigned long flags;
474 int retval;
475
Philipp Reisnerf1b3a6e2011-02-08 15:35:58 +0100476 snprintf(current->comm, sizeof(current->comm), "drbd_%c_%s",
Philipp Reisner392c8802011-02-09 10:33:31 +0100477 thi->name[0], thi->tconn->name);
Philipp Reisnerf1b3a6e2011-02-08 15:35:58 +0100478
Philipp Reisnerb411b362009-09-25 16:07:19 -0700479restart:
480 retval = thi->function(thi);
481
482 spin_lock_irqsave(&thi->t_lock, flags);
483
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100484 /* if the receiver has been "EXITING", the last thing it did
Philipp Reisnerb411b362009-09-25 16:07:19 -0700485 * was set the conn state to "StandAlone",
486 * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
487 * and receiver thread will be "started".
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100488 * drbd_thread_start needs to set "RESTARTING" in that case.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700489 * t_state check and assignment needs to be within the same spinlock,
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100490 * so either thread_start sees EXITING, and can remap to RESTARTING,
491 * or thread_start see NONE, and can proceed as normal.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700492 */
493
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100494 if (thi->t_state == RESTARTING) {
Philipp Reisner392c8802011-02-09 10:33:31 +0100495 conn_info(tconn, "Restarting %s thread\n", thi->name);
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100496 thi->t_state = RUNNING;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700497 spin_unlock_irqrestore(&thi->t_lock, flags);
498 goto restart;
499 }
500
501 thi->task = NULL;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100502 thi->t_state = NONE;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700503 smp_mb();
504 complete(&thi->stop);
505 spin_unlock_irqrestore(&thi->t_lock, flags);
506
Philipp Reisner392c8802011-02-09 10:33:31 +0100507 conn_info(tconn, "Terminating %s\n", current->comm);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700508
509 /* Release mod reference taken when thread was started */
510 module_put(THIS_MODULE);
511 return retval;
512}
513
Philipp Reisner392c8802011-02-09 10:33:31 +0100514static void drbd_thread_init(struct drbd_tconn *tconn, struct drbd_thread *thi,
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100515 int (*func) (struct drbd_thread *), char *name)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700516{
517 spin_lock_init(&thi->t_lock);
518 thi->task = NULL;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100519 thi->t_state = NONE;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700520 thi->function = func;
Philipp Reisner392c8802011-02-09 10:33:31 +0100521 thi->tconn = tconn;
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100522 strncpy(thi->name, name, ARRAY_SIZE(thi->name));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700523}
524
525int drbd_thread_start(struct drbd_thread *thi)
526{
Philipp Reisner392c8802011-02-09 10:33:31 +0100527 struct drbd_tconn *tconn = thi->tconn;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700528 struct task_struct *nt;
529 unsigned long flags;
530
Philipp Reisnerb411b362009-09-25 16:07:19 -0700531 /* is used from state engine doing drbd_thread_stop_nowait,
532 * while holding the req lock irqsave */
533 spin_lock_irqsave(&thi->t_lock, flags);
534
535 switch (thi->t_state) {
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100536 case NONE:
Philipp Reisner392c8802011-02-09 10:33:31 +0100537 conn_info(tconn, "Starting %s thread (from %s [%d])\n",
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100538 thi->name, current->comm, current->pid);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700539
540 /* Get ref on module for thread - this is released when thread exits */
541 if (!try_module_get(THIS_MODULE)) {
Philipp Reisner392c8802011-02-09 10:33:31 +0100542 conn_err(tconn, "Failed to get module reference in drbd_thread_start\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700543 spin_unlock_irqrestore(&thi->t_lock, flags);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100544 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700545 }
546
547 init_completion(&thi->stop);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700548 thi->reset_cpu_mask = 1;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100549 thi->t_state = RUNNING;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700550 spin_unlock_irqrestore(&thi->t_lock, flags);
551 flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
552
553 nt = kthread_create(drbd_thread_setup, (void *) thi,
Philipp Reisner392c8802011-02-09 10:33:31 +0100554 "drbd_%c_%s", thi->name[0], thi->tconn->name);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700555
556 if (IS_ERR(nt)) {
Philipp Reisner392c8802011-02-09 10:33:31 +0100557 conn_err(tconn, "Couldn't start thread\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700558
559 module_put(THIS_MODULE);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100560 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700561 }
562 spin_lock_irqsave(&thi->t_lock, flags);
563 thi->task = nt;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100564 thi->t_state = RUNNING;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700565 spin_unlock_irqrestore(&thi->t_lock, flags);
566 wake_up_process(nt);
567 break;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100568 case EXITING:
569 thi->t_state = RESTARTING;
Philipp Reisner392c8802011-02-09 10:33:31 +0100570 conn_info(tconn, "Restarting %s thread (from %s [%d])\n",
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100571 thi->name, current->comm, current->pid);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700572 /* fall through */
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100573 case RUNNING:
574 case RESTARTING:
Philipp Reisnerb411b362009-09-25 16:07:19 -0700575 default:
576 spin_unlock_irqrestore(&thi->t_lock, flags);
577 break;
578 }
579
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100580 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700581}
582
583
584void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
585{
586 unsigned long flags;
587
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100588 enum drbd_thread_state ns = restart ? RESTARTING : EXITING;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700589
590 /* may be called from state engine, holding the req lock irqsave */
591 spin_lock_irqsave(&thi->t_lock, flags);
592
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100593 if (thi->t_state == NONE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700594 spin_unlock_irqrestore(&thi->t_lock, flags);
595 if (restart)
596 drbd_thread_start(thi);
597 return;
598 }
599
600 if (thi->t_state != ns) {
601 if (thi->task == NULL) {
602 spin_unlock_irqrestore(&thi->t_lock, flags);
603 return;
604 }
605
606 thi->t_state = ns;
607 smp_mb();
608 init_completion(&thi->stop);
609 if (thi->task != current)
610 force_sig(DRBD_SIGKILL, thi->task);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700611 }
612
613 spin_unlock_irqrestore(&thi->t_lock, flags);
614
615 if (wait)
616 wait_for_completion(&thi->stop);
617}
618
Philipp Reisner392c8802011-02-09 10:33:31 +0100619static struct drbd_thread *drbd_task_to_thread(struct drbd_tconn *tconn, struct task_struct *task)
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100620{
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100621 struct drbd_thread *thi =
622 task == tconn->receiver.task ? &tconn->receiver :
623 task == tconn->asender.task ? &tconn->asender :
624 task == tconn->worker.task ? &tconn->worker : NULL;
625
626 return thi;
627}
628
Philipp Reisner392c8802011-02-09 10:33:31 +0100629char *drbd_task_to_thread_name(struct drbd_tconn *tconn, struct task_struct *task)
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100630{
Philipp Reisner392c8802011-02-09 10:33:31 +0100631 struct drbd_thread *thi = drbd_task_to_thread(tconn, task);
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100632 return thi ? thi->name : task->comm;
633}
634
Philipp Reisner80883192011-02-18 14:56:45 +0100635int conn_lowest_minor(struct drbd_tconn *tconn)
Philipp Reisner80822282011-02-08 12:46:30 +0100636{
Philipp Reisnere90285e2011-03-22 12:51:21 +0100637 int vnr = 0;
638 struct drbd_conf *mdev;
Philipp Reisner774b3052011-02-22 02:07:03 -0500639
Philipp Reisnere90285e2011-03-22 12:51:21 +0100640 mdev = idr_get_next(&tconn->volumes, &vnr);
641 if (!mdev)
Philipp Reisner774b3052011-02-22 02:07:03 -0500642 return -1;
Philipp Reisnere90285e2011-03-22 12:51:21 +0100643 return mdev_to_minor(mdev);
Philipp Reisner80822282011-02-08 12:46:30 +0100644}
Philipp Reisner774b3052011-02-22 02:07:03 -0500645
646#ifdef CONFIG_SMP
Philipp Reisnerb411b362009-09-25 16:07:19 -0700647/**
648 * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
649 * @mdev: DRBD device.
650 *
651 * Forces all threads of a device onto the same CPU. This is beneficial for
652 * DRBD's performance. May be overwritten by user's configuration.
653 */
Philipp Reisner80822282011-02-08 12:46:30 +0100654void drbd_calc_cpu_mask(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700655{
656 int ord, cpu;
657
658 /* user override. */
Philipp Reisner80822282011-02-08 12:46:30 +0100659 if (cpumask_weight(tconn->cpu_mask))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700660 return;
661
Philipp Reisner80822282011-02-08 12:46:30 +0100662 ord = conn_lowest_minor(tconn) % cpumask_weight(cpu_online_mask);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700663 for_each_online_cpu(cpu) {
664 if (ord-- == 0) {
Philipp Reisner80822282011-02-08 12:46:30 +0100665 cpumask_set_cpu(cpu, tconn->cpu_mask);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700666 return;
667 }
668 }
669 /* should not be reached */
Philipp Reisner80822282011-02-08 12:46:30 +0100670 cpumask_setall(tconn->cpu_mask);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700671}
672
673/**
674 * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
675 * @mdev: DRBD device.
Philipp Reisnerbc31fe32011-02-07 11:14:38 +0100676 * @thi: drbd_thread object
Philipp Reisnerb411b362009-09-25 16:07:19 -0700677 *
678 * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
679 * prematurely.
680 */
Philipp Reisner80822282011-02-08 12:46:30 +0100681void drbd_thread_current_set_cpu(struct drbd_thread *thi)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700682{
683 struct task_struct *p = current;
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100684
Philipp Reisnerb411b362009-09-25 16:07:19 -0700685 if (!thi->reset_cpu_mask)
686 return;
687 thi->reset_cpu_mask = 0;
Philipp Reisner392c8802011-02-09 10:33:31 +0100688 set_cpus_allowed_ptr(p, thi->tconn->cpu_mask);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700689}
690#endif
691
Andreas Gruenbacher52b061a2011-03-30 11:38:49 +0200692/**
693 * drbd_header_size - size of a packet header
694 *
695 * The header size is a multiple of 8, so any payload following the header is
696 * word aligned on 64-bit architectures. (The bitmap send and receive code
697 * relies on this.)
698 */
699unsigned int drbd_header_size(struct drbd_tconn *tconn)
700{
701 BUILD_BUG_ON(sizeof(struct p_header80) != sizeof(struct p_header95));
702 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header80), 8));
703 return sizeof(struct p_header80);
704}
705
Philipp Reisnerd38e7872011-02-07 15:32:04 +0100706static void prepare_header80(struct p_header80 *h, enum drbd_packet cmd, int size)
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100707{
708 h->magic = cpu_to_be32(DRBD_MAGIC);
709 h->command = cpu_to_be16(cmd);
710 h->length = cpu_to_be16(size);
711}
712
Philipp Reisnerd38e7872011-02-07 15:32:04 +0100713static void prepare_header95(struct p_header95 *h, enum drbd_packet cmd, int size)
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100714{
715 h->magic = cpu_to_be16(DRBD_MAGIC_BIG);
716 h->command = cpu_to_be16(cmd);
717 h->length = cpu_to_be32(size);
718}
719
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200720static void prepare_header(struct drbd_tconn *tconn, int vnr, struct p_header *h,
721 enum drbd_packet cmd, int size)
Philipp Reisnerd38e7872011-02-07 15:32:04 +0100722{
Andreas Gruenbacher0916e0e2011-03-21 14:10:15 +0100723 if (tconn->agreed_pro_version >= 95)
Philipp Reisnerd38e7872011-02-07 15:32:04 +0100724 prepare_header95(&h->h95, cmd, size);
725 else
726 prepare_header80(&h->h80, cmd, size);
727}
728
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200729void *conn_prepare_command(struct drbd_tconn *tconn, struct drbd_socket *sock)
730{
731 mutex_lock(&sock->mutex);
732 if (!sock->socket) {
733 mutex_unlock(&sock->mutex);
734 return NULL;
735 }
736 return sock->sbuf;
737}
738
739void *drbd_prepare_command(struct drbd_conf *mdev, struct drbd_socket *sock)
740{
741 return conn_prepare_command(mdev->tconn, sock);
742}
743
744static int __send_command(struct drbd_tconn *tconn, int vnr,
745 struct drbd_socket *sock, enum drbd_packet cmd,
746 unsigned int header_size, void *data,
747 unsigned int size)
748{
749 int msg_flags;
750 int err;
751
752 /*
753 * Called with @data == NULL and the size of the data blocks in @size
754 * for commands that send data blocks. For those commands, omit the
755 * MSG_MORE flag: this will increase the likelihood that data blocks
756 * which are page aligned on the sender will end up page aligned on the
757 * receiver.
758 */
759 msg_flags = data ? MSG_MORE : 0;
760
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200761 prepare_header(tconn, vnr, sock->sbuf, cmd,
762 header_size - sizeof(struct p_header) + size);
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200763 err = drbd_send_all(tconn, sock->socket, sock->sbuf, header_size,
764 msg_flags);
765 if (data && !err)
766 err = drbd_send_all(tconn, sock->socket, data, size, 0);
767 return err;
768}
769
770int conn_send_command(struct drbd_tconn *tconn, struct drbd_socket *sock,
771 enum drbd_packet cmd, unsigned int header_size,
772 void *data, unsigned int size)
773{
774 int err;
775
776 err = __send_command(tconn, 0, sock, cmd, header_size, data, size);
777 mutex_unlock(&sock->mutex);
778 return err;
779}
780
781int drbd_send_command(struct drbd_conf *mdev, struct drbd_socket *sock,
782 enum drbd_packet cmd, unsigned int header_size,
783 void *data, unsigned int size)
784{
785 int err;
786
787 err = __send_command(mdev->tconn, mdev->vnr, sock, cmd, header_size,
788 data, size);
789 mutex_unlock(&sock->mutex);
790 return err;
791}
792
Andreas Gruenbachere307f352011-03-22 10:55:48 +0100793int drbd_send_ping(struct drbd_tconn *tconn)
794{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200795 struct drbd_socket *sock;
796
797 sock = &tconn->meta;
798 if (!conn_prepare_command(tconn, sock))
799 return -EIO;
800 return conn_send_command(tconn, sock, P_PING, sizeof(struct p_header), NULL, 0);
Andreas Gruenbachere307f352011-03-22 10:55:48 +0100801}
802
803int drbd_send_ping_ack(struct drbd_tconn *tconn)
804{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200805 struct drbd_socket *sock;
806
807 sock = &tconn->meta;
808 if (!conn_prepare_command(tconn, sock))
809 return -EIO;
810 return conn_send_command(tconn, sock, P_PING_ACK, sizeof(struct p_header), NULL, 0);
Andreas Gruenbachere307f352011-03-22 10:55:48 +0100811}
812
Lars Ellenbergf3990022011-03-23 14:31:09 +0100813int drbd_send_sync_param(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700814{
Andreas Gruenbacher7c967152011-03-22 00:49:36 +0100815 struct drbd_socket *sock;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200816 struct p_rs_param_95 *p;
817 int size;
Philipp Reisner31890f42011-01-19 14:12:51 +0100818 const int apv = mdev->tconn->agreed_pro_version;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200819 enum drbd_packet cmd;
820
821 sock = &mdev->tconn->data;
822 p = drbd_prepare_command(mdev, sock);
823 if (!p)
824 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700825
826 size = apv <= 87 ? sizeof(struct p_rs_param)
827 : apv == 88 ? sizeof(struct p_rs_param)
Lars Ellenbergf3990022011-03-23 14:31:09 +0100828 + strlen(mdev->tconn->net_conf->verify_alg) + 1
Philipp Reisner8e26f9c2010-07-06 17:25:54 +0200829 : apv <= 94 ? sizeof(struct p_rs_param_89)
830 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700831
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200832 cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700833
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200834 /* initialize verify_alg and csums_alg */
835 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700836
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200837 if (get_ldev(mdev)) {
838 p->rate = cpu_to_be32(mdev->ldev->dc.resync_rate);
839 p->c_plan_ahead = cpu_to_be32(mdev->ldev->dc.c_plan_ahead);
840 p->c_delay_target = cpu_to_be32(mdev->ldev->dc.c_delay_target);
841 p->c_fill_target = cpu_to_be32(mdev->ldev->dc.c_fill_target);
842 p->c_max_rate = cpu_to_be32(mdev->ldev->dc.c_max_rate);
843 put_ldev(mdev);
844 } else {
845 p->rate = cpu_to_be32(DRBD_RATE_DEF);
846 p->c_plan_ahead = cpu_to_be32(DRBD_C_PLAN_AHEAD_DEF);
847 p->c_delay_target = cpu_to_be32(DRBD_C_DELAY_TARGET_DEF);
848 p->c_fill_target = cpu_to_be32(DRBD_C_FILL_TARGET_DEF);
849 p->c_max_rate = cpu_to_be32(DRBD_C_MAX_RATE_DEF);
850 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700851
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200852 if (apv >= 88)
853 strcpy(p->verify_alg, mdev->tconn->net_conf->verify_alg);
854 if (apv >= 89)
855 strcpy(p->csums_alg, mdev->tconn->net_conf->csums_alg);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700856
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200857 return drbd_send_command(mdev, sock, cmd, size, NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700858}
859
Philipp Reisnerdc8228d2011-02-08 10:13:15 +0100860int drbd_send_protocol(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700861{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200862 struct drbd_socket *sock;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700863 struct p_protocol *p;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200864 int size, cf;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700865
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200866 if (tconn->net_conf->dry_run && tconn->agreed_pro_version < 92) {
867 conn_err(tconn, "--dry-run is not supported by peer");
868 return -EOPNOTSUPP;
869 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700870
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200871 sock = &tconn->data;
872 p = conn_prepare_command(tconn, sock);
873 if (!p)
874 return -EIO;
875
876 size = sizeof(*p);
Philipp Reisnerdc8228d2011-02-08 10:13:15 +0100877 if (tconn->agreed_pro_version >= 87)
878 size += strlen(tconn->net_conf->integrity_alg) + 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700879
Philipp Reisnerdc8228d2011-02-08 10:13:15 +0100880 p->protocol = cpu_to_be32(tconn->net_conf->wire_protocol);
881 p->after_sb_0p = cpu_to_be32(tconn->net_conf->after_sb_0p);
882 p->after_sb_1p = cpu_to_be32(tconn->net_conf->after_sb_1p);
883 p->after_sb_2p = cpu_to_be32(tconn->net_conf->after_sb_2p);
884 p->two_primaries = cpu_to_be32(tconn->net_conf->two_primaries);
Philipp Reisnercf14c2e2010-02-02 21:03:50 +0100885 cf = 0;
Philipp Reisnerdc8228d2011-02-08 10:13:15 +0100886 if (tconn->net_conf->want_lose)
Philipp Reisnercf14c2e2010-02-02 21:03:50 +0100887 cf |= CF_WANT_LOSE;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200888 if (tconn->net_conf->dry_run)
889 cf |= CF_DRY_RUN;
Philipp Reisnercf14c2e2010-02-02 21:03:50 +0100890 p->conn_flags = cpu_to_be32(cf);
891
Philipp Reisnerdc8228d2011-02-08 10:13:15 +0100892 if (tconn->agreed_pro_version >= 87)
893 strcpy(p->integrity_alg, tconn->net_conf->integrity_alg);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200894 return conn_send_command(tconn, sock, P_PROTOCOL, size, NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700895}
896
897int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
898{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200899 struct drbd_socket *sock;
900 struct p_uuids *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700901 int i;
902
903 if (!get_ldev_if_state(mdev, D_NEGOTIATING))
Andreas Gruenbacher2ae5f952011-03-16 01:07:20 +0100904 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700905
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200906 sock = &mdev->tconn->data;
907 p = drbd_prepare_command(mdev, sock);
908 if (!p) {
909 put_ldev(mdev);
910 return -EIO;
911 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700912 for (i = UI_CURRENT; i < UI_SIZE; i++)
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200913 p->uuid[i] = mdev->ldev ? cpu_to_be64(mdev->ldev->md.uuid[i]) : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700914
915 mdev->comm_bm_set = drbd_bm_total_weight(mdev);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200916 p->uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
Philipp Reisner89e58e72011-01-19 13:12:45 +0100917 uuid_flags |= mdev->tconn->net_conf->want_lose ? 1 : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700918 uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0;
919 uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200920 p->uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700921
922 put_ldev(mdev);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200923 return drbd_send_command(mdev, sock, P_UUIDS, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700924}
925
926int drbd_send_uuids(struct drbd_conf *mdev)
927{
928 return _drbd_send_uuids(mdev, 0);
929}
930
931int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev)
932{
933 return _drbd_send_uuids(mdev, 8);
934}
935
Lars Ellenberg62b0da32011-01-20 13:25:21 +0100936void drbd_print_uuids(struct drbd_conf *mdev, const char *text)
937{
938 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
939 u64 *uuid = mdev->ldev->md.uuid;
940 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX\n",
941 text,
942 (unsigned long long)uuid[UI_CURRENT],
943 (unsigned long long)uuid[UI_BITMAP],
944 (unsigned long long)uuid[UI_HISTORY_START],
945 (unsigned long long)uuid[UI_HISTORY_END]);
946 put_ldev(mdev);
947 } else {
948 dev_info(DEV, "%s effective data uuid: %016llX\n",
949 text,
950 (unsigned long long)mdev->ed_uuid);
951 }
952}
953
Andreas Gruenbacher9c1b7f72011-03-16 01:09:01 +0100954void drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700955{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200956 struct drbd_socket *sock;
957 struct p_rs_uuid *p;
Lars Ellenberg5a22db82010-12-17 21:14:23 +0100958 u64 uuid;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700959
Lars Ellenberg5a22db82010-12-17 21:14:23 +0100960 D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
961
Philipp Reisner4a23f262011-01-11 17:42:17 +0100962 uuid = mdev->ldev->md.uuid[UI_BITMAP] + UUID_NEW_BM_OFFSET;
Lars Ellenberg5a22db82010-12-17 21:14:23 +0100963 drbd_uuid_set(mdev, UI_BITMAP, uuid);
Lars Ellenberg62b0da32011-01-20 13:25:21 +0100964 drbd_print_uuids(mdev, "updated sync UUID");
Lars Ellenberg5a22db82010-12-17 21:14:23 +0100965 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700966
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200967 sock = &mdev->tconn->data;
968 p = drbd_prepare_command(mdev, sock);
969 if (p) {
970 p->uuid = cpu_to_be64(uuid);
971 drbd_send_command(mdev, sock, P_SYNC_UUID, sizeof(*p), NULL, 0);
972 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700973}
974
Philipp Reisnere89b5912010-03-24 17:11:33 +0100975int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700976{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200977 struct drbd_socket *sock;
978 struct p_sizes *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700979 sector_t d_size, u_size;
Philipp Reisner99432fc2011-05-20 16:39:13 +0200980 int q_order_type, max_bio_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700981
982 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
983 D_ASSERT(mdev->ldev->backing_bdev);
984 d_size = drbd_get_max_capacity(mdev->ldev);
985 u_size = mdev->ldev->dc.disk_size;
986 q_order_type = drbd_queue_order_type(mdev);
Philipp Reisner99432fc2011-05-20 16:39:13 +0200987 max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
988 max_bio_size = min_t(int, max_bio_size, DRBD_MAX_BIO_SIZE);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700989 put_ldev(mdev);
990 } else {
991 d_size = 0;
992 u_size = 0;
993 q_order_type = QUEUE_ORDERED_NONE;
Philipp Reisner99432fc2011-05-20 16:39:13 +0200994 max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700995 }
996
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200997 sock = &mdev->tconn->data;
998 p = drbd_prepare_command(mdev, sock);
999 if (!p)
1000 return -EIO;
1001 p->d_size = cpu_to_be64(d_size);
1002 p->u_size = cpu_to_be64(u_size);
1003 p->c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
1004 p->max_bio_size = cpu_to_be32(max_bio_size);
1005 p->queue_order_type = cpu_to_be16(q_order_type);
1006 p->dds_flags = cpu_to_be16(flags);
1007 return drbd_send_command(mdev, sock, P_SIZES, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001008}
1009
1010/**
1011 * drbd_send_state() - Sends the drbd state to the peer
1012 * @mdev: DRBD device.
1013 */
1014int drbd_send_state(struct drbd_conf *mdev)
1015{
Andreas Gruenbacher7c967152011-03-22 00:49:36 +01001016 struct drbd_socket *sock;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001017 struct p_state *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001018
Andreas Gruenbacher7c967152011-03-22 00:49:36 +01001019 sock = &mdev->tconn->data;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001020 p = drbd_prepare_command(mdev, sock);
1021 if (!p)
1022 return -EIO;
1023 p->state = cpu_to_be32(mdev->state.i); /* Within the send mutex */
1024 return drbd_send_command(mdev, sock, P_STATE, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001025}
1026
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001027int drbd_send_state_req(struct drbd_conf *mdev, union drbd_state mask, union drbd_state val)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001028{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001029 struct drbd_socket *sock;
1030 struct p_req_state *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001031
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001032 sock = &mdev->tconn->data;
1033 p = drbd_prepare_command(mdev, sock);
1034 if (!p)
1035 return -EIO;
1036 p->mask = cpu_to_be32(mask.i);
1037 p->val = cpu_to_be32(val.i);
1038 return drbd_send_command(mdev, sock, P_STATE_CHG_REQ, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001039
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001040}
1041
1042int conn_send_state_req(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val)
1043{
1044 enum drbd_packet cmd;
1045 struct drbd_socket *sock;
1046 struct p_req_state *p;
1047
1048 cmd = tconn->agreed_pro_version < 100 ? P_STATE_CHG_REQ : P_CONN_ST_CHG_REQ;
1049 sock = &tconn->data;
1050 p = conn_prepare_command(tconn, sock);
1051 if (!p)
1052 return -EIO;
1053 p->mask = cpu_to_be32(mask.i);
1054 p->val = cpu_to_be32(val.i);
1055 return conn_send_command(tconn, sock, cmd, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001056}
1057
Andreas Gruenbacher2f4e7ab2011-03-16 01:20:38 +01001058void drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001059{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001060 struct drbd_socket *sock;
1061 struct p_req_state_reply *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001062
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001063 sock = &mdev->tconn->meta;
1064 p = drbd_prepare_command(mdev, sock);
1065 if (p) {
1066 p->retcode = cpu_to_be32(retcode);
1067 drbd_send_command(mdev, sock, P_STATE_CHG_REPLY, sizeof(*p), NULL, 0);
1068 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001069}
1070
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001071void conn_send_sr_reply(struct drbd_tconn *tconn, enum drbd_state_rv retcode)
Philipp Reisner047cd4a2011-02-15 11:09:33 +01001072{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001073 struct drbd_socket *sock;
1074 struct p_req_state_reply *p;
Philipp Reisner047cd4a2011-02-15 11:09:33 +01001075 enum drbd_packet cmd = tconn->agreed_pro_version < 100 ? P_STATE_CHG_REPLY : P_CONN_ST_CHG_REPLY;
1076
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001077 sock = &tconn->meta;
1078 p = conn_prepare_command(tconn, sock);
1079 if (p) {
1080 p->retcode = cpu_to_be32(retcode);
1081 conn_send_command(tconn, sock, cmd, sizeof(*p), NULL, 0);
1082 }
Philipp Reisner047cd4a2011-02-15 11:09:33 +01001083}
1084
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001085static void dcbp_set_code(struct p_compressed_bm *p, enum drbd_bitmap_code code)
1086{
1087 BUG_ON(code & ~0xf);
1088 p->encoding = (p->encoding & ~0xf) | code;
1089}
1090
1091static void dcbp_set_start(struct p_compressed_bm *p, int set)
1092{
1093 p->encoding = (p->encoding & ~0x80) | (set ? 0x80 : 0);
1094}
1095
1096static void dcbp_set_pad_bits(struct p_compressed_bm *p, int n)
1097{
1098 BUG_ON(n & ~0x7);
1099 p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4);
1100}
1101
Philipp Reisnerb411b362009-09-25 16:07:19 -07001102int fill_bitmap_rle_bits(struct drbd_conf *mdev,
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001103 struct p_compressed_bm *p,
1104 unsigned int size,
1105 struct bm_xfer_ctx *c)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001106{
1107 struct bitstream bs;
1108 unsigned long plain_bits;
1109 unsigned long tmp;
1110 unsigned long rl;
1111 unsigned len;
1112 unsigned toggle;
1113 int bits;
1114
1115 /* may we use this feature? */
Lars Ellenbergf3990022011-03-23 14:31:09 +01001116 if ((mdev->tconn->net_conf->use_rle == 0) ||
Philipp Reisner31890f42011-01-19 14:12:51 +01001117 (mdev->tconn->agreed_pro_version < 90))
Philipp Reisnerb411b362009-09-25 16:07:19 -07001118 return 0;
1119
1120 if (c->bit_offset >= c->bm_bits)
1121 return 0; /* nothing to do. */
1122
1123 /* use at most thus many bytes */
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001124 bitstream_init(&bs, p->code, size, 0);
1125 memset(p->code, 0, size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001126 /* plain bits covered in this code string */
1127 plain_bits = 0;
1128
1129 /* p->encoding & 0x80 stores whether the first run length is set.
1130 * bit offset is implicit.
1131 * start with toggle == 2 to be able to tell the first iteration */
1132 toggle = 2;
1133
1134 /* see how much plain bits we can stuff into one packet
1135 * using RLE and VLI. */
1136 do {
1137 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(mdev, c->bit_offset)
1138 : _drbd_bm_find_next(mdev, c->bit_offset);
1139 if (tmp == -1UL)
1140 tmp = c->bm_bits;
1141 rl = tmp - c->bit_offset;
1142
1143 if (toggle == 2) { /* first iteration */
1144 if (rl == 0) {
1145 /* the first checked bit was set,
1146 * store start value, */
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001147 dcbp_set_start(p, 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001148 /* but skip encoding of zero run length */
1149 toggle = !toggle;
1150 continue;
1151 }
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001152 dcbp_set_start(p, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001153 }
1154
1155 /* paranoia: catch zero runlength.
1156 * can only happen if bitmap is modified while we scan it. */
1157 if (rl == 0) {
1158 dev_err(DEV, "unexpected zero runlength while encoding bitmap "
1159 "t:%u bo:%lu\n", toggle, c->bit_offset);
1160 return -1;
1161 }
1162
1163 bits = vli_encode_bits(&bs, rl);
1164 if (bits == -ENOBUFS) /* buffer full */
1165 break;
1166 if (bits <= 0) {
1167 dev_err(DEV, "error while encoding bitmap: %d\n", bits);
1168 return 0;
1169 }
1170
1171 toggle = !toggle;
1172 plain_bits += rl;
1173 c->bit_offset = tmp;
1174 } while (c->bit_offset < c->bm_bits);
1175
1176 len = bs.cur.b - p->code + !!bs.cur.bit;
1177
1178 if (plain_bits < (len << 3)) {
1179 /* incompressible with this method.
1180 * we need to rewind both word and bit position. */
1181 c->bit_offset -= plain_bits;
1182 bm_xfer_ctx_bit_to_word_offset(c);
1183 c->bit_offset = c->word_offset * BITS_PER_LONG;
1184 return 0;
1185 }
1186
1187 /* RLE + VLI was able to compress it just fine.
1188 * update c->word_offset. */
1189 bm_xfer_ctx_bit_to_word_offset(c);
1190
1191 /* store pad_bits */
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001192 dcbp_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001193
1194 return len;
1195}
1196
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001197/**
1198 * send_bitmap_rle_or_plain
1199 *
1200 * Return 0 when done, 1 when another iteration is needed, and a negative error
1201 * code upon failure.
1202 */
1203static int
Andreas Gruenbacher79ed9bd2011-03-24 21:31:38 +01001204send_bitmap_rle_or_plain(struct drbd_conf *mdev, struct bm_xfer_ctx *c)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001205{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001206 struct drbd_socket *sock = &mdev->tconn->data;
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001207 unsigned int header_size = drbd_header_size(mdev->tconn);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001208 struct p_compressed_bm *p = sock->sbuf;
Andreas Gruenbachera982dd52010-12-10 00:45:25 +01001209 int len, err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001210
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001211 len = fill_bitmap_rle_bits(mdev, p, DRBD_SOCKET_BUFFER_SIZE - sizeof(*p) /* FIXME */, c);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001212 if (len < 0)
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001213 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001214
1215 if (len) {
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001216 dcbp_set_code(p, RLE_VLI_Bits);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001217 err = __send_command(mdev->tconn, mdev->vnr, sock,
1218 P_COMPRESSED_BITMAP, sizeof(*p) + len,
1219 NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001220 c->packets[0]++;
1221 c->bytes[0] += sizeof(*p) + len;
1222
1223 if (c->bit_offset >= c->bm_bits)
1224 len = 0; /* DONE */
1225 } else {
1226 /* was not compressible.
1227 * send a buffer full of plain text bits instead. */
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001228 unsigned int data_size;
1229 unsigned long num_words;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001230 struct p_header *h = sock->sbuf;
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001231
1232 data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
1233 num_words = min_t(size_t, data_size / sizeof(unsigned long),
1234 c->bm_words - c->word_offset);
1235 len = num_words * sizeof(unsigned long);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001236 if (len)
Andreas Gruenbacher79ed9bd2011-03-24 21:31:38 +01001237 drbd_bm_get_lel(mdev, c->word_offset, num_words,
1238 (unsigned long *)h->payload);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001239 err = __send_command(mdev->tconn, mdev->vnr, sock, P_BITMAP,
1240 sizeof(*h) + len, NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001241 c->word_offset += num_words;
1242 c->bit_offset = c->word_offset * BITS_PER_LONG;
1243
1244 c->packets[1]++;
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001245 c->bytes[1] += header_size + len;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001246
1247 if (c->bit_offset > c->bm_bits)
1248 c->bit_offset = c->bm_bits;
1249 }
Andreas Gruenbachera982dd52010-12-10 00:45:25 +01001250 if (!err) {
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001251 if (len == 0) {
1252 INFO_bm_xfer_stats(mdev, "send", c);
1253 return 0;
1254 } else
1255 return 1;
1256 }
1257 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001258}
1259
1260/* See the comment at receive_bitmap() */
Andreas Gruenbacher058820c2011-03-22 16:03:43 +01001261static int _drbd_send_bitmap(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001262{
1263 struct bm_xfer_ctx c;
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001264 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001265
Andreas Gruenbacher841ce242010-12-15 19:31:20 +01001266 if (!expect(mdev->bitmap))
1267 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001268
Philipp Reisnerb411b362009-09-25 16:07:19 -07001269 if (get_ldev(mdev)) {
1270 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
1271 dev_info(DEV, "Writing the whole bitmap, MDF_FullSync was set.\n");
1272 drbd_bm_set_all(mdev);
1273 if (drbd_bm_write(mdev)) {
1274 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
1275 * but otherwise process as per normal - need to tell other
1276 * side that a full resync is required! */
1277 dev_err(DEV, "Failed to write bitmap to disk!\n");
1278 } else {
1279 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
1280 drbd_md_sync(mdev);
1281 }
1282 }
1283 put_ldev(mdev);
1284 }
1285
1286 c = (struct bm_xfer_ctx) {
1287 .bm_bits = drbd_bm_bits(mdev),
1288 .bm_words = drbd_bm_words(mdev),
1289 };
1290
1291 do {
Andreas Gruenbacher79ed9bd2011-03-24 21:31:38 +01001292 err = send_bitmap_rle_or_plain(mdev, &c);
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001293 } while (err > 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001294
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001295 return err == 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001296}
1297
1298int drbd_send_bitmap(struct drbd_conf *mdev)
1299{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001300 struct drbd_socket *sock = &mdev->tconn->data;
1301 int err = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001302
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001303 mutex_lock(&sock->mutex);
1304 if (sock->socket)
1305 err = !_drbd_send_bitmap(mdev);
1306 mutex_unlock(&sock->mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001307 return err;
1308}
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001309
Andreas Gruenbacherd4e67d72011-03-16 01:25:28 +01001310void drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001311{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001312 struct drbd_socket *sock;
1313 struct p_barrier_ack *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001314
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001315 if (mdev->state.conn < C_CONNECTED)
1316 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001317
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001318 sock = &mdev->tconn->meta;
1319 p = drbd_prepare_command(mdev, sock);
1320 if (!p)
1321 return;
1322 p->barrier = barrier_nr;
1323 p->set_size = cpu_to_be32(set_size);
1324 drbd_send_command(mdev, sock, P_BARRIER_ACK, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001325}
1326
1327/**
1328 * _drbd_send_ack() - Sends an ack packet
1329 * @mdev: DRBD device.
1330 * @cmd: Packet command code.
1331 * @sector: sector, needs to be in big endian byte order
1332 * @blksize: size in byte, needs to be in big endian byte order
1333 * @block_id: Id, big endian byte order
1334 */
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001335static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
1336 u64 sector, u32 blksize, u64 block_id)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001337{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001338 struct drbd_socket *sock;
1339 struct p_block_ack *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001340
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001341 if (mdev->state.conn < C_CONNECTED)
Andreas Gruenbachera8c32aa2011-03-16 01:27:22 +01001342 return -EIO;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001343
1344 sock = &mdev->tconn->meta;
1345 p = drbd_prepare_command(mdev, sock);
1346 if (!p)
1347 return -EIO;
1348 p->sector = sector;
1349 p->block_id = block_id;
1350 p->blksize = blksize;
1351 p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
1352 return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001353}
1354
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02001355/* dp->sector and dp->block_id already/still in network byte order,
1356 * data_size is payload size according to dp->head,
1357 * and may need to be corrected for digest size. */
Andreas Gruenbachera9a99942011-03-16 01:30:14 +01001358void drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packet cmd,
1359 struct p_data *dp, int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001360{
Philipp Reisnera0638452011-01-19 14:31:32 +01001361 data_size -= (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_r_tfm) ?
1362 crypto_hash_digestsize(mdev->tconn->integrity_r_tfm) : 0;
Andreas Gruenbachera9a99942011-03-16 01:30:14 +01001363 _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
1364 dp->block_id);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001365}
1366
Andreas Gruenbachera9a99942011-03-16 01:30:14 +01001367void drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packet cmd,
1368 struct p_block_req *rp)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001369{
Andreas Gruenbachera9a99942011-03-16 01:30:14 +01001370 _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001371}
1372
1373/**
1374 * drbd_send_ack() - Sends an ack packet
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001375 * @mdev: DRBD device
1376 * @cmd: packet command code
1377 * @peer_req: peer request
Philipp Reisnerb411b362009-09-25 16:07:19 -07001378 */
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001379int drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001380 struct drbd_peer_request *peer_req)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001381{
Andreas Gruenbacherdd516122011-03-16 15:39:08 +01001382 return _drbd_send_ack(mdev, cmd,
1383 cpu_to_be64(peer_req->i.sector),
1384 cpu_to_be32(peer_req->i.size),
1385 peer_req->block_id);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001386}
1387
1388/* This function misuses the block_id field to signal if the blocks
1389 * are is sync or not. */
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001390int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packet cmd,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001391 sector_t sector, int blksize, u64 block_id)
1392{
Andreas Gruenbacherfa79abd2011-03-16 01:31:39 +01001393 return _drbd_send_ack(mdev, cmd,
1394 cpu_to_be64(sector),
1395 cpu_to_be32(blksize),
1396 cpu_to_be64(block_id));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001397}
1398
1399int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
1400 sector_t sector, int size, u64 block_id)
1401{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001402 struct drbd_socket *sock;
1403 struct p_block_req *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001404
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001405 sock = &mdev->tconn->data;
1406 p = drbd_prepare_command(mdev, sock);
1407 if (!p)
1408 return -EIO;
1409 p->sector = cpu_to_be64(sector);
1410 p->block_id = block_id;
1411 p->blksize = cpu_to_be32(size);
1412 return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001413}
1414
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001415int drbd_send_drequest_csum(struct drbd_conf *mdev, sector_t sector, int size,
1416 void *digest, int digest_size, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001417{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001418 struct drbd_socket *sock;
1419 struct p_block_req *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001420
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001421 /* FIXME: Put the digest into the preallocated socket buffer. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001422
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001423 sock = &mdev->tconn->data;
1424 p = drbd_prepare_command(mdev, sock);
1425 if (!p)
1426 return -EIO;
1427 p->sector = cpu_to_be64(sector);
1428 p->block_id = ID_SYNCER /* unused */;
1429 p->blksize = cpu_to_be32(size);
1430 return drbd_send_command(mdev, sock, cmd, sizeof(*p),
1431 digest, digest_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001432}
1433
1434int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
1435{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001436 struct drbd_socket *sock;
1437 struct p_block_req *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001438
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001439 sock = &mdev->tconn->data;
1440 p = drbd_prepare_command(mdev, sock);
1441 if (!p)
1442 return -EIO;
1443 p->sector = cpu_to_be64(sector);
1444 p->block_id = ID_SYNCER /* unused */;
1445 p->blksize = cpu_to_be32(size);
1446 return drbd_send_command(mdev, sock, P_OV_REQUEST, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001447}
1448
1449/* called on sndtimeo
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001450 * returns false if we should retry,
1451 * true if we think connection is dead
Philipp Reisnerb411b362009-09-25 16:07:19 -07001452 */
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001453static int we_should_drop_the_connection(struct drbd_tconn *tconn, struct socket *sock)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001454{
1455 int drop_it;
1456 /* long elapsed = (long)(jiffies - mdev->last_received); */
1457
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001458 drop_it = tconn->meta.socket == sock
1459 || !tconn->asender.task
1460 || get_t_state(&tconn->asender) != RUNNING
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01001461 || tconn->cstate < C_WF_REPORT_PARAMS;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001462
1463 if (drop_it)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001464 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001465
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001466 drop_it = !--tconn->ko_count;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001467 if (!drop_it) {
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001468 conn_err(tconn, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
1469 current->comm, current->pid, tconn->ko_count);
1470 request_ping(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001471 }
1472
1473 return drop_it; /* && (mdev->state == R_PRIMARY) */;
1474}
1475
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001476static void drbd_update_congested(struct drbd_tconn *tconn)
Andreas Gruenbacher9e204cd2011-01-26 18:45:11 +01001477{
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001478 struct sock *sk = tconn->data.socket->sk;
Andreas Gruenbacher9e204cd2011-01-26 18:45:11 +01001479 if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001480 set_bit(NET_CONGESTED, &tconn->flags);
Andreas Gruenbacher9e204cd2011-01-26 18:45:11 +01001481}
1482
Philipp Reisnerb411b362009-09-25 16:07:19 -07001483/* The idea of sendpage seems to be to put some kind of reference
1484 * to the page into the skb, and to hand it over to the NIC. In
1485 * this process get_page() gets called.
1486 *
1487 * As soon as the page was really sent over the network put_page()
1488 * gets called by some part of the network layer. [ NIC driver? ]
1489 *
1490 * [ get_page() / put_page() increment/decrement the count. If count
1491 * reaches 0 the page will be freed. ]
1492 *
1493 * This works nicely with pages from FSs.
1494 * But this means that in protocol A we might signal IO completion too early!
1495 *
1496 * In order not to corrupt data during a resync we must make sure
1497 * that we do not reuse our own buffer pages (EEs) to early, therefore
1498 * we have the net_ee list.
1499 *
1500 * XFS seems to have problems, still, it submits pages with page_count == 0!
1501 * As a workaround, we disable sendpage on pages
1502 * with page_count == 0 or PageSlab.
1503 */
1504static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page,
Andreas Gruenbacherb9874272011-03-16 09:41:10 +01001505 int offset, size_t size, unsigned msg_flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001506{
Andreas Gruenbacherb9874272011-03-16 09:41:10 +01001507 struct socket *socket;
1508 void *addr;
1509 int err;
1510
1511 socket = mdev->tconn->data.socket;
1512 addr = kmap(page) + offset;
1513 err = drbd_send_all(mdev->tconn, socket, addr, size, msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001514 kunmap(page);
Andreas Gruenbacherb9874272011-03-16 09:41:10 +01001515 if (!err)
1516 mdev->send_cnt += size >> 9;
1517 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001518}
1519
1520static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001521 int offset, size_t size, unsigned msg_flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001522{
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001523 struct socket *socket = mdev->tconn->data.socket;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001524 mm_segment_t oldfs = get_fs();
Philipp Reisnerb411b362009-09-25 16:07:19 -07001525 int len = size;
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001526 int err = -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001527
1528 /* e.g. XFS meta- & log-data is in slab pages, which have a
1529 * page_count of 0 and/or have PageSlab() set.
1530 * we cannot use send_page for those, as that does get_page();
1531 * put_page(); and would cause either a VM_BUG directly, or
1532 * __page_cache_release a page that would actually still be referenced
1533 * by someone, leading to some obscure delayed Oops somewhere else. */
1534 if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001535 return _drbd_no_send_page(mdev, page, offset, size, msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001536
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001537 msg_flags |= MSG_NOSIGNAL;
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001538 drbd_update_congested(mdev->tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001539 set_fs(KERNEL_DS);
1540 do {
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001541 int sent;
1542
1543 sent = socket->ops->sendpage(socket, page, offset, len, msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001544 if (sent <= 0) {
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001545 if (sent == -EAGAIN) {
1546 if (we_should_drop_the_connection(mdev->tconn, socket))
1547 break;
1548 continue;
1549 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001550 dev_warn(DEV, "%s: size=%d len=%d sent=%d\n",
1551 __func__, (int)size, len, sent);
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001552 if (sent < 0)
1553 err = sent;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001554 break;
1555 }
1556 len -= sent;
1557 offset += sent;
1558 } while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
1559 set_fs(oldfs);
Philipp Reisner01a311a2011-02-07 14:30:33 +01001560 clear_bit(NET_CONGESTED, &mdev->tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001561
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001562 if (len == 0) {
1563 err = 0;
1564 mdev->send_cnt += size >> 9;
1565 }
1566 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001567}
1568
1569static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
1570{
1571 struct bio_vec *bvec;
1572 int i;
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001573 /* hint all but last page with MSG_MORE */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001574 __bio_for_each_segment(bvec, bio, i, 0) {
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001575 int err;
1576
1577 err = _drbd_no_send_page(mdev, bvec->bv_page,
1578 bvec->bv_offset, bvec->bv_len,
1579 i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
1580 if (err)
1581 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001582 }
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001583 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001584}
1585
1586static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
1587{
1588 struct bio_vec *bvec;
1589 int i;
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001590 /* hint all but last page with MSG_MORE */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001591 __bio_for_each_segment(bvec, bio, i, 0) {
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001592 int err;
1593
1594 err = _drbd_send_page(mdev, bvec->bv_page,
1595 bvec->bv_offset, bvec->bv_len,
1596 i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
1597 if (err)
1598 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001599 }
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001600 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001601}
1602
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001603static int _drbd_send_zc_ee(struct drbd_conf *mdev,
1604 struct drbd_peer_request *peer_req)
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001605{
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001606 struct page *page = peer_req->pages;
1607 unsigned len = peer_req->i.size;
Andreas Gruenbacher9f692302011-03-16 10:49:09 +01001608 int err;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001609
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001610 /* hint all but last page with MSG_MORE */
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001611 page_chain_for_each(page) {
1612 unsigned l = min_t(unsigned, len, PAGE_SIZE);
Andreas Gruenbacher9f692302011-03-16 10:49:09 +01001613
1614 err = _drbd_send_page(mdev, page, 0, l,
1615 page_chain_next(page) ? MSG_MORE : 0);
1616 if (err)
1617 return err;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001618 len -= l;
1619 }
Andreas Gruenbacher9f692302011-03-16 10:49:09 +01001620 return 0;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001621}
1622
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001623static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
1624{
Philipp Reisner31890f42011-01-19 14:12:51 +01001625 if (mdev->tconn->agreed_pro_version >= 95)
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001626 return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001627 (bi_rw & REQ_FUA ? DP_FUA : 0) |
1628 (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
1629 (bi_rw & REQ_DISCARD ? DP_DISCARD : 0);
1630 else
Jens Axboe721a9602011-03-09 11:56:30 +01001631 return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001632}
1633
Philipp Reisnerb411b362009-09-25 16:07:19 -07001634/* Used to send write requests
1635 * R_PRIMARY -> Peer (P_DATA)
1636 */
1637int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
1638{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001639 struct drbd_socket *sock;
1640 struct p_data *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001641 unsigned int dp_flags = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001642 int dgs;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001643 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001644
Philipp Reisnera0638452011-01-19 14:31:32 +01001645 dgs = (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_w_tfm) ?
1646 crypto_hash_digestsize(mdev->tconn->integrity_w_tfm) : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001647
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001648 sock = &mdev->tconn->data;
1649 p = drbd_prepare_command(mdev, sock);
1650 if (!p)
1651 return -EIO;
1652 p->sector = cpu_to_be64(req->i.sector);
1653 p->block_id = (unsigned long)req;
1654 p->seq_num = cpu_to_be32(req->seq_num = atomic_inc_return(&mdev->packet_seq));
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001655 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001656 if (mdev->state.conn >= C_SYNC_SOURCE &&
1657 mdev->state.conn <= C_PAUSED_SYNC_T)
1658 dp_flags |= DP_MAY_SET_IN_SYNC;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001659 p->dp_flags = cpu_to_be32(dp_flags);
1660 if (dgs)
1661 drbd_csum_bio(mdev, mdev->tconn->integrity_w_tfm, req->master_bio, p + 1);
1662 err = __send_command(mdev->tconn, mdev->vnr, sock, P_DATA, sizeof(*p) + dgs, NULL, req->i.size);
Andreas Gruenbacher6bdb9b02011-03-16 11:52:58 +01001663 if (!err) {
Lars Ellenberg470be442010-11-10 10:36:52 +01001664 /* For protocol A, we have to memcpy the payload into
1665 * socket buffers, as we may complete right away
1666 * as soon as we handed it over to tcp, at which point the data
1667 * pages may become invalid.
1668 *
1669 * For data-integrity enabled, we copy it as well, so we can be
1670 * sure that even if the bio pages may still be modified, it
1671 * won't change the data on the wire, thus if the digest checks
1672 * out ok after sending on this side, but does not fit on the
1673 * receiving side, we sure have detected corruption elsewhere.
1674 */
Philipp Reisner89e58e72011-01-19 13:12:45 +01001675 if (mdev->tconn->net_conf->wire_protocol == DRBD_PROT_A || dgs)
Andreas Gruenbacher6bdb9b02011-03-16 11:52:58 +01001676 err = _drbd_send_bio(mdev, req->master_bio);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001677 else
Andreas Gruenbacher6bdb9b02011-03-16 11:52:58 +01001678 err = _drbd_send_zc_bio(mdev, req->master_bio);
Lars Ellenberg470be442010-11-10 10:36:52 +01001679
1680 /* double check digest, sometimes buffers have been modified in flight. */
1681 if (dgs > 0 && dgs <= 64) {
Bart Van Assche24c48302011-05-21 18:32:29 +02001682 /* 64 byte, 512 bit, is the largest digest size
Lars Ellenberg470be442010-11-10 10:36:52 +01001683 * currently supported in kernel crypto. */
1684 unsigned char digest[64];
Philipp Reisnera0638452011-01-19 14:31:32 +01001685 drbd_csum_bio(mdev, mdev->tconn->integrity_w_tfm, req->master_bio, digest);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001686 if (memcmp(p + 1, digest, dgs)) {
Lars Ellenberg470be442010-11-10 10:36:52 +01001687 dev_warn(DEV,
1688 "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
Andreas Gruenbacherace652a2011-01-03 17:09:58 +01001689 (unsigned long long)req->i.sector, req->i.size);
Lars Ellenberg470be442010-11-10 10:36:52 +01001690 }
1691 } /* else if (dgs > 64) {
1692 ... Be noisy about digest too large ...
1693 } */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001694 }
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001695 mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
Philipp Reisnerbd26bfc52010-05-04 12:33:58 +02001696
Andreas Gruenbacher6bdb9b02011-03-16 11:52:58 +01001697 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001698}
1699
1700/* answer packet, used to send data back for read requests:
1701 * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
1702 * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
1703 */
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001704int drbd_send_block(struct drbd_conf *mdev, enum drbd_packet cmd,
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001705 struct drbd_peer_request *peer_req)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001706{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001707 struct drbd_socket *sock;
1708 struct p_data *p;
Andreas Gruenbacher7b57b89d2011-03-16 11:35:20 +01001709 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001710 int dgs;
1711
Philipp Reisnera0638452011-01-19 14:31:32 +01001712 dgs = (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_w_tfm) ?
1713 crypto_hash_digestsize(mdev->tconn->integrity_w_tfm) : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001714
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001715 sock = &mdev->tconn->data;
1716 p = drbd_prepare_command(mdev, sock);
1717 if (!p)
1718 return -EIO;
1719 p->sector = cpu_to_be64(peer_req->i.sector);
1720 p->block_id = peer_req->block_id;
1721 p->seq_num = 0; /* unused */
1722 if (dgs)
1723 drbd_csum_ee(mdev, mdev->tconn->integrity_w_tfm, peer_req, p + 1);
1724 err = __send_command(mdev->tconn, mdev->vnr, sock, cmd, sizeof(*p) + dgs, NULL, peer_req->i.size);
Andreas Gruenbacher7b57b89d2011-03-16 11:35:20 +01001725 if (!err)
1726 err = _drbd_send_zc_ee(mdev, peer_req);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001727 mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
Philipp Reisnerbd26bfc52010-05-04 12:33:58 +02001728
Andreas Gruenbacher7b57b89d2011-03-16 11:35:20 +01001729 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001730}
1731
Andreas Gruenbacher8f7bed72010-12-19 23:53:14 +01001732int drbd_send_out_of_sync(struct drbd_conf *mdev, struct drbd_request *req)
Philipp Reisner73a01a12010-10-27 14:33:00 +02001733{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001734 struct drbd_socket *sock;
1735 struct p_block_desc *p;
Philipp Reisner73a01a12010-10-27 14:33:00 +02001736
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001737 sock = &mdev->tconn->data;
1738 p = drbd_prepare_command(mdev, sock);
1739 if (!p)
1740 return -EIO;
1741 p->sector = cpu_to_be64(req->i.sector);
1742 p->blksize = cpu_to_be32(req->i.size);
1743 return drbd_send_command(mdev, sock, P_OUT_OF_SYNC, sizeof(*p), NULL, 0);
Philipp Reisner73a01a12010-10-27 14:33:00 +02001744}
1745
Philipp Reisnerb411b362009-09-25 16:07:19 -07001746/*
1747 drbd_send distinguishes two cases:
1748
1749 Packets sent via the data socket "sock"
1750 and packets sent via the meta data socket "msock"
1751
1752 sock msock
1753 -----------------+-------------------------+------------------------------
1754 timeout conf.timeout / 2 conf.timeout / 2
1755 timeout action send a ping via msock Abort communication
1756 and close all sockets
1757*/
1758
1759/*
1760 * you must have down()ed the appropriate [m]sock_mutex elsewhere!
1761 */
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001762int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001763 void *buf, size_t size, unsigned msg_flags)
1764{
1765 struct kvec iov;
1766 struct msghdr msg;
1767 int rv, sent = 0;
1768
1769 if (!sock)
Andreas Gruenbacherc0d42c82010-12-09 23:52:22 +01001770 return -EBADR;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001771
1772 /* THINK if (signal_pending) return ... ? */
1773
1774 iov.iov_base = buf;
1775 iov.iov_len = size;
1776
1777 msg.msg_name = NULL;
1778 msg.msg_namelen = 0;
1779 msg.msg_control = NULL;
1780 msg.msg_controllen = 0;
1781 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
1782
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001783 if (sock == tconn->data.socket) {
1784 tconn->ko_count = tconn->net_conf->ko_count;
1785 drbd_update_congested(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001786 }
1787 do {
1788 /* STRANGE
1789 * tcp_sendmsg does _not_ use its size parameter at all ?
1790 *
1791 * -EAGAIN on timeout, -EINTR on signal.
1792 */
1793/* THINK
1794 * do we need to block DRBD_SIG if sock == &meta.socket ??
1795 * otherwise wake_asender() might interrupt some send_*Ack !
1796 */
1797 rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
1798 if (rv == -EAGAIN) {
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001799 if (we_should_drop_the_connection(tconn, sock))
Philipp Reisnerb411b362009-09-25 16:07:19 -07001800 break;
1801 else
1802 continue;
1803 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001804 if (rv == -EINTR) {
1805 flush_signals(current);
1806 rv = 0;
1807 }
1808 if (rv < 0)
1809 break;
1810 sent += rv;
1811 iov.iov_base += rv;
1812 iov.iov_len -= rv;
1813 } while (sent < size);
1814
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001815 if (sock == tconn->data.socket)
1816 clear_bit(NET_CONGESTED, &tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001817
1818 if (rv <= 0) {
1819 if (rv != -EAGAIN) {
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001820 conn_err(tconn, "%s_sendmsg returned %d\n",
1821 sock == tconn->meta.socket ? "msock" : "sock",
1822 rv);
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01001823 conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001824 } else
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01001825 conn_request_state(tconn, NS(conn, C_TIMEOUT), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001826 }
1827
1828 return sent;
1829}
1830
Andreas Gruenbacherfb708e42010-12-15 17:04:36 +01001831/**
1832 * drbd_send_all - Send an entire buffer
1833 *
1834 * Returns 0 upon success and a negative error value otherwise.
1835 */
1836int drbd_send_all(struct drbd_tconn *tconn, struct socket *sock, void *buffer,
1837 size_t size, unsigned msg_flags)
1838{
1839 int err;
1840
1841 err = drbd_send(tconn, sock, buffer, size, msg_flags);
1842 if (err < 0)
1843 return err;
1844 if (err != size)
1845 return -EIO;
1846 return 0;
1847}
1848
Philipp Reisnerb411b362009-09-25 16:07:19 -07001849static int drbd_open(struct block_device *bdev, fmode_t mode)
1850{
1851 struct drbd_conf *mdev = bdev->bd_disk->private_data;
1852 unsigned long flags;
1853 int rv = 0;
1854
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02001855 mutex_lock(&drbd_main_mutex);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001856 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001857 /* to have a stable mdev->state.role
1858 * and no race with updating open_cnt */
1859
1860 if (mdev->state.role != R_PRIMARY) {
1861 if (mode & FMODE_WRITE)
1862 rv = -EROFS;
1863 else if (!allow_oos)
1864 rv = -EMEDIUMTYPE;
1865 }
1866
1867 if (!rv)
1868 mdev->open_cnt++;
Philipp Reisner87eeee42011-01-19 14:16:30 +01001869 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02001870 mutex_unlock(&drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001871
1872 return rv;
1873}
1874
1875static int drbd_release(struct gendisk *gd, fmode_t mode)
1876{
1877 struct drbd_conf *mdev = gd->private_data;
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02001878 mutex_lock(&drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001879 mdev->open_cnt--;
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02001880 mutex_unlock(&drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001881 return 0;
1882}
1883
Philipp Reisnerb411b362009-09-25 16:07:19 -07001884static void drbd_set_defaults(struct drbd_conf *mdev)
1885{
Lars Ellenbergf3990022011-03-23 14:31:09 +01001886 /* Beware! The actual layout differs
1887 * between big endian and little endian */
Philipp Reisnerda9fbc22011-03-29 10:52:01 +02001888 mdev->state = (union drbd_dev_state) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001889 { .role = R_SECONDARY,
1890 .peer = R_UNKNOWN,
1891 .conn = C_STANDALONE,
1892 .disk = D_DISKLESS,
1893 .pdsk = D_UNKNOWN,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001894 } };
1895}
1896
1897void drbd_init_set_defaults(struct drbd_conf *mdev)
1898{
1899 /* the memset(,0,) did most of this.
1900 * note: only assignments, no allocation in here */
1901
1902 drbd_set_defaults(mdev);
1903
Philipp Reisnerb411b362009-09-25 16:07:19 -07001904 atomic_set(&mdev->ap_bio_cnt, 0);
1905 atomic_set(&mdev->ap_pending_cnt, 0);
1906 atomic_set(&mdev->rs_pending_cnt, 0);
1907 atomic_set(&mdev->unacked_cnt, 0);
1908 atomic_set(&mdev->local_cnt, 0);
Lars Ellenberg435f0742010-09-06 12:30:25 +02001909 atomic_set(&mdev->pp_in_use_by_net, 0);
Philipp Reisner778f2712010-07-06 11:14:00 +02001910 atomic_set(&mdev->rs_sect_in, 0);
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001911 atomic_set(&mdev->rs_sect_ev, 0);
Philipp Reisner759fbdf2010-10-26 16:02:27 +02001912 atomic_set(&mdev->ap_in_flight, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001913
1914 mutex_init(&mdev->md_io_mutex);
Philipp Reisner8410da8f02011-02-11 20:11:10 +01001915 mutex_init(&mdev->own_state_mutex);
1916 mdev->state_mutex = &mdev->own_state_mutex;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001917
Philipp Reisnerb411b362009-09-25 16:07:19 -07001918 spin_lock_init(&mdev->al_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001919 spin_lock_init(&mdev->peer_seq_lock);
1920 spin_lock_init(&mdev->epoch_lock);
1921
1922 INIT_LIST_HEAD(&mdev->active_ee);
1923 INIT_LIST_HEAD(&mdev->sync_ee);
1924 INIT_LIST_HEAD(&mdev->done_ee);
1925 INIT_LIST_HEAD(&mdev->read_ee);
1926 INIT_LIST_HEAD(&mdev->net_ee);
1927 INIT_LIST_HEAD(&mdev->resync_reads);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001928 INIT_LIST_HEAD(&mdev->resync_work.list);
1929 INIT_LIST_HEAD(&mdev->unplug_work.list);
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02001930 INIT_LIST_HEAD(&mdev->go_diskless.list);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001931 INIT_LIST_HEAD(&mdev->md_sync_work.list);
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02001932 INIT_LIST_HEAD(&mdev->start_resync_work.list);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001933 INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
Philipp Reisner0ced55a2010-04-30 15:26:20 +02001934
Philipp Reisner794abb72010-12-27 11:51:23 +01001935 mdev->resync_work.cb = w_resync_timer;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001936 mdev->unplug_work.cb = w_send_write_hint;
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02001937 mdev->go_diskless.cb = w_go_diskless;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001938 mdev->md_sync_work.cb = w_md_sync;
1939 mdev->bm_io_work.w.cb = w_bitmap_io;
Philipp Reisner370a43e2011-01-14 16:03:11 +01001940 mdev->start_resync_work.cb = w_start_resync;
Philipp Reisnera21e9292011-02-08 15:08:49 +01001941
1942 mdev->resync_work.mdev = mdev;
1943 mdev->unplug_work.mdev = mdev;
1944 mdev->go_diskless.mdev = mdev;
1945 mdev->md_sync_work.mdev = mdev;
1946 mdev->bm_io_work.w.mdev = mdev;
1947 mdev->start_resync_work.mdev = mdev;
1948
Philipp Reisnerb411b362009-09-25 16:07:19 -07001949 init_timer(&mdev->resync_timer);
1950 init_timer(&mdev->md_sync_timer);
Philipp Reisner370a43e2011-01-14 16:03:11 +01001951 init_timer(&mdev->start_resync_timer);
Philipp Reisner7fde2be2011-03-01 11:08:28 +01001952 init_timer(&mdev->request_timer);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001953 mdev->resync_timer.function = resync_timer_fn;
1954 mdev->resync_timer.data = (unsigned long) mdev;
1955 mdev->md_sync_timer.function = md_sync_timer_fn;
1956 mdev->md_sync_timer.data = (unsigned long) mdev;
Philipp Reisner370a43e2011-01-14 16:03:11 +01001957 mdev->start_resync_timer.function = start_resync_timer_fn;
1958 mdev->start_resync_timer.data = (unsigned long) mdev;
Philipp Reisner7fde2be2011-03-01 11:08:28 +01001959 mdev->request_timer.function = request_timer_fn;
1960 mdev->request_timer.data = (unsigned long) mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001961
1962 init_waitqueue_head(&mdev->misc_wait);
1963 init_waitqueue_head(&mdev->state_wait);
1964 init_waitqueue_head(&mdev->ee_wait);
1965 init_waitqueue_head(&mdev->al_wait);
1966 init_waitqueue_head(&mdev->seq_wait);
1967
Philipp Reisnerfd340c12011-01-19 16:57:39 +01001968 /* mdev->tconn->agreed_pro_version gets initialized in drbd_connect() */
Philipp Reisner2451fc32010-08-24 13:43:11 +02001969 mdev->write_ordering = WO_bdev_flush;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001970 mdev->resync_wenr = LC_FREE;
Philipp Reisner99432fc2011-05-20 16:39:13 +02001971 mdev->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
1972 mdev->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001973}
1974
1975void drbd_mdev_cleanup(struct drbd_conf *mdev)
1976{
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001977 int i;
Philipp Reisnere6b3ea82011-01-19 14:02:01 +01001978 if (mdev->tconn->receiver.t_state != NONE)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001979 dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
Philipp Reisnere6b3ea82011-01-19 14:02:01 +01001980 mdev->tconn->receiver.t_state);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001981
1982 /* no need to lock it, I'm the only thread alive */
1983 if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
1984 dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
1985 mdev->al_writ_cnt =
1986 mdev->bm_writ_cnt =
1987 mdev->read_cnt =
1988 mdev->recv_cnt =
1989 mdev->send_cnt =
1990 mdev->writ_cnt =
1991 mdev->p_size =
1992 mdev->rs_start =
1993 mdev->rs_total =
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001994 mdev->rs_failed = 0;
1995 mdev->rs_last_events = 0;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001996 mdev->rs_last_sect_ev = 0;
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001997 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
1998 mdev->rs_mark_left[i] = 0;
1999 mdev->rs_mark_time[i] = 0;
2000 }
Philipp Reisner89e58e72011-01-19 13:12:45 +01002001 D_ASSERT(mdev->tconn->net_conf == NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002002
2003 drbd_set_my_capacity(mdev, 0);
2004 if (mdev->bitmap) {
2005 /* maybe never allocated. */
Philipp Reisner02d9a942010-03-24 16:23:03 +01002006 drbd_bm_resize(mdev, 0, 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002007 drbd_bm_cleanup(mdev);
2008 }
2009
2010 drbd_free_resources(mdev);
Philipp Reisner07782862010-08-31 12:00:50 +02002011 clear_bit(AL_SUSPENDED, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002012
2013 /*
2014 * currently we drbd_init_ee only on module load, so
2015 * we may do drbd_release_ee only on module unload!
2016 */
2017 D_ASSERT(list_empty(&mdev->active_ee));
2018 D_ASSERT(list_empty(&mdev->sync_ee));
2019 D_ASSERT(list_empty(&mdev->done_ee));
2020 D_ASSERT(list_empty(&mdev->read_ee));
2021 D_ASSERT(list_empty(&mdev->net_ee));
2022 D_ASSERT(list_empty(&mdev->resync_reads));
Philipp Reisnere42325a2011-01-19 13:55:45 +01002023 D_ASSERT(list_empty(&mdev->tconn->data.work.q));
2024 D_ASSERT(list_empty(&mdev->tconn->meta.work.q));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002025 D_ASSERT(list_empty(&mdev->resync_work.list));
2026 D_ASSERT(list_empty(&mdev->unplug_work.list));
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02002027 D_ASSERT(list_empty(&mdev->go_diskless.list));
Lars Ellenberg2265b472010-12-16 15:41:26 +01002028
2029 drbd_set_defaults(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002030}
2031
2032
2033static void drbd_destroy_mempools(void)
2034{
2035 struct page *page;
2036
2037 while (drbd_pp_pool) {
2038 page = drbd_pp_pool;
2039 drbd_pp_pool = (struct page *)page_private(page);
2040 __free_page(page);
2041 drbd_pp_vacant--;
2042 }
2043
2044 /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
2045
Lars Ellenbergda4a75d2011-02-23 17:02:01 +01002046 if (drbd_md_io_bio_set)
2047 bioset_free(drbd_md_io_bio_set);
Lars Ellenberg35abf592011-02-23 12:39:46 +01002048 if (drbd_md_io_page_pool)
2049 mempool_destroy(drbd_md_io_page_pool);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002050 if (drbd_ee_mempool)
2051 mempool_destroy(drbd_ee_mempool);
2052 if (drbd_request_mempool)
2053 mempool_destroy(drbd_request_mempool);
2054 if (drbd_ee_cache)
2055 kmem_cache_destroy(drbd_ee_cache);
2056 if (drbd_request_cache)
2057 kmem_cache_destroy(drbd_request_cache);
2058 if (drbd_bm_ext_cache)
2059 kmem_cache_destroy(drbd_bm_ext_cache);
2060 if (drbd_al_ext_cache)
2061 kmem_cache_destroy(drbd_al_ext_cache);
2062
Lars Ellenbergda4a75d2011-02-23 17:02:01 +01002063 drbd_md_io_bio_set = NULL;
Lars Ellenberg35abf592011-02-23 12:39:46 +01002064 drbd_md_io_page_pool = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002065 drbd_ee_mempool = NULL;
2066 drbd_request_mempool = NULL;
2067 drbd_ee_cache = NULL;
2068 drbd_request_cache = NULL;
2069 drbd_bm_ext_cache = NULL;
2070 drbd_al_ext_cache = NULL;
2071
2072 return;
2073}
2074
2075static int drbd_create_mempools(void)
2076{
2077 struct page *page;
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01002078 const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002079 int i;
2080
2081 /* prepare our caches and mempools */
2082 drbd_request_mempool = NULL;
2083 drbd_ee_cache = NULL;
2084 drbd_request_cache = NULL;
2085 drbd_bm_ext_cache = NULL;
2086 drbd_al_ext_cache = NULL;
2087 drbd_pp_pool = NULL;
Lars Ellenberg35abf592011-02-23 12:39:46 +01002088 drbd_md_io_page_pool = NULL;
Lars Ellenbergda4a75d2011-02-23 17:02:01 +01002089 drbd_md_io_bio_set = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002090
2091 /* caches */
2092 drbd_request_cache = kmem_cache_create(
2093 "drbd_req", sizeof(struct drbd_request), 0, 0, NULL);
2094 if (drbd_request_cache == NULL)
2095 goto Enomem;
2096
2097 drbd_ee_cache = kmem_cache_create(
Andreas Gruenbacherf6ffca92011-02-04 15:30:34 +01002098 "drbd_ee", sizeof(struct drbd_peer_request), 0, 0, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002099 if (drbd_ee_cache == NULL)
2100 goto Enomem;
2101
2102 drbd_bm_ext_cache = kmem_cache_create(
2103 "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL);
2104 if (drbd_bm_ext_cache == NULL)
2105 goto Enomem;
2106
2107 drbd_al_ext_cache = kmem_cache_create(
2108 "drbd_al", sizeof(struct lc_element), 0, 0, NULL);
2109 if (drbd_al_ext_cache == NULL)
2110 goto Enomem;
2111
2112 /* mempools */
Lars Ellenbergda4a75d2011-02-23 17:02:01 +01002113 drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0);
2114 if (drbd_md_io_bio_set == NULL)
2115 goto Enomem;
2116
Lars Ellenberg35abf592011-02-23 12:39:46 +01002117 drbd_md_io_page_pool = mempool_create_page_pool(DRBD_MIN_POOL_PAGES, 0);
2118 if (drbd_md_io_page_pool == NULL)
2119 goto Enomem;
2120
Philipp Reisnerb411b362009-09-25 16:07:19 -07002121 drbd_request_mempool = mempool_create(number,
2122 mempool_alloc_slab, mempool_free_slab, drbd_request_cache);
2123 if (drbd_request_mempool == NULL)
2124 goto Enomem;
2125
2126 drbd_ee_mempool = mempool_create(number,
2127 mempool_alloc_slab, mempool_free_slab, drbd_ee_cache);
Nicolas Kaiser2027ae12010-10-28 06:15:26 -06002128 if (drbd_ee_mempool == NULL)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002129 goto Enomem;
2130
2131 /* drbd's page pool */
2132 spin_lock_init(&drbd_pp_lock);
2133
2134 for (i = 0; i < number; i++) {
2135 page = alloc_page(GFP_HIGHUSER);
2136 if (!page)
2137 goto Enomem;
2138 set_page_private(page, (unsigned long)drbd_pp_pool);
2139 drbd_pp_pool = page;
2140 }
2141 drbd_pp_vacant = number;
2142
2143 return 0;
2144
2145Enomem:
2146 drbd_destroy_mempools(); /* in case we allocated some */
2147 return -ENOMEM;
2148}
2149
2150static int drbd_notify_sys(struct notifier_block *this, unsigned long code,
2151 void *unused)
2152{
2153 /* just so we have it. you never know what interesting things we
2154 * might want to do here some day...
2155 */
2156
2157 return NOTIFY_DONE;
2158}
2159
2160static struct notifier_block drbd_notifier = {
2161 .notifier_call = drbd_notify_sys,
2162};
2163
2164static void drbd_release_ee_lists(struct drbd_conf *mdev)
2165{
2166 int rr;
2167
2168 rr = drbd_release_ee(mdev, &mdev->active_ee);
2169 if (rr)
2170 dev_err(DEV, "%d EEs in active list found!\n", rr);
2171
2172 rr = drbd_release_ee(mdev, &mdev->sync_ee);
2173 if (rr)
2174 dev_err(DEV, "%d EEs in sync list found!\n", rr);
2175
2176 rr = drbd_release_ee(mdev, &mdev->read_ee);
2177 if (rr)
2178 dev_err(DEV, "%d EEs in read list found!\n", rr);
2179
2180 rr = drbd_release_ee(mdev, &mdev->done_ee);
2181 if (rr)
2182 dev_err(DEV, "%d EEs in done list found!\n", rr);
2183
2184 rr = drbd_release_ee(mdev, &mdev->net_ee);
2185 if (rr)
2186 dev_err(DEV, "%d EEs in net list found!\n", rr);
2187}
2188
Philipp Reisner774b3052011-02-22 02:07:03 -05002189/* caution. no locking. */
2190void drbd_delete_device(unsigned int minor)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002191{
2192 struct drbd_conf *mdev = minor_to_mdev(minor);
2193
2194 if (!mdev)
2195 return;
2196
Lars Ellenberg569083c2011-03-07 09:49:02 +01002197 idr_remove(&mdev->tconn->volumes, mdev->vnr);
2198 idr_remove(&minors, minor);
2199 synchronize_rcu();
Philipp Reisner774b3052011-02-22 02:07:03 -05002200
Philipp Reisnerb411b362009-09-25 16:07:19 -07002201 /* paranoia asserts */
Andreas Gruenbacher70dc65e2010-12-21 14:46:57 +01002202 D_ASSERT(mdev->open_cnt == 0);
Philipp Reisnere42325a2011-01-19 13:55:45 +01002203 D_ASSERT(list_empty(&mdev->tconn->data.work.q));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002204 /* end paranoia asserts */
2205
2206 del_gendisk(mdev->vdisk);
2207
2208 /* cleanup stuff that may have been allocated during
2209 * device (re-)configuration or state changes */
2210
2211 if (mdev->this_bdev)
2212 bdput(mdev->this_bdev);
2213
2214 drbd_free_resources(mdev);
2215
2216 drbd_release_ee_lists(mdev);
2217
Philipp Reisnerb411b362009-09-25 16:07:19 -07002218 lc_destroy(mdev->act_log);
2219 lc_destroy(mdev->resync);
2220
2221 kfree(mdev->p_uuid);
2222 /* mdev->p_uuid = NULL; */
2223
Philipp Reisnerb411b362009-09-25 16:07:19 -07002224 /* cleanup the rest that has been
2225 * allocated from drbd_new_device
2226 * and actually free the mdev itself */
2227 drbd_free_mdev(mdev);
2228}
2229
2230static void drbd_cleanup(void)
2231{
2232 unsigned int i;
Philipp Reisner81a5d602011-02-22 19:53:16 -05002233 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002234
2235 unregister_reboot_notifier(&drbd_notifier);
2236
Lars Ellenberg17a93f32010-11-24 10:37:35 +01002237 /* first remove proc,
2238 * drbdsetup uses it's presence to detect
2239 * whether DRBD is loaded.
2240 * If we would get stuck in proc removal,
2241 * but have netlink already deregistered,
2242 * some drbdsetup commands may wait forever
2243 * for an answer.
2244 */
2245 if (drbd_proc)
2246 remove_proc_entry("drbd", NULL);
2247
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002248 drbd_genl_unregister();
Philipp Reisnerb411b362009-09-25 16:07:19 -07002249
Philipp Reisner81a5d602011-02-22 19:53:16 -05002250 idr_for_each_entry(&minors, mdev, i)
2251 drbd_delete_device(i);
2252 drbd_destroy_mempools();
Philipp Reisnerb411b362009-09-25 16:07:19 -07002253 unregister_blkdev(DRBD_MAJOR, "drbd");
2254
Philipp Reisner81a5d602011-02-22 19:53:16 -05002255 idr_destroy(&minors);
2256
Philipp Reisnerb411b362009-09-25 16:07:19 -07002257 printk(KERN_INFO "drbd: module cleanup done.\n");
2258}
2259
2260/**
2261 * drbd_congested() - Callback for pdflush
2262 * @congested_data: User data
2263 * @bdi_bits: Bits pdflush is currently interested in
2264 *
2265 * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
2266 */
2267static int drbd_congested(void *congested_data, int bdi_bits)
2268{
2269 struct drbd_conf *mdev = congested_data;
2270 struct request_queue *q;
2271 char reason = '-';
2272 int r = 0;
2273
Andreas Gruenbacher1b881ef2010-12-13 18:03:38 +01002274 if (!may_inc_ap_bio(mdev)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002275 /* DRBD has frozen IO */
2276 r = bdi_bits;
2277 reason = 'd';
2278 goto out;
2279 }
2280
2281 if (get_ldev(mdev)) {
2282 q = bdev_get_queue(mdev->ldev->backing_bdev);
2283 r = bdi_congested(&q->backing_dev_info, bdi_bits);
2284 put_ldev(mdev);
2285 if (r)
2286 reason = 'b';
2287 }
2288
Philipp Reisner01a311a2011-02-07 14:30:33 +01002289 if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->tconn->flags)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002290 r |= (1 << BDI_async_congested);
2291 reason = reason == 'b' ? 'a' : 'n';
2292 }
2293
2294out:
2295 mdev->congestion_reason = reason;
2296 return r;
2297}
2298
Philipp Reisner6699b652011-02-09 11:10:24 +01002299static void drbd_init_workqueue(struct drbd_work_queue* wq)
2300{
2301 sema_init(&wq->s, 0);
2302 spin_lock_init(&wq->q_lock);
2303 INIT_LIST_HEAD(&wq->q);
2304}
2305
Philipp Reisner1aba4d72011-02-21 15:38:08 +01002306struct drbd_tconn *conn_by_name(const char *name)
2307{
2308 struct drbd_tconn *tconn;
2309
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002310 if (!name || !name[0])
2311 return NULL;
2312
Lars Ellenberg543cc102011-03-10 22:18:18 +01002313 mutex_lock(&drbd_cfg_mutex);
Philipp Reisner1aba4d72011-02-21 15:38:08 +01002314 list_for_each_entry(tconn, &drbd_tconns, all_tconn) {
2315 if (!strcmp(tconn->name, name))
2316 goto found;
2317 }
2318 tconn = NULL;
2319found:
Lars Ellenberg543cc102011-03-10 22:18:18 +01002320 mutex_unlock(&drbd_cfg_mutex);
Philipp Reisner1aba4d72011-02-21 15:38:08 +01002321 return tconn;
2322}
2323
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002324static int drbd_alloc_socket(struct drbd_socket *socket)
2325{
2326 socket->rbuf = (void *) __get_free_page(GFP_KERNEL);
2327 if (!socket->rbuf)
2328 return -ENOMEM;
Andreas Gruenbacher5a87d922011-03-24 21:17:52 +01002329 socket->sbuf = (void *) __get_free_page(GFP_KERNEL);
2330 if (!socket->sbuf)
2331 return -ENOMEM;
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002332 return 0;
2333}
2334
2335static void drbd_free_socket(struct drbd_socket *socket)
2336{
Andreas Gruenbacher5a87d922011-03-24 21:17:52 +01002337 free_page((unsigned long) socket->sbuf);
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002338 free_page((unsigned long) socket->rbuf);
2339}
2340
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002341struct drbd_tconn *drbd_new_tconn(const char *name)
Philipp Reisner21114382011-01-19 12:26:59 +01002342{
2343 struct drbd_tconn *tconn;
2344
2345 tconn = kzalloc(sizeof(struct drbd_tconn), GFP_KERNEL);
2346 if (!tconn)
2347 return NULL;
2348
2349 tconn->name = kstrdup(name, GFP_KERNEL);
2350 if (!tconn->name)
2351 goto fail;
2352
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002353 if (drbd_alloc_socket(&tconn->data))
2354 goto fail;
2355 if (drbd_alloc_socket(&tconn->meta))
2356 goto fail;
2357
Philipp Reisner774b3052011-02-22 02:07:03 -05002358 if (!zalloc_cpumask_var(&tconn->cpu_mask, GFP_KERNEL))
2359 goto fail;
2360
Philipp Reisner2f5cdd02011-02-21 14:29:27 +01002361 if (!tl_init(tconn))
2362 goto fail;
2363
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01002364 tconn->cstate = C_STANDALONE;
Philipp Reisner8410da8f02011-02-11 20:11:10 +01002365 mutex_init(&tconn->cstate_mutex);
Philipp Reisner6699b652011-02-09 11:10:24 +01002366 spin_lock_init(&tconn->req_lock);
Philipp Reisnerb2fb6dbe2011-01-19 13:48:44 +01002367 atomic_set(&tconn->net_cnt, 0);
2368 init_waitqueue_head(&tconn->net_cnt_wait);
Philipp Reisner2a67d8b2011-02-09 14:10:32 +01002369 init_waitqueue_head(&tconn->ping_wait);
Philipp Reisner062e8792011-02-08 11:09:18 +01002370 idr_init(&tconn->volumes);
Philipp Reisnerb2fb6dbe2011-01-19 13:48:44 +01002371
Philipp Reisner6699b652011-02-09 11:10:24 +01002372 drbd_init_workqueue(&tconn->data.work);
2373 mutex_init(&tconn->data.mutex);
2374
2375 drbd_init_workqueue(&tconn->meta.work);
2376 mutex_init(&tconn->meta.mutex);
2377
Philipp Reisner392c8802011-02-09 10:33:31 +01002378 drbd_thread_init(tconn, &tconn->receiver, drbdd_init, "receiver");
2379 drbd_thread_init(tconn, &tconn->worker, drbd_worker, "worker");
2380 drbd_thread_init(tconn, &tconn->asender, drbd_asender, "asender");
2381
Lars Ellenbergf3990022011-03-23 14:31:09 +01002382 tconn->res_opts = (struct res_opts) {
2383 {}, 0, /* cpu_mask */
2384 DRBD_ON_NO_DATA_DEF, /* on_no_data */
2385 };
2386
Lars Ellenberg543cc102011-03-10 22:18:18 +01002387 mutex_lock(&drbd_cfg_mutex);
2388 list_add_tail(&tconn->all_tconn, &drbd_tconns);
2389 mutex_unlock(&drbd_cfg_mutex);
Philipp Reisner21114382011-01-19 12:26:59 +01002390
2391 return tconn;
2392
2393fail:
Philipp Reisner2f5cdd02011-02-21 14:29:27 +01002394 tl_cleanup(tconn);
Philipp Reisner774b3052011-02-22 02:07:03 -05002395 free_cpumask_var(tconn->cpu_mask);
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002396 drbd_free_socket(&tconn->meta);
2397 drbd_free_socket(&tconn->data);
Philipp Reisner21114382011-01-19 12:26:59 +01002398 kfree(tconn->name);
2399 kfree(tconn);
2400
2401 return NULL;
2402}
2403
2404void drbd_free_tconn(struct drbd_tconn *tconn)
2405{
Philipp Reisner21114382011-01-19 12:26:59 +01002406 list_del(&tconn->all_tconn);
Philipp Reisner062e8792011-02-08 11:09:18 +01002407 idr_destroy(&tconn->volumes);
Philipp Reisner21114382011-01-19 12:26:59 +01002408
Philipp Reisner774b3052011-02-22 02:07:03 -05002409 free_cpumask_var(tconn->cpu_mask);
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002410 drbd_free_socket(&tconn->meta);
2411 drbd_free_socket(&tconn->data);
Philipp Reisner21114382011-01-19 12:26:59 +01002412 kfree(tconn->name);
Philipp Reisnerb42a70a2011-01-27 10:55:20 +01002413 kfree(tconn->int_dig_in);
2414 kfree(tconn->int_dig_vv);
Philipp Reisner21114382011-01-19 12:26:59 +01002415 kfree(tconn);
2416}
2417
Philipp Reisner774b3052011-02-22 02:07:03 -05002418enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, int vnr)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002419{
2420 struct drbd_conf *mdev;
2421 struct gendisk *disk;
2422 struct request_queue *q;
Philipp Reisner774b3052011-02-22 02:07:03 -05002423 int vnr_got = vnr;
Philipp Reisner81a5d602011-02-22 19:53:16 -05002424 int minor_got = minor;
Lars Ellenberg8432b312011-03-08 16:11:16 +01002425 enum drbd_ret_code err = ERR_NOMEM;
Philipp Reisner774b3052011-02-22 02:07:03 -05002426
2427 mdev = minor_to_mdev(minor);
2428 if (mdev)
2429 return ERR_MINOR_EXISTS;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002430
2431 /* GFP_KERNEL, we are outside of all write-out paths */
2432 mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL);
2433 if (!mdev)
Philipp Reisner774b3052011-02-22 02:07:03 -05002434 return ERR_NOMEM;
2435
2436 mdev->tconn = tconn;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002437 mdev->minor = minor;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002438 mdev->vnr = vnr;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002439
2440 drbd_init_set_defaults(mdev);
2441
2442 q = blk_alloc_queue(GFP_KERNEL);
2443 if (!q)
2444 goto out_no_q;
2445 mdev->rq_queue = q;
2446 q->queuedata = mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002447
2448 disk = alloc_disk(1);
2449 if (!disk)
2450 goto out_no_disk;
2451 mdev->vdisk = disk;
2452
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002453 set_disk_ro(disk, true);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002454
2455 disk->queue = q;
2456 disk->major = DRBD_MAJOR;
2457 disk->first_minor = minor;
2458 disk->fops = &drbd_ops;
2459 sprintf(disk->disk_name, "drbd%d", minor);
2460 disk->private_data = mdev;
2461
2462 mdev->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
2463 /* we have no partitions. we contain only ourselves. */
2464 mdev->this_bdev->bd_contains = mdev->this_bdev;
2465
2466 q->backing_dev_info.congested_fn = drbd_congested;
2467 q->backing_dev_info.congested_data = mdev;
2468
Andreas Gruenbacher2f58dcf2010-12-13 17:48:19 +01002469 blk_queue_make_request(q, drbd_make_request);
Philipp Reisner99432fc2011-05-20 16:39:13 +02002470 /* Setting the max_hw_sectors to an odd value of 8kibyte here
2471 This triggers a max_bio_size message upon first attach or connect */
2472 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002473 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
2474 blk_queue_merge_bvec(q, drbd_merge_bvec);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002475 q->queue_lock = &mdev->tconn->req_lock; /* needed since we use */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002476
2477 mdev->md_io_page = alloc_page(GFP_KERNEL);
2478 if (!mdev->md_io_page)
2479 goto out_no_io_page;
2480
2481 if (drbd_bm_init(mdev))
2482 goto out_no_bitmap;
Andreas Gruenbacherdac13892011-01-21 17:18:39 +01002483 mdev->read_requests = RB_ROOT;
Andreas Gruenbacherde696712011-01-20 15:00:24 +01002484 mdev->write_requests = RB_ROOT;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002485
Philipp Reisnerb411b362009-09-25 16:07:19 -07002486 mdev->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
2487 if (!mdev->current_epoch)
2488 goto out_no_epoch;
2489
2490 INIT_LIST_HEAD(&mdev->current_epoch->list);
2491 mdev->epochs = 1;
2492
Lars Ellenberg8432b312011-03-08 16:11:16 +01002493 if (!idr_pre_get(&minors, GFP_KERNEL))
2494 goto out_no_minor_idr;
2495 if (idr_get_new_above(&minors, mdev, minor, &minor_got))
2496 goto out_no_minor_idr;
2497 if (minor_got != minor) {
2498 err = ERR_MINOR_EXISTS;
2499 drbd_msg_put_info("requested minor exists already");
2500 goto out_idr_remove_minor;
Lars Ellenberg569083c2011-03-07 09:49:02 +01002501 }
2502
Lars Ellenberg8432b312011-03-08 16:11:16 +01002503 if (!idr_pre_get(&tconn->volumes, GFP_KERNEL))
Lars Ellenberg569083c2011-03-07 09:49:02 +01002504 goto out_idr_remove_minor;
Lars Ellenberg8432b312011-03-08 16:11:16 +01002505 if (idr_get_new_above(&tconn->volumes, mdev, vnr, &vnr_got))
2506 goto out_idr_remove_minor;
2507 if (vnr_got != vnr) {
2508 err = ERR_INVALID_REQUEST;
2509 drbd_msg_put_info("requested volume exists already");
2510 goto out_idr_remove_vol;
Philipp Reisner81a5d602011-02-22 19:53:16 -05002511 }
Philipp Reisner774b3052011-02-22 02:07:03 -05002512 add_disk(disk);
2513
Philipp Reisner2325eb62011-03-15 16:56:18 +01002514 /* inherit the connection state */
2515 mdev->state.conn = tconn->cstate;
2516 if (mdev->state.conn == C_WF_REPORT_PARAMS)
2517 drbd_connected(vnr, mdev, tconn);
2518
Philipp Reisner774b3052011-02-22 02:07:03 -05002519 return NO_ERROR;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002520
Lars Ellenberg569083c2011-03-07 09:49:02 +01002521out_idr_remove_vol:
2522 idr_remove(&tconn->volumes, vnr_got);
Lars Ellenberg8432b312011-03-08 16:11:16 +01002523out_idr_remove_minor:
2524 idr_remove(&minors, minor_got);
Lars Ellenberg569083c2011-03-07 09:49:02 +01002525 synchronize_rcu();
Lars Ellenberg8432b312011-03-08 16:11:16 +01002526out_no_minor_idr:
Philipp Reisner81a5d602011-02-22 19:53:16 -05002527 kfree(mdev->current_epoch);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002528out_no_epoch:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002529 drbd_bm_cleanup(mdev);
2530out_no_bitmap:
2531 __free_page(mdev->md_io_page);
2532out_no_io_page:
2533 put_disk(disk);
2534out_no_disk:
2535 blk_cleanup_queue(q);
2536out_no_q:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002537 kfree(mdev);
Lars Ellenberg8432b312011-03-08 16:11:16 +01002538 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002539}
2540
2541/* counterpart of drbd_new_device.
2542 * last part of drbd_delete_device. */
2543void drbd_free_mdev(struct drbd_conf *mdev)
2544{
2545 kfree(mdev->current_epoch);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002546 if (mdev->bitmap) /* should no longer be there. */
2547 drbd_bm_cleanup(mdev);
2548 __free_page(mdev->md_io_page);
2549 put_disk(mdev->vdisk);
2550 blk_cleanup_queue(mdev->rq_queue);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002551 kfree(mdev);
2552}
2553
2554
2555int __init drbd_init(void)
2556{
2557 int err;
2558
Andreas Gruenbacher60381782011-03-28 17:05:50 +02002559 BUILD_BUG_ON(sizeof(struct p_connection_features) != 80);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002560
Philipp Reisner2b8a90b2011-01-10 11:15:17 +01002561 if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002562 printk(KERN_ERR
Philipp Reisner81a5d602011-02-22 19:53:16 -05002563 "drbd: invalid minor_count (%d)\n", minor_count);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002564#ifdef MODULE
2565 return -EINVAL;
2566#else
2567 minor_count = 8;
2568#endif
2569 }
2570
Philipp Reisnerb411b362009-09-25 16:07:19 -07002571 err = register_blkdev(DRBD_MAJOR, "drbd");
2572 if (err) {
2573 printk(KERN_ERR
2574 "drbd: unable to register block device major %d\n",
2575 DRBD_MAJOR);
2576 return err;
2577 }
2578
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002579 err = drbd_genl_register();
2580 if (err) {
2581 printk(KERN_ERR "drbd: unable to register generic netlink family\n");
2582 goto fail;
2583 }
2584
2585
Philipp Reisnerb411b362009-09-25 16:07:19 -07002586 register_reboot_notifier(&drbd_notifier);
2587
2588 /*
2589 * allocate all necessary structs
2590 */
2591 err = -ENOMEM;
2592
2593 init_waitqueue_head(&drbd_pp_wait);
2594
2595 drbd_proc = NULL; /* play safe for drbd_cleanup */
Philipp Reisner81a5d602011-02-22 19:53:16 -05002596 idr_init(&minors);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002597
2598 err = drbd_create_mempools();
2599 if (err)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002600 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002601
Lars Ellenberg8c484ee2010-03-11 16:47:58 +01002602 drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002603 if (!drbd_proc) {
2604 printk(KERN_ERR "drbd: unable to register proc file\n");
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002605 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002606 }
2607
2608 rwlock_init(&global_state_lock);
Philipp Reisner21114382011-01-19 12:26:59 +01002609 INIT_LIST_HEAD(&drbd_tconns);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002610
2611 printk(KERN_INFO "drbd: initialized. "
2612 "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
2613 API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
2614 printk(KERN_INFO "drbd: %s\n", drbd_buildtag());
2615 printk(KERN_INFO "drbd: registered as block device major %d\n",
2616 DRBD_MAJOR);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002617
2618 return 0; /* Success! */
2619
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002620fail:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002621 drbd_cleanup();
2622 if (err == -ENOMEM)
2623 /* currently always the case */
2624 printk(KERN_ERR "drbd: ran out of memory\n");
2625 else
2626 printk(KERN_ERR "drbd: initialization failure\n");
2627 return err;
2628}
2629
2630void drbd_free_bc(struct drbd_backing_dev *ldev)
2631{
2632 if (ldev == NULL)
2633 return;
2634
Tejun Heoe525fd82010-11-13 11:55:17 +01002635 blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
2636 blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002637
2638 kfree(ldev);
2639}
2640
Philipp Reisner360cc742011-02-08 14:29:53 +01002641void drbd_free_sock(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002642{
Philipp Reisner360cc742011-02-08 14:29:53 +01002643 if (tconn->data.socket) {
2644 mutex_lock(&tconn->data.mutex);
2645 kernel_sock_shutdown(tconn->data.socket, SHUT_RDWR);
2646 sock_release(tconn->data.socket);
2647 tconn->data.socket = NULL;
2648 mutex_unlock(&tconn->data.mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002649 }
Philipp Reisner360cc742011-02-08 14:29:53 +01002650 if (tconn->meta.socket) {
2651 mutex_lock(&tconn->meta.mutex);
2652 kernel_sock_shutdown(tconn->meta.socket, SHUT_RDWR);
2653 sock_release(tconn->meta.socket);
2654 tconn->meta.socket = NULL;
2655 mutex_unlock(&tconn->meta.mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002656 }
2657}
2658
2659
2660void drbd_free_resources(struct drbd_conf *mdev)
2661{
Lars Ellenbergf3990022011-03-23 14:31:09 +01002662 crypto_free_hash(mdev->tconn->csums_tfm);
2663 mdev->tconn->csums_tfm = NULL;
2664 crypto_free_hash(mdev->tconn->verify_tfm);
2665 mdev->tconn->verify_tfm = NULL;
Philipp Reisnera0638452011-01-19 14:31:32 +01002666 crypto_free_hash(mdev->tconn->cram_hmac_tfm);
2667 mdev->tconn->cram_hmac_tfm = NULL;
2668 crypto_free_hash(mdev->tconn->integrity_w_tfm);
2669 mdev->tconn->integrity_w_tfm = NULL;
2670 crypto_free_hash(mdev->tconn->integrity_r_tfm);
2671 mdev->tconn->integrity_r_tfm = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002672
Philipp Reisner360cc742011-02-08 14:29:53 +01002673 drbd_free_sock(mdev->tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002674
2675 __no_warn(local,
2676 drbd_free_bc(mdev->ldev);
2677 mdev->ldev = NULL;);
2678}
2679
2680/* meta data management */
2681
2682struct meta_data_on_disk {
2683 u64 la_size; /* last agreed size. */
2684 u64 uuid[UI_SIZE]; /* UUIDs. */
2685 u64 device_uuid;
2686 u64 reserved_u64_1;
2687 u32 flags; /* MDF */
2688 u32 magic;
2689 u32 md_size_sect;
2690 u32 al_offset; /* offset to this block */
2691 u32 al_nr_extents; /* important for restoring the AL */
Lars Ellenbergf3990022011-03-23 14:31:09 +01002692 /* `-- act_log->nr_elements <-- ldev->dc.al_extents */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002693 u32 bm_offset; /* offset to the bitmap, from here */
2694 u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */
Philipp Reisner99432fc2011-05-20 16:39:13 +02002695 u32 la_peer_max_bio_size; /* last peer max_bio_size */
2696 u32 reserved_u32[3];
Philipp Reisnerb411b362009-09-25 16:07:19 -07002697
2698} __packed;
2699
2700/**
2701 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
2702 * @mdev: DRBD device.
2703 */
2704void drbd_md_sync(struct drbd_conf *mdev)
2705{
2706 struct meta_data_on_disk *buffer;
2707 sector_t sector;
2708 int i;
2709
Lars Ellenbergee15b032010-09-03 10:00:09 +02002710 del_timer(&mdev->md_sync_timer);
2711 /* timer may be rearmed by drbd_md_mark_dirty() now. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002712 if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
2713 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002714
2715 /* We use here D_FAILED and not D_ATTACHING because we try to write
2716 * metadata even if we detach due to a disk failure! */
2717 if (!get_ldev_if_state(mdev, D_FAILED))
2718 return;
2719
Philipp Reisnerb411b362009-09-25 16:07:19 -07002720 mutex_lock(&mdev->md_io_mutex);
2721 buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
2722 memset(buffer, 0, 512);
2723
2724 buffer->la_size = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
2725 for (i = UI_CURRENT; i < UI_SIZE; i++)
2726 buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
2727 buffer->flags = cpu_to_be32(mdev->ldev->md.flags);
2728 buffer->magic = cpu_to_be32(DRBD_MD_MAGIC);
2729
2730 buffer->md_size_sect = cpu_to_be32(mdev->ldev->md.md_size_sect);
2731 buffer->al_offset = cpu_to_be32(mdev->ldev->md.al_offset);
2732 buffer->al_nr_extents = cpu_to_be32(mdev->act_log->nr_elements);
2733 buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
2734 buffer->device_uuid = cpu_to_be64(mdev->ldev->md.device_uuid);
2735
2736 buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset);
Philipp Reisner99432fc2011-05-20 16:39:13 +02002737 buffer->la_peer_max_bio_size = cpu_to_be32(mdev->peer_max_bio_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002738
2739 D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset);
2740 sector = mdev->ldev->md.md_offset;
2741
Andreas Gruenbacher3fbf4d22010-12-13 02:25:41 +01002742 if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002743 /* this was a try anyways ... */
2744 dev_err(DEV, "meta data update failed!\n");
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002745 drbd_chk_io_error(mdev, 1, true);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002746 }
2747
2748 /* Update mdev->ldev->md.la_size_sect,
2749 * since we updated it on metadata. */
2750 mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev);
2751
2752 mutex_unlock(&mdev->md_io_mutex);
2753 put_ldev(mdev);
2754}
2755
2756/**
2757 * drbd_md_read() - Reads in the meta data super block
2758 * @mdev: DRBD device.
2759 * @bdev: Device from which the meta data should be read in.
2760 *
Andreas Gruenbacher116676c2010-12-08 13:33:11 +01002761 * Return 0 (NO_ERROR) on success, and an enum drbd_ret_code in case
Philipp Reisnerb411b362009-09-25 16:07:19 -07002762 * something goes wrong. Currently only: ERR_IO_MD_DISK, ERR_MD_INVALID.
2763 */
2764int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
2765{
2766 struct meta_data_on_disk *buffer;
2767 int i, rv = NO_ERROR;
2768
2769 if (!get_ldev_if_state(mdev, D_ATTACHING))
2770 return ERR_IO_MD_DISK;
2771
Philipp Reisnerb411b362009-09-25 16:07:19 -07002772 mutex_lock(&mdev->md_io_mutex);
2773 buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
2774
Andreas Gruenbacher3fbf4d22010-12-13 02:25:41 +01002775 if (drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002776 /* NOTE: can't do normal error processing here as this is
Philipp Reisnerb411b362009-09-25 16:07:19 -07002777 called BEFORE disk is attached */
2778 dev_err(DEV, "Error while reading metadata.\n");
2779 rv = ERR_IO_MD_DISK;
2780 goto err;
2781 }
2782
Andreas Gruenbachere7fad8a2011-01-11 13:54:02 +01002783 if (buffer->magic != cpu_to_be32(DRBD_MD_MAGIC)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002784 dev_err(DEV, "Error while reading metadata, magic not found.\n");
2785 rv = ERR_MD_INVALID;
2786 goto err;
2787 }
2788 if (be32_to_cpu(buffer->al_offset) != bdev->md.al_offset) {
2789 dev_err(DEV, "unexpected al_offset: %d (expected %d)\n",
2790 be32_to_cpu(buffer->al_offset), bdev->md.al_offset);
2791 rv = ERR_MD_INVALID;
2792 goto err;
2793 }
2794 if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
2795 dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n",
2796 be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
2797 rv = ERR_MD_INVALID;
2798 goto err;
2799 }
2800 if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
2801 dev_err(DEV, "unexpected md_size: %u (expected %u)\n",
2802 be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
2803 rv = ERR_MD_INVALID;
2804 goto err;
2805 }
2806
2807 if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
2808 dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
2809 be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
2810 rv = ERR_MD_INVALID;
2811 goto err;
2812 }
2813
2814 bdev->md.la_size_sect = be64_to_cpu(buffer->la_size);
2815 for (i = UI_CURRENT; i < UI_SIZE; i++)
2816 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
2817 bdev->md.flags = be32_to_cpu(buffer->flags);
Lars Ellenbergf3990022011-03-23 14:31:09 +01002818 bdev->dc.al_extents = be32_to_cpu(buffer->al_nr_extents);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002819 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
2820
Philipp Reisner87eeee42011-01-19 14:16:30 +01002821 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisner99432fc2011-05-20 16:39:13 +02002822 if (mdev->state.conn < C_CONNECTED) {
2823 int peer;
2824 peer = be32_to_cpu(buffer->la_peer_max_bio_size);
2825 peer = max_t(int, peer, DRBD_MAX_BIO_SIZE_SAFE);
2826 mdev->peer_max_bio_size = peer;
2827 }
Philipp Reisner87eeee42011-01-19 14:16:30 +01002828 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisner99432fc2011-05-20 16:39:13 +02002829
Lars Ellenbergf3990022011-03-23 14:31:09 +01002830 if (bdev->dc.al_extents < 7)
2831 bdev->dc.al_extents = 127;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002832
2833 err:
2834 mutex_unlock(&mdev->md_io_mutex);
2835 put_ldev(mdev);
2836
2837 return rv;
2838}
2839
2840/**
2841 * drbd_md_mark_dirty() - Mark meta data super block as dirty
2842 * @mdev: DRBD device.
2843 *
2844 * Call this function if you change anything that should be written to
2845 * the meta-data super block. This function sets MD_DIRTY, and starts a
2846 * timer that ensures that within five seconds you have to call drbd_md_sync().
2847 */
Lars Ellenbergca0e6092010-10-14 15:01:21 +02002848#ifdef DEBUG
Lars Ellenbergee15b032010-09-03 10:00:09 +02002849void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func)
2850{
2851 if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) {
2852 mod_timer(&mdev->md_sync_timer, jiffies + HZ);
2853 mdev->last_md_mark_dirty.line = line;
2854 mdev->last_md_mark_dirty.func = func;
2855 }
2856}
2857#else
Philipp Reisnerb411b362009-09-25 16:07:19 -07002858void drbd_md_mark_dirty(struct drbd_conf *mdev)
2859{
Lars Ellenbergee15b032010-09-03 10:00:09 +02002860 if (!test_and_set_bit(MD_DIRTY, &mdev->flags))
Lars Ellenbergca0e6092010-10-14 15:01:21 +02002861 mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002862}
Lars Ellenbergee15b032010-09-03 10:00:09 +02002863#endif
Philipp Reisnerb411b362009-09-25 16:07:19 -07002864
2865static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
2866{
2867 int i;
2868
Lars Ellenberg62b0da32011-01-20 13:25:21 +01002869 for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002870 mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
Philipp Reisnerb411b362009-09-25 16:07:19 -07002871}
2872
2873void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
2874{
2875 if (idx == UI_CURRENT) {
2876 if (mdev->state.role == R_PRIMARY)
2877 val |= 1;
2878 else
2879 val &= ~((u64)1);
2880
2881 drbd_set_ed_uuid(mdev, val);
2882 }
2883
2884 mdev->ldev->md.uuid[idx] = val;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002885 drbd_md_mark_dirty(mdev);
2886}
2887
2888
2889void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
2890{
2891 if (mdev->ldev->md.uuid[idx]) {
2892 drbd_uuid_move_history(mdev);
2893 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
Philipp Reisnerb411b362009-09-25 16:07:19 -07002894 }
2895 _drbd_uuid_set(mdev, idx, val);
2896}
2897
2898/**
2899 * drbd_uuid_new_current() - Creates a new current UUID
2900 * @mdev: DRBD device.
2901 *
2902 * Creates a new current UUID, and rotates the old current UUID into
2903 * the bitmap slot. Causes an incremental resync upon next connect.
2904 */
2905void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
2906{
2907 u64 val;
Lars Ellenberg62b0da32011-01-20 13:25:21 +01002908 unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
Philipp Reisnerb411b362009-09-25 16:07:19 -07002909
Lars Ellenberg62b0da32011-01-20 13:25:21 +01002910 if (bm_uuid)
2911 dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
2912
Philipp Reisnerb411b362009-09-25 16:07:19 -07002913 mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
Philipp Reisnerb411b362009-09-25 16:07:19 -07002914
2915 get_random_bytes(&val, sizeof(u64));
2916 _drbd_uuid_set(mdev, UI_CURRENT, val);
Lars Ellenberg62b0da32011-01-20 13:25:21 +01002917 drbd_print_uuids(mdev, "new current UUID");
Lars Ellenbergaaa8e2b2010-10-15 13:16:53 +02002918 /* get it to stable storage _now_ */
2919 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002920}
2921
2922void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
2923{
2924 if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
2925 return;
2926
2927 if (val == 0) {
2928 drbd_uuid_move_history(mdev);
2929 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
2930 mdev->ldev->md.uuid[UI_BITMAP] = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002931 } else {
Lars Ellenberg62b0da32011-01-20 13:25:21 +01002932 unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
2933 if (bm_uuid)
2934 dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002935
Lars Ellenberg62b0da32011-01-20 13:25:21 +01002936 mdev->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002937 }
2938 drbd_md_mark_dirty(mdev);
2939}
2940
2941/**
2942 * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
2943 * @mdev: DRBD device.
2944 *
2945 * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
2946 */
2947int drbd_bmio_set_n_write(struct drbd_conf *mdev)
2948{
2949 int rv = -EIO;
2950
2951 if (get_ldev_if_state(mdev, D_ATTACHING)) {
2952 drbd_md_set_flag(mdev, MDF_FULL_SYNC);
2953 drbd_md_sync(mdev);
2954 drbd_bm_set_all(mdev);
2955
2956 rv = drbd_bm_write(mdev);
2957
2958 if (!rv) {
2959 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
2960 drbd_md_sync(mdev);
2961 }
2962
2963 put_ldev(mdev);
2964 }
2965
2966 return rv;
2967}
2968
2969/**
2970 * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
2971 * @mdev: DRBD device.
2972 *
2973 * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
2974 */
2975int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
2976{
2977 int rv = -EIO;
2978
Philipp Reisner07782862010-08-31 12:00:50 +02002979 drbd_resume_al(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002980 if (get_ldev_if_state(mdev, D_ATTACHING)) {
2981 drbd_bm_clear_all(mdev);
2982 rv = drbd_bm_write(mdev);
2983 put_ldev(mdev);
2984 }
2985
2986 return rv;
2987}
2988
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01002989static int w_bitmap_io(struct drbd_work *w, int unused)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002990{
2991 struct bm_io_work *work = container_of(w, struct bm_io_work, w);
Philipp Reisner00d56942011-02-09 18:09:48 +01002992 struct drbd_conf *mdev = w->mdev;
Lars Ellenberg02851e92010-12-16 14:47:39 +01002993 int rv = -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002994
2995 D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0);
2996
Lars Ellenberg02851e92010-12-16 14:47:39 +01002997 if (get_ldev(mdev)) {
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01002998 drbd_bm_lock(mdev, work->why, work->flags);
Lars Ellenberg02851e92010-12-16 14:47:39 +01002999 rv = work->io_fn(mdev);
3000 drbd_bm_unlock(mdev);
3001 put_ldev(mdev);
3002 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003003
Lars Ellenberg4738fa12011-02-21 13:20:55 +01003004 clear_bit_unlock(BITMAP_IO, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003005 wake_up(&mdev->misc_wait);
3006
3007 if (work->done)
3008 work->done(mdev, rv);
3009
3010 clear_bit(BITMAP_IO_QUEUED, &mdev->flags);
3011 work->why = NULL;
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003012 work->flags = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003013
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003014 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003015}
3016
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003017void drbd_ldev_destroy(struct drbd_conf *mdev)
3018{
3019 lc_destroy(mdev->resync);
3020 mdev->resync = NULL;
3021 lc_destroy(mdev->act_log);
3022 mdev->act_log = NULL;
3023 __no_warn(local,
3024 drbd_free_bc(mdev->ldev);
3025 mdev->ldev = NULL;);
3026
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003027 clear_bit(GO_DISKLESS, &mdev->flags);
3028}
3029
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003030static int w_go_diskless(struct drbd_work *w, int unused)
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003031{
Philipp Reisner00d56942011-02-09 18:09:48 +01003032 struct drbd_conf *mdev = w->mdev;
3033
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003034 D_ASSERT(mdev->state.disk == D_FAILED);
Lars Ellenberg9d282872010-10-14 13:57:07 +02003035 /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
3036 * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003037 * the protected members anymore, though, so once put_ldev reaches zero
3038 * again, it will be safe to free them. */
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003039 drbd_force_state(mdev, NS(disk, D_DISKLESS));
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003040 return 0;
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003041}
3042
3043void drbd_go_diskless(struct drbd_conf *mdev)
3044{
3045 D_ASSERT(mdev->state.disk == D_FAILED);
3046 if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
Philipp Reisnere42325a2011-01-19 13:55:45 +01003047 drbd_queue_work(&mdev->tconn->data.work, &mdev->go_diskless);
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003048}
3049
Philipp Reisnerb411b362009-09-25 16:07:19 -07003050/**
3051 * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
3052 * @mdev: DRBD device.
3053 * @io_fn: IO callback to be called when bitmap IO is possible
3054 * @done: callback to be called after the bitmap IO was performed
3055 * @why: Descriptive text of the reason for doing the IO
3056 *
3057 * While IO on the bitmap happens we freeze application IO thus we ensure
3058 * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
3059 * called from worker context. It MUST NOT be used while a previous such
3060 * work is still pending!
3061 */
3062void drbd_queue_bitmap_io(struct drbd_conf *mdev,
3063 int (*io_fn)(struct drbd_conf *),
3064 void (*done)(struct drbd_conf *, int),
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003065 char *why, enum bm_flag flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003066{
Philipp Reisnere6b3ea82011-01-19 14:02:01 +01003067 D_ASSERT(current == mdev->tconn->worker.task);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003068
3069 D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags));
3070 D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags));
3071 D_ASSERT(list_empty(&mdev->bm_io_work.w.list));
3072 if (mdev->bm_io_work.why)
3073 dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n",
3074 why, mdev->bm_io_work.why);
3075
3076 mdev->bm_io_work.io_fn = io_fn;
3077 mdev->bm_io_work.done = done;
3078 mdev->bm_io_work.why = why;
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003079 mdev->bm_io_work.flags = flags;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003080
Philipp Reisner87eeee42011-01-19 14:16:30 +01003081 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003082 set_bit(BITMAP_IO, &mdev->flags);
3083 if (atomic_read(&mdev->ap_bio_cnt) == 0) {
Philipp Reisner127b3172010-11-16 10:07:53 +01003084 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
Philipp Reisnere42325a2011-01-19 13:55:45 +01003085 drbd_queue_work(&mdev->tconn->data.work, &mdev->bm_io_work.w);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003086 }
Philipp Reisner87eeee42011-01-19 14:16:30 +01003087 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003088}
3089
3090/**
3091 * drbd_bitmap_io() - Does an IO operation on the whole bitmap
3092 * @mdev: DRBD device.
3093 * @io_fn: IO callback to be called when bitmap IO is possible
3094 * @why: Descriptive text of the reason for doing the IO
3095 *
3096 * freezes application IO while that the actual IO operations runs. This
3097 * functions MAY NOT be called from worker context.
3098 */
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003099int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *),
3100 char *why, enum bm_flag flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003101{
3102 int rv;
3103
Philipp Reisnere6b3ea82011-01-19 14:02:01 +01003104 D_ASSERT(current != mdev->tconn->worker.task);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003105
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003106 if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
3107 drbd_suspend_io(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003108
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003109 drbd_bm_lock(mdev, why, flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003110 rv = io_fn(mdev);
3111 drbd_bm_unlock(mdev);
3112
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003113 if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
3114 drbd_resume_io(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003115
3116 return rv;
3117}
3118
3119void drbd_md_set_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
3120{
3121 if ((mdev->ldev->md.flags & flag) != flag) {
3122 drbd_md_mark_dirty(mdev);
3123 mdev->ldev->md.flags |= flag;
3124 }
3125}
3126
3127void drbd_md_clear_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
3128{
3129 if ((mdev->ldev->md.flags & flag) != 0) {
3130 drbd_md_mark_dirty(mdev);
3131 mdev->ldev->md.flags &= ~flag;
3132 }
3133}
3134int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
3135{
3136 return (bdev->md.flags & flag) != 0;
3137}
3138
3139static void md_sync_timer_fn(unsigned long data)
3140{
3141 struct drbd_conf *mdev = (struct drbd_conf *) data;
3142
Philipp Reisnere42325a2011-01-19 13:55:45 +01003143 drbd_queue_work_front(&mdev->tconn->data.work, &mdev->md_sync_work);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003144}
3145
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003146static int w_md_sync(struct drbd_work *w, int unused)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003147{
Philipp Reisner00d56942011-02-09 18:09:48 +01003148 struct drbd_conf *mdev = w->mdev;
3149
Philipp Reisnerb411b362009-09-25 16:07:19 -07003150 dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
Lars Ellenbergee15b032010-09-03 10:00:09 +02003151#ifdef DEBUG
3152 dev_warn(DEV, "last md_mark_dirty: %s:%u\n",
3153 mdev->last_md_mark_dirty.func, mdev->last_md_mark_dirty.line);
3154#endif
Philipp Reisnerb411b362009-09-25 16:07:19 -07003155 drbd_md_sync(mdev);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003156 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003157}
3158
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01003159const char *cmdname(enum drbd_packet cmd)
Andreas Gruenbacherf2ad9062011-01-26 17:13:25 +01003160{
3161 /* THINK may need to become several global tables
3162 * when we want to support more than
3163 * one PRO_VERSION */
3164 static const char *cmdnames[] = {
3165 [P_DATA] = "Data",
3166 [P_DATA_REPLY] = "DataReply",
3167 [P_RS_DATA_REPLY] = "RSDataReply",
3168 [P_BARRIER] = "Barrier",
3169 [P_BITMAP] = "ReportBitMap",
3170 [P_BECOME_SYNC_TARGET] = "BecomeSyncTarget",
3171 [P_BECOME_SYNC_SOURCE] = "BecomeSyncSource",
3172 [P_UNPLUG_REMOTE] = "UnplugRemote",
3173 [P_DATA_REQUEST] = "DataRequest",
3174 [P_RS_DATA_REQUEST] = "RSDataRequest",
3175 [P_SYNC_PARAM] = "SyncParam",
3176 [P_SYNC_PARAM89] = "SyncParam89",
3177 [P_PROTOCOL] = "ReportProtocol",
3178 [P_UUIDS] = "ReportUUIDs",
3179 [P_SIZES] = "ReportSizes",
3180 [P_STATE] = "ReportState",
3181 [P_SYNC_UUID] = "ReportSyncUUID",
3182 [P_AUTH_CHALLENGE] = "AuthChallenge",
3183 [P_AUTH_RESPONSE] = "AuthResponse",
3184 [P_PING] = "Ping",
3185 [P_PING_ACK] = "PingAck",
3186 [P_RECV_ACK] = "RecvAck",
3187 [P_WRITE_ACK] = "WriteAck",
3188 [P_RS_WRITE_ACK] = "RSWriteAck",
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003189 [P_DISCARD_WRITE] = "DiscardWrite",
Andreas Gruenbacherf2ad9062011-01-26 17:13:25 +01003190 [P_NEG_ACK] = "NegAck",
3191 [P_NEG_DREPLY] = "NegDReply",
3192 [P_NEG_RS_DREPLY] = "NegRSDReply",
3193 [P_BARRIER_ACK] = "BarrierAck",
3194 [P_STATE_CHG_REQ] = "StateChgRequest",
3195 [P_STATE_CHG_REPLY] = "StateChgReply",
3196 [P_OV_REQUEST] = "OVRequest",
3197 [P_OV_REPLY] = "OVReply",
3198 [P_OV_RESULT] = "OVResult",
3199 [P_CSUM_RS_REQUEST] = "CsumRSRequest",
3200 [P_RS_IS_IN_SYNC] = "CsumRSIsInSync",
3201 [P_COMPRESSED_BITMAP] = "CBitmap",
3202 [P_DELAY_PROBE] = "DelayProbe",
3203 [P_OUT_OF_SYNC] = "OutOfSync",
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003204 [P_RETRY_WRITE] = "RetryWrite",
Andreas Gruenbacherf2ad9062011-01-26 17:13:25 +01003205 };
3206
Andreas Gruenbachere5d6f332011-03-28 16:44:40 +02003207 if (cmd == P_INITIAL_META)
3208 return "InitialMeta";
3209 if (cmd == P_INITIAL_DATA)
3210 return "InitialData";
Andreas Gruenbacher60381782011-03-28 17:05:50 +02003211 if (cmd == P_CONNECTION_FEATURES)
3212 return "ConnectionFeatures";
Andreas Gruenbacher6e849ce2011-03-14 17:27:45 +01003213 if (cmd >= ARRAY_SIZE(cmdnames))
Andreas Gruenbacherf2ad9062011-01-26 17:13:25 +01003214 return "Unknown";
3215 return cmdnames[cmd];
3216}
3217
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003218/**
3219 * drbd_wait_misc - wait for a request to make progress
3220 * @mdev: device associated with the request
3221 * @i: the struct drbd_interval embedded in struct drbd_request or
3222 * struct drbd_peer_request
3223 */
3224int drbd_wait_misc(struct drbd_conf *mdev, struct drbd_interval *i)
3225{
3226 struct net_conf *net_conf = mdev->tconn->net_conf;
3227 DEFINE_WAIT(wait);
3228 long timeout;
3229
3230 if (!net_conf)
3231 return -ETIMEDOUT;
3232 timeout = MAX_SCHEDULE_TIMEOUT;
3233 if (net_conf->ko_count)
3234 timeout = net_conf->timeout * HZ / 10 * net_conf->ko_count;
3235
3236 /* Indicate to wake up mdev->misc_wait on progress. */
3237 i->waiting = true;
3238 prepare_to_wait(&mdev->misc_wait, &wait, TASK_INTERRUPTIBLE);
3239 spin_unlock_irq(&mdev->tconn->req_lock);
3240 timeout = schedule_timeout(timeout);
3241 finish_wait(&mdev->misc_wait, &wait);
3242 spin_lock_irq(&mdev->tconn->req_lock);
3243 if (!timeout || mdev->state.conn < C_CONNECTED)
3244 return -ETIMEDOUT;
3245 if (signal_pending(current))
3246 return -ERESTARTSYS;
3247 return 0;
3248}
3249
Philipp Reisnerb411b362009-09-25 16:07:19 -07003250#ifdef CONFIG_DRBD_FAULT_INJECTION
3251/* Fault insertion support including random number generator shamelessly
3252 * stolen from kernel/rcutorture.c */
3253struct fault_random_state {
3254 unsigned long state;
3255 unsigned long count;
3256};
3257
3258#define FAULT_RANDOM_MULT 39916801 /* prime */
3259#define FAULT_RANDOM_ADD 479001701 /* prime */
3260#define FAULT_RANDOM_REFRESH 10000
3261
3262/*
3263 * Crude but fast random-number generator. Uses a linear congruential
3264 * generator, with occasional help from get_random_bytes().
3265 */
3266static unsigned long
3267_drbd_fault_random(struct fault_random_state *rsp)
3268{
3269 long refresh;
3270
Roel Kluin49829ea2009-12-15 22:55:44 +01003271 if (!rsp->count--) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003272 get_random_bytes(&refresh, sizeof(refresh));
3273 rsp->state += refresh;
3274 rsp->count = FAULT_RANDOM_REFRESH;
3275 }
3276 rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
3277 return swahw32(rsp->state);
3278}
3279
3280static char *
3281_drbd_fault_str(unsigned int type) {
3282 static char *_faults[] = {
3283 [DRBD_FAULT_MD_WR] = "Meta-data write",
3284 [DRBD_FAULT_MD_RD] = "Meta-data read",
3285 [DRBD_FAULT_RS_WR] = "Resync write",
3286 [DRBD_FAULT_RS_RD] = "Resync read",
3287 [DRBD_FAULT_DT_WR] = "Data write",
3288 [DRBD_FAULT_DT_RD] = "Data read",
3289 [DRBD_FAULT_DT_RA] = "Data read ahead",
3290 [DRBD_FAULT_BM_ALLOC] = "BM allocation",
Philipp Reisner6b4388a2010-04-26 14:11:45 +02003291 [DRBD_FAULT_AL_EE] = "EE allocation",
3292 [DRBD_FAULT_RECEIVE] = "receive data corruption",
Philipp Reisnerb411b362009-09-25 16:07:19 -07003293 };
3294
3295 return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
3296}
3297
3298unsigned int
3299_drbd_insert_fault(struct drbd_conf *mdev, unsigned int type)
3300{
3301 static struct fault_random_state rrs = {0, 0};
3302
3303 unsigned int ret = (
3304 (fault_devs == 0 ||
3305 ((1 << mdev_to_minor(mdev)) & fault_devs) != 0) &&
3306 (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate));
3307
3308 if (ret) {
3309 fault_count++;
3310
Lars Ellenberg73835062010-05-27 11:51:56 +02003311 if (__ratelimit(&drbd_ratelimit_state))
Philipp Reisnerb411b362009-09-25 16:07:19 -07003312 dev_warn(DEV, "***Simulating %s failure\n",
3313 _drbd_fault_str(type));
3314 }
3315
3316 return ret;
3317}
3318#endif
3319
3320const char *drbd_buildtag(void)
3321{
3322 /* DRBD built from external sources has here a reference to the
3323 git hash of the source code. */
3324
3325 static char buildtag[38] = "\0uilt-in";
3326
3327 if (buildtag[0] == 0) {
3328#ifdef CONFIG_MODULES
3329 if (THIS_MODULE != NULL)
3330 sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
3331 else
3332#endif
3333 buildtag[0] = 'b';
3334 }
3335
3336 return buildtag;
3337}
3338
3339module_init(drbd_init)
3340module_exit(drbd_cleanup)
3341
Philipp Reisnerb411b362009-09-25 16:07:19 -07003342EXPORT_SYMBOL(drbd_conn_str);
3343EXPORT_SYMBOL(drbd_role_str);
3344EXPORT_SYMBOL(drbd_disk_str);
3345EXPORT_SYMBOL(drbd_set_st_err_str);