blob: 71e3470304ddc9186a05158fa673df7e7c0f8fc6 [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11 from Logicworks, Inc. for making SDP replication support possible.
12
13 drbd is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
16 any later version.
17
18 drbd is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with drbd; see the file COPYING. If not, write to
25 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26
27 */
28
Philipp Reisnerb411b362009-09-25 16:07:19 -070029#include <linux/module.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070030#include <linux/drbd.h>
31#include <asm/uaccess.h>
32#include <asm/types.h>
33#include <net/sock.h>
34#include <linux/ctype.h>
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020035#include <linux/mutex.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070036#include <linux/fs.h>
37#include <linux/file.h>
38#include <linux/proc_fs.h>
39#include <linux/init.h>
40#include <linux/mm.h>
41#include <linux/memcontrol.h>
42#include <linux/mm_inline.h>
43#include <linux/slab.h>
44#include <linux/random.h>
45#include <linux/reboot.h>
46#include <linux/notifier.h>
47#include <linux/kthread.h>
48
49#define __KERNEL_SYSCALLS__
50#include <linux/unistd.h>
51#include <linux/vmalloc.h>
52
53#include <linux/drbd_limits.h>
54#include "drbd_int.h"
Philipp Reisnerb411b362009-09-25 16:07:19 -070055#include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
56
57#include "drbd_vli.h"
58
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020059static DEFINE_MUTEX(drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -070060int drbdd_init(struct drbd_thread *);
61int drbd_worker(struct drbd_thread *);
62int drbd_asender(struct drbd_thread *);
63
64int drbd_init(void);
65static int drbd_open(struct block_device *bdev, fmode_t mode);
66static int drbd_release(struct gendisk *gd, fmode_t mode);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +010067static int w_md_sync(struct drbd_work *w, int unused);
Philipp Reisnerb411b362009-09-25 16:07:19 -070068static void md_sync_timer_fn(unsigned long data);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +010069static int w_bitmap_io(struct drbd_work *w, int unused);
70static int w_go_diskless(struct drbd_work *w, int unused);
Philipp Reisnerb411b362009-09-25 16:07:19 -070071
Philipp Reisnerb411b362009-09-25 16:07:19 -070072MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
73 "Lars Ellenberg <lars@linbit.com>");
74MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
75MODULE_VERSION(REL_VERSION);
76MODULE_LICENSE("GPL");
Philipp Reisner81a5d602011-02-22 19:53:16 -050077MODULE_PARM_DESC(minor_count, "Approximate number of drbd devices ("
Philipp Reisner2b8a90b2011-01-10 11:15:17 +010078 __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")");
Philipp Reisnerb411b362009-09-25 16:07:19 -070079MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
80
81#include <linux/moduleparam.h>
82/* allow_open_on_secondary */
83MODULE_PARM_DESC(allow_oos, "DONT USE!");
84/* thanks to these macros, if compiled into the kernel (not-module),
85 * this becomes the boot parameter drbd.minor_count */
86module_param(minor_count, uint, 0444);
87module_param(disable_sendpage, bool, 0644);
88module_param(allow_oos, bool, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -070089module_param(proc_details, int, 0644);
90
91#ifdef CONFIG_DRBD_FAULT_INJECTION
92int enable_faults;
93int fault_rate;
94static int fault_count;
95int fault_devs;
96/* bitmap of enabled faults */
97module_param(enable_faults, int, 0664);
98/* fault rate % value - applies to all enabled faults */
99module_param(fault_rate, int, 0664);
100/* count of faults inserted */
101module_param(fault_count, int, 0664);
102/* bitmap of devices to insert faults on */
103module_param(fault_devs, int, 0644);
104#endif
105
106/* module parameter, defined */
Philipp Reisner2b8a90b2011-01-10 11:15:17 +0100107unsigned int minor_count = DRBD_MINOR_COUNT_DEF;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700108int disable_sendpage;
109int allow_oos;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700110int proc_details; /* Detail level in proc drbd*/
111
112/* Module parameter for setting the user mode helper program
113 * to run. Default is /sbin/drbdadm */
114char usermode_helper[80] = "/sbin/drbdadm";
115
116module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644);
117
118/* in 2.6.x, our device mapping and config info contains our virtual gendisks
119 * as member "struct gendisk *vdisk;"
120 */
Philipp Reisner81a5d602011-02-22 19:53:16 -0500121struct idr minors;
Philipp Reisner21114382011-01-19 12:26:59 +0100122struct list_head drbd_tconns; /* list of struct drbd_tconn */
Lars Ellenberg543cc102011-03-10 22:18:18 +0100123DEFINE_MUTEX(drbd_cfg_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700124
125struct kmem_cache *drbd_request_cache;
Andreas Gruenbacher6c852be2011-02-04 15:38:52 +0100126struct kmem_cache *drbd_ee_cache; /* peer requests */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700127struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
128struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
129mempool_t *drbd_request_mempool;
130mempool_t *drbd_ee_mempool;
Lars Ellenberg35abf592011-02-23 12:39:46 +0100131mempool_t *drbd_md_io_page_pool;
Lars Ellenbergda4a75d2011-02-23 17:02:01 +0100132struct bio_set *drbd_md_io_bio_set;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700133
134/* I do not use a standard mempool, because:
135 1) I want to hand out the pre-allocated objects first.
136 2) I want to be able to interrupt sleeping allocation with a signal.
137 Note: This is a single linked list, the next pointer is the private
138 member of struct page.
139 */
140struct page *drbd_pp_pool;
141spinlock_t drbd_pp_lock;
142int drbd_pp_vacant;
143wait_queue_head_t drbd_pp_wait;
144
145DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
146
Emese Revfy7d4e9d02009-12-14 00:59:30 +0100147static const struct block_device_operations drbd_ops = {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700148 .owner = THIS_MODULE,
149 .open = drbd_open,
150 .release = drbd_release,
151};
152
Lars Ellenbergda4a75d2011-02-23 17:02:01 +0100153static void bio_destructor_drbd(struct bio *bio)
154{
155 bio_free(bio, drbd_md_io_bio_set);
156}
157
158struct bio *bio_alloc_drbd(gfp_t gfp_mask)
159{
160 struct bio *bio;
161
162 if (!drbd_md_io_bio_set)
163 return bio_alloc(gfp_mask, 1);
164
165 bio = bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set);
166 if (!bio)
167 return NULL;
168 bio->bi_destructor = bio_destructor_drbd;
169 return bio;
170}
171
Philipp Reisnerb411b362009-09-25 16:07:19 -0700172#ifdef __CHECKER__
173/* When checking with sparse, and this is an inline function, sparse will
174 give tons of false positives. When this is a real functions sparse works.
175 */
176int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
177{
178 int io_allowed;
179
180 atomic_inc(&mdev->local_cnt);
181 io_allowed = (mdev->state.disk >= mins);
182 if (!io_allowed) {
183 if (atomic_dec_and_test(&mdev->local_cnt))
184 wake_up(&mdev->misc_wait);
185 }
186 return io_allowed;
187}
188
189#endif
190
191/**
192 * DOC: The transfer log
193 *
194 * The transfer log is a single linked list of &struct drbd_tl_epoch objects.
Philipp Reisner87eeee42011-01-19 14:16:30 +0100195 * mdev->tconn->newest_tle points to the head, mdev->tconn->oldest_tle points to the tail
Philipp Reisnerb411b362009-09-25 16:07:19 -0700196 * of the list. There is always at least one &struct drbd_tl_epoch object.
197 *
198 * Each &struct drbd_tl_epoch has a circular double linked list of requests
199 * attached.
200 */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100201static int tl_init(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700202{
203 struct drbd_tl_epoch *b;
204
205 /* during device minor initialization, we may well use GFP_KERNEL */
206 b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_KERNEL);
207 if (!b)
208 return 0;
209 INIT_LIST_HEAD(&b->requests);
210 INIT_LIST_HEAD(&b->w.list);
211 b->next = NULL;
212 b->br_number = 4711;
Philipp Reisner7e602c02010-05-27 14:49:27 +0200213 b->n_writes = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700214 b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
215
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100216 tconn->oldest_tle = b;
217 tconn->newest_tle = b;
218 INIT_LIST_HEAD(&tconn->out_of_sequence_requests);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700219
Philipp Reisnerb411b362009-09-25 16:07:19 -0700220 return 1;
221}
222
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100223static void tl_cleanup(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700224{
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100225 if (tconn->oldest_tle != tconn->newest_tle)
226 conn_err(tconn, "ASSERT FAILED: oldest_tle == newest_tle\n");
227 if (!list_empty(&tconn->out_of_sequence_requests))
228 conn_err(tconn, "ASSERT FAILED: list_empty(out_of_sequence_requests)\n");
229 kfree(tconn->oldest_tle);
230 tconn->oldest_tle = NULL;
231 kfree(tconn->unused_spare_tle);
232 tconn->unused_spare_tle = NULL;
Andreas Gruenbacherd6287692011-01-13 23:05:39 +0100233}
234
Philipp Reisnerb411b362009-09-25 16:07:19 -0700235/**
236 * _tl_add_barrier() - Adds a barrier to the transfer log
237 * @mdev: DRBD device.
238 * @new: Barrier to be added before the current head of the TL.
239 *
240 * The caller must hold the req_lock.
241 */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100242void _tl_add_barrier(struct drbd_tconn *tconn, struct drbd_tl_epoch *new)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700243{
244 struct drbd_tl_epoch *newest_before;
245
246 INIT_LIST_HEAD(&new->requests);
247 INIT_LIST_HEAD(&new->w.list);
248 new->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
249 new->next = NULL;
Philipp Reisner7e602c02010-05-27 14:49:27 +0200250 new->n_writes = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700251
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100252 newest_before = tconn->newest_tle;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700253 /* never send a barrier number == 0, because that is special-cased
254 * when using TCQ for our write ordering code */
255 new->br_number = (newest_before->br_number+1) ?: 1;
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100256 if (tconn->newest_tle != new) {
257 tconn->newest_tle->next = new;
258 tconn->newest_tle = new;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700259 }
260}
261
262/**
263 * tl_release() - Free or recycle the oldest &struct drbd_tl_epoch object of the TL
264 * @mdev: DRBD device.
265 * @barrier_nr: Expected identifier of the DRBD write barrier packet.
266 * @set_size: Expected number of requests before that barrier.
267 *
268 * In case the passed barrier_nr or set_size does not match the oldest
269 * &struct drbd_tl_epoch objects this function will cause a termination
270 * of the connection.
271 */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100272void tl_release(struct drbd_tconn *tconn, unsigned int barrier_nr,
273 unsigned int set_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700274{
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100275 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700276 struct drbd_tl_epoch *b, *nob; /* next old barrier */
277 struct list_head *le, *tle;
278 struct drbd_request *r;
279
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100280 spin_lock_irq(&tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700281
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100282 b = tconn->oldest_tle;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700283
284 /* first some paranoia code */
285 if (b == NULL) {
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100286 conn_err(tconn, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
287 barrier_nr);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700288 goto bail;
289 }
290 if (b->br_number != barrier_nr) {
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100291 conn_err(tconn, "BAD! BarrierAck #%u received, expected #%u!\n",
292 barrier_nr, b->br_number);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700293 goto bail;
294 }
Philipp Reisner7e602c02010-05-27 14:49:27 +0200295 if (b->n_writes != set_size) {
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100296 conn_err(tconn, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
297 barrier_nr, set_size, b->n_writes);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700298 goto bail;
299 }
300
301 /* Clean up list of requests processed during current epoch */
302 list_for_each_safe(le, tle, &b->requests) {
303 r = list_entry(le, struct drbd_request, tl_requests);
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100304 _req_mod(r, BARRIER_ACKED);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700305 }
306 /* There could be requests on the list waiting for completion
307 of the write to the local disk. To avoid corruptions of
308 slab's data structures we have to remove the lists head.
309
310 Also there could have been a barrier ack out of sequence, overtaking
311 the write acks - which would be a bug and violating write ordering.
312 To not deadlock in case we lose connection while such requests are
313 still pending, we need some way to find them for the
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100314 _req_mode(CONNECTION_LOST_WHILE_PENDING).
Philipp Reisnerb411b362009-09-25 16:07:19 -0700315
316 These have been list_move'd to the out_of_sequence_requests list in
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100317 _req_mod(, BARRIER_ACKED) above.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700318 */
319 list_del_init(&b->requests);
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100320 mdev = b->w.mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700321
322 nob = b->next;
323 if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100324 _tl_add_barrier(tconn, b);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700325 if (nob)
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100326 tconn->oldest_tle = nob;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700327 /* if nob == NULL b was the only barrier, and becomes the new
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100328 barrier. Therefore tconn->oldest_tle points already to b */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700329 } else {
330 D_ASSERT(nob != NULL);
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100331 tconn->oldest_tle = nob;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700332 kfree(b);
333 }
334
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100335 spin_unlock_irq(&tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700336 dec_ap_pending(mdev);
337
338 return;
339
340bail:
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100341 spin_unlock_irq(&tconn->req_lock);
342 conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700343}
344
Philipp Reisner617049a2010-12-22 12:48:31 +0100345
Philipp Reisner11b58e72010-05-12 17:08:26 +0200346/**
347 * _tl_restart() - Walks the transfer log, and applies an action to all requests
348 * @mdev: DRBD device.
349 * @what: The action/event to perform with all request objects
350 *
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100351 * @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO,
352 * RESTART_FROZEN_DISK_IO.
Philipp Reisner11b58e72010-05-12 17:08:26 +0200353 */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100354void _tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what)
Philipp Reisner11b58e72010-05-12 17:08:26 +0200355{
356 struct drbd_tl_epoch *b, *tmp, **pn;
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200357 struct list_head *le, *tle, carry_reads;
Philipp Reisner11b58e72010-05-12 17:08:26 +0200358 struct drbd_request *req;
359 int rv, n_writes, n_reads;
360
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100361 b = tconn->oldest_tle;
362 pn = &tconn->oldest_tle;
Philipp Reisner11b58e72010-05-12 17:08:26 +0200363 while (b) {
364 n_writes = 0;
365 n_reads = 0;
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200366 INIT_LIST_HEAD(&carry_reads);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200367 list_for_each_safe(le, tle, &b->requests) {
368 req = list_entry(le, struct drbd_request, tl_requests);
369 rv = _req_mod(req, what);
370
371 n_writes += (rv & MR_WRITE) >> MR_WRITE_SHIFT;
372 n_reads += (rv & MR_READ) >> MR_READ_SHIFT;
373 }
374 tmp = b->next;
375
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200376 if (n_writes) {
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100377 if (what == RESEND) {
Philipp Reisner11b58e72010-05-12 17:08:26 +0200378 b->n_writes = n_writes;
379 if (b->w.cb == NULL) {
380 b->w.cb = w_send_barrier;
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100381 inc_ap_pending(b->w.mdev);
382 set_bit(CREATE_BARRIER, &b->w.mdev->flags);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200383 }
384
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100385 drbd_queue_work(&tconn->data.work, &b->w);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200386 }
387 pn = &b->next;
388 } else {
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200389 if (n_reads)
390 list_add(&carry_reads, &b->requests);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200391 /* there could still be requests on that ring list,
392 * in case local io is still pending */
393 list_del(&b->requests);
394
395 /* dec_ap_pending corresponding to queue_barrier.
396 * the newest barrier may not have been queued yet,
397 * in which case w.cb is still NULL. */
398 if (b->w.cb != NULL)
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100399 dec_ap_pending(b->w.mdev);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200400
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100401 if (b == tconn->newest_tle) {
Philipp Reisner11b58e72010-05-12 17:08:26 +0200402 /* recycle, but reinit! */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100403 if (tmp != NULL)
404 conn_err(tconn, "ASSERT FAILED tmp == NULL");
Philipp Reisner11b58e72010-05-12 17:08:26 +0200405 INIT_LIST_HEAD(&b->requests);
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200406 list_splice(&carry_reads, &b->requests);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200407 INIT_LIST_HEAD(&b->w.list);
408 b->w.cb = NULL;
409 b->br_number = net_random();
410 b->n_writes = 0;
411
412 *pn = b;
413 break;
414 }
415 *pn = tmp;
416 kfree(b);
417 }
418 b = tmp;
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200419 list_splice(&carry_reads, &b->requests);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200420 }
421}
422
Philipp Reisnerb411b362009-09-25 16:07:19 -0700423
424/**
425 * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
426 * @mdev: DRBD device.
427 *
428 * This is called after the connection to the peer was lost. The storage covered
429 * by the requests on the transfer gets marked as our of sync. Called from the
430 * receiver thread and the worker thread.
431 */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100432void tl_clear(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700433{
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100434 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700435 struct list_head *le, *tle;
436 struct drbd_request *r;
Philipp Reisnere90285e2011-03-22 12:51:21 +0100437 int vnr;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700438
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100439 spin_lock_irq(&tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700440
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100441 _tl_restart(tconn, CONNECTION_LOST_WHILE_PENDING);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700442
443 /* we expect this list to be empty. */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100444 if (!list_empty(&tconn->out_of_sequence_requests))
445 conn_err(tconn, "ASSERT FAILED list_empty(&out_of_sequence_requests)\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700446
447 /* but just in case, clean it up anyways! */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100448 list_for_each_safe(le, tle, &tconn->out_of_sequence_requests) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700449 r = list_entry(le, struct drbd_request, tl_requests);
450 /* It would be nice to complete outside of spinlock.
451 * But this is easier for now. */
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100452 _req_mod(r, CONNECTION_LOST_WHILE_PENDING);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700453 }
454
455 /* ensure bit indicating barrier is required is clear */
Philipp Reisnere90285e2011-03-22 12:51:21 +0100456 idr_for_each_entry(&tconn->volumes, mdev, vnr)
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100457 clear_bit(CREATE_BARRIER, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700458
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100459 spin_unlock_irq(&tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700460}
461
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100462void tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what)
Philipp Reisner11b58e72010-05-12 17:08:26 +0200463{
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100464 spin_lock_irq(&tconn->req_lock);
465 _tl_restart(tconn, what);
466 spin_unlock_irq(&tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700467}
468
Philipp Reisnerb411b362009-09-25 16:07:19 -0700469static int drbd_thread_setup(void *arg)
470{
471 struct drbd_thread *thi = (struct drbd_thread *) arg;
Philipp Reisner392c8802011-02-09 10:33:31 +0100472 struct drbd_tconn *tconn = thi->tconn;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700473 unsigned long flags;
474 int retval;
475
Philipp Reisnerf1b3a6e2011-02-08 15:35:58 +0100476 snprintf(current->comm, sizeof(current->comm), "drbd_%c_%s",
Philipp Reisner392c8802011-02-09 10:33:31 +0100477 thi->name[0], thi->tconn->name);
Philipp Reisnerf1b3a6e2011-02-08 15:35:58 +0100478
Philipp Reisnerb411b362009-09-25 16:07:19 -0700479restart:
480 retval = thi->function(thi);
481
482 spin_lock_irqsave(&thi->t_lock, flags);
483
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100484 /* if the receiver has been "EXITING", the last thing it did
Philipp Reisnerb411b362009-09-25 16:07:19 -0700485 * was set the conn state to "StandAlone",
486 * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
487 * and receiver thread will be "started".
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100488 * drbd_thread_start needs to set "RESTARTING" in that case.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700489 * t_state check and assignment needs to be within the same spinlock,
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100490 * so either thread_start sees EXITING, and can remap to RESTARTING,
491 * or thread_start see NONE, and can proceed as normal.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700492 */
493
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100494 if (thi->t_state == RESTARTING) {
Philipp Reisner392c8802011-02-09 10:33:31 +0100495 conn_info(tconn, "Restarting %s thread\n", thi->name);
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100496 thi->t_state = RUNNING;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700497 spin_unlock_irqrestore(&thi->t_lock, flags);
498 goto restart;
499 }
500
501 thi->task = NULL;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100502 thi->t_state = NONE;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700503 smp_mb();
504 complete(&thi->stop);
505 spin_unlock_irqrestore(&thi->t_lock, flags);
506
Philipp Reisner392c8802011-02-09 10:33:31 +0100507 conn_info(tconn, "Terminating %s\n", current->comm);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700508
509 /* Release mod reference taken when thread was started */
510 module_put(THIS_MODULE);
511 return retval;
512}
513
Philipp Reisner392c8802011-02-09 10:33:31 +0100514static void drbd_thread_init(struct drbd_tconn *tconn, struct drbd_thread *thi,
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100515 int (*func) (struct drbd_thread *), char *name)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700516{
517 spin_lock_init(&thi->t_lock);
518 thi->task = NULL;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100519 thi->t_state = NONE;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700520 thi->function = func;
Philipp Reisner392c8802011-02-09 10:33:31 +0100521 thi->tconn = tconn;
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100522 strncpy(thi->name, name, ARRAY_SIZE(thi->name));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700523}
524
525int drbd_thread_start(struct drbd_thread *thi)
526{
Philipp Reisner392c8802011-02-09 10:33:31 +0100527 struct drbd_tconn *tconn = thi->tconn;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700528 struct task_struct *nt;
529 unsigned long flags;
530
Philipp Reisnerb411b362009-09-25 16:07:19 -0700531 /* is used from state engine doing drbd_thread_stop_nowait,
532 * while holding the req lock irqsave */
533 spin_lock_irqsave(&thi->t_lock, flags);
534
535 switch (thi->t_state) {
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100536 case NONE:
Philipp Reisner392c8802011-02-09 10:33:31 +0100537 conn_info(tconn, "Starting %s thread (from %s [%d])\n",
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100538 thi->name, current->comm, current->pid);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700539
540 /* Get ref on module for thread - this is released when thread exits */
541 if (!try_module_get(THIS_MODULE)) {
Philipp Reisner392c8802011-02-09 10:33:31 +0100542 conn_err(tconn, "Failed to get module reference in drbd_thread_start\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700543 spin_unlock_irqrestore(&thi->t_lock, flags);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100544 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700545 }
546
547 init_completion(&thi->stop);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700548 thi->reset_cpu_mask = 1;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100549 thi->t_state = RUNNING;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700550 spin_unlock_irqrestore(&thi->t_lock, flags);
551 flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
552
553 nt = kthread_create(drbd_thread_setup, (void *) thi,
Philipp Reisner392c8802011-02-09 10:33:31 +0100554 "drbd_%c_%s", thi->name[0], thi->tconn->name);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700555
556 if (IS_ERR(nt)) {
Philipp Reisner392c8802011-02-09 10:33:31 +0100557 conn_err(tconn, "Couldn't start thread\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700558
559 module_put(THIS_MODULE);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100560 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700561 }
562 spin_lock_irqsave(&thi->t_lock, flags);
563 thi->task = nt;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100564 thi->t_state = RUNNING;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700565 spin_unlock_irqrestore(&thi->t_lock, flags);
566 wake_up_process(nt);
567 break;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100568 case EXITING:
569 thi->t_state = RESTARTING;
Philipp Reisner392c8802011-02-09 10:33:31 +0100570 conn_info(tconn, "Restarting %s thread (from %s [%d])\n",
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100571 thi->name, current->comm, current->pid);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700572 /* fall through */
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100573 case RUNNING:
574 case RESTARTING:
Philipp Reisnerb411b362009-09-25 16:07:19 -0700575 default:
576 spin_unlock_irqrestore(&thi->t_lock, flags);
577 break;
578 }
579
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100580 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700581}
582
583
584void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
585{
586 unsigned long flags;
587
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100588 enum drbd_thread_state ns = restart ? RESTARTING : EXITING;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700589
590 /* may be called from state engine, holding the req lock irqsave */
591 spin_lock_irqsave(&thi->t_lock, flags);
592
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100593 if (thi->t_state == NONE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700594 spin_unlock_irqrestore(&thi->t_lock, flags);
595 if (restart)
596 drbd_thread_start(thi);
597 return;
598 }
599
600 if (thi->t_state != ns) {
601 if (thi->task == NULL) {
602 spin_unlock_irqrestore(&thi->t_lock, flags);
603 return;
604 }
605
606 thi->t_state = ns;
607 smp_mb();
608 init_completion(&thi->stop);
609 if (thi->task != current)
610 force_sig(DRBD_SIGKILL, thi->task);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700611 }
612
613 spin_unlock_irqrestore(&thi->t_lock, flags);
614
615 if (wait)
616 wait_for_completion(&thi->stop);
617}
618
Philipp Reisner392c8802011-02-09 10:33:31 +0100619static struct drbd_thread *drbd_task_to_thread(struct drbd_tconn *tconn, struct task_struct *task)
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100620{
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100621 struct drbd_thread *thi =
622 task == tconn->receiver.task ? &tconn->receiver :
623 task == tconn->asender.task ? &tconn->asender :
624 task == tconn->worker.task ? &tconn->worker : NULL;
625
626 return thi;
627}
628
Philipp Reisner392c8802011-02-09 10:33:31 +0100629char *drbd_task_to_thread_name(struct drbd_tconn *tconn, struct task_struct *task)
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100630{
Philipp Reisner392c8802011-02-09 10:33:31 +0100631 struct drbd_thread *thi = drbd_task_to_thread(tconn, task);
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100632 return thi ? thi->name : task->comm;
633}
634
Philipp Reisner80883192011-02-18 14:56:45 +0100635int conn_lowest_minor(struct drbd_tconn *tconn)
Philipp Reisner80822282011-02-08 12:46:30 +0100636{
Philipp Reisnere90285e2011-03-22 12:51:21 +0100637 int vnr = 0;
638 struct drbd_conf *mdev;
Philipp Reisner774b3052011-02-22 02:07:03 -0500639
Philipp Reisnere90285e2011-03-22 12:51:21 +0100640 mdev = idr_get_next(&tconn->volumes, &vnr);
641 if (!mdev)
Philipp Reisner774b3052011-02-22 02:07:03 -0500642 return -1;
Philipp Reisnere90285e2011-03-22 12:51:21 +0100643 return mdev_to_minor(mdev);
Philipp Reisner80822282011-02-08 12:46:30 +0100644}
Philipp Reisner774b3052011-02-22 02:07:03 -0500645
646#ifdef CONFIG_SMP
Philipp Reisnerb411b362009-09-25 16:07:19 -0700647/**
648 * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
649 * @mdev: DRBD device.
650 *
651 * Forces all threads of a device onto the same CPU. This is beneficial for
652 * DRBD's performance. May be overwritten by user's configuration.
653 */
Philipp Reisner80822282011-02-08 12:46:30 +0100654void drbd_calc_cpu_mask(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700655{
656 int ord, cpu;
657
658 /* user override. */
Philipp Reisner80822282011-02-08 12:46:30 +0100659 if (cpumask_weight(tconn->cpu_mask))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700660 return;
661
Philipp Reisner80822282011-02-08 12:46:30 +0100662 ord = conn_lowest_minor(tconn) % cpumask_weight(cpu_online_mask);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700663 for_each_online_cpu(cpu) {
664 if (ord-- == 0) {
Philipp Reisner80822282011-02-08 12:46:30 +0100665 cpumask_set_cpu(cpu, tconn->cpu_mask);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700666 return;
667 }
668 }
669 /* should not be reached */
Philipp Reisner80822282011-02-08 12:46:30 +0100670 cpumask_setall(tconn->cpu_mask);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700671}
672
673/**
674 * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
675 * @mdev: DRBD device.
Philipp Reisnerbc31fe32011-02-07 11:14:38 +0100676 * @thi: drbd_thread object
Philipp Reisnerb411b362009-09-25 16:07:19 -0700677 *
678 * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
679 * prematurely.
680 */
Philipp Reisner80822282011-02-08 12:46:30 +0100681void drbd_thread_current_set_cpu(struct drbd_thread *thi)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700682{
683 struct task_struct *p = current;
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100684
Philipp Reisnerb411b362009-09-25 16:07:19 -0700685 if (!thi->reset_cpu_mask)
686 return;
687 thi->reset_cpu_mask = 0;
Philipp Reisner392c8802011-02-09 10:33:31 +0100688 set_cpus_allowed_ptr(p, thi->tconn->cpu_mask);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700689}
690#endif
691
Philipp Reisnerd38e7872011-02-07 15:32:04 +0100692static void prepare_header80(struct p_header80 *h, enum drbd_packet cmd, int size)
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100693{
694 h->magic = cpu_to_be32(DRBD_MAGIC);
695 h->command = cpu_to_be16(cmd);
696 h->length = cpu_to_be16(size);
697}
698
Philipp Reisnerd38e7872011-02-07 15:32:04 +0100699static void prepare_header95(struct p_header95 *h, enum drbd_packet cmd, int size)
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100700{
701 h->magic = cpu_to_be16(DRBD_MAGIC_BIG);
702 h->command = cpu_to_be16(cmd);
703 h->length = cpu_to_be32(size);
704}
705
Philipp Reisnerd38e7872011-02-07 15:32:04 +0100706static void _prepare_header(struct drbd_tconn *tconn, int vnr, struct p_header *h,
707 enum drbd_packet cmd, int size)
708{
Andreas Gruenbacher0916e0e2011-03-21 14:10:15 +0100709 if (tconn->agreed_pro_version >= 95)
Philipp Reisnerd38e7872011-02-07 15:32:04 +0100710 prepare_header95(&h->h95, cmd, size);
711 else
712 prepare_header80(&h->h80, cmd, size);
713}
714
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100715static void prepare_header(struct drbd_conf *mdev, struct p_header *h,
Andreas Gruenbacherd8763022011-01-26 17:39:41 +0100716 enum drbd_packet cmd, int size)
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100717{
Philipp Reisnerd38e7872011-02-07 15:32:04 +0100718 _prepare_header(mdev->tconn, mdev->vnr, h, cmd, size);
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100719}
720
Philipp Reisnerb411b362009-09-25 16:07:19 -0700721/* the appropriate socket mutex must be held already */
Andreas Gruenbacher7c967152011-03-22 00:49:36 +0100722int _conn_send_cmd(struct drbd_tconn *tconn, int vnr, struct drbd_socket *sock,
Andreas Gruenbacherd8763022011-01-26 17:39:41 +0100723 enum drbd_packet cmd, struct p_header *h, size_t size,
724 unsigned msg_flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700725{
Andreas Gruenbacherecf23632011-03-15 23:48:25 +0100726 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700727
Philipp Reisnerd38e7872011-02-07 15:32:04 +0100728 _prepare_header(tconn, vnr, h, cmd, size - sizeof(struct p_header));
Andreas Gruenbacher7c967152011-03-22 00:49:36 +0100729 err = drbd_send_all(tconn, sock->socket, h, size, msg_flags);
Andreas Gruenbacherecf23632011-03-15 23:48:25 +0100730 if (err && !signal_pending(current))
731 conn_warn(tconn, "short send %s size=%d\n",
732 cmdname(cmd), (int)size);
733 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700734}
735
736/* don't pass the socket. we may only look at it
737 * when we hold the appropriate socket mutex.
738 */
Andreas Gruenbacher7d168ed2011-03-16 00:11:25 +0100739int conn_send_cmd(struct drbd_tconn *tconn, int vnr, struct drbd_socket *sock,
Andreas Gruenbacherd8763022011-01-26 17:39:41 +0100740 enum drbd_packet cmd, struct p_header *h, size_t size)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700741{
Andreas Gruenbacher596a37f2011-03-15 23:55:59 +0100742 int err = -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700743
Andreas Gruenbacher7d168ed2011-03-16 00:11:25 +0100744 mutex_lock(&sock->mutex);
745 if (sock->socket)
Andreas Gruenbacher7c967152011-03-22 00:49:36 +0100746 err = _conn_send_cmd(tconn, vnr, sock, cmd, h, size, 0);
Andreas Gruenbacher7d168ed2011-03-16 00:11:25 +0100747 mutex_unlock(&sock->mutex);
Andreas Gruenbacher596a37f2011-03-15 23:55:59 +0100748 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700749}
750
Philipp Reisner61120872011-02-08 09:50:54 +0100751int conn_send_cmd2(struct drbd_tconn *tconn, enum drbd_packet cmd, char *data,
Philipp Reisnerb411b362009-09-25 16:07:19 -0700752 size_t size)
753{
Philipp Reisner61120872011-02-08 09:50:54 +0100754 struct p_header80 h;
Andreas Gruenbacherce9879c2011-03-15 23:34:29 +0100755 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700756
Philipp Reisner61120872011-02-08 09:50:54 +0100757 prepare_header80(&h, cmd, size);
Andreas Gruenbacherce9879c2011-03-15 23:34:29 +0100758 err = drbd_get_data_sock(tconn);
759 if (!err) {
760 err = drbd_send_all(tconn, tconn->data.socket, &h, sizeof(h), 0);
761 if (!err)
762 err = drbd_send_all(tconn, tconn->data.socket, data, size, 0);
763 drbd_put_data_sock(tconn);
764 }
765 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700766}
767
Andreas Gruenbachere307f352011-03-22 10:55:48 +0100768int drbd_send_ping(struct drbd_tconn *tconn)
769{
770 struct p_header h;
771 return !conn_send_cmd(tconn, 0, &tconn->meta, P_PING, &h, sizeof(h));
772}
773
774int drbd_send_ping_ack(struct drbd_tconn *tconn)
775{
776 struct p_header h;
777 return !conn_send_cmd(tconn, 0, &tconn->meta, P_PING_ACK, &h, sizeof(h));
778}
779
Lars Ellenbergf3990022011-03-23 14:31:09 +0100780int drbd_send_sync_param(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700781{
Philipp Reisner8e26f9c2010-07-06 17:25:54 +0200782 struct p_rs_param_95 *p;
Andreas Gruenbacher7c967152011-03-22 00:49:36 +0100783 struct drbd_socket *sock;
Andreas Gruenbacher103ea272011-03-16 00:43:02 +0100784 int size, err;
Philipp Reisner31890f42011-01-19 14:12:51 +0100785 const int apv = mdev->tconn->agreed_pro_version;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700786
787 size = apv <= 87 ? sizeof(struct p_rs_param)
788 : apv == 88 ? sizeof(struct p_rs_param)
Lars Ellenbergf3990022011-03-23 14:31:09 +0100789 + strlen(mdev->tconn->net_conf->verify_alg) + 1
Philipp Reisner8e26f9c2010-07-06 17:25:54 +0200790 : apv <= 94 ? sizeof(struct p_rs_param_89)
791 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700792
Philipp Reisnere42325a2011-01-19 13:55:45 +0100793 mutex_lock(&mdev->tconn->data.mutex);
Andreas Gruenbacher7c967152011-03-22 00:49:36 +0100794 sock = &mdev->tconn->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700795
Andreas Gruenbacher7c967152011-03-22 00:49:36 +0100796 if (likely(sock->socket != NULL)) {
Andreas Gruenbacherd8763022011-01-26 17:39:41 +0100797 enum drbd_packet cmd =
798 apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700799
Andreas Gruenbacher5a87d922011-03-24 21:17:52 +0100800 p = mdev->tconn->data.sbuf;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700801
802 /* initialize verify_alg and csums_alg */
803 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
804
Lars Ellenbergf3990022011-03-23 14:31:09 +0100805 if (get_ldev(mdev)) {
806 p->rate = cpu_to_be32(mdev->ldev->dc.resync_rate);
807 p->c_plan_ahead = cpu_to_be32(mdev->ldev->dc.c_plan_ahead);
808 p->c_delay_target = cpu_to_be32(mdev->ldev->dc.c_delay_target);
809 p->c_fill_target = cpu_to_be32(mdev->ldev->dc.c_fill_target);
810 p->c_max_rate = cpu_to_be32(mdev->ldev->dc.c_max_rate);
811 put_ldev(mdev);
812 } else {
813 p->rate = cpu_to_be32(DRBD_RATE_DEF);
814 p->c_plan_ahead = cpu_to_be32(DRBD_C_PLAN_AHEAD_DEF);
815 p->c_delay_target = cpu_to_be32(DRBD_C_DELAY_TARGET_DEF);
816 p->c_fill_target = cpu_to_be32(DRBD_C_FILL_TARGET_DEF);
817 p->c_max_rate = cpu_to_be32(DRBD_C_MAX_RATE_DEF);
818 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700819
820 if (apv >= 88)
Lars Ellenbergf3990022011-03-23 14:31:09 +0100821 strcpy(p->verify_alg, mdev->tconn->net_conf->verify_alg);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700822 if (apv >= 89)
Lars Ellenbergf3990022011-03-23 14:31:09 +0100823 strcpy(p->csums_alg, mdev->tconn->net_conf->csums_alg);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700824
Andreas Gruenbacher103ea272011-03-16 00:43:02 +0100825 err = _drbd_send_cmd(mdev, sock, cmd, &p->head, size, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700826 } else
Andreas Gruenbacher103ea272011-03-16 00:43:02 +0100827 err = -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700828
Philipp Reisnere42325a2011-01-19 13:55:45 +0100829 mutex_unlock(&mdev->tconn->data.mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700830
Andreas Gruenbacher103ea272011-03-16 00:43:02 +0100831 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700832}
833
Philipp Reisnerdc8228d2011-02-08 10:13:15 +0100834int drbd_send_protocol(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700835{
836 struct p_protocol *p;
Andreas Gruenbacher387eb302011-03-16 01:05:37 +0100837 int size, cf, err;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700838
839 size = sizeof(struct p_protocol);
840
Philipp Reisnerdc8228d2011-02-08 10:13:15 +0100841 if (tconn->agreed_pro_version >= 87)
842 size += strlen(tconn->net_conf->integrity_alg) + 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700843
844 /* we must not recurse into our own queue,
845 * as that is blocked during handshake */
846 p = kmalloc(size, GFP_NOIO);
847 if (p == NULL)
Andreas Gruenbacher387eb302011-03-16 01:05:37 +0100848 return -ENOMEM;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700849
Philipp Reisnerdc8228d2011-02-08 10:13:15 +0100850 p->protocol = cpu_to_be32(tconn->net_conf->wire_protocol);
851 p->after_sb_0p = cpu_to_be32(tconn->net_conf->after_sb_0p);
852 p->after_sb_1p = cpu_to_be32(tconn->net_conf->after_sb_1p);
853 p->after_sb_2p = cpu_to_be32(tconn->net_conf->after_sb_2p);
854 p->two_primaries = cpu_to_be32(tconn->net_conf->two_primaries);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700855
Philipp Reisnercf14c2e2010-02-02 21:03:50 +0100856 cf = 0;
Philipp Reisnerdc8228d2011-02-08 10:13:15 +0100857 if (tconn->net_conf->want_lose)
Philipp Reisnercf14c2e2010-02-02 21:03:50 +0100858 cf |= CF_WANT_LOSE;
Philipp Reisnerdc8228d2011-02-08 10:13:15 +0100859 if (tconn->net_conf->dry_run) {
860 if (tconn->agreed_pro_version >= 92)
Philipp Reisnercf14c2e2010-02-02 21:03:50 +0100861 cf |= CF_DRY_RUN;
862 else {
Philipp Reisnerdc8228d2011-02-08 10:13:15 +0100863 conn_err(tconn, "--dry-run is not supported by peer");
Dan Carpenter7ac314c2010-04-22 14:27:23 +0200864 kfree(p);
Andreas Gruenbacher387eb302011-03-16 01:05:37 +0100865 return -EOPNOTSUPP;
Philipp Reisnercf14c2e2010-02-02 21:03:50 +0100866 }
867 }
868 p->conn_flags = cpu_to_be32(cf);
869
Philipp Reisnerdc8228d2011-02-08 10:13:15 +0100870 if (tconn->agreed_pro_version >= 87)
871 strcpy(p->integrity_alg, tconn->net_conf->integrity_alg);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700872
Andreas Gruenbacher387eb302011-03-16 01:05:37 +0100873 err = conn_send_cmd2(tconn, P_PROTOCOL, p->head.payload, size - sizeof(struct p_header));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700874 kfree(p);
Andreas Gruenbacher387eb302011-03-16 01:05:37 +0100875 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700876}
877
878int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
879{
880 struct p_uuids p;
881 int i;
882
883 if (!get_ldev_if_state(mdev, D_NEGOTIATING))
Andreas Gruenbacher2ae5f952011-03-16 01:07:20 +0100884 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700885
886 for (i = UI_CURRENT; i < UI_SIZE; i++)
887 p.uuid[i] = mdev->ldev ? cpu_to_be64(mdev->ldev->md.uuid[i]) : 0;
888
889 mdev->comm_bm_set = drbd_bm_total_weight(mdev);
890 p.uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
Philipp Reisner89e58e72011-01-19 13:12:45 +0100891 uuid_flags |= mdev->tconn->net_conf->want_lose ? 1 : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700892 uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0;
893 uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
894 p.uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
895
896 put_ldev(mdev);
897
Andreas Gruenbacher2ae5f952011-03-16 01:07:20 +0100898 return drbd_send_cmd(mdev, &mdev->tconn->data, P_UUIDS, &p.head, sizeof(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700899}
900
901int drbd_send_uuids(struct drbd_conf *mdev)
902{
903 return _drbd_send_uuids(mdev, 0);
904}
905
906int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev)
907{
908 return _drbd_send_uuids(mdev, 8);
909}
910
Lars Ellenberg62b0da32011-01-20 13:25:21 +0100911void drbd_print_uuids(struct drbd_conf *mdev, const char *text)
912{
913 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
914 u64 *uuid = mdev->ldev->md.uuid;
915 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX\n",
916 text,
917 (unsigned long long)uuid[UI_CURRENT],
918 (unsigned long long)uuid[UI_BITMAP],
919 (unsigned long long)uuid[UI_HISTORY_START],
920 (unsigned long long)uuid[UI_HISTORY_END]);
921 put_ldev(mdev);
922 } else {
923 dev_info(DEV, "%s effective data uuid: %016llX\n",
924 text,
925 (unsigned long long)mdev->ed_uuid);
926 }
927}
928
Andreas Gruenbacher9c1b7f72011-03-16 01:09:01 +0100929void drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700930{
931 struct p_rs_uuid p;
Lars Ellenberg5a22db82010-12-17 21:14:23 +0100932 u64 uuid;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700933
Lars Ellenberg5a22db82010-12-17 21:14:23 +0100934 D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
935
Philipp Reisner4a23f262011-01-11 17:42:17 +0100936 uuid = mdev->ldev->md.uuid[UI_BITMAP] + UUID_NEW_BM_OFFSET;
Lars Ellenberg5a22db82010-12-17 21:14:23 +0100937 drbd_uuid_set(mdev, UI_BITMAP, uuid);
Lars Ellenberg62b0da32011-01-20 13:25:21 +0100938 drbd_print_uuids(mdev, "updated sync UUID");
Lars Ellenberg5a22db82010-12-17 21:14:23 +0100939 drbd_md_sync(mdev);
940 p.uuid = cpu_to_be64(uuid);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700941
Andreas Gruenbacher9c1b7f72011-03-16 01:09:01 +0100942 drbd_send_cmd(mdev, &mdev->tconn->data, P_SYNC_UUID, &p.head, sizeof(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700943}
944
Philipp Reisnere89b5912010-03-24 17:11:33 +0100945int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700946{
947 struct p_sizes p;
948 sector_t d_size, u_size;
Philipp Reisner99432fc2011-05-20 16:39:13 +0200949 int q_order_type, max_bio_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700950
951 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
952 D_ASSERT(mdev->ldev->backing_bdev);
953 d_size = drbd_get_max_capacity(mdev->ldev);
954 u_size = mdev->ldev->dc.disk_size;
955 q_order_type = drbd_queue_order_type(mdev);
Philipp Reisner99432fc2011-05-20 16:39:13 +0200956 max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
957 max_bio_size = min_t(int, max_bio_size, DRBD_MAX_BIO_SIZE);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700958 put_ldev(mdev);
959 } else {
960 d_size = 0;
961 u_size = 0;
962 q_order_type = QUEUE_ORDERED_NONE;
Philipp Reisner99432fc2011-05-20 16:39:13 +0200963 max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700964 }
965
966 p.d_size = cpu_to_be64(d_size);
967 p.u_size = cpu_to_be64(u_size);
968 p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
Philipp Reisner99432fc2011-05-20 16:39:13 +0200969 p.max_bio_size = cpu_to_be32(max_bio_size);
Philipp Reisnere89b5912010-03-24 17:11:33 +0100970 p.queue_order_type = cpu_to_be16(q_order_type);
971 p.dds_flags = cpu_to_be16(flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700972
Andreas Gruenbacherf02d4d02011-03-16 01:12:50 +0100973 return drbd_send_cmd(mdev, &mdev->tconn->data, P_SIZES, &p.head, sizeof(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700974}
975
976/**
977 * drbd_send_state() - Sends the drbd state to the peer
978 * @mdev: DRBD device.
979 */
980int drbd_send_state(struct drbd_conf *mdev)
981{
Andreas Gruenbacher7c967152011-03-22 00:49:36 +0100982 struct drbd_socket *sock;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700983 struct p_state p;
Andreas Gruenbacher927036f2011-03-16 00:50:00 +0100984 int err = -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700985
Philipp Reisnere42325a2011-01-19 13:55:45 +0100986 mutex_lock(&mdev->tconn->data.mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700987
988 p.state = cpu_to_be32(mdev->state.i); /* Within the send mutex */
Andreas Gruenbacher7c967152011-03-22 00:49:36 +0100989 sock = &mdev->tconn->data;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700990
Andreas Gruenbacher7c967152011-03-22 00:49:36 +0100991 if (likely(sock->socket != NULL))
Andreas Gruenbacher927036f2011-03-16 00:50:00 +0100992 err = _drbd_send_cmd(mdev, sock, P_STATE, &p.head, sizeof(p), 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700993
Philipp Reisnere42325a2011-01-19 13:55:45 +0100994 mutex_unlock(&mdev->tconn->data.mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700995
Andreas Gruenbacher927036f2011-03-16 00:50:00 +0100996 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700997}
998
Philipp Reisnercf29c9d2011-02-11 15:11:24 +0100999int _conn_send_state_req(struct drbd_tconn *tconn, int vnr, enum drbd_packet cmd,
1000 union drbd_state mask, union drbd_state val)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001001{
1002 struct p_req_state p;
1003
1004 p.mask = cpu_to_be32(mask.i);
1005 p.val = cpu_to_be32(val.i);
1006
Andreas Gruenbacher758970c2011-03-16 01:16:25 +01001007 return conn_send_cmd(tconn, vnr, &tconn->data, cmd, &p.head, sizeof(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001008}
1009
Andreas Gruenbacher2f4e7ab2011-03-16 01:20:38 +01001010void drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001011{
1012 struct p_req_state_reply p;
1013
1014 p.retcode = cpu_to_be32(retcode);
1015
Andreas Gruenbacher2f4e7ab2011-03-16 01:20:38 +01001016 drbd_send_cmd(mdev, &mdev->tconn->meta, P_STATE_CHG_REPLY, &p.head, sizeof(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001017}
1018
Philipp Reisner047cd4a2011-02-15 11:09:33 +01001019int conn_send_sr_reply(struct drbd_tconn *tconn, enum drbd_state_rv retcode)
1020{
1021 struct p_req_state_reply p;
1022 enum drbd_packet cmd = tconn->agreed_pro_version < 100 ? P_STATE_CHG_REPLY : P_CONN_ST_CHG_REPLY;
1023
1024 p.retcode = cpu_to_be32(retcode);
1025
Andreas Gruenbacher7d168ed2011-03-16 00:11:25 +01001026 return !conn_send_cmd(tconn, 0, &tconn->meta, cmd, &p.head, sizeof(p));
Philipp Reisner047cd4a2011-02-15 11:09:33 +01001027}
1028
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001029static void dcbp_set_code(struct p_compressed_bm *p, enum drbd_bitmap_code code)
1030{
1031 BUG_ON(code & ~0xf);
1032 p->encoding = (p->encoding & ~0xf) | code;
1033}
1034
1035static void dcbp_set_start(struct p_compressed_bm *p, int set)
1036{
1037 p->encoding = (p->encoding & ~0x80) | (set ? 0x80 : 0);
1038}
1039
1040static void dcbp_set_pad_bits(struct p_compressed_bm *p, int n)
1041{
1042 BUG_ON(n & ~0x7);
1043 p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4);
1044}
1045
Philipp Reisnerb411b362009-09-25 16:07:19 -07001046int fill_bitmap_rle_bits(struct drbd_conf *mdev,
1047 struct p_compressed_bm *p,
1048 struct bm_xfer_ctx *c)
1049{
1050 struct bitstream bs;
1051 unsigned long plain_bits;
1052 unsigned long tmp;
1053 unsigned long rl;
1054 unsigned len;
1055 unsigned toggle;
1056 int bits;
1057
1058 /* may we use this feature? */
Lars Ellenbergf3990022011-03-23 14:31:09 +01001059 if ((mdev->tconn->net_conf->use_rle == 0) ||
Philipp Reisner31890f42011-01-19 14:12:51 +01001060 (mdev->tconn->agreed_pro_version < 90))
Philipp Reisnerb411b362009-09-25 16:07:19 -07001061 return 0;
1062
1063 if (c->bit_offset >= c->bm_bits)
1064 return 0; /* nothing to do. */
1065
1066 /* use at most thus many bytes */
1067 bitstream_init(&bs, p->code, BM_PACKET_VLI_BYTES_MAX, 0);
1068 memset(p->code, 0, BM_PACKET_VLI_BYTES_MAX);
1069 /* plain bits covered in this code string */
1070 plain_bits = 0;
1071
1072 /* p->encoding & 0x80 stores whether the first run length is set.
1073 * bit offset is implicit.
1074 * start with toggle == 2 to be able to tell the first iteration */
1075 toggle = 2;
1076
1077 /* see how much plain bits we can stuff into one packet
1078 * using RLE and VLI. */
1079 do {
1080 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(mdev, c->bit_offset)
1081 : _drbd_bm_find_next(mdev, c->bit_offset);
1082 if (tmp == -1UL)
1083 tmp = c->bm_bits;
1084 rl = tmp - c->bit_offset;
1085
1086 if (toggle == 2) { /* first iteration */
1087 if (rl == 0) {
1088 /* the first checked bit was set,
1089 * store start value, */
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001090 dcbp_set_start(p, 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001091 /* but skip encoding of zero run length */
1092 toggle = !toggle;
1093 continue;
1094 }
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001095 dcbp_set_start(p, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001096 }
1097
1098 /* paranoia: catch zero runlength.
1099 * can only happen if bitmap is modified while we scan it. */
1100 if (rl == 0) {
1101 dev_err(DEV, "unexpected zero runlength while encoding bitmap "
1102 "t:%u bo:%lu\n", toggle, c->bit_offset);
1103 return -1;
1104 }
1105
1106 bits = vli_encode_bits(&bs, rl);
1107 if (bits == -ENOBUFS) /* buffer full */
1108 break;
1109 if (bits <= 0) {
1110 dev_err(DEV, "error while encoding bitmap: %d\n", bits);
1111 return 0;
1112 }
1113
1114 toggle = !toggle;
1115 plain_bits += rl;
1116 c->bit_offset = tmp;
1117 } while (c->bit_offset < c->bm_bits);
1118
1119 len = bs.cur.b - p->code + !!bs.cur.bit;
1120
1121 if (plain_bits < (len << 3)) {
1122 /* incompressible with this method.
1123 * we need to rewind both word and bit position. */
1124 c->bit_offset -= plain_bits;
1125 bm_xfer_ctx_bit_to_word_offset(c);
1126 c->bit_offset = c->word_offset * BITS_PER_LONG;
1127 return 0;
1128 }
1129
1130 /* RLE + VLI was able to compress it just fine.
1131 * update c->word_offset. */
1132 bm_xfer_ctx_bit_to_word_offset(c);
1133
1134 /* store pad_bits */
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001135 dcbp_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001136
1137 return len;
1138}
1139
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001140/**
1141 * send_bitmap_rle_or_plain
1142 *
1143 * Return 0 when done, 1 when another iteration is needed, and a negative error
1144 * code upon failure.
1145 */
1146static int
Andreas Gruenbacher79ed9bd2011-03-24 21:31:38 +01001147send_bitmap_rle_or_plain(struct drbd_conf *mdev, struct bm_xfer_ctx *c)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001148{
Andreas Gruenbacher79ed9bd2011-03-24 21:31:38 +01001149 struct p_compressed_bm *p = mdev->tconn->data.sbuf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001150 unsigned long num_words;
Andreas Gruenbachera982dd52010-12-10 00:45:25 +01001151 int len, err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001152
1153 len = fill_bitmap_rle_bits(mdev, p, c);
1154
1155 if (len < 0)
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001156 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001157
1158 if (len) {
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001159 dcbp_set_code(p, RLE_VLI_Bits);
Andreas Gruenbacher7c967152011-03-22 00:49:36 +01001160 err = _drbd_send_cmd(mdev, &mdev->tconn->data,
Andreas Gruenbacher79ed9bd2011-03-24 21:31:38 +01001161 P_COMPRESSED_BITMAP, &p->head,
Andreas Gruenbacher04dfa132011-03-15 23:51:21 +01001162 sizeof(*p) + len, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001163
1164 c->packets[0]++;
1165 c->bytes[0] += sizeof(*p) + len;
1166
1167 if (c->bit_offset >= c->bm_bits)
1168 len = 0; /* DONE */
1169 } else {
1170 /* was not compressible.
1171 * send a buffer full of plain text bits instead. */
Andreas Gruenbacher79ed9bd2011-03-24 21:31:38 +01001172 struct p_header *h = mdev->tconn->data.sbuf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001173 num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
1174 len = num_words * sizeof(long);
1175 if (len)
Andreas Gruenbacher79ed9bd2011-03-24 21:31:38 +01001176 drbd_bm_get_lel(mdev, c->word_offset, num_words,
1177 (unsigned long *)h->payload);
Andreas Gruenbacher7c967152011-03-22 00:49:36 +01001178 err = _drbd_send_cmd(mdev, &mdev->tconn->data, P_BITMAP,
Andreas Gruenbacher04dfa132011-03-15 23:51:21 +01001179 h, sizeof(struct p_header80) + len, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001180 c->word_offset += num_words;
1181 c->bit_offset = c->word_offset * BITS_PER_LONG;
1182
1183 c->packets[1]++;
Philipp Reisner0b70a132010-08-20 13:36:10 +02001184 c->bytes[1] += sizeof(struct p_header80) + len;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001185
1186 if (c->bit_offset > c->bm_bits)
1187 c->bit_offset = c->bm_bits;
1188 }
Andreas Gruenbachera982dd52010-12-10 00:45:25 +01001189 if (!err) {
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001190 if (len == 0) {
1191 INFO_bm_xfer_stats(mdev, "send", c);
1192 return 0;
1193 } else
1194 return 1;
1195 }
1196 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001197}
1198
1199/* See the comment at receive_bitmap() */
Andreas Gruenbacher058820c2011-03-22 16:03:43 +01001200static int _drbd_send_bitmap(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001201{
1202 struct bm_xfer_ctx c;
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001203 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001204
Andreas Gruenbacher841ce242010-12-15 19:31:20 +01001205 if (!expect(mdev->bitmap))
1206 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001207
Philipp Reisnerb411b362009-09-25 16:07:19 -07001208 if (get_ldev(mdev)) {
1209 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
1210 dev_info(DEV, "Writing the whole bitmap, MDF_FullSync was set.\n");
1211 drbd_bm_set_all(mdev);
1212 if (drbd_bm_write(mdev)) {
1213 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
1214 * but otherwise process as per normal - need to tell other
1215 * side that a full resync is required! */
1216 dev_err(DEV, "Failed to write bitmap to disk!\n");
1217 } else {
1218 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
1219 drbd_md_sync(mdev);
1220 }
1221 }
1222 put_ldev(mdev);
1223 }
1224
1225 c = (struct bm_xfer_ctx) {
1226 .bm_bits = drbd_bm_bits(mdev),
1227 .bm_words = drbd_bm_words(mdev),
1228 };
1229
1230 do {
Andreas Gruenbacher79ed9bd2011-03-24 21:31:38 +01001231 err = send_bitmap_rle_or_plain(mdev, &c);
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001232 } while (err > 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001233
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001234 return err == 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001235}
1236
1237int drbd_send_bitmap(struct drbd_conf *mdev)
1238{
1239 int err;
1240
Andreas Gruenbacher11b0be22011-03-15 16:15:10 +01001241 if (drbd_get_data_sock(mdev->tconn))
Philipp Reisnerb411b362009-09-25 16:07:19 -07001242 return -1;
1243 err = !_drbd_send_bitmap(mdev);
Philipp Reisner61120872011-02-08 09:50:54 +01001244 drbd_put_data_sock(mdev->tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001245 return err;
1246}
Andreas Gruenbacherd4e67d72011-03-16 01:25:28 +01001247void drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001248{
Philipp Reisnerb411b362009-09-25 16:07:19 -07001249 struct p_barrier_ack p;
1250
1251 p.barrier = barrier_nr;
1252 p.set_size = cpu_to_be32(set_size);
1253
Andreas Gruenbacherd4e67d72011-03-16 01:25:28 +01001254 if (mdev->state.conn >= C_CONNECTED)
1255 drbd_send_cmd(mdev, &mdev->tconn->meta, P_BARRIER_ACK, &p.head, sizeof(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001256}
1257
1258/**
1259 * _drbd_send_ack() - Sends an ack packet
1260 * @mdev: DRBD device.
1261 * @cmd: Packet command code.
1262 * @sector: sector, needs to be in big endian byte order
1263 * @blksize: size in byte, needs to be in big endian byte order
1264 * @block_id: Id, big endian byte order
1265 */
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001266static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
1267 u64 sector, u32 blksize, u64 block_id)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001268{
Philipp Reisnerb411b362009-09-25 16:07:19 -07001269 struct p_block_ack p;
1270
1271 p.sector = sector;
1272 p.block_id = block_id;
1273 p.blksize = blksize;
Andreas Gruenbacher8ccf2182011-02-24 11:35:43 +01001274 p.seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001275
Philipp Reisnere42325a2011-01-19 13:55:45 +01001276 if (!mdev->tconn->meta.socket || mdev->state.conn < C_CONNECTED)
Andreas Gruenbachera8c32aa2011-03-16 01:27:22 +01001277 return -EIO;
1278 return drbd_send_cmd(mdev, &mdev->tconn->meta, cmd, &p.head, sizeof(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001279}
1280
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02001281/* dp->sector and dp->block_id already/still in network byte order,
1282 * data_size is payload size according to dp->head,
1283 * and may need to be corrected for digest size. */
Andreas Gruenbachera9a99942011-03-16 01:30:14 +01001284void drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packet cmd,
1285 struct p_data *dp, int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001286{
Philipp Reisnera0638452011-01-19 14:31:32 +01001287 data_size -= (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_r_tfm) ?
1288 crypto_hash_digestsize(mdev->tconn->integrity_r_tfm) : 0;
Andreas Gruenbachera9a99942011-03-16 01:30:14 +01001289 _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
1290 dp->block_id);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001291}
1292
Andreas Gruenbachera9a99942011-03-16 01:30:14 +01001293void drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packet cmd,
1294 struct p_block_req *rp)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001295{
Andreas Gruenbachera9a99942011-03-16 01:30:14 +01001296 _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001297}
1298
1299/**
1300 * drbd_send_ack() - Sends an ack packet
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001301 * @mdev: DRBD device
1302 * @cmd: packet command code
1303 * @peer_req: peer request
Philipp Reisnerb411b362009-09-25 16:07:19 -07001304 */
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001305int drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001306 struct drbd_peer_request *peer_req)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001307{
Andreas Gruenbacherdd516122011-03-16 15:39:08 +01001308 return _drbd_send_ack(mdev, cmd,
1309 cpu_to_be64(peer_req->i.sector),
1310 cpu_to_be32(peer_req->i.size),
1311 peer_req->block_id);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001312}
1313
1314/* This function misuses the block_id field to signal if the blocks
1315 * are is sync or not. */
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001316int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packet cmd,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001317 sector_t sector, int blksize, u64 block_id)
1318{
Andreas Gruenbacherfa79abd2011-03-16 01:31:39 +01001319 return _drbd_send_ack(mdev, cmd,
1320 cpu_to_be64(sector),
1321 cpu_to_be32(blksize),
1322 cpu_to_be64(block_id));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001323}
1324
1325int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
1326 sector_t sector, int size, u64 block_id)
1327{
Philipp Reisnerb411b362009-09-25 16:07:19 -07001328 struct p_block_req p;
1329
1330 p.sector = cpu_to_be64(sector);
1331 p.block_id = block_id;
1332 p.blksize = cpu_to_be32(size);
1333
Andreas Gruenbacher6c1005e2011-03-16 01:34:24 +01001334 return drbd_send_cmd(mdev, &mdev->tconn->data, cmd, &p.head, sizeof(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001335}
1336
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001337int drbd_send_drequest_csum(struct drbd_conf *mdev, sector_t sector, int size,
1338 void *digest, int digest_size, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001339{
Andreas Gruenbacherdb1b0b72011-03-16 01:37:21 +01001340 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001341 struct p_block_req p;
1342
Philipp Reisnerfd340c12011-01-19 16:57:39 +01001343 prepare_header(mdev, &p.head, cmd, sizeof(p) - sizeof(struct p_header) + digest_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001344 p.sector = cpu_to_be64(sector);
Andreas Gruenbacher9a8e7752011-01-11 14:04:09 +01001345 p.block_id = ID_SYNCER /* unused */;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001346 p.blksize = cpu_to_be32(size);
1347
Philipp Reisnere42325a2011-01-19 13:55:45 +01001348 mutex_lock(&mdev->tconn->data.mutex);
Andreas Gruenbacherdb1b0b72011-03-16 01:37:21 +01001349 err = drbd_send_all(mdev->tconn, mdev->tconn->data.socket, &p, sizeof(p), 0);
1350 if (!err)
1351 err = drbd_send_all(mdev->tconn, mdev->tconn->data.socket, digest, digest_size, 0);
Philipp Reisnere42325a2011-01-19 13:55:45 +01001352 mutex_unlock(&mdev->tconn->data.mutex);
Andreas Gruenbacherdb1b0b72011-03-16 01:37:21 +01001353 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001354}
1355
1356int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
1357{
Philipp Reisnerb411b362009-09-25 16:07:19 -07001358 struct p_block_req p;
1359
1360 p.sector = cpu_to_be64(sector);
Andreas Gruenbacher9a8e7752011-01-11 14:04:09 +01001361 p.block_id = ID_SYNCER /* unused */;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001362 p.blksize = cpu_to_be32(size);
1363
Andreas Gruenbacher5b9f4992011-03-16 01:31:39 +01001364 return drbd_send_cmd(mdev, &mdev->tconn->data, P_OV_REQUEST, &p.head, sizeof(p));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001365}
1366
1367/* called on sndtimeo
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001368 * returns false if we should retry,
1369 * true if we think connection is dead
Philipp Reisnerb411b362009-09-25 16:07:19 -07001370 */
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001371static int we_should_drop_the_connection(struct drbd_tconn *tconn, struct socket *sock)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001372{
1373 int drop_it;
1374 /* long elapsed = (long)(jiffies - mdev->last_received); */
1375
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001376 drop_it = tconn->meta.socket == sock
1377 || !tconn->asender.task
1378 || get_t_state(&tconn->asender) != RUNNING
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01001379 || tconn->cstate < C_WF_REPORT_PARAMS;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001380
1381 if (drop_it)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001382 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001383
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001384 drop_it = !--tconn->ko_count;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001385 if (!drop_it) {
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001386 conn_err(tconn, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
1387 current->comm, current->pid, tconn->ko_count);
1388 request_ping(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001389 }
1390
1391 return drop_it; /* && (mdev->state == R_PRIMARY) */;
1392}
1393
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001394static void drbd_update_congested(struct drbd_tconn *tconn)
Andreas Gruenbacher9e204cd2011-01-26 18:45:11 +01001395{
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001396 struct sock *sk = tconn->data.socket->sk;
Andreas Gruenbacher9e204cd2011-01-26 18:45:11 +01001397 if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001398 set_bit(NET_CONGESTED, &tconn->flags);
Andreas Gruenbacher9e204cd2011-01-26 18:45:11 +01001399}
1400
Philipp Reisnerb411b362009-09-25 16:07:19 -07001401/* The idea of sendpage seems to be to put some kind of reference
1402 * to the page into the skb, and to hand it over to the NIC. In
1403 * this process get_page() gets called.
1404 *
1405 * As soon as the page was really sent over the network put_page()
1406 * gets called by some part of the network layer. [ NIC driver? ]
1407 *
1408 * [ get_page() / put_page() increment/decrement the count. If count
1409 * reaches 0 the page will be freed. ]
1410 *
1411 * This works nicely with pages from FSs.
1412 * But this means that in protocol A we might signal IO completion too early!
1413 *
1414 * In order not to corrupt data during a resync we must make sure
1415 * that we do not reuse our own buffer pages (EEs) to early, therefore
1416 * we have the net_ee list.
1417 *
1418 * XFS seems to have problems, still, it submits pages with page_count == 0!
1419 * As a workaround, we disable sendpage on pages
1420 * with page_count == 0 or PageSlab.
1421 */
1422static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page,
Andreas Gruenbacherb9874272011-03-16 09:41:10 +01001423 int offset, size_t size, unsigned msg_flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001424{
Andreas Gruenbacherb9874272011-03-16 09:41:10 +01001425 struct socket *socket;
1426 void *addr;
1427 int err;
1428
1429 socket = mdev->tconn->data.socket;
1430 addr = kmap(page) + offset;
1431 err = drbd_send_all(mdev->tconn, socket, addr, size, msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001432 kunmap(page);
Andreas Gruenbacherb9874272011-03-16 09:41:10 +01001433 if (!err)
1434 mdev->send_cnt += size >> 9;
1435 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001436}
1437
1438static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001439 int offset, size_t size, unsigned msg_flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001440{
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001441 struct socket *socket = mdev->tconn->data.socket;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001442 mm_segment_t oldfs = get_fs();
Philipp Reisnerb411b362009-09-25 16:07:19 -07001443 int len = size;
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001444 int err = -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001445
1446 /* e.g. XFS meta- & log-data is in slab pages, which have a
1447 * page_count of 0 and/or have PageSlab() set.
1448 * we cannot use send_page for those, as that does get_page();
1449 * put_page(); and would cause either a VM_BUG directly, or
1450 * __page_cache_release a page that would actually still be referenced
1451 * by someone, leading to some obscure delayed Oops somewhere else. */
1452 if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001453 return _drbd_no_send_page(mdev, page, offset, size, msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001454
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001455 msg_flags |= MSG_NOSIGNAL;
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001456 drbd_update_congested(mdev->tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001457 set_fs(KERNEL_DS);
1458 do {
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001459 int sent;
1460
1461 sent = socket->ops->sendpage(socket, page, offset, len, msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001462 if (sent <= 0) {
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001463 if (sent == -EAGAIN) {
1464 if (we_should_drop_the_connection(mdev->tconn, socket))
1465 break;
1466 continue;
1467 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001468 dev_warn(DEV, "%s: size=%d len=%d sent=%d\n",
1469 __func__, (int)size, len, sent);
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001470 if (sent < 0)
1471 err = sent;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001472 break;
1473 }
1474 len -= sent;
1475 offset += sent;
1476 } while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
1477 set_fs(oldfs);
Philipp Reisner01a311a2011-02-07 14:30:33 +01001478 clear_bit(NET_CONGESTED, &mdev->tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001479
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001480 if (len == 0) {
1481 err = 0;
1482 mdev->send_cnt += size >> 9;
1483 }
1484 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001485}
1486
1487static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
1488{
1489 struct bio_vec *bvec;
1490 int i;
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001491 /* hint all but last page with MSG_MORE */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001492 __bio_for_each_segment(bvec, bio, i, 0) {
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001493 int err;
1494
1495 err = _drbd_no_send_page(mdev, bvec->bv_page,
1496 bvec->bv_offset, bvec->bv_len,
1497 i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
1498 if (err)
1499 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001500 }
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001501 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001502}
1503
1504static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
1505{
1506 struct bio_vec *bvec;
1507 int i;
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001508 /* hint all but last page with MSG_MORE */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001509 __bio_for_each_segment(bvec, bio, i, 0) {
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001510 int err;
1511
1512 err = _drbd_send_page(mdev, bvec->bv_page,
1513 bvec->bv_offset, bvec->bv_len,
1514 i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
1515 if (err)
1516 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001517 }
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001518 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001519}
1520
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001521static int _drbd_send_zc_ee(struct drbd_conf *mdev,
1522 struct drbd_peer_request *peer_req)
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001523{
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001524 struct page *page = peer_req->pages;
1525 unsigned len = peer_req->i.size;
Andreas Gruenbacher9f692302011-03-16 10:49:09 +01001526 int err;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001527
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001528 /* hint all but last page with MSG_MORE */
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001529 page_chain_for_each(page) {
1530 unsigned l = min_t(unsigned, len, PAGE_SIZE);
Andreas Gruenbacher9f692302011-03-16 10:49:09 +01001531
1532 err = _drbd_send_page(mdev, page, 0, l,
1533 page_chain_next(page) ? MSG_MORE : 0);
1534 if (err)
1535 return err;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001536 len -= l;
1537 }
Andreas Gruenbacher9f692302011-03-16 10:49:09 +01001538 return 0;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001539}
1540
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001541static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
1542{
Philipp Reisner31890f42011-01-19 14:12:51 +01001543 if (mdev->tconn->agreed_pro_version >= 95)
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001544 return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001545 (bi_rw & REQ_FUA ? DP_FUA : 0) |
1546 (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
1547 (bi_rw & REQ_DISCARD ? DP_DISCARD : 0);
1548 else
Jens Axboe721a9602011-03-09 11:56:30 +01001549 return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001550}
1551
Philipp Reisnerb411b362009-09-25 16:07:19 -07001552/* Used to send write requests
1553 * R_PRIMARY -> Peer (P_DATA)
1554 */
1555int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
1556{
Andreas Gruenbacher6bdb9b02011-03-16 11:52:58 +01001557 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001558 struct p_data p;
1559 unsigned int dp_flags = 0;
1560 void *dgb;
1561 int dgs;
1562
Andreas Gruenbacher6bdb9b02011-03-16 11:52:58 +01001563 err = drbd_get_data_sock(mdev->tconn);
1564 if (err)
1565 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001566
Philipp Reisnera0638452011-01-19 14:31:32 +01001567 dgs = (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_w_tfm) ?
1568 crypto_hash_digestsize(mdev->tconn->integrity_w_tfm) : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001569
Philipp Reisnerfd340c12011-01-19 16:57:39 +01001570 prepare_header(mdev, &p.head, P_DATA, sizeof(p) - sizeof(struct p_header) + dgs + req->i.size);
Andreas Gruenbacherace652a2011-01-03 17:09:58 +01001571 p.sector = cpu_to_be64(req->i.sector);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001572 p.block_id = (unsigned long)req;
Andreas Gruenbacher8ccf2182011-02-24 11:35:43 +01001573 p.seq_num = cpu_to_be32(req->seq_num = atomic_inc_return(&mdev->packet_seq));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001574
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001575 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
1576
Philipp Reisnerb411b362009-09-25 16:07:19 -07001577 if (mdev->state.conn >= C_SYNC_SOURCE &&
1578 mdev->state.conn <= C_PAUSED_SYNC_T)
1579 dp_flags |= DP_MAY_SET_IN_SYNC;
1580
1581 p.dp_flags = cpu_to_be32(dp_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001582 set_bit(UNPLUG_REMOTE, &mdev->flags);
Andreas Gruenbacher6bdb9b02011-03-16 11:52:58 +01001583 err = drbd_send_all(mdev->tconn, mdev->tconn->data.socket, &p,
1584 sizeof(p), dgs ? MSG_MORE : 0);
1585 if (!err && dgs) {
Philipp Reisnera0638452011-01-19 14:31:32 +01001586 dgb = mdev->tconn->int_dig_out;
1587 drbd_csum_bio(mdev, mdev->tconn->integrity_w_tfm, req->master_bio, dgb);
Andreas Gruenbacher6bdb9b02011-03-16 11:52:58 +01001588 err = drbd_send_all(mdev->tconn, mdev->tconn->data.socket, dgb, dgs, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001589 }
Andreas Gruenbacher6bdb9b02011-03-16 11:52:58 +01001590 if (!err) {
Lars Ellenberg470be442010-11-10 10:36:52 +01001591 /* For protocol A, we have to memcpy the payload into
1592 * socket buffers, as we may complete right away
1593 * as soon as we handed it over to tcp, at which point the data
1594 * pages may become invalid.
1595 *
1596 * For data-integrity enabled, we copy it as well, so we can be
1597 * sure that even if the bio pages may still be modified, it
1598 * won't change the data on the wire, thus if the digest checks
1599 * out ok after sending on this side, but does not fit on the
1600 * receiving side, we sure have detected corruption elsewhere.
1601 */
Philipp Reisner89e58e72011-01-19 13:12:45 +01001602 if (mdev->tconn->net_conf->wire_protocol == DRBD_PROT_A || dgs)
Andreas Gruenbacher6bdb9b02011-03-16 11:52:58 +01001603 err = _drbd_send_bio(mdev, req->master_bio);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001604 else
Andreas Gruenbacher6bdb9b02011-03-16 11:52:58 +01001605 err = _drbd_send_zc_bio(mdev, req->master_bio);
Lars Ellenberg470be442010-11-10 10:36:52 +01001606
1607 /* double check digest, sometimes buffers have been modified in flight. */
1608 if (dgs > 0 && dgs <= 64) {
Bart Van Assche24c48302011-05-21 18:32:29 +02001609 /* 64 byte, 512 bit, is the largest digest size
Lars Ellenberg470be442010-11-10 10:36:52 +01001610 * currently supported in kernel crypto. */
1611 unsigned char digest[64];
Philipp Reisnera0638452011-01-19 14:31:32 +01001612 drbd_csum_bio(mdev, mdev->tconn->integrity_w_tfm, req->master_bio, digest);
1613 if (memcmp(mdev->tconn->int_dig_out, digest, dgs)) {
Lars Ellenberg470be442010-11-10 10:36:52 +01001614 dev_warn(DEV,
1615 "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
Andreas Gruenbacherace652a2011-01-03 17:09:58 +01001616 (unsigned long long)req->i.sector, req->i.size);
Lars Ellenberg470be442010-11-10 10:36:52 +01001617 }
1618 } /* else if (dgs > 64) {
1619 ... Be noisy about digest too large ...
1620 } */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001621 }
1622
Philipp Reisner61120872011-02-08 09:50:54 +01001623 drbd_put_data_sock(mdev->tconn);
Philipp Reisnerbd26bfc52010-05-04 12:33:58 +02001624
Andreas Gruenbacher6bdb9b02011-03-16 11:52:58 +01001625 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001626}
1627
1628/* answer packet, used to send data back for read requests:
1629 * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
1630 * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
1631 */
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001632int drbd_send_block(struct drbd_conf *mdev, enum drbd_packet cmd,
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001633 struct drbd_peer_request *peer_req)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001634{
Andreas Gruenbacher7b57b89d2011-03-16 11:35:20 +01001635 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001636 struct p_data p;
1637 void *dgb;
1638 int dgs;
1639
Philipp Reisnera0638452011-01-19 14:31:32 +01001640 dgs = (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_w_tfm) ?
1641 crypto_hash_digestsize(mdev->tconn->integrity_w_tfm) : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001642
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001643 prepare_header(mdev, &p.head, cmd, sizeof(p) -
1644 sizeof(struct p_header80) +
1645 dgs + peer_req->i.size);
1646 p.sector = cpu_to_be64(peer_req->i.sector);
1647 p.block_id = peer_req->block_id;
Andreas Gruenbachercc378272011-01-26 18:01:50 +01001648 p.seq_num = 0; /* unused */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001649
1650 /* Only called by our kernel thread.
1651 * This one may be interrupted by DRBD_SIG and/or DRBD_SIGKILL
1652 * in response to admin command or module unload.
1653 */
Andreas Gruenbacher7b57b89d2011-03-16 11:35:20 +01001654 err = drbd_get_data_sock(mdev->tconn);
1655 if (err)
1656 return err;
1657 err = drbd_send_all(mdev->tconn, mdev->tconn->data.socket, &p,
1658 sizeof(p), dgs ? MSG_MORE : 0);
1659 if (!err && dgs) {
Philipp Reisnera0638452011-01-19 14:31:32 +01001660 dgb = mdev->tconn->int_dig_out;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001661 drbd_csum_ee(mdev, mdev->tconn->integrity_w_tfm, peer_req, dgb);
Andreas Gruenbacher7b57b89d2011-03-16 11:35:20 +01001662 err = drbd_send_all(mdev->tconn, mdev->tconn->data.socket, dgb,
1663 dgs, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001664 }
Andreas Gruenbacher7b57b89d2011-03-16 11:35:20 +01001665 if (!err)
1666 err = _drbd_send_zc_ee(mdev, peer_req);
Philipp Reisner61120872011-02-08 09:50:54 +01001667 drbd_put_data_sock(mdev->tconn);
Philipp Reisnerbd26bfc52010-05-04 12:33:58 +02001668
Andreas Gruenbacher7b57b89d2011-03-16 11:35:20 +01001669 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001670}
1671
Andreas Gruenbacher8f7bed72010-12-19 23:53:14 +01001672int drbd_send_out_of_sync(struct drbd_conf *mdev, struct drbd_request *req)
Philipp Reisner73a01a12010-10-27 14:33:00 +02001673{
1674 struct p_block_desc p;
1675
Andreas Gruenbacherace652a2011-01-03 17:09:58 +01001676 p.sector = cpu_to_be64(req->i.sector);
1677 p.blksize = cpu_to_be32(req->i.size);
Philipp Reisner73a01a12010-10-27 14:33:00 +02001678
Andreas Gruenbacher73218a3c2011-03-16 01:39:44 +01001679 return drbd_send_cmd(mdev, &mdev->tconn->data, P_OUT_OF_SYNC, &p.head, sizeof(p));
Philipp Reisner73a01a12010-10-27 14:33:00 +02001680}
1681
Philipp Reisnerb411b362009-09-25 16:07:19 -07001682/*
1683 drbd_send distinguishes two cases:
1684
1685 Packets sent via the data socket "sock"
1686 and packets sent via the meta data socket "msock"
1687
1688 sock msock
1689 -----------------+-------------------------+------------------------------
1690 timeout conf.timeout / 2 conf.timeout / 2
1691 timeout action send a ping via msock Abort communication
1692 and close all sockets
1693*/
1694
1695/*
1696 * you must have down()ed the appropriate [m]sock_mutex elsewhere!
1697 */
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001698int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001699 void *buf, size_t size, unsigned msg_flags)
1700{
1701 struct kvec iov;
1702 struct msghdr msg;
1703 int rv, sent = 0;
1704
1705 if (!sock)
Andreas Gruenbacherc0d42c82010-12-09 23:52:22 +01001706 return -EBADR;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001707
1708 /* THINK if (signal_pending) return ... ? */
1709
1710 iov.iov_base = buf;
1711 iov.iov_len = size;
1712
1713 msg.msg_name = NULL;
1714 msg.msg_namelen = 0;
1715 msg.msg_control = NULL;
1716 msg.msg_controllen = 0;
1717 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
1718
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001719 if (sock == tconn->data.socket) {
1720 tconn->ko_count = tconn->net_conf->ko_count;
1721 drbd_update_congested(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001722 }
1723 do {
1724 /* STRANGE
1725 * tcp_sendmsg does _not_ use its size parameter at all ?
1726 *
1727 * -EAGAIN on timeout, -EINTR on signal.
1728 */
1729/* THINK
1730 * do we need to block DRBD_SIG if sock == &meta.socket ??
1731 * otherwise wake_asender() might interrupt some send_*Ack !
1732 */
1733 rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
1734 if (rv == -EAGAIN) {
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001735 if (we_should_drop_the_connection(tconn, sock))
Philipp Reisnerb411b362009-09-25 16:07:19 -07001736 break;
1737 else
1738 continue;
1739 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001740 if (rv == -EINTR) {
1741 flush_signals(current);
1742 rv = 0;
1743 }
1744 if (rv < 0)
1745 break;
1746 sent += rv;
1747 iov.iov_base += rv;
1748 iov.iov_len -= rv;
1749 } while (sent < size);
1750
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001751 if (sock == tconn->data.socket)
1752 clear_bit(NET_CONGESTED, &tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001753
1754 if (rv <= 0) {
1755 if (rv != -EAGAIN) {
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001756 conn_err(tconn, "%s_sendmsg returned %d\n",
1757 sock == tconn->meta.socket ? "msock" : "sock",
1758 rv);
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01001759 conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001760 } else
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01001761 conn_request_state(tconn, NS(conn, C_TIMEOUT), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001762 }
1763
1764 return sent;
1765}
1766
Andreas Gruenbacherfb708e42010-12-15 17:04:36 +01001767/**
1768 * drbd_send_all - Send an entire buffer
1769 *
1770 * Returns 0 upon success and a negative error value otherwise.
1771 */
1772int drbd_send_all(struct drbd_tconn *tconn, struct socket *sock, void *buffer,
1773 size_t size, unsigned msg_flags)
1774{
1775 int err;
1776
1777 err = drbd_send(tconn, sock, buffer, size, msg_flags);
1778 if (err < 0)
1779 return err;
1780 if (err != size)
1781 return -EIO;
1782 return 0;
1783}
1784
Philipp Reisnerb411b362009-09-25 16:07:19 -07001785static int drbd_open(struct block_device *bdev, fmode_t mode)
1786{
1787 struct drbd_conf *mdev = bdev->bd_disk->private_data;
1788 unsigned long flags;
1789 int rv = 0;
1790
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02001791 mutex_lock(&drbd_main_mutex);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001792 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001793 /* to have a stable mdev->state.role
1794 * and no race with updating open_cnt */
1795
1796 if (mdev->state.role != R_PRIMARY) {
1797 if (mode & FMODE_WRITE)
1798 rv = -EROFS;
1799 else if (!allow_oos)
1800 rv = -EMEDIUMTYPE;
1801 }
1802
1803 if (!rv)
1804 mdev->open_cnt++;
Philipp Reisner87eeee42011-01-19 14:16:30 +01001805 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02001806 mutex_unlock(&drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001807
1808 return rv;
1809}
1810
1811static int drbd_release(struct gendisk *gd, fmode_t mode)
1812{
1813 struct drbd_conf *mdev = gd->private_data;
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02001814 mutex_lock(&drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001815 mdev->open_cnt--;
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02001816 mutex_unlock(&drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001817 return 0;
1818}
1819
Philipp Reisnerb411b362009-09-25 16:07:19 -07001820static void drbd_set_defaults(struct drbd_conf *mdev)
1821{
Lars Ellenbergf3990022011-03-23 14:31:09 +01001822 /* Beware! The actual layout differs
1823 * between big endian and little endian */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001824 mdev->state = (union drbd_state) {
1825 { .role = R_SECONDARY,
1826 .peer = R_UNKNOWN,
1827 .conn = C_STANDALONE,
1828 .disk = D_DISKLESS,
1829 .pdsk = D_UNKNOWN,
Philipp Reisnerfb22c402010-09-08 23:20:21 +02001830 .susp = 0,
1831 .susp_nod = 0,
1832 .susp_fen = 0
Philipp Reisnerb411b362009-09-25 16:07:19 -07001833 } };
1834}
1835
1836void drbd_init_set_defaults(struct drbd_conf *mdev)
1837{
1838 /* the memset(,0,) did most of this.
1839 * note: only assignments, no allocation in here */
1840
1841 drbd_set_defaults(mdev);
1842
Philipp Reisnerb411b362009-09-25 16:07:19 -07001843 atomic_set(&mdev->ap_bio_cnt, 0);
1844 atomic_set(&mdev->ap_pending_cnt, 0);
1845 atomic_set(&mdev->rs_pending_cnt, 0);
1846 atomic_set(&mdev->unacked_cnt, 0);
1847 atomic_set(&mdev->local_cnt, 0);
Lars Ellenberg435f0742010-09-06 12:30:25 +02001848 atomic_set(&mdev->pp_in_use_by_net, 0);
Philipp Reisner778f2712010-07-06 11:14:00 +02001849 atomic_set(&mdev->rs_sect_in, 0);
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001850 atomic_set(&mdev->rs_sect_ev, 0);
Philipp Reisner759fbdf2010-10-26 16:02:27 +02001851 atomic_set(&mdev->ap_in_flight, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001852
1853 mutex_init(&mdev->md_io_mutex);
Philipp Reisner8410da8f02011-02-11 20:11:10 +01001854 mutex_init(&mdev->own_state_mutex);
1855 mdev->state_mutex = &mdev->own_state_mutex;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001856
Philipp Reisnerb411b362009-09-25 16:07:19 -07001857 spin_lock_init(&mdev->al_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001858 spin_lock_init(&mdev->peer_seq_lock);
1859 spin_lock_init(&mdev->epoch_lock);
1860
1861 INIT_LIST_HEAD(&mdev->active_ee);
1862 INIT_LIST_HEAD(&mdev->sync_ee);
1863 INIT_LIST_HEAD(&mdev->done_ee);
1864 INIT_LIST_HEAD(&mdev->read_ee);
1865 INIT_LIST_HEAD(&mdev->net_ee);
1866 INIT_LIST_HEAD(&mdev->resync_reads);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001867 INIT_LIST_HEAD(&mdev->resync_work.list);
1868 INIT_LIST_HEAD(&mdev->unplug_work.list);
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02001869 INIT_LIST_HEAD(&mdev->go_diskless.list);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001870 INIT_LIST_HEAD(&mdev->md_sync_work.list);
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02001871 INIT_LIST_HEAD(&mdev->start_resync_work.list);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001872 INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
Philipp Reisner0ced55a2010-04-30 15:26:20 +02001873
Philipp Reisner794abb72010-12-27 11:51:23 +01001874 mdev->resync_work.cb = w_resync_timer;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001875 mdev->unplug_work.cb = w_send_write_hint;
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02001876 mdev->go_diskless.cb = w_go_diskless;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001877 mdev->md_sync_work.cb = w_md_sync;
1878 mdev->bm_io_work.w.cb = w_bitmap_io;
Philipp Reisner370a43e2011-01-14 16:03:11 +01001879 mdev->start_resync_work.cb = w_start_resync;
Philipp Reisnera21e9292011-02-08 15:08:49 +01001880
1881 mdev->resync_work.mdev = mdev;
1882 mdev->unplug_work.mdev = mdev;
1883 mdev->go_diskless.mdev = mdev;
1884 mdev->md_sync_work.mdev = mdev;
1885 mdev->bm_io_work.w.mdev = mdev;
1886 mdev->start_resync_work.mdev = mdev;
1887
Philipp Reisnerb411b362009-09-25 16:07:19 -07001888 init_timer(&mdev->resync_timer);
1889 init_timer(&mdev->md_sync_timer);
Philipp Reisner370a43e2011-01-14 16:03:11 +01001890 init_timer(&mdev->start_resync_timer);
Philipp Reisner7fde2be2011-03-01 11:08:28 +01001891 init_timer(&mdev->request_timer);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001892 mdev->resync_timer.function = resync_timer_fn;
1893 mdev->resync_timer.data = (unsigned long) mdev;
1894 mdev->md_sync_timer.function = md_sync_timer_fn;
1895 mdev->md_sync_timer.data = (unsigned long) mdev;
Philipp Reisner370a43e2011-01-14 16:03:11 +01001896 mdev->start_resync_timer.function = start_resync_timer_fn;
1897 mdev->start_resync_timer.data = (unsigned long) mdev;
Philipp Reisner7fde2be2011-03-01 11:08:28 +01001898 mdev->request_timer.function = request_timer_fn;
1899 mdev->request_timer.data = (unsigned long) mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001900
1901 init_waitqueue_head(&mdev->misc_wait);
1902 init_waitqueue_head(&mdev->state_wait);
1903 init_waitqueue_head(&mdev->ee_wait);
1904 init_waitqueue_head(&mdev->al_wait);
1905 init_waitqueue_head(&mdev->seq_wait);
1906
Philipp Reisnerfd340c12011-01-19 16:57:39 +01001907 /* mdev->tconn->agreed_pro_version gets initialized in drbd_connect() */
Philipp Reisner2451fc32010-08-24 13:43:11 +02001908 mdev->write_ordering = WO_bdev_flush;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001909 mdev->resync_wenr = LC_FREE;
Philipp Reisner99432fc2011-05-20 16:39:13 +02001910 mdev->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
1911 mdev->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001912}
1913
1914void drbd_mdev_cleanup(struct drbd_conf *mdev)
1915{
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001916 int i;
Philipp Reisnere6b3ea82011-01-19 14:02:01 +01001917 if (mdev->tconn->receiver.t_state != NONE)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001918 dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
Philipp Reisnere6b3ea82011-01-19 14:02:01 +01001919 mdev->tconn->receiver.t_state);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001920
1921 /* no need to lock it, I'm the only thread alive */
1922 if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
1923 dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
1924 mdev->al_writ_cnt =
1925 mdev->bm_writ_cnt =
1926 mdev->read_cnt =
1927 mdev->recv_cnt =
1928 mdev->send_cnt =
1929 mdev->writ_cnt =
1930 mdev->p_size =
1931 mdev->rs_start =
1932 mdev->rs_total =
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001933 mdev->rs_failed = 0;
1934 mdev->rs_last_events = 0;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001935 mdev->rs_last_sect_ev = 0;
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001936 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
1937 mdev->rs_mark_left[i] = 0;
1938 mdev->rs_mark_time[i] = 0;
1939 }
Philipp Reisner89e58e72011-01-19 13:12:45 +01001940 D_ASSERT(mdev->tconn->net_conf == NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001941
1942 drbd_set_my_capacity(mdev, 0);
1943 if (mdev->bitmap) {
1944 /* maybe never allocated. */
Philipp Reisner02d9a942010-03-24 16:23:03 +01001945 drbd_bm_resize(mdev, 0, 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001946 drbd_bm_cleanup(mdev);
1947 }
1948
1949 drbd_free_resources(mdev);
Philipp Reisner07782862010-08-31 12:00:50 +02001950 clear_bit(AL_SUSPENDED, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001951
1952 /*
1953 * currently we drbd_init_ee only on module load, so
1954 * we may do drbd_release_ee only on module unload!
1955 */
1956 D_ASSERT(list_empty(&mdev->active_ee));
1957 D_ASSERT(list_empty(&mdev->sync_ee));
1958 D_ASSERT(list_empty(&mdev->done_ee));
1959 D_ASSERT(list_empty(&mdev->read_ee));
1960 D_ASSERT(list_empty(&mdev->net_ee));
1961 D_ASSERT(list_empty(&mdev->resync_reads));
Philipp Reisnere42325a2011-01-19 13:55:45 +01001962 D_ASSERT(list_empty(&mdev->tconn->data.work.q));
1963 D_ASSERT(list_empty(&mdev->tconn->meta.work.q));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001964 D_ASSERT(list_empty(&mdev->resync_work.list));
1965 D_ASSERT(list_empty(&mdev->unplug_work.list));
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02001966 D_ASSERT(list_empty(&mdev->go_diskless.list));
Lars Ellenberg2265b472010-12-16 15:41:26 +01001967
1968 drbd_set_defaults(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001969}
1970
1971
1972static void drbd_destroy_mempools(void)
1973{
1974 struct page *page;
1975
1976 while (drbd_pp_pool) {
1977 page = drbd_pp_pool;
1978 drbd_pp_pool = (struct page *)page_private(page);
1979 __free_page(page);
1980 drbd_pp_vacant--;
1981 }
1982
1983 /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
1984
Lars Ellenbergda4a75d2011-02-23 17:02:01 +01001985 if (drbd_md_io_bio_set)
1986 bioset_free(drbd_md_io_bio_set);
Lars Ellenberg35abf592011-02-23 12:39:46 +01001987 if (drbd_md_io_page_pool)
1988 mempool_destroy(drbd_md_io_page_pool);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001989 if (drbd_ee_mempool)
1990 mempool_destroy(drbd_ee_mempool);
1991 if (drbd_request_mempool)
1992 mempool_destroy(drbd_request_mempool);
1993 if (drbd_ee_cache)
1994 kmem_cache_destroy(drbd_ee_cache);
1995 if (drbd_request_cache)
1996 kmem_cache_destroy(drbd_request_cache);
1997 if (drbd_bm_ext_cache)
1998 kmem_cache_destroy(drbd_bm_ext_cache);
1999 if (drbd_al_ext_cache)
2000 kmem_cache_destroy(drbd_al_ext_cache);
2001
Lars Ellenbergda4a75d2011-02-23 17:02:01 +01002002 drbd_md_io_bio_set = NULL;
Lars Ellenberg35abf592011-02-23 12:39:46 +01002003 drbd_md_io_page_pool = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002004 drbd_ee_mempool = NULL;
2005 drbd_request_mempool = NULL;
2006 drbd_ee_cache = NULL;
2007 drbd_request_cache = NULL;
2008 drbd_bm_ext_cache = NULL;
2009 drbd_al_ext_cache = NULL;
2010
2011 return;
2012}
2013
2014static int drbd_create_mempools(void)
2015{
2016 struct page *page;
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01002017 const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002018 int i;
2019
2020 /* prepare our caches and mempools */
2021 drbd_request_mempool = NULL;
2022 drbd_ee_cache = NULL;
2023 drbd_request_cache = NULL;
2024 drbd_bm_ext_cache = NULL;
2025 drbd_al_ext_cache = NULL;
2026 drbd_pp_pool = NULL;
Lars Ellenberg35abf592011-02-23 12:39:46 +01002027 drbd_md_io_page_pool = NULL;
Lars Ellenbergda4a75d2011-02-23 17:02:01 +01002028 drbd_md_io_bio_set = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002029
2030 /* caches */
2031 drbd_request_cache = kmem_cache_create(
2032 "drbd_req", sizeof(struct drbd_request), 0, 0, NULL);
2033 if (drbd_request_cache == NULL)
2034 goto Enomem;
2035
2036 drbd_ee_cache = kmem_cache_create(
Andreas Gruenbacherf6ffca92011-02-04 15:30:34 +01002037 "drbd_ee", sizeof(struct drbd_peer_request), 0, 0, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002038 if (drbd_ee_cache == NULL)
2039 goto Enomem;
2040
2041 drbd_bm_ext_cache = kmem_cache_create(
2042 "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL);
2043 if (drbd_bm_ext_cache == NULL)
2044 goto Enomem;
2045
2046 drbd_al_ext_cache = kmem_cache_create(
2047 "drbd_al", sizeof(struct lc_element), 0, 0, NULL);
2048 if (drbd_al_ext_cache == NULL)
2049 goto Enomem;
2050
2051 /* mempools */
Lars Ellenbergda4a75d2011-02-23 17:02:01 +01002052 drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0);
2053 if (drbd_md_io_bio_set == NULL)
2054 goto Enomem;
2055
Lars Ellenberg35abf592011-02-23 12:39:46 +01002056 drbd_md_io_page_pool = mempool_create_page_pool(DRBD_MIN_POOL_PAGES, 0);
2057 if (drbd_md_io_page_pool == NULL)
2058 goto Enomem;
2059
Philipp Reisnerb411b362009-09-25 16:07:19 -07002060 drbd_request_mempool = mempool_create(number,
2061 mempool_alloc_slab, mempool_free_slab, drbd_request_cache);
2062 if (drbd_request_mempool == NULL)
2063 goto Enomem;
2064
2065 drbd_ee_mempool = mempool_create(number,
2066 mempool_alloc_slab, mempool_free_slab, drbd_ee_cache);
Nicolas Kaiser2027ae12010-10-28 06:15:26 -06002067 if (drbd_ee_mempool == NULL)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002068 goto Enomem;
2069
2070 /* drbd's page pool */
2071 spin_lock_init(&drbd_pp_lock);
2072
2073 for (i = 0; i < number; i++) {
2074 page = alloc_page(GFP_HIGHUSER);
2075 if (!page)
2076 goto Enomem;
2077 set_page_private(page, (unsigned long)drbd_pp_pool);
2078 drbd_pp_pool = page;
2079 }
2080 drbd_pp_vacant = number;
2081
2082 return 0;
2083
2084Enomem:
2085 drbd_destroy_mempools(); /* in case we allocated some */
2086 return -ENOMEM;
2087}
2088
2089static int drbd_notify_sys(struct notifier_block *this, unsigned long code,
2090 void *unused)
2091{
2092 /* just so we have it. you never know what interesting things we
2093 * might want to do here some day...
2094 */
2095
2096 return NOTIFY_DONE;
2097}
2098
2099static struct notifier_block drbd_notifier = {
2100 .notifier_call = drbd_notify_sys,
2101};
2102
2103static void drbd_release_ee_lists(struct drbd_conf *mdev)
2104{
2105 int rr;
2106
2107 rr = drbd_release_ee(mdev, &mdev->active_ee);
2108 if (rr)
2109 dev_err(DEV, "%d EEs in active list found!\n", rr);
2110
2111 rr = drbd_release_ee(mdev, &mdev->sync_ee);
2112 if (rr)
2113 dev_err(DEV, "%d EEs in sync list found!\n", rr);
2114
2115 rr = drbd_release_ee(mdev, &mdev->read_ee);
2116 if (rr)
2117 dev_err(DEV, "%d EEs in read list found!\n", rr);
2118
2119 rr = drbd_release_ee(mdev, &mdev->done_ee);
2120 if (rr)
2121 dev_err(DEV, "%d EEs in done list found!\n", rr);
2122
2123 rr = drbd_release_ee(mdev, &mdev->net_ee);
2124 if (rr)
2125 dev_err(DEV, "%d EEs in net list found!\n", rr);
2126}
2127
Philipp Reisner774b3052011-02-22 02:07:03 -05002128/* caution. no locking. */
2129void drbd_delete_device(unsigned int minor)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002130{
2131 struct drbd_conf *mdev = minor_to_mdev(minor);
2132
2133 if (!mdev)
2134 return;
2135
Lars Ellenberg569083c2011-03-07 09:49:02 +01002136 idr_remove(&mdev->tconn->volumes, mdev->vnr);
2137 idr_remove(&minors, minor);
2138 synchronize_rcu();
Philipp Reisner774b3052011-02-22 02:07:03 -05002139
Philipp Reisnerb411b362009-09-25 16:07:19 -07002140 /* paranoia asserts */
Andreas Gruenbacher70dc65e2010-12-21 14:46:57 +01002141 D_ASSERT(mdev->open_cnt == 0);
Philipp Reisnere42325a2011-01-19 13:55:45 +01002142 D_ASSERT(list_empty(&mdev->tconn->data.work.q));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002143 /* end paranoia asserts */
2144
2145 del_gendisk(mdev->vdisk);
2146
2147 /* cleanup stuff that may have been allocated during
2148 * device (re-)configuration or state changes */
2149
2150 if (mdev->this_bdev)
2151 bdput(mdev->this_bdev);
2152
2153 drbd_free_resources(mdev);
2154
2155 drbd_release_ee_lists(mdev);
2156
Philipp Reisnerb411b362009-09-25 16:07:19 -07002157 lc_destroy(mdev->act_log);
2158 lc_destroy(mdev->resync);
2159
2160 kfree(mdev->p_uuid);
2161 /* mdev->p_uuid = NULL; */
2162
Philipp Reisnerb411b362009-09-25 16:07:19 -07002163 /* cleanup the rest that has been
2164 * allocated from drbd_new_device
2165 * and actually free the mdev itself */
2166 drbd_free_mdev(mdev);
2167}
2168
2169static void drbd_cleanup(void)
2170{
2171 unsigned int i;
Philipp Reisner81a5d602011-02-22 19:53:16 -05002172 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002173
2174 unregister_reboot_notifier(&drbd_notifier);
2175
Lars Ellenberg17a93f32010-11-24 10:37:35 +01002176 /* first remove proc,
2177 * drbdsetup uses it's presence to detect
2178 * whether DRBD is loaded.
2179 * If we would get stuck in proc removal,
2180 * but have netlink already deregistered,
2181 * some drbdsetup commands may wait forever
2182 * for an answer.
2183 */
2184 if (drbd_proc)
2185 remove_proc_entry("drbd", NULL);
2186
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002187 drbd_genl_unregister();
Philipp Reisnerb411b362009-09-25 16:07:19 -07002188
Philipp Reisner81a5d602011-02-22 19:53:16 -05002189 idr_for_each_entry(&minors, mdev, i)
2190 drbd_delete_device(i);
2191 drbd_destroy_mempools();
Philipp Reisnerb411b362009-09-25 16:07:19 -07002192 unregister_blkdev(DRBD_MAJOR, "drbd");
2193
Philipp Reisner81a5d602011-02-22 19:53:16 -05002194 idr_destroy(&minors);
2195
Philipp Reisnerb411b362009-09-25 16:07:19 -07002196 printk(KERN_INFO "drbd: module cleanup done.\n");
2197}
2198
2199/**
2200 * drbd_congested() - Callback for pdflush
2201 * @congested_data: User data
2202 * @bdi_bits: Bits pdflush is currently interested in
2203 *
2204 * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
2205 */
2206static int drbd_congested(void *congested_data, int bdi_bits)
2207{
2208 struct drbd_conf *mdev = congested_data;
2209 struct request_queue *q;
2210 char reason = '-';
2211 int r = 0;
2212
Andreas Gruenbacher1b881ef2010-12-13 18:03:38 +01002213 if (!may_inc_ap_bio(mdev)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002214 /* DRBD has frozen IO */
2215 r = bdi_bits;
2216 reason = 'd';
2217 goto out;
2218 }
2219
2220 if (get_ldev(mdev)) {
2221 q = bdev_get_queue(mdev->ldev->backing_bdev);
2222 r = bdi_congested(&q->backing_dev_info, bdi_bits);
2223 put_ldev(mdev);
2224 if (r)
2225 reason = 'b';
2226 }
2227
Philipp Reisner01a311a2011-02-07 14:30:33 +01002228 if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->tconn->flags)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002229 r |= (1 << BDI_async_congested);
2230 reason = reason == 'b' ? 'a' : 'n';
2231 }
2232
2233out:
2234 mdev->congestion_reason = reason;
2235 return r;
2236}
2237
Philipp Reisner6699b652011-02-09 11:10:24 +01002238static void drbd_init_workqueue(struct drbd_work_queue* wq)
2239{
2240 sema_init(&wq->s, 0);
2241 spin_lock_init(&wq->q_lock);
2242 INIT_LIST_HEAD(&wq->q);
2243}
2244
Philipp Reisner1aba4d72011-02-21 15:38:08 +01002245struct drbd_tconn *conn_by_name(const char *name)
2246{
2247 struct drbd_tconn *tconn;
2248
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002249 if (!name || !name[0])
2250 return NULL;
2251
Lars Ellenberg543cc102011-03-10 22:18:18 +01002252 mutex_lock(&drbd_cfg_mutex);
Philipp Reisner1aba4d72011-02-21 15:38:08 +01002253 list_for_each_entry(tconn, &drbd_tconns, all_tconn) {
2254 if (!strcmp(tconn->name, name))
2255 goto found;
2256 }
2257 tconn = NULL;
2258found:
Lars Ellenberg543cc102011-03-10 22:18:18 +01002259 mutex_unlock(&drbd_cfg_mutex);
Philipp Reisner1aba4d72011-02-21 15:38:08 +01002260 return tconn;
2261}
2262
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002263static int drbd_alloc_socket(struct drbd_socket *socket)
2264{
2265 socket->rbuf = (void *) __get_free_page(GFP_KERNEL);
2266 if (!socket->rbuf)
2267 return -ENOMEM;
Andreas Gruenbacher5a87d922011-03-24 21:17:52 +01002268 socket->sbuf = (void *) __get_free_page(GFP_KERNEL);
2269 if (!socket->sbuf)
2270 return -ENOMEM;
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002271 return 0;
2272}
2273
2274static void drbd_free_socket(struct drbd_socket *socket)
2275{
Andreas Gruenbacher5a87d922011-03-24 21:17:52 +01002276 free_page((unsigned long) socket->sbuf);
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002277 free_page((unsigned long) socket->rbuf);
2278}
2279
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002280struct drbd_tconn *drbd_new_tconn(const char *name)
Philipp Reisner21114382011-01-19 12:26:59 +01002281{
2282 struct drbd_tconn *tconn;
2283
2284 tconn = kzalloc(sizeof(struct drbd_tconn), GFP_KERNEL);
2285 if (!tconn)
2286 return NULL;
2287
2288 tconn->name = kstrdup(name, GFP_KERNEL);
2289 if (!tconn->name)
2290 goto fail;
2291
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002292 if (drbd_alloc_socket(&tconn->data))
2293 goto fail;
2294 if (drbd_alloc_socket(&tconn->meta))
2295 goto fail;
2296
Philipp Reisner774b3052011-02-22 02:07:03 -05002297 if (!zalloc_cpumask_var(&tconn->cpu_mask, GFP_KERNEL))
2298 goto fail;
2299
Philipp Reisner2f5cdd02011-02-21 14:29:27 +01002300 if (!tl_init(tconn))
2301 goto fail;
2302
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01002303 tconn->cstate = C_STANDALONE;
Philipp Reisner8410da8f02011-02-11 20:11:10 +01002304 mutex_init(&tconn->cstate_mutex);
Philipp Reisner6699b652011-02-09 11:10:24 +01002305 spin_lock_init(&tconn->req_lock);
Philipp Reisnerb2fb6dbe2011-01-19 13:48:44 +01002306 atomic_set(&tconn->net_cnt, 0);
2307 init_waitqueue_head(&tconn->net_cnt_wait);
Philipp Reisner2a67d8b2011-02-09 14:10:32 +01002308 init_waitqueue_head(&tconn->ping_wait);
Philipp Reisner062e8792011-02-08 11:09:18 +01002309 idr_init(&tconn->volumes);
Philipp Reisnerb2fb6dbe2011-01-19 13:48:44 +01002310
Philipp Reisner6699b652011-02-09 11:10:24 +01002311 drbd_init_workqueue(&tconn->data.work);
2312 mutex_init(&tconn->data.mutex);
2313
2314 drbd_init_workqueue(&tconn->meta.work);
2315 mutex_init(&tconn->meta.mutex);
2316
Philipp Reisner392c8802011-02-09 10:33:31 +01002317 drbd_thread_init(tconn, &tconn->receiver, drbdd_init, "receiver");
2318 drbd_thread_init(tconn, &tconn->worker, drbd_worker, "worker");
2319 drbd_thread_init(tconn, &tconn->asender, drbd_asender, "asender");
2320
Lars Ellenbergf3990022011-03-23 14:31:09 +01002321 tconn->res_opts = (struct res_opts) {
2322 {}, 0, /* cpu_mask */
2323 DRBD_ON_NO_DATA_DEF, /* on_no_data */
2324 };
2325
Lars Ellenberg543cc102011-03-10 22:18:18 +01002326 mutex_lock(&drbd_cfg_mutex);
2327 list_add_tail(&tconn->all_tconn, &drbd_tconns);
2328 mutex_unlock(&drbd_cfg_mutex);
Philipp Reisner21114382011-01-19 12:26:59 +01002329
2330 return tconn;
2331
2332fail:
Philipp Reisner2f5cdd02011-02-21 14:29:27 +01002333 tl_cleanup(tconn);
Philipp Reisner774b3052011-02-22 02:07:03 -05002334 free_cpumask_var(tconn->cpu_mask);
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002335 drbd_free_socket(&tconn->meta);
2336 drbd_free_socket(&tconn->data);
Philipp Reisner21114382011-01-19 12:26:59 +01002337 kfree(tconn->name);
2338 kfree(tconn);
2339
2340 return NULL;
2341}
2342
2343void drbd_free_tconn(struct drbd_tconn *tconn)
2344{
Philipp Reisner21114382011-01-19 12:26:59 +01002345 list_del(&tconn->all_tconn);
Philipp Reisner062e8792011-02-08 11:09:18 +01002346 idr_destroy(&tconn->volumes);
Philipp Reisner21114382011-01-19 12:26:59 +01002347
Philipp Reisner774b3052011-02-22 02:07:03 -05002348 free_cpumask_var(tconn->cpu_mask);
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002349 drbd_free_socket(&tconn->meta);
2350 drbd_free_socket(&tconn->data);
Philipp Reisner21114382011-01-19 12:26:59 +01002351 kfree(tconn->name);
Philipp Reisnerb42a70a2011-01-27 10:55:20 +01002352 kfree(tconn->int_dig_out);
2353 kfree(tconn->int_dig_in);
2354 kfree(tconn->int_dig_vv);
Philipp Reisner21114382011-01-19 12:26:59 +01002355 kfree(tconn);
2356}
2357
Philipp Reisner774b3052011-02-22 02:07:03 -05002358enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, int vnr)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002359{
2360 struct drbd_conf *mdev;
2361 struct gendisk *disk;
2362 struct request_queue *q;
Philipp Reisner774b3052011-02-22 02:07:03 -05002363 int vnr_got = vnr;
Philipp Reisner81a5d602011-02-22 19:53:16 -05002364 int minor_got = minor;
Lars Ellenberg8432b312011-03-08 16:11:16 +01002365 enum drbd_ret_code err = ERR_NOMEM;
Philipp Reisner774b3052011-02-22 02:07:03 -05002366
2367 mdev = minor_to_mdev(minor);
2368 if (mdev)
2369 return ERR_MINOR_EXISTS;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002370
2371 /* GFP_KERNEL, we are outside of all write-out paths */
2372 mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL);
2373 if (!mdev)
Philipp Reisner774b3052011-02-22 02:07:03 -05002374 return ERR_NOMEM;
2375
2376 mdev->tconn = tconn;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002377 mdev->minor = minor;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002378 mdev->vnr = vnr;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002379
2380 drbd_init_set_defaults(mdev);
2381
2382 q = blk_alloc_queue(GFP_KERNEL);
2383 if (!q)
2384 goto out_no_q;
2385 mdev->rq_queue = q;
2386 q->queuedata = mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002387
2388 disk = alloc_disk(1);
2389 if (!disk)
2390 goto out_no_disk;
2391 mdev->vdisk = disk;
2392
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002393 set_disk_ro(disk, true);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002394
2395 disk->queue = q;
2396 disk->major = DRBD_MAJOR;
2397 disk->first_minor = minor;
2398 disk->fops = &drbd_ops;
2399 sprintf(disk->disk_name, "drbd%d", minor);
2400 disk->private_data = mdev;
2401
2402 mdev->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
2403 /* we have no partitions. we contain only ourselves. */
2404 mdev->this_bdev->bd_contains = mdev->this_bdev;
2405
2406 q->backing_dev_info.congested_fn = drbd_congested;
2407 q->backing_dev_info.congested_data = mdev;
2408
Andreas Gruenbacher2f58dcf2010-12-13 17:48:19 +01002409 blk_queue_make_request(q, drbd_make_request);
Philipp Reisner99432fc2011-05-20 16:39:13 +02002410 /* Setting the max_hw_sectors to an odd value of 8kibyte here
2411 This triggers a max_bio_size message upon first attach or connect */
2412 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002413 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
2414 blk_queue_merge_bvec(q, drbd_merge_bvec);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002415 q->queue_lock = &mdev->tconn->req_lock; /* needed since we use */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002416
2417 mdev->md_io_page = alloc_page(GFP_KERNEL);
2418 if (!mdev->md_io_page)
2419 goto out_no_io_page;
2420
2421 if (drbd_bm_init(mdev))
2422 goto out_no_bitmap;
Andreas Gruenbacherdac13892011-01-21 17:18:39 +01002423 mdev->read_requests = RB_ROOT;
Andreas Gruenbacherde696712011-01-20 15:00:24 +01002424 mdev->write_requests = RB_ROOT;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002425
Philipp Reisnerb411b362009-09-25 16:07:19 -07002426 mdev->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
2427 if (!mdev->current_epoch)
2428 goto out_no_epoch;
2429
2430 INIT_LIST_HEAD(&mdev->current_epoch->list);
2431 mdev->epochs = 1;
2432
Lars Ellenberg8432b312011-03-08 16:11:16 +01002433 if (!idr_pre_get(&minors, GFP_KERNEL))
2434 goto out_no_minor_idr;
2435 if (idr_get_new_above(&minors, mdev, minor, &minor_got))
2436 goto out_no_minor_idr;
2437 if (minor_got != minor) {
2438 err = ERR_MINOR_EXISTS;
2439 drbd_msg_put_info("requested minor exists already");
2440 goto out_idr_remove_minor;
Lars Ellenberg569083c2011-03-07 09:49:02 +01002441 }
2442
Lars Ellenberg8432b312011-03-08 16:11:16 +01002443 if (!idr_pre_get(&tconn->volumes, GFP_KERNEL))
Lars Ellenberg569083c2011-03-07 09:49:02 +01002444 goto out_idr_remove_minor;
Lars Ellenberg8432b312011-03-08 16:11:16 +01002445 if (idr_get_new_above(&tconn->volumes, mdev, vnr, &vnr_got))
2446 goto out_idr_remove_minor;
2447 if (vnr_got != vnr) {
2448 err = ERR_INVALID_REQUEST;
2449 drbd_msg_put_info("requested volume exists already");
2450 goto out_idr_remove_vol;
Philipp Reisner81a5d602011-02-22 19:53:16 -05002451 }
Philipp Reisner774b3052011-02-22 02:07:03 -05002452 add_disk(disk);
2453
Philipp Reisner2325eb62011-03-15 16:56:18 +01002454 /* inherit the connection state */
2455 mdev->state.conn = tconn->cstate;
2456 if (mdev->state.conn == C_WF_REPORT_PARAMS)
2457 drbd_connected(vnr, mdev, tconn);
2458
Philipp Reisner774b3052011-02-22 02:07:03 -05002459 return NO_ERROR;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002460
Lars Ellenberg569083c2011-03-07 09:49:02 +01002461out_idr_remove_vol:
2462 idr_remove(&tconn->volumes, vnr_got);
Lars Ellenberg8432b312011-03-08 16:11:16 +01002463out_idr_remove_minor:
2464 idr_remove(&minors, minor_got);
Lars Ellenberg569083c2011-03-07 09:49:02 +01002465 synchronize_rcu();
Lars Ellenberg8432b312011-03-08 16:11:16 +01002466out_no_minor_idr:
Philipp Reisner81a5d602011-02-22 19:53:16 -05002467 kfree(mdev->current_epoch);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002468out_no_epoch:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002469 drbd_bm_cleanup(mdev);
2470out_no_bitmap:
2471 __free_page(mdev->md_io_page);
2472out_no_io_page:
2473 put_disk(disk);
2474out_no_disk:
2475 blk_cleanup_queue(q);
2476out_no_q:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002477 kfree(mdev);
Lars Ellenberg8432b312011-03-08 16:11:16 +01002478 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002479}
2480
2481/* counterpart of drbd_new_device.
2482 * last part of drbd_delete_device. */
2483void drbd_free_mdev(struct drbd_conf *mdev)
2484{
2485 kfree(mdev->current_epoch);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002486 if (mdev->bitmap) /* should no longer be there. */
2487 drbd_bm_cleanup(mdev);
2488 __free_page(mdev->md_io_page);
2489 put_disk(mdev->vdisk);
2490 blk_cleanup_queue(mdev->rq_queue);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002491 kfree(mdev);
2492}
2493
2494
2495int __init drbd_init(void)
2496{
2497 int err;
2498
Philipp Reisnerfd340c12011-01-19 16:57:39 +01002499 BUILD_BUG_ON(sizeof(struct p_header80) != sizeof(struct p_header95));
Andreas Gruenbacher60381782011-03-28 17:05:50 +02002500 BUILD_BUG_ON(sizeof(struct p_connection_features) != 80);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002501
Philipp Reisner2b8a90b2011-01-10 11:15:17 +01002502 if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002503 printk(KERN_ERR
Philipp Reisner81a5d602011-02-22 19:53:16 -05002504 "drbd: invalid minor_count (%d)\n", minor_count);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002505#ifdef MODULE
2506 return -EINVAL;
2507#else
2508 minor_count = 8;
2509#endif
2510 }
2511
Philipp Reisnerb411b362009-09-25 16:07:19 -07002512 err = register_blkdev(DRBD_MAJOR, "drbd");
2513 if (err) {
2514 printk(KERN_ERR
2515 "drbd: unable to register block device major %d\n",
2516 DRBD_MAJOR);
2517 return err;
2518 }
2519
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002520 err = drbd_genl_register();
2521 if (err) {
2522 printk(KERN_ERR "drbd: unable to register generic netlink family\n");
2523 goto fail;
2524 }
2525
2526
Philipp Reisnerb411b362009-09-25 16:07:19 -07002527 register_reboot_notifier(&drbd_notifier);
2528
2529 /*
2530 * allocate all necessary structs
2531 */
2532 err = -ENOMEM;
2533
2534 init_waitqueue_head(&drbd_pp_wait);
2535
2536 drbd_proc = NULL; /* play safe for drbd_cleanup */
Philipp Reisner81a5d602011-02-22 19:53:16 -05002537 idr_init(&minors);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002538
2539 err = drbd_create_mempools();
2540 if (err)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002541 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002542
Lars Ellenberg8c484ee2010-03-11 16:47:58 +01002543 drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002544 if (!drbd_proc) {
2545 printk(KERN_ERR "drbd: unable to register proc file\n");
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002546 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002547 }
2548
2549 rwlock_init(&global_state_lock);
Philipp Reisner21114382011-01-19 12:26:59 +01002550 INIT_LIST_HEAD(&drbd_tconns);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002551
2552 printk(KERN_INFO "drbd: initialized. "
2553 "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
2554 API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
2555 printk(KERN_INFO "drbd: %s\n", drbd_buildtag());
2556 printk(KERN_INFO "drbd: registered as block device major %d\n",
2557 DRBD_MAJOR);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002558
2559 return 0; /* Success! */
2560
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002561fail:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002562 drbd_cleanup();
2563 if (err == -ENOMEM)
2564 /* currently always the case */
2565 printk(KERN_ERR "drbd: ran out of memory\n");
2566 else
2567 printk(KERN_ERR "drbd: initialization failure\n");
2568 return err;
2569}
2570
2571void drbd_free_bc(struct drbd_backing_dev *ldev)
2572{
2573 if (ldev == NULL)
2574 return;
2575
Tejun Heoe525fd82010-11-13 11:55:17 +01002576 blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
2577 blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002578
2579 kfree(ldev);
2580}
2581
Philipp Reisner360cc742011-02-08 14:29:53 +01002582void drbd_free_sock(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002583{
Philipp Reisner360cc742011-02-08 14:29:53 +01002584 if (tconn->data.socket) {
2585 mutex_lock(&tconn->data.mutex);
2586 kernel_sock_shutdown(tconn->data.socket, SHUT_RDWR);
2587 sock_release(tconn->data.socket);
2588 tconn->data.socket = NULL;
2589 mutex_unlock(&tconn->data.mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002590 }
Philipp Reisner360cc742011-02-08 14:29:53 +01002591 if (tconn->meta.socket) {
2592 mutex_lock(&tconn->meta.mutex);
2593 kernel_sock_shutdown(tconn->meta.socket, SHUT_RDWR);
2594 sock_release(tconn->meta.socket);
2595 tconn->meta.socket = NULL;
2596 mutex_unlock(&tconn->meta.mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002597 }
2598}
2599
2600
2601void drbd_free_resources(struct drbd_conf *mdev)
2602{
Lars Ellenbergf3990022011-03-23 14:31:09 +01002603 crypto_free_hash(mdev->tconn->csums_tfm);
2604 mdev->tconn->csums_tfm = NULL;
2605 crypto_free_hash(mdev->tconn->verify_tfm);
2606 mdev->tconn->verify_tfm = NULL;
Philipp Reisnera0638452011-01-19 14:31:32 +01002607 crypto_free_hash(mdev->tconn->cram_hmac_tfm);
2608 mdev->tconn->cram_hmac_tfm = NULL;
2609 crypto_free_hash(mdev->tconn->integrity_w_tfm);
2610 mdev->tconn->integrity_w_tfm = NULL;
2611 crypto_free_hash(mdev->tconn->integrity_r_tfm);
2612 mdev->tconn->integrity_r_tfm = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002613
Philipp Reisner360cc742011-02-08 14:29:53 +01002614 drbd_free_sock(mdev->tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002615
2616 __no_warn(local,
2617 drbd_free_bc(mdev->ldev);
2618 mdev->ldev = NULL;);
2619}
2620
2621/* meta data management */
2622
2623struct meta_data_on_disk {
2624 u64 la_size; /* last agreed size. */
2625 u64 uuid[UI_SIZE]; /* UUIDs. */
2626 u64 device_uuid;
2627 u64 reserved_u64_1;
2628 u32 flags; /* MDF */
2629 u32 magic;
2630 u32 md_size_sect;
2631 u32 al_offset; /* offset to this block */
2632 u32 al_nr_extents; /* important for restoring the AL */
Lars Ellenbergf3990022011-03-23 14:31:09 +01002633 /* `-- act_log->nr_elements <-- ldev->dc.al_extents */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002634 u32 bm_offset; /* offset to the bitmap, from here */
2635 u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */
Philipp Reisner99432fc2011-05-20 16:39:13 +02002636 u32 la_peer_max_bio_size; /* last peer max_bio_size */
2637 u32 reserved_u32[3];
Philipp Reisnerb411b362009-09-25 16:07:19 -07002638
2639} __packed;
2640
2641/**
2642 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
2643 * @mdev: DRBD device.
2644 */
2645void drbd_md_sync(struct drbd_conf *mdev)
2646{
2647 struct meta_data_on_disk *buffer;
2648 sector_t sector;
2649 int i;
2650
Lars Ellenbergee15b032010-09-03 10:00:09 +02002651 del_timer(&mdev->md_sync_timer);
2652 /* timer may be rearmed by drbd_md_mark_dirty() now. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002653 if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
2654 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002655
2656 /* We use here D_FAILED and not D_ATTACHING because we try to write
2657 * metadata even if we detach due to a disk failure! */
2658 if (!get_ldev_if_state(mdev, D_FAILED))
2659 return;
2660
Philipp Reisnerb411b362009-09-25 16:07:19 -07002661 mutex_lock(&mdev->md_io_mutex);
2662 buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
2663 memset(buffer, 0, 512);
2664
2665 buffer->la_size = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
2666 for (i = UI_CURRENT; i < UI_SIZE; i++)
2667 buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
2668 buffer->flags = cpu_to_be32(mdev->ldev->md.flags);
2669 buffer->magic = cpu_to_be32(DRBD_MD_MAGIC);
2670
2671 buffer->md_size_sect = cpu_to_be32(mdev->ldev->md.md_size_sect);
2672 buffer->al_offset = cpu_to_be32(mdev->ldev->md.al_offset);
2673 buffer->al_nr_extents = cpu_to_be32(mdev->act_log->nr_elements);
2674 buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
2675 buffer->device_uuid = cpu_to_be64(mdev->ldev->md.device_uuid);
2676
2677 buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset);
Philipp Reisner99432fc2011-05-20 16:39:13 +02002678 buffer->la_peer_max_bio_size = cpu_to_be32(mdev->peer_max_bio_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002679
2680 D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset);
2681 sector = mdev->ldev->md.md_offset;
2682
Andreas Gruenbacher3fbf4d22010-12-13 02:25:41 +01002683 if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002684 /* this was a try anyways ... */
2685 dev_err(DEV, "meta data update failed!\n");
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002686 drbd_chk_io_error(mdev, 1, true);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002687 }
2688
2689 /* Update mdev->ldev->md.la_size_sect,
2690 * since we updated it on metadata. */
2691 mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev);
2692
2693 mutex_unlock(&mdev->md_io_mutex);
2694 put_ldev(mdev);
2695}
2696
2697/**
2698 * drbd_md_read() - Reads in the meta data super block
2699 * @mdev: DRBD device.
2700 * @bdev: Device from which the meta data should be read in.
2701 *
Andreas Gruenbacher116676c2010-12-08 13:33:11 +01002702 * Return 0 (NO_ERROR) on success, and an enum drbd_ret_code in case
Philipp Reisnerb411b362009-09-25 16:07:19 -07002703 * something goes wrong. Currently only: ERR_IO_MD_DISK, ERR_MD_INVALID.
2704 */
2705int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
2706{
2707 struct meta_data_on_disk *buffer;
2708 int i, rv = NO_ERROR;
2709
2710 if (!get_ldev_if_state(mdev, D_ATTACHING))
2711 return ERR_IO_MD_DISK;
2712
Philipp Reisnerb411b362009-09-25 16:07:19 -07002713 mutex_lock(&mdev->md_io_mutex);
2714 buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
2715
Andreas Gruenbacher3fbf4d22010-12-13 02:25:41 +01002716 if (drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002717 /* NOTE: can't do normal error processing here as this is
Philipp Reisnerb411b362009-09-25 16:07:19 -07002718 called BEFORE disk is attached */
2719 dev_err(DEV, "Error while reading metadata.\n");
2720 rv = ERR_IO_MD_DISK;
2721 goto err;
2722 }
2723
Andreas Gruenbachere7fad8a2011-01-11 13:54:02 +01002724 if (buffer->magic != cpu_to_be32(DRBD_MD_MAGIC)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002725 dev_err(DEV, "Error while reading metadata, magic not found.\n");
2726 rv = ERR_MD_INVALID;
2727 goto err;
2728 }
2729 if (be32_to_cpu(buffer->al_offset) != bdev->md.al_offset) {
2730 dev_err(DEV, "unexpected al_offset: %d (expected %d)\n",
2731 be32_to_cpu(buffer->al_offset), bdev->md.al_offset);
2732 rv = ERR_MD_INVALID;
2733 goto err;
2734 }
2735 if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
2736 dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n",
2737 be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
2738 rv = ERR_MD_INVALID;
2739 goto err;
2740 }
2741 if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
2742 dev_err(DEV, "unexpected md_size: %u (expected %u)\n",
2743 be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
2744 rv = ERR_MD_INVALID;
2745 goto err;
2746 }
2747
2748 if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
2749 dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
2750 be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
2751 rv = ERR_MD_INVALID;
2752 goto err;
2753 }
2754
2755 bdev->md.la_size_sect = be64_to_cpu(buffer->la_size);
2756 for (i = UI_CURRENT; i < UI_SIZE; i++)
2757 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
2758 bdev->md.flags = be32_to_cpu(buffer->flags);
Lars Ellenbergf3990022011-03-23 14:31:09 +01002759 bdev->dc.al_extents = be32_to_cpu(buffer->al_nr_extents);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002760 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
2761
Philipp Reisner87eeee42011-01-19 14:16:30 +01002762 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisner99432fc2011-05-20 16:39:13 +02002763 if (mdev->state.conn < C_CONNECTED) {
2764 int peer;
2765 peer = be32_to_cpu(buffer->la_peer_max_bio_size);
2766 peer = max_t(int, peer, DRBD_MAX_BIO_SIZE_SAFE);
2767 mdev->peer_max_bio_size = peer;
2768 }
Philipp Reisner87eeee42011-01-19 14:16:30 +01002769 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisner99432fc2011-05-20 16:39:13 +02002770
Lars Ellenbergf3990022011-03-23 14:31:09 +01002771 if (bdev->dc.al_extents < 7)
2772 bdev->dc.al_extents = 127;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002773
2774 err:
2775 mutex_unlock(&mdev->md_io_mutex);
2776 put_ldev(mdev);
2777
2778 return rv;
2779}
2780
2781/**
2782 * drbd_md_mark_dirty() - Mark meta data super block as dirty
2783 * @mdev: DRBD device.
2784 *
2785 * Call this function if you change anything that should be written to
2786 * the meta-data super block. This function sets MD_DIRTY, and starts a
2787 * timer that ensures that within five seconds you have to call drbd_md_sync().
2788 */
Lars Ellenbergca0e6092010-10-14 15:01:21 +02002789#ifdef DEBUG
Lars Ellenbergee15b032010-09-03 10:00:09 +02002790void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func)
2791{
2792 if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) {
2793 mod_timer(&mdev->md_sync_timer, jiffies + HZ);
2794 mdev->last_md_mark_dirty.line = line;
2795 mdev->last_md_mark_dirty.func = func;
2796 }
2797}
2798#else
Philipp Reisnerb411b362009-09-25 16:07:19 -07002799void drbd_md_mark_dirty(struct drbd_conf *mdev)
2800{
Lars Ellenbergee15b032010-09-03 10:00:09 +02002801 if (!test_and_set_bit(MD_DIRTY, &mdev->flags))
Lars Ellenbergca0e6092010-10-14 15:01:21 +02002802 mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002803}
Lars Ellenbergee15b032010-09-03 10:00:09 +02002804#endif
Philipp Reisnerb411b362009-09-25 16:07:19 -07002805
2806static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
2807{
2808 int i;
2809
Lars Ellenberg62b0da32011-01-20 13:25:21 +01002810 for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002811 mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
Philipp Reisnerb411b362009-09-25 16:07:19 -07002812}
2813
2814void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
2815{
2816 if (idx == UI_CURRENT) {
2817 if (mdev->state.role == R_PRIMARY)
2818 val |= 1;
2819 else
2820 val &= ~((u64)1);
2821
2822 drbd_set_ed_uuid(mdev, val);
2823 }
2824
2825 mdev->ldev->md.uuid[idx] = val;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002826 drbd_md_mark_dirty(mdev);
2827}
2828
2829
2830void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
2831{
2832 if (mdev->ldev->md.uuid[idx]) {
2833 drbd_uuid_move_history(mdev);
2834 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
Philipp Reisnerb411b362009-09-25 16:07:19 -07002835 }
2836 _drbd_uuid_set(mdev, idx, val);
2837}
2838
2839/**
2840 * drbd_uuid_new_current() - Creates a new current UUID
2841 * @mdev: DRBD device.
2842 *
2843 * Creates a new current UUID, and rotates the old current UUID into
2844 * the bitmap slot. Causes an incremental resync upon next connect.
2845 */
2846void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
2847{
2848 u64 val;
Lars Ellenberg62b0da32011-01-20 13:25:21 +01002849 unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
Philipp Reisnerb411b362009-09-25 16:07:19 -07002850
Lars Ellenberg62b0da32011-01-20 13:25:21 +01002851 if (bm_uuid)
2852 dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
2853
Philipp Reisnerb411b362009-09-25 16:07:19 -07002854 mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
Philipp Reisnerb411b362009-09-25 16:07:19 -07002855
2856 get_random_bytes(&val, sizeof(u64));
2857 _drbd_uuid_set(mdev, UI_CURRENT, val);
Lars Ellenberg62b0da32011-01-20 13:25:21 +01002858 drbd_print_uuids(mdev, "new current UUID");
Lars Ellenbergaaa8e2b2010-10-15 13:16:53 +02002859 /* get it to stable storage _now_ */
2860 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002861}
2862
2863void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
2864{
2865 if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
2866 return;
2867
2868 if (val == 0) {
2869 drbd_uuid_move_history(mdev);
2870 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
2871 mdev->ldev->md.uuid[UI_BITMAP] = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002872 } else {
Lars Ellenberg62b0da32011-01-20 13:25:21 +01002873 unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
2874 if (bm_uuid)
2875 dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002876
Lars Ellenberg62b0da32011-01-20 13:25:21 +01002877 mdev->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002878 }
2879 drbd_md_mark_dirty(mdev);
2880}
2881
2882/**
2883 * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
2884 * @mdev: DRBD device.
2885 *
2886 * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
2887 */
2888int drbd_bmio_set_n_write(struct drbd_conf *mdev)
2889{
2890 int rv = -EIO;
2891
2892 if (get_ldev_if_state(mdev, D_ATTACHING)) {
2893 drbd_md_set_flag(mdev, MDF_FULL_SYNC);
2894 drbd_md_sync(mdev);
2895 drbd_bm_set_all(mdev);
2896
2897 rv = drbd_bm_write(mdev);
2898
2899 if (!rv) {
2900 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
2901 drbd_md_sync(mdev);
2902 }
2903
2904 put_ldev(mdev);
2905 }
2906
2907 return rv;
2908}
2909
2910/**
2911 * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
2912 * @mdev: DRBD device.
2913 *
2914 * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
2915 */
2916int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
2917{
2918 int rv = -EIO;
2919
Philipp Reisner07782862010-08-31 12:00:50 +02002920 drbd_resume_al(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002921 if (get_ldev_if_state(mdev, D_ATTACHING)) {
2922 drbd_bm_clear_all(mdev);
2923 rv = drbd_bm_write(mdev);
2924 put_ldev(mdev);
2925 }
2926
2927 return rv;
2928}
2929
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01002930static int w_bitmap_io(struct drbd_work *w, int unused)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002931{
2932 struct bm_io_work *work = container_of(w, struct bm_io_work, w);
Philipp Reisner00d56942011-02-09 18:09:48 +01002933 struct drbd_conf *mdev = w->mdev;
Lars Ellenberg02851e92010-12-16 14:47:39 +01002934 int rv = -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002935
2936 D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0);
2937
Lars Ellenberg02851e92010-12-16 14:47:39 +01002938 if (get_ldev(mdev)) {
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01002939 drbd_bm_lock(mdev, work->why, work->flags);
Lars Ellenberg02851e92010-12-16 14:47:39 +01002940 rv = work->io_fn(mdev);
2941 drbd_bm_unlock(mdev);
2942 put_ldev(mdev);
2943 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002944
Lars Ellenberg4738fa12011-02-21 13:20:55 +01002945 clear_bit_unlock(BITMAP_IO, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002946 wake_up(&mdev->misc_wait);
2947
2948 if (work->done)
2949 work->done(mdev, rv);
2950
2951 clear_bit(BITMAP_IO_QUEUED, &mdev->flags);
2952 work->why = NULL;
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01002953 work->flags = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002954
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01002955 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002956}
2957
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02002958void drbd_ldev_destroy(struct drbd_conf *mdev)
2959{
2960 lc_destroy(mdev->resync);
2961 mdev->resync = NULL;
2962 lc_destroy(mdev->act_log);
2963 mdev->act_log = NULL;
2964 __no_warn(local,
2965 drbd_free_bc(mdev->ldev);
2966 mdev->ldev = NULL;);
2967
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02002968 clear_bit(GO_DISKLESS, &mdev->flags);
2969}
2970
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01002971static int w_go_diskless(struct drbd_work *w, int unused)
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02002972{
Philipp Reisner00d56942011-02-09 18:09:48 +01002973 struct drbd_conf *mdev = w->mdev;
2974
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02002975 D_ASSERT(mdev->state.disk == D_FAILED);
Lars Ellenberg9d282872010-10-14 13:57:07 +02002976 /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
2977 * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02002978 * the protected members anymore, though, so once put_ldev reaches zero
2979 * again, it will be safe to free them. */
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02002980 drbd_force_state(mdev, NS(disk, D_DISKLESS));
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01002981 return 0;
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02002982}
2983
2984void drbd_go_diskless(struct drbd_conf *mdev)
2985{
2986 D_ASSERT(mdev->state.disk == D_FAILED);
2987 if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
Philipp Reisnere42325a2011-01-19 13:55:45 +01002988 drbd_queue_work(&mdev->tconn->data.work, &mdev->go_diskless);
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02002989}
2990
Philipp Reisnerb411b362009-09-25 16:07:19 -07002991/**
2992 * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
2993 * @mdev: DRBD device.
2994 * @io_fn: IO callback to be called when bitmap IO is possible
2995 * @done: callback to be called after the bitmap IO was performed
2996 * @why: Descriptive text of the reason for doing the IO
2997 *
2998 * While IO on the bitmap happens we freeze application IO thus we ensure
2999 * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
3000 * called from worker context. It MUST NOT be used while a previous such
3001 * work is still pending!
3002 */
3003void drbd_queue_bitmap_io(struct drbd_conf *mdev,
3004 int (*io_fn)(struct drbd_conf *),
3005 void (*done)(struct drbd_conf *, int),
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003006 char *why, enum bm_flag flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003007{
Philipp Reisnere6b3ea82011-01-19 14:02:01 +01003008 D_ASSERT(current == mdev->tconn->worker.task);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003009
3010 D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags));
3011 D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags));
3012 D_ASSERT(list_empty(&mdev->bm_io_work.w.list));
3013 if (mdev->bm_io_work.why)
3014 dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n",
3015 why, mdev->bm_io_work.why);
3016
3017 mdev->bm_io_work.io_fn = io_fn;
3018 mdev->bm_io_work.done = done;
3019 mdev->bm_io_work.why = why;
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003020 mdev->bm_io_work.flags = flags;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003021
Philipp Reisner87eeee42011-01-19 14:16:30 +01003022 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003023 set_bit(BITMAP_IO, &mdev->flags);
3024 if (atomic_read(&mdev->ap_bio_cnt) == 0) {
Philipp Reisner127b3172010-11-16 10:07:53 +01003025 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
Philipp Reisnere42325a2011-01-19 13:55:45 +01003026 drbd_queue_work(&mdev->tconn->data.work, &mdev->bm_io_work.w);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003027 }
Philipp Reisner87eeee42011-01-19 14:16:30 +01003028 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003029}
3030
3031/**
3032 * drbd_bitmap_io() - Does an IO operation on the whole bitmap
3033 * @mdev: DRBD device.
3034 * @io_fn: IO callback to be called when bitmap IO is possible
3035 * @why: Descriptive text of the reason for doing the IO
3036 *
3037 * freezes application IO while that the actual IO operations runs. This
3038 * functions MAY NOT be called from worker context.
3039 */
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003040int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *),
3041 char *why, enum bm_flag flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003042{
3043 int rv;
3044
Philipp Reisnere6b3ea82011-01-19 14:02:01 +01003045 D_ASSERT(current != mdev->tconn->worker.task);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003046
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003047 if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
3048 drbd_suspend_io(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003049
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003050 drbd_bm_lock(mdev, why, flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003051 rv = io_fn(mdev);
3052 drbd_bm_unlock(mdev);
3053
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003054 if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
3055 drbd_resume_io(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003056
3057 return rv;
3058}
3059
3060void drbd_md_set_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
3061{
3062 if ((mdev->ldev->md.flags & flag) != flag) {
3063 drbd_md_mark_dirty(mdev);
3064 mdev->ldev->md.flags |= flag;
3065 }
3066}
3067
3068void drbd_md_clear_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
3069{
3070 if ((mdev->ldev->md.flags & flag) != 0) {
3071 drbd_md_mark_dirty(mdev);
3072 mdev->ldev->md.flags &= ~flag;
3073 }
3074}
3075int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
3076{
3077 return (bdev->md.flags & flag) != 0;
3078}
3079
3080static void md_sync_timer_fn(unsigned long data)
3081{
3082 struct drbd_conf *mdev = (struct drbd_conf *) data;
3083
Philipp Reisnere42325a2011-01-19 13:55:45 +01003084 drbd_queue_work_front(&mdev->tconn->data.work, &mdev->md_sync_work);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003085}
3086
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003087static int w_md_sync(struct drbd_work *w, int unused)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003088{
Philipp Reisner00d56942011-02-09 18:09:48 +01003089 struct drbd_conf *mdev = w->mdev;
3090
Philipp Reisnerb411b362009-09-25 16:07:19 -07003091 dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
Lars Ellenbergee15b032010-09-03 10:00:09 +02003092#ifdef DEBUG
3093 dev_warn(DEV, "last md_mark_dirty: %s:%u\n",
3094 mdev->last_md_mark_dirty.func, mdev->last_md_mark_dirty.line);
3095#endif
Philipp Reisnerb411b362009-09-25 16:07:19 -07003096 drbd_md_sync(mdev);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003097 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003098}
3099
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01003100const char *cmdname(enum drbd_packet cmd)
Andreas Gruenbacherf2ad9062011-01-26 17:13:25 +01003101{
3102 /* THINK may need to become several global tables
3103 * when we want to support more than
3104 * one PRO_VERSION */
3105 static const char *cmdnames[] = {
3106 [P_DATA] = "Data",
3107 [P_DATA_REPLY] = "DataReply",
3108 [P_RS_DATA_REPLY] = "RSDataReply",
3109 [P_BARRIER] = "Barrier",
3110 [P_BITMAP] = "ReportBitMap",
3111 [P_BECOME_SYNC_TARGET] = "BecomeSyncTarget",
3112 [P_BECOME_SYNC_SOURCE] = "BecomeSyncSource",
3113 [P_UNPLUG_REMOTE] = "UnplugRemote",
3114 [P_DATA_REQUEST] = "DataRequest",
3115 [P_RS_DATA_REQUEST] = "RSDataRequest",
3116 [P_SYNC_PARAM] = "SyncParam",
3117 [P_SYNC_PARAM89] = "SyncParam89",
3118 [P_PROTOCOL] = "ReportProtocol",
3119 [P_UUIDS] = "ReportUUIDs",
3120 [P_SIZES] = "ReportSizes",
3121 [P_STATE] = "ReportState",
3122 [P_SYNC_UUID] = "ReportSyncUUID",
3123 [P_AUTH_CHALLENGE] = "AuthChallenge",
3124 [P_AUTH_RESPONSE] = "AuthResponse",
3125 [P_PING] = "Ping",
3126 [P_PING_ACK] = "PingAck",
3127 [P_RECV_ACK] = "RecvAck",
3128 [P_WRITE_ACK] = "WriteAck",
3129 [P_RS_WRITE_ACK] = "RSWriteAck",
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003130 [P_DISCARD_WRITE] = "DiscardWrite",
Andreas Gruenbacherf2ad9062011-01-26 17:13:25 +01003131 [P_NEG_ACK] = "NegAck",
3132 [P_NEG_DREPLY] = "NegDReply",
3133 [P_NEG_RS_DREPLY] = "NegRSDReply",
3134 [P_BARRIER_ACK] = "BarrierAck",
3135 [P_STATE_CHG_REQ] = "StateChgRequest",
3136 [P_STATE_CHG_REPLY] = "StateChgReply",
3137 [P_OV_REQUEST] = "OVRequest",
3138 [P_OV_REPLY] = "OVReply",
3139 [P_OV_RESULT] = "OVResult",
3140 [P_CSUM_RS_REQUEST] = "CsumRSRequest",
3141 [P_RS_IS_IN_SYNC] = "CsumRSIsInSync",
3142 [P_COMPRESSED_BITMAP] = "CBitmap",
3143 [P_DELAY_PROBE] = "DelayProbe",
3144 [P_OUT_OF_SYNC] = "OutOfSync",
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003145 [P_RETRY_WRITE] = "RetryWrite",
Andreas Gruenbacherf2ad9062011-01-26 17:13:25 +01003146 };
3147
Andreas Gruenbachere5d6f332011-03-28 16:44:40 +02003148 if (cmd == P_INITIAL_META)
3149 return "InitialMeta";
3150 if (cmd == P_INITIAL_DATA)
3151 return "InitialData";
Andreas Gruenbacher60381782011-03-28 17:05:50 +02003152 if (cmd == P_CONNECTION_FEATURES)
3153 return "ConnectionFeatures";
Andreas Gruenbacher6e849ce2011-03-14 17:27:45 +01003154 if (cmd >= ARRAY_SIZE(cmdnames))
Andreas Gruenbacherf2ad9062011-01-26 17:13:25 +01003155 return "Unknown";
3156 return cmdnames[cmd];
3157}
3158
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003159/**
3160 * drbd_wait_misc - wait for a request to make progress
3161 * @mdev: device associated with the request
3162 * @i: the struct drbd_interval embedded in struct drbd_request or
3163 * struct drbd_peer_request
3164 */
3165int drbd_wait_misc(struct drbd_conf *mdev, struct drbd_interval *i)
3166{
3167 struct net_conf *net_conf = mdev->tconn->net_conf;
3168 DEFINE_WAIT(wait);
3169 long timeout;
3170
3171 if (!net_conf)
3172 return -ETIMEDOUT;
3173 timeout = MAX_SCHEDULE_TIMEOUT;
3174 if (net_conf->ko_count)
3175 timeout = net_conf->timeout * HZ / 10 * net_conf->ko_count;
3176
3177 /* Indicate to wake up mdev->misc_wait on progress. */
3178 i->waiting = true;
3179 prepare_to_wait(&mdev->misc_wait, &wait, TASK_INTERRUPTIBLE);
3180 spin_unlock_irq(&mdev->tconn->req_lock);
3181 timeout = schedule_timeout(timeout);
3182 finish_wait(&mdev->misc_wait, &wait);
3183 spin_lock_irq(&mdev->tconn->req_lock);
3184 if (!timeout || mdev->state.conn < C_CONNECTED)
3185 return -ETIMEDOUT;
3186 if (signal_pending(current))
3187 return -ERESTARTSYS;
3188 return 0;
3189}
3190
Philipp Reisnerb411b362009-09-25 16:07:19 -07003191#ifdef CONFIG_DRBD_FAULT_INJECTION
3192/* Fault insertion support including random number generator shamelessly
3193 * stolen from kernel/rcutorture.c */
3194struct fault_random_state {
3195 unsigned long state;
3196 unsigned long count;
3197};
3198
3199#define FAULT_RANDOM_MULT 39916801 /* prime */
3200#define FAULT_RANDOM_ADD 479001701 /* prime */
3201#define FAULT_RANDOM_REFRESH 10000
3202
3203/*
3204 * Crude but fast random-number generator. Uses a linear congruential
3205 * generator, with occasional help from get_random_bytes().
3206 */
3207static unsigned long
3208_drbd_fault_random(struct fault_random_state *rsp)
3209{
3210 long refresh;
3211
Roel Kluin49829ea2009-12-15 22:55:44 +01003212 if (!rsp->count--) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003213 get_random_bytes(&refresh, sizeof(refresh));
3214 rsp->state += refresh;
3215 rsp->count = FAULT_RANDOM_REFRESH;
3216 }
3217 rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
3218 return swahw32(rsp->state);
3219}
3220
3221static char *
3222_drbd_fault_str(unsigned int type) {
3223 static char *_faults[] = {
3224 [DRBD_FAULT_MD_WR] = "Meta-data write",
3225 [DRBD_FAULT_MD_RD] = "Meta-data read",
3226 [DRBD_FAULT_RS_WR] = "Resync write",
3227 [DRBD_FAULT_RS_RD] = "Resync read",
3228 [DRBD_FAULT_DT_WR] = "Data write",
3229 [DRBD_FAULT_DT_RD] = "Data read",
3230 [DRBD_FAULT_DT_RA] = "Data read ahead",
3231 [DRBD_FAULT_BM_ALLOC] = "BM allocation",
Philipp Reisner6b4388a2010-04-26 14:11:45 +02003232 [DRBD_FAULT_AL_EE] = "EE allocation",
3233 [DRBD_FAULT_RECEIVE] = "receive data corruption",
Philipp Reisnerb411b362009-09-25 16:07:19 -07003234 };
3235
3236 return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
3237}
3238
3239unsigned int
3240_drbd_insert_fault(struct drbd_conf *mdev, unsigned int type)
3241{
3242 static struct fault_random_state rrs = {0, 0};
3243
3244 unsigned int ret = (
3245 (fault_devs == 0 ||
3246 ((1 << mdev_to_minor(mdev)) & fault_devs) != 0) &&
3247 (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate));
3248
3249 if (ret) {
3250 fault_count++;
3251
Lars Ellenberg73835062010-05-27 11:51:56 +02003252 if (__ratelimit(&drbd_ratelimit_state))
Philipp Reisnerb411b362009-09-25 16:07:19 -07003253 dev_warn(DEV, "***Simulating %s failure\n",
3254 _drbd_fault_str(type));
3255 }
3256
3257 return ret;
3258}
3259#endif
3260
3261const char *drbd_buildtag(void)
3262{
3263 /* DRBD built from external sources has here a reference to the
3264 git hash of the source code. */
3265
3266 static char buildtag[38] = "\0uilt-in";
3267
3268 if (buildtag[0] == 0) {
3269#ifdef CONFIG_MODULES
3270 if (THIS_MODULE != NULL)
3271 sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
3272 else
3273#endif
3274 buildtag[0] = 'b';
3275 }
3276
3277 return buildtag;
3278}
3279
3280module_init(drbd_init)
3281module_exit(drbd_cleanup)
3282
Philipp Reisnerb411b362009-09-25 16:07:19 -07003283EXPORT_SYMBOL(drbd_conn_str);
3284EXPORT_SYMBOL(drbd_role_str);
3285EXPORT_SYMBOL(drbd_disk_str);
3286EXPORT_SYMBOL(drbd_set_st_err_str);