blob: e3dc84dcd67e22f11a023e15f629e552a4983a4d [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11 from Logicworks, Inc. for making SDP replication support possible.
12
13 drbd is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
16 any later version.
17
18 drbd is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with drbd; see the file COPYING. If not, write to
25 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26
27 */
28
Philipp Reisnerb411b362009-09-25 16:07:19 -070029#include <linux/module.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070030#include <linux/drbd.h>
31#include <asm/uaccess.h>
32#include <asm/types.h>
33#include <net/sock.h>
34#include <linux/ctype.h>
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020035#include <linux/mutex.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070036#include <linux/fs.h>
37#include <linux/file.h>
38#include <linux/proc_fs.h>
39#include <linux/init.h>
40#include <linux/mm.h>
41#include <linux/memcontrol.h>
42#include <linux/mm_inline.h>
43#include <linux/slab.h>
44#include <linux/random.h>
45#include <linux/reboot.h>
46#include <linux/notifier.h>
47#include <linux/kthread.h>
48
49#define __KERNEL_SYSCALLS__
50#include <linux/unistd.h>
51#include <linux/vmalloc.h>
52
53#include <linux/drbd_limits.h>
54#include "drbd_int.h"
Philipp Reisnerb411b362009-09-25 16:07:19 -070055#include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
56
57#include "drbd_vli.h"
58
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020059static DEFINE_MUTEX(drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -070060int drbdd_init(struct drbd_thread *);
61int drbd_worker(struct drbd_thread *);
62int drbd_asender(struct drbd_thread *);
63
64int drbd_init(void);
65static int drbd_open(struct block_device *bdev, fmode_t mode);
66static int drbd_release(struct gendisk *gd, fmode_t mode);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +010067static int w_md_sync(struct drbd_work *w, int unused);
Philipp Reisnerb411b362009-09-25 16:07:19 -070068static void md_sync_timer_fn(unsigned long data);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +010069static int w_bitmap_io(struct drbd_work *w, int unused);
70static int w_go_diskless(struct drbd_work *w, int unused);
Philipp Reisnerb411b362009-09-25 16:07:19 -070071
Philipp Reisnerb411b362009-09-25 16:07:19 -070072MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
73 "Lars Ellenberg <lars@linbit.com>");
74MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
75MODULE_VERSION(REL_VERSION);
76MODULE_LICENSE("GPL");
Philipp Reisner81a5d602011-02-22 19:53:16 -050077MODULE_PARM_DESC(minor_count, "Approximate number of drbd devices ("
Philipp Reisner2b8a90b2011-01-10 11:15:17 +010078 __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")");
Philipp Reisnerb411b362009-09-25 16:07:19 -070079MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
80
81#include <linux/moduleparam.h>
82/* allow_open_on_secondary */
83MODULE_PARM_DESC(allow_oos, "DONT USE!");
84/* thanks to these macros, if compiled into the kernel (not-module),
85 * this becomes the boot parameter drbd.minor_count */
86module_param(minor_count, uint, 0444);
87module_param(disable_sendpage, bool, 0644);
88module_param(allow_oos, bool, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -070089module_param(proc_details, int, 0644);
90
91#ifdef CONFIG_DRBD_FAULT_INJECTION
92int enable_faults;
93int fault_rate;
94static int fault_count;
95int fault_devs;
96/* bitmap of enabled faults */
97module_param(enable_faults, int, 0664);
98/* fault rate % value - applies to all enabled faults */
99module_param(fault_rate, int, 0664);
100/* count of faults inserted */
101module_param(fault_count, int, 0664);
102/* bitmap of devices to insert faults on */
103module_param(fault_devs, int, 0644);
104#endif
105
106/* module parameter, defined */
Philipp Reisner2b8a90b2011-01-10 11:15:17 +0100107unsigned int minor_count = DRBD_MINOR_COUNT_DEF;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700108int disable_sendpage;
109int allow_oos;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700110int proc_details; /* Detail level in proc drbd*/
111
112/* Module parameter for setting the user mode helper program
113 * to run. Default is /sbin/drbdadm */
114char usermode_helper[80] = "/sbin/drbdadm";
115
116module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644);
117
118/* in 2.6.x, our device mapping and config info contains our virtual gendisks
119 * as member "struct gendisk *vdisk;"
120 */
Philipp Reisner81a5d602011-02-22 19:53:16 -0500121struct idr minors;
Philipp Reisner21114382011-01-19 12:26:59 +0100122struct list_head drbd_tconns; /* list of struct drbd_tconn */
Lars Ellenberg543cc102011-03-10 22:18:18 +0100123DEFINE_MUTEX(drbd_cfg_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700124
125struct kmem_cache *drbd_request_cache;
Andreas Gruenbacher6c852be2011-02-04 15:38:52 +0100126struct kmem_cache *drbd_ee_cache; /* peer requests */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700127struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
128struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
129mempool_t *drbd_request_mempool;
130mempool_t *drbd_ee_mempool;
Lars Ellenberg35abf592011-02-23 12:39:46 +0100131mempool_t *drbd_md_io_page_pool;
Lars Ellenbergda4a75d2011-02-23 17:02:01 +0100132struct bio_set *drbd_md_io_bio_set;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700133
134/* I do not use a standard mempool, because:
135 1) I want to hand out the pre-allocated objects first.
136 2) I want to be able to interrupt sleeping allocation with a signal.
137 Note: This is a single linked list, the next pointer is the private
138 member of struct page.
139 */
140struct page *drbd_pp_pool;
141spinlock_t drbd_pp_lock;
142int drbd_pp_vacant;
143wait_queue_head_t drbd_pp_wait;
144
145DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
146
Emese Revfy7d4e9d02009-12-14 00:59:30 +0100147static const struct block_device_operations drbd_ops = {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700148 .owner = THIS_MODULE,
149 .open = drbd_open,
150 .release = drbd_release,
151};
152
Lars Ellenbergda4a75d2011-02-23 17:02:01 +0100153static void bio_destructor_drbd(struct bio *bio)
154{
155 bio_free(bio, drbd_md_io_bio_set);
156}
157
158struct bio *bio_alloc_drbd(gfp_t gfp_mask)
159{
160 struct bio *bio;
161
162 if (!drbd_md_io_bio_set)
163 return bio_alloc(gfp_mask, 1);
164
165 bio = bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set);
166 if (!bio)
167 return NULL;
168 bio->bi_destructor = bio_destructor_drbd;
169 return bio;
170}
171
Philipp Reisnerb411b362009-09-25 16:07:19 -0700172#ifdef __CHECKER__
173/* When checking with sparse, and this is an inline function, sparse will
174 give tons of false positives. When this is a real functions sparse works.
175 */
176int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
177{
178 int io_allowed;
179
180 atomic_inc(&mdev->local_cnt);
181 io_allowed = (mdev->state.disk >= mins);
182 if (!io_allowed) {
183 if (atomic_dec_and_test(&mdev->local_cnt))
184 wake_up(&mdev->misc_wait);
185 }
186 return io_allowed;
187}
188
189#endif
190
191/**
192 * DOC: The transfer log
193 *
194 * The transfer log is a single linked list of &struct drbd_tl_epoch objects.
Philipp Reisner87eeee42011-01-19 14:16:30 +0100195 * mdev->tconn->newest_tle points to the head, mdev->tconn->oldest_tle points to the tail
Philipp Reisnerb411b362009-09-25 16:07:19 -0700196 * of the list. There is always at least one &struct drbd_tl_epoch object.
197 *
198 * Each &struct drbd_tl_epoch has a circular double linked list of requests
199 * attached.
200 */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100201static int tl_init(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700202{
203 struct drbd_tl_epoch *b;
204
205 /* during device minor initialization, we may well use GFP_KERNEL */
206 b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_KERNEL);
207 if (!b)
208 return 0;
209 INIT_LIST_HEAD(&b->requests);
210 INIT_LIST_HEAD(&b->w.list);
211 b->next = NULL;
212 b->br_number = 4711;
Philipp Reisner7e602c02010-05-27 14:49:27 +0200213 b->n_writes = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700214 b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
215
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100216 tconn->oldest_tle = b;
217 tconn->newest_tle = b;
218 INIT_LIST_HEAD(&tconn->out_of_sequence_requests);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700219
Philipp Reisnerb411b362009-09-25 16:07:19 -0700220 return 1;
221}
222
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100223static void tl_cleanup(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700224{
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100225 if (tconn->oldest_tle != tconn->newest_tle)
226 conn_err(tconn, "ASSERT FAILED: oldest_tle == newest_tle\n");
227 if (!list_empty(&tconn->out_of_sequence_requests))
228 conn_err(tconn, "ASSERT FAILED: list_empty(out_of_sequence_requests)\n");
229 kfree(tconn->oldest_tle);
230 tconn->oldest_tle = NULL;
231 kfree(tconn->unused_spare_tle);
232 tconn->unused_spare_tle = NULL;
Andreas Gruenbacherd6287692011-01-13 23:05:39 +0100233}
234
Philipp Reisnerb411b362009-09-25 16:07:19 -0700235/**
236 * _tl_add_barrier() - Adds a barrier to the transfer log
237 * @mdev: DRBD device.
238 * @new: Barrier to be added before the current head of the TL.
239 *
240 * The caller must hold the req_lock.
241 */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100242void _tl_add_barrier(struct drbd_tconn *tconn, struct drbd_tl_epoch *new)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700243{
244 struct drbd_tl_epoch *newest_before;
245
246 INIT_LIST_HEAD(&new->requests);
247 INIT_LIST_HEAD(&new->w.list);
248 new->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
249 new->next = NULL;
Philipp Reisner7e602c02010-05-27 14:49:27 +0200250 new->n_writes = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700251
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100252 newest_before = tconn->newest_tle;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700253 /* never send a barrier number == 0, because that is special-cased
254 * when using TCQ for our write ordering code */
255 new->br_number = (newest_before->br_number+1) ?: 1;
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100256 if (tconn->newest_tle != new) {
257 tconn->newest_tle->next = new;
258 tconn->newest_tle = new;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700259 }
260}
261
262/**
263 * tl_release() - Free or recycle the oldest &struct drbd_tl_epoch object of the TL
264 * @mdev: DRBD device.
265 * @barrier_nr: Expected identifier of the DRBD write barrier packet.
266 * @set_size: Expected number of requests before that barrier.
267 *
268 * In case the passed barrier_nr or set_size does not match the oldest
269 * &struct drbd_tl_epoch objects this function will cause a termination
270 * of the connection.
271 */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100272void tl_release(struct drbd_tconn *tconn, unsigned int barrier_nr,
273 unsigned int set_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700274{
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100275 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700276 struct drbd_tl_epoch *b, *nob; /* next old barrier */
277 struct list_head *le, *tle;
278 struct drbd_request *r;
279
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100280 spin_lock_irq(&tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700281
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100282 b = tconn->oldest_tle;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700283
284 /* first some paranoia code */
285 if (b == NULL) {
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100286 conn_err(tconn, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
287 barrier_nr);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700288 goto bail;
289 }
290 if (b->br_number != barrier_nr) {
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100291 conn_err(tconn, "BAD! BarrierAck #%u received, expected #%u!\n",
292 barrier_nr, b->br_number);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700293 goto bail;
294 }
Philipp Reisner7e602c02010-05-27 14:49:27 +0200295 if (b->n_writes != set_size) {
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100296 conn_err(tconn, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
297 barrier_nr, set_size, b->n_writes);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700298 goto bail;
299 }
300
301 /* Clean up list of requests processed during current epoch */
302 list_for_each_safe(le, tle, &b->requests) {
303 r = list_entry(le, struct drbd_request, tl_requests);
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100304 _req_mod(r, BARRIER_ACKED);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700305 }
306 /* There could be requests on the list waiting for completion
307 of the write to the local disk. To avoid corruptions of
308 slab's data structures we have to remove the lists head.
309
310 Also there could have been a barrier ack out of sequence, overtaking
311 the write acks - which would be a bug and violating write ordering.
312 To not deadlock in case we lose connection while such requests are
313 still pending, we need some way to find them for the
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100314 _req_mode(CONNECTION_LOST_WHILE_PENDING).
Philipp Reisnerb411b362009-09-25 16:07:19 -0700315
316 These have been list_move'd to the out_of_sequence_requests list in
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100317 _req_mod(, BARRIER_ACKED) above.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700318 */
319 list_del_init(&b->requests);
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100320 mdev = b->w.mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700321
322 nob = b->next;
323 if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100324 _tl_add_barrier(tconn, b);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700325 if (nob)
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100326 tconn->oldest_tle = nob;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700327 /* if nob == NULL b was the only barrier, and becomes the new
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100328 barrier. Therefore tconn->oldest_tle points already to b */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700329 } else {
330 D_ASSERT(nob != NULL);
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100331 tconn->oldest_tle = nob;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700332 kfree(b);
333 }
334
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100335 spin_unlock_irq(&tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700336 dec_ap_pending(mdev);
337
338 return;
339
340bail:
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100341 spin_unlock_irq(&tconn->req_lock);
342 conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700343}
344
Philipp Reisner617049a2010-12-22 12:48:31 +0100345
Philipp Reisner11b58e72010-05-12 17:08:26 +0200346/**
347 * _tl_restart() - Walks the transfer log, and applies an action to all requests
348 * @mdev: DRBD device.
349 * @what: The action/event to perform with all request objects
350 *
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100351 * @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO,
352 * RESTART_FROZEN_DISK_IO.
Philipp Reisner11b58e72010-05-12 17:08:26 +0200353 */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100354void _tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what)
Philipp Reisner11b58e72010-05-12 17:08:26 +0200355{
356 struct drbd_tl_epoch *b, *tmp, **pn;
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200357 struct list_head *le, *tle, carry_reads;
Philipp Reisner11b58e72010-05-12 17:08:26 +0200358 struct drbd_request *req;
359 int rv, n_writes, n_reads;
360
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100361 b = tconn->oldest_tle;
362 pn = &tconn->oldest_tle;
Philipp Reisner11b58e72010-05-12 17:08:26 +0200363 while (b) {
364 n_writes = 0;
365 n_reads = 0;
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200366 INIT_LIST_HEAD(&carry_reads);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200367 list_for_each_safe(le, tle, &b->requests) {
368 req = list_entry(le, struct drbd_request, tl_requests);
369 rv = _req_mod(req, what);
370
371 n_writes += (rv & MR_WRITE) >> MR_WRITE_SHIFT;
372 n_reads += (rv & MR_READ) >> MR_READ_SHIFT;
373 }
374 tmp = b->next;
375
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200376 if (n_writes) {
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100377 if (what == RESEND) {
Philipp Reisner11b58e72010-05-12 17:08:26 +0200378 b->n_writes = n_writes;
379 if (b->w.cb == NULL) {
380 b->w.cb = w_send_barrier;
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100381 inc_ap_pending(b->w.mdev);
382 set_bit(CREATE_BARRIER, &b->w.mdev->flags);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200383 }
384
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100385 drbd_queue_work(&tconn->data.work, &b->w);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200386 }
387 pn = &b->next;
388 } else {
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200389 if (n_reads)
390 list_add(&carry_reads, &b->requests);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200391 /* there could still be requests on that ring list,
392 * in case local io is still pending */
393 list_del(&b->requests);
394
395 /* dec_ap_pending corresponding to queue_barrier.
396 * the newest barrier may not have been queued yet,
397 * in which case w.cb is still NULL. */
398 if (b->w.cb != NULL)
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100399 dec_ap_pending(b->w.mdev);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200400
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100401 if (b == tconn->newest_tle) {
Philipp Reisner11b58e72010-05-12 17:08:26 +0200402 /* recycle, but reinit! */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100403 if (tmp != NULL)
404 conn_err(tconn, "ASSERT FAILED tmp == NULL");
Philipp Reisner11b58e72010-05-12 17:08:26 +0200405 INIT_LIST_HEAD(&b->requests);
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200406 list_splice(&carry_reads, &b->requests);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200407 INIT_LIST_HEAD(&b->w.list);
408 b->w.cb = NULL;
409 b->br_number = net_random();
410 b->n_writes = 0;
411
412 *pn = b;
413 break;
414 }
415 *pn = tmp;
416 kfree(b);
417 }
418 b = tmp;
Philipp Reisnerb9b98712010-06-22 11:26:48 +0200419 list_splice(&carry_reads, &b->requests);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200420 }
421}
422
Philipp Reisnerb411b362009-09-25 16:07:19 -0700423
424/**
425 * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
426 * @mdev: DRBD device.
427 *
428 * This is called after the connection to the peer was lost. The storage covered
429 * by the requests on the transfer gets marked as our of sync. Called from the
430 * receiver thread and the worker thread.
431 */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100432void tl_clear(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700433{
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100434 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700435 struct list_head *le, *tle;
436 struct drbd_request *r;
Philipp Reisnere90285e2011-03-22 12:51:21 +0100437 int vnr;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700438
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100439 spin_lock_irq(&tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700440
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100441 _tl_restart(tconn, CONNECTION_LOST_WHILE_PENDING);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700442
443 /* we expect this list to be empty. */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100444 if (!list_empty(&tconn->out_of_sequence_requests))
445 conn_err(tconn, "ASSERT FAILED list_empty(&out_of_sequence_requests)\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700446
447 /* but just in case, clean it up anyways! */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100448 list_for_each_safe(le, tle, &tconn->out_of_sequence_requests) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700449 r = list_entry(le, struct drbd_request, tl_requests);
450 /* It would be nice to complete outside of spinlock.
451 * But this is easier for now. */
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100452 _req_mod(r, CONNECTION_LOST_WHILE_PENDING);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700453 }
454
455 /* ensure bit indicating barrier is required is clear */
Philipp Reisnere90285e2011-03-22 12:51:21 +0100456 idr_for_each_entry(&tconn->volumes, mdev, vnr)
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100457 clear_bit(CREATE_BARRIER, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700458
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100459 spin_unlock_irq(&tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700460}
461
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100462void tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what)
Philipp Reisner11b58e72010-05-12 17:08:26 +0200463{
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100464 spin_lock_irq(&tconn->req_lock);
465 _tl_restart(tconn, what);
466 spin_unlock_irq(&tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700467}
468
Philipp Reisnerb411b362009-09-25 16:07:19 -0700469static int drbd_thread_setup(void *arg)
470{
471 struct drbd_thread *thi = (struct drbd_thread *) arg;
Philipp Reisner392c8802011-02-09 10:33:31 +0100472 struct drbd_tconn *tconn = thi->tconn;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700473 unsigned long flags;
474 int retval;
475
Philipp Reisnerf1b3a6e2011-02-08 15:35:58 +0100476 snprintf(current->comm, sizeof(current->comm), "drbd_%c_%s",
Philipp Reisner392c8802011-02-09 10:33:31 +0100477 thi->name[0], thi->tconn->name);
Philipp Reisnerf1b3a6e2011-02-08 15:35:58 +0100478
Philipp Reisnerb411b362009-09-25 16:07:19 -0700479restart:
480 retval = thi->function(thi);
481
482 spin_lock_irqsave(&thi->t_lock, flags);
483
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100484 /* if the receiver has been "EXITING", the last thing it did
Philipp Reisnerb411b362009-09-25 16:07:19 -0700485 * was set the conn state to "StandAlone",
486 * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
487 * and receiver thread will be "started".
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100488 * drbd_thread_start needs to set "RESTARTING" in that case.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700489 * t_state check and assignment needs to be within the same spinlock,
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100490 * so either thread_start sees EXITING, and can remap to RESTARTING,
491 * or thread_start see NONE, and can proceed as normal.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700492 */
493
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100494 if (thi->t_state == RESTARTING) {
Philipp Reisner392c8802011-02-09 10:33:31 +0100495 conn_info(tconn, "Restarting %s thread\n", thi->name);
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100496 thi->t_state = RUNNING;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700497 spin_unlock_irqrestore(&thi->t_lock, flags);
498 goto restart;
499 }
500
501 thi->task = NULL;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100502 thi->t_state = NONE;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700503 smp_mb();
504 complete(&thi->stop);
505 spin_unlock_irqrestore(&thi->t_lock, flags);
506
Philipp Reisner392c8802011-02-09 10:33:31 +0100507 conn_info(tconn, "Terminating %s\n", current->comm);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700508
509 /* Release mod reference taken when thread was started */
510 module_put(THIS_MODULE);
511 return retval;
512}
513
Philipp Reisner392c8802011-02-09 10:33:31 +0100514static void drbd_thread_init(struct drbd_tconn *tconn, struct drbd_thread *thi,
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100515 int (*func) (struct drbd_thread *), char *name)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700516{
517 spin_lock_init(&thi->t_lock);
518 thi->task = NULL;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100519 thi->t_state = NONE;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700520 thi->function = func;
Philipp Reisner392c8802011-02-09 10:33:31 +0100521 thi->tconn = tconn;
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100522 strncpy(thi->name, name, ARRAY_SIZE(thi->name));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700523}
524
525int drbd_thread_start(struct drbd_thread *thi)
526{
Philipp Reisner392c8802011-02-09 10:33:31 +0100527 struct drbd_tconn *tconn = thi->tconn;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700528 struct task_struct *nt;
529 unsigned long flags;
530
Philipp Reisnerb411b362009-09-25 16:07:19 -0700531 /* is used from state engine doing drbd_thread_stop_nowait,
532 * while holding the req lock irqsave */
533 spin_lock_irqsave(&thi->t_lock, flags);
534
535 switch (thi->t_state) {
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100536 case NONE:
Philipp Reisner392c8802011-02-09 10:33:31 +0100537 conn_info(tconn, "Starting %s thread (from %s [%d])\n",
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100538 thi->name, current->comm, current->pid);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700539
540 /* Get ref on module for thread - this is released when thread exits */
541 if (!try_module_get(THIS_MODULE)) {
Philipp Reisner392c8802011-02-09 10:33:31 +0100542 conn_err(tconn, "Failed to get module reference in drbd_thread_start\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700543 spin_unlock_irqrestore(&thi->t_lock, flags);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100544 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700545 }
546
547 init_completion(&thi->stop);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700548 thi->reset_cpu_mask = 1;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100549 thi->t_state = RUNNING;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700550 spin_unlock_irqrestore(&thi->t_lock, flags);
551 flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
552
553 nt = kthread_create(drbd_thread_setup, (void *) thi,
Philipp Reisner392c8802011-02-09 10:33:31 +0100554 "drbd_%c_%s", thi->name[0], thi->tconn->name);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700555
556 if (IS_ERR(nt)) {
Philipp Reisner392c8802011-02-09 10:33:31 +0100557 conn_err(tconn, "Couldn't start thread\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700558
559 module_put(THIS_MODULE);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100560 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700561 }
562 spin_lock_irqsave(&thi->t_lock, flags);
563 thi->task = nt;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100564 thi->t_state = RUNNING;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700565 spin_unlock_irqrestore(&thi->t_lock, flags);
566 wake_up_process(nt);
567 break;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100568 case EXITING:
569 thi->t_state = RESTARTING;
Philipp Reisner392c8802011-02-09 10:33:31 +0100570 conn_info(tconn, "Restarting %s thread (from %s [%d])\n",
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100571 thi->name, current->comm, current->pid);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700572 /* fall through */
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100573 case RUNNING:
574 case RESTARTING:
Philipp Reisnerb411b362009-09-25 16:07:19 -0700575 default:
576 spin_unlock_irqrestore(&thi->t_lock, flags);
577 break;
578 }
579
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100580 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700581}
582
583
584void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
585{
586 unsigned long flags;
587
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100588 enum drbd_thread_state ns = restart ? RESTARTING : EXITING;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700589
590 /* may be called from state engine, holding the req lock irqsave */
591 spin_lock_irqsave(&thi->t_lock, flags);
592
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100593 if (thi->t_state == NONE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700594 spin_unlock_irqrestore(&thi->t_lock, flags);
595 if (restart)
596 drbd_thread_start(thi);
597 return;
598 }
599
600 if (thi->t_state != ns) {
601 if (thi->task == NULL) {
602 spin_unlock_irqrestore(&thi->t_lock, flags);
603 return;
604 }
605
606 thi->t_state = ns;
607 smp_mb();
608 init_completion(&thi->stop);
609 if (thi->task != current)
610 force_sig(DRBD_SIGKILL, thi->task);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700611 }
612
613 spin_unlock_irqrestore(&thi->t_lock, flags);
614
615 if (wait)
616 wait_for_completion(&thi->stop);
617}
618
Philipp Reisner392c8802011-02-09 10:33:31 +0100619static struct drbd_thread *drbd_task_to_thread(struct drbd_tconn *tconn, struct task_struct *task)
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100620{
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100621 struct drbd_thread *thi =
622 task == tconn->receiver.task ? &tconn->receiver :
623 task == tconn->asender.task ? &tconn->asender :
624 task == tconn->worker.task ? &tconn->worker : NULL;
625
626 return thi;
627}
628
Philipp Reisner392c8802011-02-09 10:33:31 +0100629char *drbd_task_to_thread_name(struct drbd_tconn *tconn, struct task_struct *task)
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100630{
Philipp Reisner392c8802011-02-09 10:33:31 +0100631 struct drbd_thread *thi = drbd_task_to_thread(tconn, task);
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100632 return thi ? thi->name : task->comm;
633}
634
Philipp Reisner80883192011-02-18 14:56:45 +0100635int conn_lowest_minor(struct drbd_tconn *tconn)
Philipp Reisner80822282011-02-08 12:46:30 +0100636{
Philipp Reisnere90285e2011-03-22 12:51:21 +0100637 int vnr = 0;
638 struct drbd_conf *mdev;
Philipp Reisner774b3052011-02-22 02:07:03 -0500639
Philipp Reisnere90285e2011-03-22 12:51:21 +0100640 mdev = idr_get_next(&tconn->volumes, &vnr);
641 if (!mdev)
Philipp Reisner774b3052011-02-22 02:07:03 -0500642 return -1;
Philipp Reisnere90285e2011-03-22 12:51:21 +0100643 return mdev_to_minor(mdev);
Philipp Reisner80822282011-02-08 12:46:30 +0100644}
Philipp Reisner774b3052011-02-22 02:07:03 -0500645
646#ifdef CONFIG_SMP
Philipp Reisnerb411b362009-09-25 16:07:19 -0700647/**
648 * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
649 * @mdev: DRBD device.
650 *
651 * Forces all threads of a device onto the same CPU. This is beneficial for
652 * DRBD's performance. May be overwritten by user's configuration.
653 */
Philipp Reisner80822282011-02-08 12:46:30 +0100654void drbd_calc_cpu_mask(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700655{
656 int ord, cpu;
657
658 /* user override. */
Philipp Reisner80822282011-02-08 12:46:30 +0100659 if (cpumask_weight(tconn->cpu_mask))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700660 return;
661
Philipp Reisner80822282011-02-08 12:46:30 +0100662 ord = conn_lowest_minor(tconn) % cpumask_weight(cpu_online_mask);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700663 for_each_online_cpu(cpu) {
664 if (ord-- == 0) {
Philipp Reisner80822282011-02-08 12:46:30 +0100665 cpumask_set_cpu(cpu, tconn->cpu_mask);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700666 return;
667 }
668 }
669 /* should not be reached */
Philipp Reisner80822282011-02-08 12:46:30 +0100670 cpumask_setall(tconn->cpu_mask);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700671}
672
673/**
674 * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
675 * @mdev: DRBD device.
Philipp Reisnerbc31fe32011-02-07 11:14:38 +0100676 * @thi: drbd_thread object
Philipp Reisnerb411b362009-09-25 16:07:19 -0700677 *
678 * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
679 * prematurely.
680 */
Philipp Reisner80822282011-02-08 12:46:30 +0100681void drbd_thread_current_set_cpu(struct drbd_thread *thi)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700682{
683 struct task_struct *p = current;
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100684
Philipp Reisnerb411b362009-09-25 16:07:19 -0700685 if (!thi->reset_cpu_mask)
686 return;
687 thi->reset_cpu_mask = 0;
Philipp Reisner392c8802011-02-09 10:33:31 +0100688 set_cpus_allowed_ptr(p, thi->tconn->cpu_mask);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700689}
690#endif
691
Andreas Gruenbacher52b061a2011-03-30 11:38:49 +0200692/**
693 * drbd_header_size - size of a packet header
694 *
695 * The header size is a multiple of 8, so any payload following the header is
696 * word aligned on 64-bit architectures. (The bitmap send and receive code
697 * relies on this.)
698 */
699unsigned int drbd_header_size(struct drbd_tconn *tconn)
700{
701 BUILD_BUG_ON(sizeof(struct p_header80) != sizeof(struct p_header95));
702 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header80), 8));
703 return sizeof(struct p_header80);
704}
705
Philipp Reisnerd38e7872011-02-07 15:32:04 +0100706static void prepare_header80(struct p_header80 *h, enum drbd_packet cmd, int size)
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100707{
708 h->magic = cpu_to_be32(DRBD_MAGIC);
709 h->command = cpu_to_be16(cmd);
710 h->length = cpu_to_be16(size);
711}
712
Philipp Reisnerd38e7872011-02-07 15:32:04 +0100713static void prepare_header95(struct p_header95 *h, enum drbd_packet cmd, int size)
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100714{
715 h->magic = cpu_to_be16(DRBD_MAGIC_BIG);
716 h->command = cpu_to_be16(cmd);
717 h->length = cpu_to_be32(size);
718}
719
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200720static void prepare_header(struct drbd_tconn *tconn, int vnr, struct p_header *h,
721 enum drbd_packet cmd, int size)
Philipp Reisnerd38e7872011-02-07 15:32:04 +0100722{
Andreas Gruenbacher0916e0e2011-03-21 14:10:15 +0100723 if (tconn->agreed_pro_version >= 95)
Philipp Reisnerd38e7872011-02-07 15:32:04 +0100724 prepare_header95(&h->h95, cmd, size);
725 else
726 prepare_header80(&h->h80, cmd, size);
727}
728
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200729void *conn_prepare_command(struct drbd_tconn *tconn, struct drbd_socket *sock)
730{
731 mutex_lock(&sock->mutex);
732 if (!sock->socket) {
733 mutex_unlock(&sock->mutex);
734 return NULL;
735 }
736 return sock->sbuf;
737}
738
739void *drbd_prepare_command(struct drbd_conf *mdev, struct drbd_socket *sock)
740{
741 return conn_prepare_command(mdev->tconn, sock);
742}
743
744static int __send_command(struct drbd_tconn *tconn, int vnr,
745 struct drbd_socket *sock, enum drbd_packet cmd,
746 unsigned int header_size, void *data,
747 unsigned int size)
748{
749 int msg_flags;
750 int err;
751
752 /*
753 * Called with @data == NULL and the size of the data blocks in @size
754 * for commands that send data blocks. For those commands, omit the
755 * MSG_MORE flag: this will increase the likelihood that data blocks
756 * which are page aligned on the sender will end up page aligned on the
757 * receiver.
758 */
759 msg_flags = data ? MSG_MORE : 0;
760
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200761 prepare_header(tconn, vnr, sock->sbuf, cmd,
762 header_size - sizeof(struct p_header) + size);
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200763 err = drbd_send_all(tconn, sock->socket, sock->sbuf, header_size,
764 msg_flags);
765 if (data && !err)
766 err = drbd_send_all(tconn, sock->socket, data, size, 0);
767 return err;
768}
769
770int conn_send_command(struct drbd_tconn *tconn, struct drbd_socket *sock,
771 enum drbd_packet cmd, unsigned int header_size,
772 void *data, unsigned int size)
773{
774 int err;
775
776 err = __send_command(tconn, 0, sock, cmd, header_size, data, size);
777 mutex_unlock(&sock->mutex);
778 return err;
779}
780
781int drbd_send_command(struct drbd_conf *mdev, struct drbd_socket *sock,
782 enum drbd_packet cmd, unsigned int header_size,
783 void *data, unsigned int size)
784{
785 int err;
786
787 err = __send_command(mdev->tconn, mdev->vnr, sock, cmd, header_size,
788 data, size);
789 mutex_unlock(&sock->mutex);
790 return err;
791}
792
Andreas Gruenbachere307f352011-03-22 10:55:48 +0100793int drbd_send_ping(struct drbd_tconn *tconn)
794{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200795 struct drbd_socket *sock;
796
797 sock = &tconn->meta;
798 if (!conn_prepare_command(tconn, sock))
799 return -EIO;
800 return conn_send_command(tconn, sock, P_PING, sizeof(struct p_header), NULL, 0);
Andreas Gruenbachere307f352011-03-22 10:55:48 +0100801}
802
803int drbd_send_ping_ack(struct drbd_tconn *tconn)
804{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200805 struct drbd_socket *sock;
806
807 sock = &tconn->meta;
808 if (!conn_prepare_command(tconn, sock))
809 return -EIO;
810 return conn_send_command(tconn, sock, P_PING_ACK, sizeof(struct p_header), NULL, 0);
Andreas Gruenbachere307f352011-03-22 10:55:48 +0100811}
812
Lars Ellenbergf3990022011-03-23 14:31:09 +0100813int drbd_send_sync_param(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700814{
Andreas Gruenbacher7c967152011-03-22 00:49:36 +0100815 struct drbd_socket *sock;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200816 struct p_rs_param_95 *p;
817 int size;
Philipp Reisner31890f42011-01-19 14:12:51 +0100818 const int apv = mdev->tconn->agreed_pro_version;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200819 enum drbd_packet cmd;
820
821 sock = &mdev->tconn->data;
822 p = drbd_prepare_command(mdev, sock);
823 if (!p)
824 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700825
826 size = apv <= 87 ? sizeof(struct p_rs_param)
827 : apv == 88 ? sizeof(struct p_rs_param)
Lars Ellenbergf3990022011-03-23 14:31:09 +0100828 + strlen(mdev->tconn->net_conf->verify_alg) + 1
Philipp Reisner8e26f9c2010-07-06 17:25:54 +0200829 : apv <= 94 ? sizeof(struct p_rs_param_89)
830 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700831
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200832 cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700833
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200834 /* initialize verify_alg and csums_alg */
835 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700836
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200837 if (get_ldev(mdev)) {
838 p->rate = cpu_to_be32(mdev->ldev->dc.resync_rate);
839 p->c_plan_ahead = cpu_to_be32(mdev->ldev->dc.c_plan_ahead);
840 p->c_delay_target = cpu_to_be32(mdev->ldev->dc.c_delay_target);
841 p->c_fill_target = cpu_to_be32(mdev->ldev->dc.c_fill_target);
842 p->c_max_rate = cpu_to_be32(mdev->ldev->dc.c_max_rate);
843 put_ldev(mdev);
844 } else {
845 p->rate = cpu_to_be32(DRBD_RATE_DEF);
846 p->c_plan_ahead = cpu_to_be32(DRBD_C_PLAN_AHEAD_DEF);
847 p->c_delay_target = cpu_to_be32(DRBD_C_DELAY_TARGET_DEF);
848 p->c_fill_target = cpu_to_be32(DRBD_C_FILL_TARGET_DEF);
849 p->c_max_rate = cpu_to_be32(DRBD_C_MAX_RATE_DEF);
850 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700851
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200852 if (apv >= 88)
853 strcpy(p->verify_alg, mdev->tconn->net_conf->verify_alg);
854 if (apv >= 89)
855 strcpy(p->csums_alg, mdev->tconn->net_conf->csums_alg);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700856
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200857 return drbd_send_command(mdev, sock, cmd, size, NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700858}
859
Philipp Reisnerdc8228d2011-02-08 10:13:15 +0100860int drbd_send_protocol(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700861{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200862 struct drbd_socket *sock;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700863 struct p_protocol *p;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200864 int size, cf;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700865
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200866 if (tconn->net_conf->dry_run && tconn->agreed_pro_version < 92) {
867 conn_err(tconn, "--dry-run is not supported by peer");
868 return -EOPNOTSUPP;
869 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700870
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200871 sock = &tconn->data;
872 p = conn_prepare_command(tconn, sock);
873 if (!p)
874 return -EIO;
875
876 size = sizeof(*p);
Philipp Reisnerdc8228d2011-02-08 10:13:15 +0100877 if (tconn->agreed_pro_version >= 87)
878 size += strlen(tconn->net_conf->integrity_alg) + 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700879
Philipp Reisnerdc8228d2011-02-08 10:13:15 +0100880 p->protocol = cpu_to_be32(tconn->net_conf->wire_protocol);
881 p->after_sb_0p = cpu_to_be32(tconn->net_conf->after_sb_0p);
882 p->after_sb_1p = cpu_to_be32(tconn->net_conf->after_sb_1p);
883 p->after_sb_2p = cpu_to_be32(tconn->net_conf->after_sb_2p);
884 p->two_primaries = cpu_to_be32(tconn->net_conf->two_primaries);
Philipp Reisnercf14c2e2010-02-02 21:03:50 +0100885 cf = 0;
Philipp Reisnerdc8228d2011-02-08 10:13:15 +0100886 if (tconn->net_conf->want_lose)
Philipp Reisnercf14c2e2010-02-02 21:03:50 +0100887 cf |= CF_WANT_LOSE;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200888 if (tconn->net_conf->dry_run)
889 cf |= CF_DRY_RUN;
Philipp Reisnercf14c2e2010-02-02 21:03:50 +0100890 p->conn_flags = cpu_to_be32(cf);
891
Philipp Reisnerdc8228d2011-02-08 10:13:15 +0100892 if (tconn->agreed_pro_version >= 87)
893 strcpy(p->integrity_alg, tconn->net_conf->integrity_alg);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200894 return conn_send_command(tconn, sock, P_PROTOCOL, size, NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700895}
896
897int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
898{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200899 struct drbd_socket *sock;
900 struct p_uuids *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700901 int i;
902
903 if (!get_ldev_if_state(mdev, D_NEGOTIATING))
Andreas Gruenbacher2ae5f952011-03-16 01:07:20 +0100904 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700905
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200906 sock = &mdev->tconn->data;
907 p = drbd_prepare_command(mdev, sock);
908 if (!p) {
909 put_ldev(mdev);
910 return -EIO;
911 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700912 for (i = UI_CURRENT; i < UI_SIZE; i++)
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200913 p->uuid[i] = mdev->ldev ? cpu_to_be64(mdev->ldev->md.uuid[i]) : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700914
915 mdev->comm_bm_set = drbd_bm_total_weight(mdev);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200916 p->uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
Philipp Reisner89e58e72011-01-19 13:12:45 +0100917 uuid_flags |= mdev->tconn->net_conf->want_lose ? 1 : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700918 uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0;
919 uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200920 p->uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700921
922 put_ldev(mdev);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200923 return drbd_send_command(mdev, sock, P_UUIDS, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700924}
925
926int drbd_send_uuids(struct drbd_conf *mdev)
927{
928 return _drbd_send_uuids(mdev, 0);
929}
930
931int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev)
932{
933 return _drbd_send_uuids(mdev, 8);
934}
935
Lars Ellenberg62b0da32011-01-20 13:25:21 +0100936void drbd_print_uuids(struct drbd_conf *mdev, const char *text)
937{
938 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
939 u64 *uuid = mdev->ldev->md.uuid;
940 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX\n",
941 text,
942 (unsigned long long)uuid[UI_CURRENT],
943 (unsigned long long)uuid[UI_BITMAP],
944 (unsigned long long)uuid[UI_HISTORY_START],
945 (unsigned long long)uuid[UI_HISTORY_END]);
946 put_ldev(mdev);
947 } else {
948 dev_info(DEV, "%s effective data uuid: %016llX\n",
949 text,
950 (unsigned long long)mdev->ed_uuid);
951 }
952}
953
Andreas Gruenbacher9c1b7f72011-03-16 01:09:01 +0100954void drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700955{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200956 struct drbd_socket *sock;
957 struct p_rs_uuid *p;
Lars Ellenberg5a22db82010-12-17 21:14:23 +0100958 u64 uuid;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700959
Lars Ellenberg5a22db82010-12-17 21:14:23 +0100960 D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
961
Philipp Reisner4a23f262011-01-11 17:42:17 +0100962 uuid = mdev->ldev->md.uuid[UI_BITMAP] + UUID_NEW_BM_OFFSET;
Lars Ellenberg5a22db82010-12-17 21:14:23 +0100963 drbd_uuid_set(mdev, UI_BITMAP, uuid);
Lars Ellenberg62b0da32011-01-20 13:25:21 +0100964 drbd_print_uuids(mdev, "updated sync UUID");
Lars Ellenberg5a22db82010-12-17 21:14:23 +0100965 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700966
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200967 sock = &mdev->tconn->data;
968 p = drbd_prepare_command(mdev, sock);
969 if (p) {
970 p->uuid = cpu_to_be64(uuid);
971 drbd_send_command(mdev, sock, P_SYNC_UUID, sizeof(*p), NULL, 0);
972 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700973}
974
Philipp Reisnere89b5912010-03-24 17:11:33 +0100975int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700976{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200977 struct drbd_socket *sock;
978 struct p_sizes *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700979 sector_t d_size, u_size;
Philipp Reisner99432fc2011-05-20 16:39:13 +0200980 int q_order_type, max_bio_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700981
982 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
983 D_ASSERT(mdev->ldev->backing_bdev);
984 d_size = drbd_get_max_capacity(mdev->ldev);
985 u_size = mdev->ldev->dc.disk_size;
986 q_order_type = drbd_queue_order_type(mdev);
Philipp Reisner99432fc2011-05-20 16:39:13 +0200987 max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
988 max_bio_size = min_t(int, max_bio_size, DRBD_MAX_BIO_SIZE);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700989 put_ldev(mdev);
990 } else {
991 d_size = 0;
992 u_size = 0;
993 q_order_type = QUEUE_ORDERED_NONE;
Philipp Reisner99432fc2011-05-20 16:39:13 +0200994 max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700995 }
996
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200997 sock = &mdev->tconn->data;
998 p = drbd_prepare_command(mdev, sock);
999 if (!p)
1000 return -EIO;
1001 p->d_size = cpu_to_be64(d_size);
1002 p->u_size = cpu_to_be64(u_size);
1003 p->c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
1004 p->max_bio_size = cpu_to_be32(max_bio_size);
1005 p->queue_order_type = cpu_to_be16(q_order_type);
1006 p->dds_flags = cpu_to_be16(flags);
1007 return drbd_send_command(mdev, sock, P_SIZES, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001008}
1009
1010/**
1011 * drbd_send_state() - Sends the drbd state to the peer
1012 * @mdev: DRBD device.
1013 */
1014int drbd_send_state(struct drbd_conf *mdev)
1015{
Andreas Gruenbacher7c967152011-03-22 00:49:36 +01001016 struct drbd_socket *sock;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001017 struct p_state *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001018
Andreas Gruenbacher7c967152011-03-22 00:49:36 +01001019 sock = &mdev->tconn->data;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001020 p = drbd_prepare_command(mdev, sock);
1021 if (!p)
1022 return -EIO;
1023 p->state = cpu_to_be32(mdev->state.i); /* Within the send mutex */
1024 return drbd_send_command(mdev, sock, P_STATE, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001025}
1026
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001027int drbd_send_state_req(struct drbd_conf *mdev, union drbd_state mask, union drbd_state val)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001028{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001029 struct drbd_socket *sock;
1030 struct p_req_state *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001031
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001032 sock = &mdev->tconn->data;
1033 p = drbd_prepare_command(mdev, sock);
1034 if (!p)
1035 return -EIO;
1036 p->mask = cpu_to_be32(mask.i);
1037 p->val = cpu_to_be32(val.i);
1038 return drbd_send_command(mdev, sock, P_STATE_CHG_REQ, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001039
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001040}
1041
1042int conn_send_state_req(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val)
1043{
1044 enum drbd_packet cmd;
1045 struct drbd_socket *sock;
1046 struct p_req_state *p;
1047
1048 cmd = tconn->agreed_pro_version < 100 ? P_STATE_CHG_REQ : P_CONN_ST_CHG_REQ;
1049 sock = &tconn->data;
1050 p = conn_prepare_command(tconn, sock);
1051 if (!p)
1052 return -EIO;
1053 p->mask = cpu_to_be32(mask.i);
1054 p->val = cpu_to_be32(val.i);
1055 return conn_send_command(tconn, sock, cmd, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001056}
1057
Andreas Gruenbacher2f4e7ab2011-03-16 01:20:38 +01001058void drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001059{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001060 struct drbd_socket *sock;
1061 struct p_req_state_reply *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001062
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001063 sock = &mdev->tconn->meta;
1064 p = drbd_prepare_command(mdev, sock);
1065 if (p) {
1066 p->retcode = cpu_to_be32(retcode);
1067 drbd_send_command(mdev, sock, P_STATE_CHG_REPLY, sizeof(*p), NULL, 0);
1068 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001069}
1070
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001071void conn_send_sr_reply(struct drbd_tconn *tconn, enum drbd_state_rv retcode)
Philipp Reisner047cd4a2011-02-15 11:09:33 +01001072{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001073 struct drbd_socket *sock;
1074 struct p_req_state_reply *p;
Philipp Reisner047cd4a2011-02-15 11:09:33 +01001075 enum drbd_packet cmd = tconn->agreed_pro_version < 100 ? P_STATE_CHG_REPLY : P_CONN_ST_CHG_REPLY;
1076
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001077 sock = &tconn->meta;
1078 p = conn_prepare_command(tconn, sock);
1079 if (p) {
1080 p->retcode = cpu_to_be32(retcode);
1081 conn_send_command(tconn, sock, cmd, sizeof(*p), NULL, 0);
1082 }
Philipp Reisner047cd4a2011-02-15 11:09:33 +01001083}
1084
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001085static void dcbp_set_code(struct p_compressed_bm *p, enum drbd_bitmap_code code)
1086{
1087 BUG_ON(code & ~0xf);
1088 p->encoding = (p->encoding & ~0xf) | code;
1089}
1090
1091static void dcbp_set_start(struct p_compressed_bm *p, int set)
1092{
1093 p->encoding = (p->encoding & ~0x80) | (set ? 0x80 : 0);
1094}
1095
1096static void dcbp_set_pad_bits(struct p_compressed_bm *p, int n)
1097{
1098 BUG_ON(n & ~0x7);
1099 p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4);
1100}
1101
Philipp Reisnerb411b362009-09-25 16:07:19 -07001102int fill_bitmap_rle_bits(struct drbd_conf *mdev,
1103 struct p_compressed_bm *p,
1104 struct bm_xfer_ctx *c)
1105{
1106 struct bitstream bs;
1107 unsigned long plain_bits;
1108 unsigned long tmp;
1109 unsigned long rl;
1110 unsigned len;
1111 unsigned toggle;
1112 int bits;
1113
1114 /* may we use this feature? */
Lars Ellenbergf3990022011-03-23 14:31:09 +01001115 if ((mdev->tconn->net_conf->use_rle == 0) ||
Philipp Reisner31890f42011-01-19 14:12:51 +01001116 (mdev->tconn->agreed_pro_version < 90))
Philipp Reisnerb411b362009-09-25 16:07:19 -07001117 return 0;
1118
1119 if (c->bit_offset >= c->bm_bits)
1120 return 0; /* nothing to do. */
1121
1122 /* use at most thus many bytes */
1123 bitstream_init(&bs, p->code, BM_PACKET_VLI_BYTES_MAX, 0);
1124 memset(p->code, 0, BM_PACKET_VLI_BYTES_MAX);
1125 /* plain bits covered in this code string */
1126 plain_bits = 0;
1127
1128 /* p->encoding & 0x80 stores whether the first run length is set.
1129 * bit offset is implicit.
1130 * start with toggle == 2 to be able to tell the first iteration */
1131 toggle = 2;
1132
1133 /* see how much plain bits we can stuff into one packet
1134 * using RLE and VLI. */
1135 do {
1136 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(mdev, c->bit_offset)
1137 : _drbd_bm_find_next(mdev, c->bit_offset);
1138 if (tmp == -1UL)
1139 tmp = c->bm_bits;
1140 rl = tmp - c->bit_offset;
1141
1142 if (toggle == 2) { /* first iteration */
1143 if (rl == 0) {
1144 /* the first checked bit was set,
1145 * store start value, */
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001146 dcbp_set_start(p, 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001147 /* but skip encoding of zero run length */
1148 toggle = !toggle;
1149 continue;
1150 }
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001151 dcbp_set_start(p, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001152 }
1153
1154 /* paranoia: catch zero runlength.
1155 * can only happen if bitmap is modified while we scan it. */
1156 if (rl == 0) {
1157 dev_err(DEV, "unexpected zero runlength while encoding bitmap "
1158 "t:%u bo:%lu\n", toggle, c->bit_offset);
1159 return -1;
1160 }
1161
1162 bits = vli_encode_bits(&bs, rl);
1163 if (bits == -ENOBUFS) /* buffer full */
1164 break;
1165 if (bits <= 0) {
1166 dev_err(DEV, "error while encoding bitmap: %d\n", bits);
1167 return 0;
1168 }
1169
1170 toggle = !toggle;
1171 plain_bits += rl;
1172 c->bit_offset = tmp;
1173 } while (c->bit_offset < c->bm_bits);
1174
1175 len = bs.cur.b - p->code + !!bs.cur.bit;
1176
1177 if (plain_bits < (len << 3)) {
1178 /* incompressible with this method.
1179 * we need to rewind both word and bit position. */
1180 c->bit_offset -= plain_bits;
1181 bm_xfer_ctx_bit_to_word_offset(c);
1182 c->bit_offset = c->word_offset * BITS_PER_LONG;
1183 return 0;
1184 }
1185
1186 /* RLE + VLI was able to compress it just fine.
1187 * update c->word_offset. */
1188 bm_xfer_ctx_bit_to_word_offset(c);
1189
1190 /* store pad_bits */
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001191 dcbp_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001192
1193 return len;
1194}
1195
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001196/**
1197 * send_bitmap_rle_or_plain
1198 *
1199 * Return 0 when done, 1 when another iteration is needed, and a negative error
1200 * code upon failure.
1201 */
1202static int
Andreas Gruenbacher79ed9bd2011-03-24 21:31:38 +01001203send_bitmap_rle_or_plain(struct drbd_conf *mdev, struct bm_xfer_ctx *c)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001204{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001205 struct drbd_socket *sock = &mdev->tconn->data;
1206 struct p_compressed_bm *p = sock->sbuf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001207 unsigned long num_words;
Andreas Gruenbachera982dd52010-12-10 00:45:25 +01001208 int len, err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001209
1210 len = fill_bitmap_rle_bits(mdev, p, c);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001211 if (len < 0)
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001212 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001213
1214 if (len) {
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001215 dcbp_set_code(p, RLE_VLI_Bits);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001216 err = __send_command(mdev->tconn, mdev->vnr, sock,
1217 P_COMPRESSED_BITMAP, sizeof(*p) + len,
1218 NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001219 c->packets[0]++;
1220 c->bytes[0] += sizeof(*p) + len;
1221
1222 if (c->bit_offset >= c->bm_bits)
1223 len = 0; /* DONE */
1224 } else {
1225 /* was not compressible.
1226 * send a buffer full of plain text bits instead. */
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001227 struct p_header *h = sock->sbuf;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001228 num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
1229 len = num_words * sizeof(long);
1230 if (len)
Andreas Gruenbacher79ed9bd2011-03-24 21:31:38 +01001231 drbd_bm_get_lel(mdev, c->word_offset, num_words,
1232 (unsigned long *)h->payload);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001233 err = __send_command(mdev->tconn, mdev->vnr, sock, P_BITMAP,
1234 sizeof(*h) + len, NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001235 c->word_offset += num_words;
1236 c->bit_offset = c->word_offset * BITS_PER_LONG;
1237
1238 c->packets[1]++;
Philipp Reisner0b70a132010-08-20 13:36:10 +02001239 c->bytes[1] += sizeof(struct p_header80) + len;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001240
1241 if (c->bit_offset > c->bm_bits)
1242 c->bit_offset = c->bm_bits;
1243 }
Andreas Gruenbachera982dd52010-12-10 00:45:25 +01001244 if (!err) {
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001245 if (len == 0) {
1246 INFO_bm_xfer_stats(mdev, "send", c);
1247 return 0;
1248 } else
1249 return 1;
1250 }
1251 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001252}
1253
1254/* See the comment at receive_bitmap() */
Andreas Gruenbacher058820c2011-03-22 16:03:43 +01001255static int _drbd_send_bitmap(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001256{
1257 struct bm_xfer_ctx c;
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001258 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001259
Andreas Gruenbacher841ce242010-12-15 19:31:20 +01001260 if (!expect(mdev->bitmap))
1261 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001262
Philipp Reisnerb411b362009-09-25 16:07:19 -07001263 if (get_ldev(mdev)) {
1264 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
1265 dev_info(DEV, "Writing the whole bitmap, MDF_FullSync was set.\n");
1266 drbd_bm_set_all(mdev);
1267 if (drbd_bm_write(mdev)) {
1268 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
1269 * but otherwise process as per normal - need to tell other
1270 * side that a full resync is required! */
1271 dev_err(DEV, "Failed to write bitmap to disk!\n");
1272 } else {
1273 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
1274 drbd_md_sync(mdev);
1275 }
1276 }
1277 put_ldev(mdev);
1278 }
1279
1280 c = (struct bm_xfer_ctx) {
1281 .bm_bits = drbd_bm_bits(mdev),
1282 .bm_words = drbd_bm_words(mdev),
1283 };
1284
1285 do {
Andreas Gruenbacher79ed9bd2011-03-24 21:31:38 +01001286 err = send_bitmap_rle_or_plain(mdev, &c);
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001287 } while (err > 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001288
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001289 return err == 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001290}
1291
1292int drbd_send_bitmap(struct drbd_conf *mdev)
1293{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001294 struct drbd_socket *sock = &mdev->tconn->data;
1295 int err = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001296
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001297 mutex_lock(&sock->mutex);
1298 if (sock->socket)
1299 err = !_drbd_send_bitmap(mdev);
1300 mutex_unlock(&sock->mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001301 return err;
1302}
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001303
Andreas Gruenbacherd4e67d72011-03-16 01:25:28 +01001304void drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001305{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001306 struct drbd_socket *sock;
1307 struct p_barrier_ack *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001308
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001309 if (mdev->state.conn < C_CONNECTED)
1310 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001311
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001312 sock = &mdev->tconn->meta;
1313 p = drbd_prepare_command(mdev, sock);
1314 if (!p)
1315 return;
1316 p->barrier = barrier_nr;
1317 p->set_size = cpu_to_be32(set_size);
1318 drbd_send_command(mdev, sock, P_BARRIER_ACK, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001319}
1320
1321/**
1322 * _drbd_send_ack() - Sends an ack packet
1323 * @mdev: DRBD device.
1324 * @cmd: Packet command code.
1325 * @sector: sector, needs to be in big endian byte order
1326 * @blksize: size in byte, needs to be in big endian byte order
1327 * @block_id: Id, big endian byte order
1328 */
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001329static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
1330 u64 sector, u32 blksize, u64 block_id)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001331{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001332 struct drbd_socket *sock;
1333 struct p_block_ack *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001334
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001335 if (mdev->state.conn < C_CONNECTED)
Andreas Gruenbachera8c32aa2011-03-16 01:27:22 +01001336 return -EIO;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001337
1338 sock = &mdev->tconn->meta;
1339 p = drbd_prepare_command(mdev, sock);
1340 if (!p)
1341 return -EIO;
1342 p->sector = sector;
1343 p->block_id = block_id;
1344 p->blksize = blksize;
1345 p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
1346 return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001347}
1348
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02001349/* dp->sector and dp->block_id already/still in network byte order,
1350 * data_size is payload size according to dp->head,
1351 * and may need to be corrected for digest size. */
Andreas Gruenbachera9a99942011-03-16 01:30:14 +01001352void drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packet cmd,
1353 struct p_data *dp, int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001354{
Philipp Reisnera0638452011-01-19 14:31:32 +01001355 data_size -= (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_r_tfm) ?
1356 crypto_hash_digestsize(mdev->tconn->integrity_r_tfm) : 0;
Andreas Gruenbachera9a99942011-03-16 01:30:14 +01001357 _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
1358 dp->block_id);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001359}
1360
Andreas Gruenbachera9a99942011-03-16 01:30:14 +01001361void drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packet cmd,
1362 struct p_block_req *rp)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001363{
Andreas Gruenbachera9a99942011-03-16 01:30:14 +01001364 _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001365}
1366
1367/**
1368 * drbd_send_ack() - Sends an ack packet
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001369 * @mdev: DRBD device
1370 * @cmd: packet command code
1371 * @peer_req: peer request
Philipp Reisnerb411b362009-09-25 16:07:19 -07001372 */
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001373int drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001374 struct drbd_peer_request *peer_req)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001375{
Andreas Gruenbacherdd516122011-03-16 15:39:08 +01001376 return _drbd_send_ack(mdev, cmd,
1377 cpu_to_be64(peer_req->i.sector),
1378 cpu_to_be32(peer_req->i.size),
1379 peer_req->block_id);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001380}
1381
1382/* This function misuses the block_id field to signal if the blocks
1383 * are is sync or not. */
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001384int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packet cmd,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001385 sector_t sector, int blksize, u64 block_id)
1386{
Andreas Gruenbacherfa79abd2011-03-16 01:31:39 +01001387 return _drbd_send_ack(mdev, cmd,
1388 cpu_to_be64(sector),
1389 cpu_to_be32(blksize),
1390 cpu_to_be64(block_id));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001391}
1392
1393int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
1394 sector_t sector, int size, u64 block_id)
1395{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001396 struct drbd_socket *sock;
1397 struct p_block_req *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001398
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001399 sock = &mdev->tconn->data;
1400 p = drbd_prepare_command(mdev, sock);
1401 if (!p)
1402 return -EIO;
1403 p->sector = cpu_to_be64(sector);
1404 p->block_id = block_id;
1405 p->blksize = cpu_to_be32(size);
1406 return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001407}
1408
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001409int drbd_send_drequest_csum(struct drbd_conf *mdev, sector_t sector, int size,
1410 void *digest, int digest_size, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001411{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001412 struct drbd_socket *sock;
1413 struct p_block_req *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001414
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001415 /* FIXME: Put the digest into the preallocated socket buffer. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001416
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001417 sock = &mdev->tconn->data;
1418 p = drbd_prepare_command(mdev, sock);
1419 if (!p)
1420 return -EIO;
1421 p->sector = cpu_to_be64(sector);
1422 p->block_id = ID_SYNCER /* unused */;
1423 p->blksize = cpu_to_be32(size);
1424 return drbd_send_command(mdev, sock, cmd, sizeof(*p),
1425 digest, digest_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001426}
1427
1428int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
1429{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001430 struct drbd_socket *sock;
1431 struct p_block_req *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001432
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001433 sock = &mdev->tconn->data;
1434 p = drbd_prepare_command(mdev, sock);
1435 if (!p)
1436 return -EIO;
1437 p->sector = cpu_to_be64(sector);
1438 p->block_id = ID_SYNCER /* unused */;
1439 p->blksize = cpu_to_be32(size);
1440 return drbd_send_command(mdev, sock, P_OV_REQUEST, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001441}
1442
1443/* called on sndtimeo
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001444 * returns false if we should retry,
1445 * true if we think connection is dead
Philipp Reisnerb411b362009-09-25 16:07:19 -07001446 */
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001447static int we_should_drop_the_connection(struct drbd_tconn *tconn, struct socket *sock)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001448{
1449 int drop_it;
1450 /* long elapsed = (long)(jiffies - mdev->last_received); */
1451
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001452 drop_it = tconn->meta.socket == sock
1453 || !tconn->asender.task
1454 || get_t_state(&tconn->asender) != RUNNING
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01001455 || tconn->cstate < C_WF_REPORT_PARAMS;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001456
1457 if (drop_it)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001458 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001459
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001460 drop_it = !--tconn->ko_count;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001461 if (!drop_it) {
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001462 conn_err(tconn, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
1463 current->comm, current->pid, tconn->ko_count);
1464 request_ping(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001465 }
1466
1467 return drop_it; /* && (mdev->state == R_PRIMARY) */;
1468}
1469
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001470static void drbd_update_congested(struct drbd_tconn *tconn)
Andreas Gruenbacher9e204cd2011-01-26 18:45:11 +01001471{
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001472 struct sock *sk = tconn->data.socket->sk;
Andreas Gruenbacher9e204cd2011-01-26 18:45:11 +01001473 if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001474 set_bit(NET_CONGESTED, &tconn->flags);
Andreas Gruenbacher9e204cd2011-01-26 18:45:11 +01001475}
1476
Philipp Reisnerb411b362009-09-25 16:07:19 -07001477/* The idea of sendpage seems to be to put some kind of reference
1478 * to the page into the skb, and to hand it over to the NIC. In
1479 * this process get_page() gets called.
1480 *
1481 * As soon as the page was really sent over the network put_page()
1482 * gets called by some part of the network layer. [ NIC driver? ]
1483 *
1484 * [ get_page() / put_page() increment/decrement the count. If count
1485 * reaches 0 the page will be freed. ]
1486 *
1487 * This works nicely with pages from FSs.
1488 * But this means that in protocol A we might signal IO completion too early!
1489 *
1490 * In order not to corrupt data during a resync we must make sure
1491 * that we do not reuse our own buffer pages (EEs) to early, therefore
1492 * we have the net_ee list.
1493 *
1494 * XFS seems to have problems, still, it submits pages with page_count == 0!
1495 * As a workaround, we disable sendpage on pages
1496 * with page_count == 0 or PageSlab.
1497 */
1498static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page,
Andreas Gruenbacherb9874272011-03-16 09:41:10 +01001499 int offset, size_t size, unsigned msg_flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001500{
Andreas Gruenbacherb9874272011-03-16 09:41:10 +01001501 struct socket *socket;
1502 void *addr;
1503 int err;
1504
1505 socket = mdev->tconn->data.socket;
1506 addr = kmap(page) + offset;
1507 err = drbd_send_all(mdev->tconn, socket, addr, size, msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001508 kunmap(page);
Andreas Gruenbacherb9874272011-03-16 09:41:10 +01001509 if (!err)
1510 mdev->send_cnt += size >> 9;
1511 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001512}
1513
1514static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001515 int offset, size_t size, unsigned msg_flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001516{
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001517 struct socket *socket = mdev->tconn->data.socket;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001518 mm_segment_t oldfs = get_fs();
Philipp Reisnerb411b362009-09-25 16:07:19 -07001519 int len = size;
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001520 int err = -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001521
1522 /* e.g. XFS meta- & log-data is in slab pages, which have a
1523 * page_count of 0 and/or have PageSlab() set.
1524 * we cannot use send_page for those, as that does get_page();
1525 * put_page(); and would cause either a VM_BUG directly, or
1526 * __page_cache_release a page that would actually still be referenced
1527 * by someone, leading to some obscure delayed Oops somewhere else. */
1528 if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001529 return _drbd_no_send_page(mdev, page, offset, size, msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001530
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001531 msg_flags |= MSG_NOSIGNAL;
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001532 drbd_update_congested(mdev->tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001533 set_fs(KERNEL_DS);
1534 do {
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001535 int sent;
1536
1537 sent = socket->ops->sendpage(socket, page, offset, len, msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001538 if (sent <= 0) {
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001539 if (sent == -EAGAIN) {
1540 if (we_should_drop_the_connection(mdev->tconn, socket))
1541 break;
1542 continue;
1543 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001544 dev_warn(DEV, "%s: size=%d len=%d sent=%d\n",
1545 __func__, (int)size, len, sent);
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001546 if (sent < 0)
1547 err = sent;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001548 break;
1549 }
1550 len -= sent;
1551 offset += sent;
1552 } while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
1553 set_fs(oldfs);
Philipp Reisner01a311a2011-02-07 14:30:33 +01001554 clear_bit(NET_CONGESTED, &mdev->tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001555
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001556 if (len == 0) {
1557 err = 0;
1558 mdev->send_cnt += size >> 9;
1559 }
1560 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001561}
1562
1563static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
1564{
1565 struct bio_vec *bvec;
1566 int i;
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001567 /* hint all but last page with MSG_MORE */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001568 __bio_for_each_segment(bvec, bio, i, 0) {
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001569 int err;
1570
1571 err = _drbd_no_send_page(mdev, bvec->bv_page,
1572 bvec->bv_offset, bvec->bv_len,
1573 i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
1574 if (err)
1575 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001576 }
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001577 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001578}
1579
1580static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
1581{
1582 struct bio_vec *bvec;
1583 int i;
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001584 /* hint all but last page with MSG_MORE */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001585 __bio_for_each_segment(bvec, bio, i, 0) {
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001586 int err;
1587
1588 err = _drbd_send_page(mdev, bvec->bv_page,
1589 bvec->bv_offset, bvec->bv_len,
1590 i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
1591 if (err)
1592 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001593 }
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001594 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001595}
1596
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001597static int _drbd_send_zc_ee(struct drbd_conf *mdev,
1598 struct drbd_peer_request *peer_req)
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001599{
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001600 struct page *page = peer_req->pages;
1601 unsigned len = peer_req->i.size;
Andreas Gruenbacher9f692302011-03-16 10:49:09 +01001602 int err;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001603
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001604 /* hint all but last page with MSG_MORE */
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001605 page_chain_for_each(page) {
1606 unsigned l = min_t(unsigned, len, PAGE_SIZE);
Andreas Gruenbacher9f692302011-03-16 10:49:09 +01001607
1608 err = _drbd_send_page(mdev, page, 0, l,
1609 page_chain_next(page) ? MSG_MORE : 0);
1610 if (err)
1611 return err;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001612 len -= l;
1613 }
Andreas Gruenbacher9f692302011-03-16 10:49:09 +01001614 return 0;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001615}
1616
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001617static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
1618{
Philipp Reisner31890f42011-01-19 14:12:51 +01001619 if (mdev->tconn->agreed_pro_version >= 95)
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001620 return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001621 (bi_rw & REQ_FUA ? DP_FUA : 0) |
1622 (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
1623 (bi_rw & REQ_DISCARD ? DP_DISCARD : 0);
1624 else
Jens Axboe721a9602011-03-09 11:56:30 +01001625 return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001626}
1627
Philipp Reisnerb411b362009-09-25 16:07:19 -07001628/* Used to send write requests
1629 * R_PRIMARY -> Peer (P_DATA)
1630 */
1631int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
1632{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001633 struct drbd_socket *sock;
1634 struct p_data *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001635 unsigned int dp_flags = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001636 int dgs;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001637 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001638
Philipp Reisnera0638452011-01-19 14:31:32 +01001639 dgs = (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_w_tfm) ?
1640 crypto_hash_digestsize(mdev->tconn->integrity_w_tfm) : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001641
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001642 sock = &mdev->tconn->data;
1643 p = drbd_prepare_command(mdev, sock);
1644 if (!p)
1645 return -EIO;
1646 p->sector = cpu_to_be64(req->i.sector);
1647 p->block_id = (unsigned long)req;
1648 p->seq_num = cpu_to_be32(req->seq_num = atomic_inc_return(&mdev->packet_seq));
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001649 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001650 if (mdev->state.conn >= C_SYNC_SOURCE &&
1651 mdev->state.conn <= C_PAUSED_SYNC_T)
1652 dp_flags |= DP_MAY_SET_IN_SYNC;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001653 p->dp_flags = cpu_to_be32(dp_flags);
1654 if (dgs)
1655 drbd_csum_bio(mdev, mdev->tconn->integrity_w_tfm, req->master_bio, p + 1);
1656 err = __send_command(mdev->tconn, mdev->vnr, sock, P_DATA, sizeof(*p) + dgs, NULL, req->i.size);
Andreas Gruenbacher6bdb9b02011-03-16 11:52:58 +01001657 if (!err) {
Lars Ellenberg470be442010-11-10 10:36:52 +01001658 /* For protocol A, we have to memcpy the payload into
1659 * socket buffers, as we may complete right away
1660 * as soon as we handed it over to tcp, at which point the data
1661 * pages may become invalid.
1662 *
1663 * For data-integrity enabled, we copy it as well, so we can be
1664 * sure that even if the bio pages may still be modified, it
1665 * won't change the data on the wire, thus if the digest checks
1666 * out ok after sending on this side, but does not fit on the
1667 * receiving side, we sure have detected corruption elsewhere.
1668 */
Philipp Reisner89e58e72011-01-19 13:12:45 +01001669 if (mdev->tconn->net_conf->wire_protocol == DRBD_PROT_A || dgs)
Andreas Gruenbacher6bdb9b02011-03-16 11:52:58 +01001670 err = _drbd_send_bio(mdev, req->master_bio);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001671 else
Andreas Gruenbacher6bdb9b02011-03-16 11:52:58 +01001672 err = _drbd_send_zc_bio(mdev, req->master_bio);
Lars Ellenberg470be442010-11-10 10:36:52 +01001673
1674 /* double check digest, sometimes buffers have been modified in flight. */
1675 if (dgs > 0 && dgs <= 64) {
Bart Van Assche24c48302011-05-21 18:32:29 +02001676 /* 64 byte, 512 bit, is the largest digest size
Lars Ellenberg470be442010-11-10 10:36:52 +01001677 * currently supported in kernel crypto. */
1678 unsigned char digest[64];
Philipp Reisnera0638452011-01-19 14:31:32 +01001679 drbd_csum_bio(mdev, mdev->tconn->integrity_w_tfm, req->master_bio, digest);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001680 if (memcmp(p + 1, digest, dgs)) {
Lars Ellenberg470be442010-11-10 10:36:52 +01001681 dev_warn(DEV,
1682 "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
Andreas Gruenbacherace652a2011-01-03 17:09:58 +01001683 (unsigned long long)req->i.sector, req->i.size);
Lars Ellenberg470be442010-11-10 10:36:52 +01001684 }
1685 } /* else if (dgs > 64) {
1686 ... Be noisy about digest too large ...
1687 } */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001688 }
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001689 mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
Philipp Reisnerbd26bfc52010-05-04 12:33:58 +02001690
Andreas Gruenbacher6bdb9b02011-03-16 11:52:58 +01001691 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001692}
1693
1694/* answer packet, used to send data back for read requests:
1695 * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
1696 * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
1697 */
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001698int drbd_send_block(struct drbd_conf *mdev, enum drbd_packet cmd,
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001699 struct drbd_peer_request *peer_req)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001700{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001701 struct drbd_socket *sock;
1702 struct p_data *p;
Andreas Gruenbacher7b57b89d2011-03-16 11:35:20 +01001703 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001704 int dgs;
1705
Philipp Reisnera0638452011-01-19 14:31:32 +01001706 dgs = (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_w_tfm) ?
1707 crypto_hash_digestsize(mdev->tconn->integrity_w_tfm) : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001708
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001709 sock = &mdev->tconn->data;
1710 p = drbd_prepare_command(mdev, sock);
1711 if (!p)
1712 return -EIO;
1713 p->sector = cpu_to_be64(peer_req->i.sector);
1714 p->block_id = peer_req->block_id;
1715 p->seq_num = 0; /* unused */
1716 if (dgs)
1717 drbd_csum_ee(mdev, mdev->tconn->integrity_w_tfm, peer_req, p + 1);
1718 err = __send_command(mdev->tconn, mdev->vnr, sock, cmd, sizeof(*p) + dgs, NULL, peer_req->i.size);
Andreas Gruenbacher7b57b89d2011-03-16 11:35:20 +01001719 if (!err)
1720 err = _drbd_send_zc_ee(mdev, peer_req);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001721 mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
Philipp Reisnerbd26bfc52010-05-04 12:33:58 +02001722
Andreas Gruenbacher7b57b89d2011-03-16 11:35:20 +01001723 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001724}
1725
Andreas Gruenbacher8f7bed72010-12-19 23:53:14 +01001726int drbd_send_out_of_sync(struct drbd_conf *mdev, struct drbd_request *req)
Philipp Reisner73a01a12010-10-27 14:33:00 +02001727{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001728 struct drbd_socket *sock;
1729 struct p_block_desc *p;
Philipp Reisner73a01a12010-10-27 14:33:00 +02001730
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001731 sock = &mdev->tconn->data;
1732 p = drbd_prepare_command(mdev, sock);
1733 if (!p)
1734 return -EIO;
1735 p->sector = cpu_to_be64(req->i.sector);
1736 p->blksize = cpu_to_be32(req->i.size);
1737 return drbd_send_command(mdev, sock, P_OUT_OF_SYNC, sizeof(*p), NULL, 0);
Philipp Reisner73a01a12010-10-27 14:33:00 +02001738}
1739
Philipp Reisnerb411b362009-09-25 16:07:19 -07001740/*
1741 drbd_send distinguishes two cases:
1742
1743 Packets sent via the data socket "sock"
1744 and packets sent via the meta data socket "msock"
1745
1746 sock msock
1747 -----------------+-------------------------+------------------------------
1748 timeout conf.timeout / 2 conf.timeout / 2
1749 timeout action send a ping via msock Abort communication
1750 and close all sockets
1751*/
1752
1753/*
1754 * you must have down()ed the appropriate [m]sock_mutex elsewhere!
1755 */
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001756int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001757 void *buf, size_t size, unsigned msg_flags)
1758{
1759 struct kvec iov;
1760 struct msghdr msg;
1761 int rv, sent = 0;
1762
1763 if (!sock)
Andreas Gruenbacherc0d42c82010-12-09 23:52:22 +01001764 return -EBADR;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001765
1766 /* THINK if (signal_pending) return ... ? */
1767
1768 iov.iov_base = buf;
1769 iov.iov_len = size;
1770
1771 msg.msg_name = NULL;
1772 msg.msg_namelen = 0;
1773 msg.msg_control = NULL;
1774 msg.msg_controllen = 0;
1775 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
1776
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001777 if (sock == tconn->data.socket) {
1778 tconn->ko_count = tconn->net_conf->ko_count;
1779 drbd_update_congested(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001780 }
1781 do {
1782 /* STRANGE
1783 * tcp_sendmsg does _not_ use its size parameter at all ?
1784 *
1785 * -EAGAIN on timeout, -EINTR on signal.
1786 */
1787/* THINK
1788 * do we need to block DRBD_SIG if sock == &meta.socket ??
1789 * otherwise wake_asender() might interrupt some send_*Ack !
1790 */
1791 rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
1792 if (rv == -EAGAIN) {
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001793 if (we_should_drop_the_connection(tconn, sock))
Philipp Reisnerb411b362009-09-25 16:07:19 -07001794 break;
1795 else
1796 continue;
1797 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001798 if (rv == -EINTR) {
1799 flush_signals(current);
1800 rv = 0;
1801 }
1802 if (rv < 0)
1803 break;
1804 sent += rv;
1805 iov.iov_base += rv;
1806 iov.iov_len -= rv;
1807 } while (sent < size);
1808
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001809 if (sock == tconn->data.socket)
1810 clear_bit(NET_CONGESTED, &tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001811
1812 if (rv <= 0) {
1813 if (rv != -EAGAIN) {
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001814 conn_err(tconn, "%s_sendmsg returned %d\n",
1815 sock == tconn->meta.socket ? "msock" : "sock",
1816 rv);
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01001817 conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001818 } else
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01001819 conn_request_state(tconn, NS(conn, C_TIMEOUT), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001820 }
1821
1822 return sent;
1823}
1824
Andreas Gruenbacherfb708e42010-12-15 17:04:36 +01001825/**
1826 * drbd_send_all - Send an entire buffer
1827 *
1828 * Returns 0 upon success and a negative error value otherwise.
1829 */
1830int drbd_send_all(struct drbd_tconn *tconn, struct socket *sock, void *buffer,
1831 size_t size, unsigned msg_flags)
1832{
1833 int err;
1834
1835 err = drbd_send(tconn, sock, buffer, size, msg_flags);
1836 if (err < 0)
1837 return err;
1838 if (err != size)
1839 return -EIO;
1840 return 0;
1841}
1842
Philipp Reisnerb411b362009-09-25 16:07:19 -07001843static int drbd_open(struct block_device *bdev, fmode_t mode)
1844{
1845 struct drbd_conf *mdev = bdev->bd_disk->private_data;
1846 unsigned long flags;
1847 int rv = 0;
1848
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02001849 mutex_lock(&drbd_main_mutex);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001850 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001851 /* to have a stable mdev->state.role
1852 * and no race with updating open_cnt */
1853
1854 if (mdev->state.role != R_PRIMARY) {
1855 if (mode & FMODE_WRITE)
1856 rv = -EROFS;
1857 else if (!allow_oos)
1858 rv = -EMEDIUMTYPE;
1859 }
1860
1861 if (!rv)
1862 mdev->open_cnt++;
Philipp Reisner87eeee42011-01-19 14:16:30 +01001863 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02001864 mutex_unlock(&drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001865
1866 return rv;
1867}
1868
1869static int drbd_release(struct gendisk *gd, fmode_t mode)
1870{
1871 struct drbd_conf *mdev = gd->private_data;
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02001872 mutex_lock(&drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001873 mdev->open_cnt--;
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02001874 mutex_unlock(&drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001875 return 0;
1876}
1877
Philipp Reisnerb411b362009-09-25 16:07:19 -07001878static void drbd_set_defaults(struct drbd_conf *mdev)
1879{
Lars Ellenbergf3990022011-03-23 14:31:09 +01001880 /* Beware! The actual layout differs
1881 * between big endian and little endian */
Philipp Reisnerda9fbc22011-03-29 10:52:01 +02001882 mdev->state = (union drbd_dev_state) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001883 { .role = R_SECONDARY,
1884 .peer = R_UNKNOWN,
1885 .conn = C_STANDALONE,
1886 .disk = D_DISKLESS,
1887 .pdsk = D_UNKNOWN,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001888 } };
1889}
1890
1891void drbd_init_set_defaults(struct drbd_conf *mdev)
1892{
1893 /* the memset(,0,) did most of this.
1894 * note: only assignments, no allocation in here */
1895
1896 drbd_set_defaults(mdev);
1897
Philipp Reisnerb411b362009-09-25 16:07:19 -07001898 atomic_set(&mdev->ap_bio_cnt, 0);
1899 atomic_set(&mdev->ap_pending_cnt, 0);
1900 atomic_set(&mdev->rs_pending_cnt, 0);
1901 atomic_set(&mdev->unacked_cnt, 0);
1902 atomic_set(&mdev->local_cnt, 0);
Lars Ellenberg435f0742010-09-06 12:30:25 +02001903 atomic_set(&mdev->pp_in_use_by_net, 0);
Philipp Reisner778f2712010-07-06 11:14:00 +02001904 atomic_set(&mdev->rs_sect_in, 0);
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001905 atomic_set(&mdev->rs_sect_ev, 0);
Philipp Reisner759fbdf2010-10-26 16:02:27 +02001906 atomic_set(&mdev->ap_in_flight, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001907
1908 mutex_init(&mdev->md_io_mutex);
Philipp Reisner8410da8f02011-02-11 20:11:10 +01001909 mutex_init(&mdev->own_state_mutex);
1910 mdev->state_mutex = &mdev->own_state_mutex;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001911
Philipp Reisnerb411b362009-09-25 16:07:19 -07001912 spin_lock_init(&mdev->al_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001913 spin_lock_init(&mdev->peer_seq_lock);
1914 spin_lock_init(&mdev->epoch_lock);
1915
1916 INIT_LIST_HEAD(&mdev->active_ee);
1917 INIT_LIST_HEAD(&mdev->sync_ee);
1918 INIT_LIST_HEAD(&mdev->done_ee);
1919 INIT_LIST_HEAD(&mdev->read_ee);
1920 INIT_LIST_HEAD(&mdev->net_ee);
1921 INIT_LIST_HEAD(&mdev->resync_reads);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001922 INIT_LIST_HEAD(&mdev->resync_work.list);
1923 INIT_LIST_HEAD(&mdev->unplug_work.list);
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02001924 INIT_LIST_HEAD(&mdev->go_diskless.list);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001925 INIT_LIST_HEAD(&mdev->md_sync_work.list);
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02001926 INIT_LIST_HEAD(&mdev->start_resync_work.list);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001927 INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
Philipp Reisner0ced55a2010-04-30 15:26:20 +02001928
Philipp Reisner794abb72010-12-27 11:51:23 +01001929 mdev->resync_work.cb = w_resync_timer;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001930 mdev->unplug_work.cb = w_send_write_hint;
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02001931 mdev->go_diskless.cb = w_go_diskless;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001932 mdev->md_sync_work.cb = w_md_sync;
1933 mdev->bm_io_work.w.cb = w_bitmap_io;
Philipp Reisner370a43e2011-01-14 16:03:11 +01001934 mdev->start_resync_work.cb = w_start_resync;
Philipp Reisnera21e9292011-02-08 15:08:49 +01001935
1936 mdev->resync_work.mdev = mdev;
1937 mdev->unplug_work.mdev = mdev;
1938 mdev->go_diskless.mdev = mdev;
1939 mdev->md_sync_work.mdev = mdev;
1940 mdev->bm_io_work.w.mdev = mdev;
1941 mdev->start_resync_work.mdev = mdev;
1942
Philipp Reisnerb411b362009-09-25 16:07:19 -07001943 init_timer(&mdev->resync_timer);
1944 init_timer(&mdev->md_sync_timer);
Philipp Reisner370a43e2011-01-14 16:03:11 +01001945 init_timer(&mdev->start_resync_timer);
Philipp Reisner7fde2be2011-03-01 11:08:28 +01001946 init_timer(&mdev->request_timer);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001947 mdev->resync_timer.function = resync_timer_fn;
1948 mdev->resync_timer.data = (unsigned long) mdev;
1949 mdev->md_sync_timer.function = md_sync_timer_fn;
1950 mdev->md_sync_timer.data = (unsigned long) mdev;
Philipp Reisner370a43e2011-01-14 16:03:11 +01001951 mdev->start_resync_timer.function = start_resync_timer_fn;
1952 mdev->start_resync_timer.data = (unsigned long) mdev;
Philipp Reisner7fde2be2011-03-01 11:08:28 +01001953 mdev->request_timer.function = request_timer_fn;
1954 mdev->request_timer.data = (unsigned long) mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001955
1956 init_waitqueue_head(&mdev->misc_wait);
1957 init_waitqueue_head(&mdev->state_wait);
1958 init_waitqueue_head(&mdev->ee_wait);
1959 init_waitqueue_head(&mdev->al_wait);
1960 init_waitqueue_head(&mdev->seq_wait);
1961
Philipp Reisnerfd340c12011-01-19 16:57:39 +01001962 /* mdev->tconn->agreed_pro_version gets initialized in drbd_connect() */
Philipp Reisner2451fc32010-08-24 13:43:11 +02001963 mdev->write_ordering = WO_bdev_flush;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001964 mdev->resync_wenr = LC_FREE;
Philipp Reisner99432fc2011-05-20 16:39:13 +02001965 mdev->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
1966 mdev->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001967}
1968
1969void drbd_mdev_cleanup(struct drbd_conf *mdev)
1970{
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001971 int i;
Philipp Reisnere6b3ea82011-01-19 14:02:01 +01001972 if (mdev->tconn->receiver.t_state != NONE)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001973 dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
Philipp Reisnere6b3ea82011-01-19 14:02:01 +01001974 mdev->tconn->receiver.t_state);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001975
1976 /* no need to lock it, I'm the only thread alive */
1977 if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
1978 dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
1979 mdev->al_writ_cnt =
1980 mdev->bm_writ_cnt =
1981 mdev->read_cnt =
1982 mdev->recv_cnt =
1983 mdev->send_cnt =
1984 mdev->writ_cnt =
1985 mdev->p_size =
1986 mdev->rs_start =
1987 mdev->rs_total =
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001988 mdev->rs_failed = 0;
1989 mdev->rs_last_events = 0;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001990 mdev->rs_last_sect_ev = 0;
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001991 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
1992 mdev->rs_mark_left[i] = 0;
1993 mdev->rs_mark_time[i] = 0;
1994 }
Philipp Reisner89e58e72011-01-19 13:12:45 +01001995 D_ASSERT(mdev->tconn->net_conf == NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001996
1997 drbd_set_my_capacity(mdev, 0);
1998 if (mdev->bitmap) {
1999 /* maybe never allocated. */
Philipp Reisner02d9a942010-03-24 16:23:03 +01002000 drbd_bm_resize(mdev, 0, 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002001 drbd_bm_cleanup(mdev);
2002 }
2003
2004 drbd_free_resources(mdev);
Philipp Reisner07782862010-08-31 12:00:50 +02002005 clear_bit(AL_SUSPENDED, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002006
2007 /*
2008 * currently we drbd_init_ee only on module load, so
2009 * we may do drbd_release_ee only on module unload!
2010 */
2011 D_ASSERT(list_empty(&mdev->active_ee));
2012 D_ASSERT(list_empty(&mdev->sync_ee));
2013 D_ASSERT(list_empty(&mdev->done_ee));
2014 D_ASSERT(list_empty(&mdev->read_ee));
2015 D_ASSERT(list_empty(&mdev->net_ee));
2016 D_ASSERT(list_empty(&mdev->resync_reads));
Philipp Reisnere42325a2011-01-19 13:55:45 +01002017 D_ASSERT(list_empty(&mdev->tconn->data.work.q));
2018 D_ASSERT(list_empty(&mdev->tconn->meta.work.q));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002019 D_ASSERT(list_empty(&mdev->resync_work.list));
2020 D_ASSERT(list_empty(&mdev->unplug_work.list));
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02002021 D_ASSERT(list_empty(&mdev->go_diskless.list));
Lars Ellenberg2265b472010-12-16 15:41:26 +01002022
2023 drbd_set_defaults(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002024}
2025
2026
2027static void drbd_destroy_mempools(void)
2028{
2029 struct page *page;
2030
2031 while (drbd_pp_pool) {
2032 page = drbd_pp_pool;
2033 drbd_pp_pool = (struct page *)page_private(page);
2034 __free_page(page);
2035 drbd_pp_vacant--;
2036 }
2037
2038 /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
2039
Lars Ellenbergda4a75d2011-02-23 17:02:01 +01002040 if (drbd_md_io_bio_set)
2041 bioset_free(drbd_md_io_bio_set);
Lars Ellenberg35abf592011-02-23 12:39:46 +01002042 if (drbd_md_io_page_pool)
2043 mempool_destroy(drbd_md_io_page_pool);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002044 if (drbd_ee_mempool)
2045 mempool_destroy(drbd_ee_mempool);
2046 if (drbd_request_mempool)
2047 mempool_destroy(drbd_request_mempool);
2048 if (drbd_ee_cache)
2049 kmem_cache_destroy(drbd_ee_cache);
2050 if (drbd_request_cache)
2051 kmem_cache_destroy(drbd_request_cache);
2052 if (drbd_bm_ext_cache)
2053 kmem_cache_destroy(drbd_bm_ext_cache);
2054 if (drbd_al_ext_cache)
2055 kmem_cache_destroy(drbd_al_ext_cache);
2056
Lars Ellenbergda4a75d2011-02-23 17:02:01 +01002057 drbd_md_io_bio_set = NULL;
Lars Ellenberg35abf592011-02-23 12:39:46 +01002058 drbd_md_io_page_pool = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002059 drbd_ee_mempool = NULL;
2060 drbd_request_mempool = NULL;
2061 drbd_ee_cache = NULL;
2062 drbd_request_cache = NULL;
2063 drbd_bm_ext_cache = NULL;
2064 drbd_al_ext_cache = NULL;
2065
2066 return;
2067}
2068
2069static int drbd_create_mempools(void)
2070{
2071 struct page *page;
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01002072 const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002073 int i;
2074
2075 /* prepare our caches and mempools */
2076 drbd_request_mempool = NULL;
2077 drbd_ee_cache = NULL;
2078 drbd_request_cache = NULL;
2079 drbd_bm_ext_cache = NULL;
2080 drbd_al_ext_cache = NULL;
2081 drbd_pp_pool = NULL;
Lars Ellenberg35abf592011-02-23 12:39:46 +01002082 drbd_md_io_page_pool = NULL;
Lars Ellenbergda4a75d2011-02-23 17:02:01 +01002083 drbd_md_io_bio_set = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002084
2085 /* caches */
2086 drbd_request_cache = kmem_cache_create(
2087 "drbd_req", sizeof(struct drbd_request), 0, 0, NULL);
2088 if (drbd_request_cache == NULL)
2089 goto Enomem;
2090
2091 drbd_ee_cache = kmem_cache_create(
Andreas Gruenbacherf6ffca92011-02-04 15:30:34 +01002092 "drbd_ee", sizeof(struct drbd_peer_request), 0, 0, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002093 if (drbd_ee_cache == NULL)
2094 goto Enomem;
2095
2096 drbd_bm_ext_cache = kmem_cache_create(
2097 "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL);
2098 if (drbd_bm_ext_cache == NULL)
2099 goto Enomem;
2100
2101 drbd_al_ext_cache = kmem_cache_create(
2102 "drbd_al", sizeof(struct lc_element), 0, 0, NULL);
2103 if (drbd_al_ext_cache == NULL)
2104 goto Enomem;
2105
2106 /* mempools */
Lars Ellenbergda4a75d2011-02-23 17:02:01 +01002107 drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0);
2108 if (drbd_md_io_bio_set == NULL)
2109 goto Enomem;
2110
Lars Ellenberg35abf592011-02-23 12:39:46 +01002111 drbd_md_io_page_pool = mempool_create_page_pool(DRBD_MIN_POOL_PAGES, 0);
2112 if (drbd_md_io_page_pool == NULL)
2113 goto Enomem;
2114
Philipp Reisnerb411b362009-09-25 16:07:19 -07002115 drbd_request_mempool = mempool_create(number,
2116 mempool_alloc_slab, mempool_free_slab, drbd_request_cache);
2117 if (drbd_request_mempool == NULL)
2118 goto Enomem;
2119
2120 drbd_ee_mempool = mempool_create(number,
2121 mempool_alloc_slab, mempool_free_slab, drbd_ee_cache);
Nicolas Kaiser2027ae12010-10-28 06:15:26 -06002122 if (drbd_ee_mempool == NULL)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002123 goto Enomem;
2124
2125 /* drbd's page pool */
2126 spin_lock_init(&drbd_pp_lock);
2127
2128 for (i = 0; i < number; i++) {
2129 page = alloc_page(GFP_HIGHUSER);
2130 if (!page)
2131 goto Enomem;
2132 set_page_private(page, (unsigned long)drbd_pp_pool);
2133 drbd_pp_pool = page;
2134 }
2135 drbd_pp_vacant = number;
2136
2137 return 0;
2138
2139Enomem:
2140 drbd_destroy_mempools(); /* in case we allocated some */
2141 return -ENOMEM;
2142}
2143
2144static int drbd_notify_sys(struct notifier_block *this, unsigned long code,
2145 void *unused)
2146{
2147 /* just so we have it. you never know what interesting things we
2148 * might want to do here some day...
2149 */
2150
2151 return NOTIFY_DONE;
2152}
2153
2154static struct notifier_block drbd_notifier = {
2155 .notifier_call = drbd_notify_sys,
2156};
2157
2158static void drbd_release_ee_lists(struct drbd_conf *mdev)
2159{
2160 int rr;
2161
2162 rr = drbd_release_ee(mdev, &mdev->active_ee);
2163 if (rr)
2164 dev_err(DEV, "%d EEs in active list found!\n", rr);
2165
2166 rr = drbd_release_ee(mdev, &mdev->sync_ee);
2167 if (rr)
2168 dev_err(DEV, "%d EEs in sync list found!\n", rr);
2169
2170 rr = drbd_release_ee(mdev, &mdev->read_ee);
2171 if (rr)
2172 dev_err(DEV, "%d EEs in read list found!\n", rr);
2173
2174 rr = drbd_release_ee(mdev, &mdev->done_ee);
2175 if (rr)
2176 dev_err(DEV, "%d EEs in done list found!\n", rr);
2177
2178 rr = drbd_release_ee(mdev, &mdev->net_ee);
2179 if (rr)
2180 dev_err(DEV, "%d EEs in net list found!\n", rr);
2181}
2182
Philipp Reisner774b3052011-02-22 02:07:03 -05002183/* caution. no locking. */
2184void drbd_delete_device(unsigned int minor)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002185{
2186 struct drbd_conf *mdev = minor_to_mdev(minor);
2187
2188 if (!mdev)
2189 return;
2190
Lars Ellenberg569083c2011-03-07 09:49:02 +01002191 idr_remove(&mdev->tconn->volumes, mdev->vnr);
2192 idr_remove(&minors, minor);
2193 synchronize_rcu();
Philipp Reisner774b3052011-02-22 02:07:03 -05002194
Philipp Reisnerb411b362009-09-25 16:07:19 -07002195 /* paranoia asserts */
Andreas Gruenbacher70dc65e2010-12-21 14:46:57 +01002196 D_ASSERT(mdev->open_cnt == 0);
Philipp Reisnere42325a2011-01-19 13:55:45 +01002197 D_ASSERT(list_empty(&mdev->tconn->data.work.q));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002198 /* end paranoia asserts */
2199
2200 del_gendisk(mdev->vdisk);
2201
2202 /* cleanup stuff that may have been allocated during
2203 * device (re-)configuration or state changes */
2204
2205 if (mdev->this_bdev)
2206 bdput(mdev->this_bdev);
2207
2208 drbd_free_resources(mdev);
2209
2210 drbd_release_ee_lists(mdev);
2211
Philipp Reisnerb411b362009-09-25 16:07:19 -07002212 lc_destroy(mdev->act_log);
2213 lc_destroy(mdev->resync);
2214
2215 kfree(mdev->p_uuid);
2216 /* mdev->p_uuid = NULL; */
2217
Philipp Reisnerb411b362009-09-25 16:07:19 -07002218 /* cleanup the rest that has been
2219 * allocated from drbd_new_device
2220 * and actually free the mdev itself */
2221 drbd_free_mdev(mdev);
2222}
2223
2224static void drbd_cleanup(void)
2225{
2226 unsigned int i;
Philipp Reisner81a5d602011-02-22 19:53:16 -05002227 struct drbd_conf *mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002228
2229 unregister_reboot_notifier(&drbd_notifier);
2230
Lars Ellenberg17a93f32010-11-24 10:37:35 +01002231 /* first remove proc,
2232 * drbdsetup uses it's presence to detect
2233 * whether DRBD is loaded.
2234 * If we would get stuck in proc removal,
2235 * but have netlink already deregistered,
2236 * some drbdsetup commands may wait forever
2237 * for an answer.
2238 */
2239 if (drbd_proc)
2240 remove_proc_entry("drbd", NULL);
2241
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002242 drbd_genl_unregister();
Philipp Reisnerb411b362009-09-25 16:07:19 -07002243
Philipp Reisner81a5d602011-02-22 19:53:16 -05002244 idr_for_each_entry(&minors, mdev, i)
2245 drbd_delete_device(i);
2246 drbd_destroy_mempools();
Philipp Reisnerb411b362009-09-25 16:07:19 -07002247 unregister_blkdev(DRBD_MAJOR, "drbd");
2248
Philipp Reisner81a5d602011-02-22 19:53:16 -05002249 idr_destroy(&minors);
2250
Philipp Reisnerb411b362009-09-25 16:07:19 -07002251 printk(KERN_INFO "drbd: module cleanup done.\n");
2252}
2253
2254/**
2255 * drbd_congested() - Callback for pdflush
2256 * @congested_data: User data
2257 * @bdi_bits: Bits pdflush is currently interested in
2258 *
2259 * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
2260 */
2261static int drbd_congested(void *congested_data, int bdi_bits)
2262{
2263 struct drbd_conf *mdev = congested_data;
2264 struct request_queue *q;
2265 char reason = '-';
2266 int r = 0;
2267
Andreas Gruenbacher1b881ef2010-12-13 18:03:38 +01002268 if (!may_inc_ap_bio(mdev)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002269 /* DRBD has frozen IO */
2270 r = bdi_bits;
2271 reason = 'd';
2272 goto out;
2273 }
2274
2275 if (get_ldev(mdev)) {
2276 q = bdev_get_queue(mdev->ldev->backing_bdev);
2277 r = bdi_congested(&q->backing_dev_info, bdi_bits);
2278 put_ldev(mdev);
2279 if (r)
2280 reason = 'b';
2281 }
2282
Philipp Reisner01a311a2011-02-07 14:30:33 +01002283 if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->tconn->flags)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002284 r |= (1 << BDI_async_congested);
2285 reason = reason == 'b' ? 'a' : 'n';
2286 }
2287
2288out:
2289 mdev->congestion_reason = reason;
2290 return r;
2291}
2292
Philipp Reisner6699b652011-02-09 11:10:24 +01002293static void drbd_init_workqueue(struct drbd_work_queue* wq)
2294{
2295 sema_init(&wq->s, 0);
2296 spin_lock_init(&wq->q_lock);
2297 INIT_LIST_HEAD(&wq->q);
2298}
2299
Philipp Reisner1aba4d72011-02-21 15:38:08 +01002300struct drbd_tconn *conn_by_name(const char *name)
2301{
2302 struct drbd_tconn *tconn;
2303
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002304 if (!name || !name[0])
2305 return NULL;
2306
Lars Ellenberg543cc102011-03-10 22:18:18 +01002307 mutex_lock(&drbd_cfg_mutex);
Philipp Reisner1aba4d72011-02-21 15:38:08 +01002308 list_for_each_entry(tconn, &drbd_tconns, all_tconn) {
2309 if (!strcmp(tconn->name, name))
2310 goto found;
2311 }
2312 tconn = NULL;
2313found:
Lars Ellenberg543cc102011-03-10 22:18:18 +01002314 mutex_unlock(&drbd_cfg_mutex);
Philipp Reisner1aba4d72011-02-21 15:38:08 +01002315 return tconn;
2316}
2317
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002318static int drbd_alloc_socket(struct drbd_socket *socket)
2319{
2320 socket->rbuf = (void *) __get_free_page(GFP_KERNEL);
2321 if (!socket->rbuf)
2322 return -ENOMEM;
Andreas Gruenbacher5a87d922011-03-24 21:17:52 +01002323 socket->sbuf = (void *) __get_free_page(GFP_KERNEL);
2324 if (!socket->sbuf)
2325 return -ENOMEM;
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002326 return 0;
2327}
2328
2329static void drbd_free_socket(struct drbd_socket *socket)
2330{
Andreas Gruenbacher5a87d922011-03-24 21:17:52 +01002331 free_page((unsigned long) socket->sbuf);
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002332 free_page((unsigned long) socket->rbuf);
2333}
2334
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002335struct drbd_tconn *drbd_new_tconn(const char *name)
Philipp Reisner21114382011-01-19 12:26:59 +01002336{
2337 struct drbd_tconn *tconn;
2338
2339 tconn = kzalloc(sizeof(struct drbd_tconn), GFP_KERNEL);
2340 if (!tconn)
2341 return NULL;
2342
2343 tconn->name = kstrdup(name, GFP_KERNEL);
2344 if (!tconn->name)
2345 goto fail;
2346
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002347 if (drbd_alloc_socket(&tconn->data))
2348 goto fail;
2349 if (drbd_alloc_socket(&tconn->meta))
2350 goto fail;
2351
Philipp Reisner774b3052011-02-22 02:07:03 -05002352 if (!zalloc_cpumask_var(&tconn->cpu_mask, GFP_KERNEL))
2353 goto fail;
2354
Philipp Reisner2f5cdd02011-02-21 14:29:27 +01002355 if (!tl_init(tconn))
2356 goto fail;
2357
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01002358 tconn->cstate = C_STANDALONE;
Philipp Reisner8410da8f02011-02-11 20:11:10 +01002359 mutex_init(&tconn->cstate_mutex);
Philipp Reisner6699b652011-02-09 11:10:24 +01002360 spin_lock_init(&tconn->req_lock);
Philipp Reisnerb2fb6dbe2011-01-19 13:48:44 +01002361 atomic_set(&tconn->net_cnt, 0);
2362 init_waitqueue_head(&tconn->net_cnt_wait);
Philipp Reisner2a67d8b2011-02-09 14:10:32 +01002363 init_waitqueue_head(&tconn->ping_wait);
Philipp Reisner062e8792011-02-08 11:09:18 +01002364 idr_init(&tconn->volumes);
Philipp Reisnerb2fb6dbe2011-01-19 13:48:44 +01002365
Philipp Reisner6699b652011-02-09 11:10:24 +01002366 drbd_init_workqueue(&tconn->data.work);
2367 mutex_init(&tconn->data.mutex);
2368
2369 drbd_init_workqueue(&tconn->meta.work);
2370 mutex_init(&tconn->meta.mutex);
2371
Philipp Reisner392c8802011-02-09 10:33:31 +01002372 drbd_thread_init(tconn, &tconn->receiver, drbdd_init, "receiver");
2373 drbd_thread_init(tconn, &tconn->worker, drbd_worker, "worker");
2374 drbd_thread_init(tconn, &tconn->asender, drbd_asender, "asender");
2375
Lars Ellenbergf3990022011-03-23 14:31:09 +01002376 tconn->res_opts = (struct res_opts) {
2377 {}, 0, /* cpu_mask */
2378 DRBD_ON_NO_DATA_DEF, /* on_no_data */
2379 };
2380
Lars Ellenberg543cc102011-03-10 22:18:18 +01002381 mutex_lock(&drbd_cfg_mutex);
2382 list_add_tail(&tconn->all_tconn, &drbd_tconns);
2383 mutex_unlock(&drbd_cfg_mutex);
Philipp Reisner21114382011-01-19 12:26:59 +01002384
2385 return tconn;
2386
2387fail:
Philipp Reisner2f5cdd02011-02-21 14:29:27 +01002388 tl_cleanup(tconn);
Philipp Reisner774b3052011-02-22 02:07:03 -05002389 free_cpumask_var(tconn->cpu_mask);
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002390 drbd_free_socket(&tconn->meta);
2391 drbd_free_socket(&tconn->data);
Philipp Reisner21114382011-01-19 12:26:59 +01002392 kfree(tconn->name);
2393 kfree(tconn);
2394
2395 return NULL;
2396}
2397
2398void drbd_free_tconn(struct drbd_tconn *tconn)
2399{
Philipp Reisner21114382011-01-19 12:26:59 +01002400 list_del(&tconn->all_tconn);
Philipp Reisner062e8792011-02-08 11:09:18 +01002401 idr_destroy(&tconn->volumes);
Philipp Reisner21114382011-01-19 12:26:59 +01002402
Philipp Reisner774b3052011-02-22 02:07:03 -05002403 free_cpumask_var(tconn->cpu_mask);
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002404 drbd_free_socket(&tconn->meta);
2405 drbd_free_socket(&tconn->data);
Philipp Reisner21114382011-01-19 12:26:59 +01002406 kfree(tconn->name);
Philipp Reisnerb42a70a2011-01-27 10:55:20 +01002407 kfree(tconn->int_dig_in);
2408 kfree(tconn->int_dig_vv);
Philipp Reisner21114382011-01-19 12:26:59 +01002409 kfree(tconn);
2410}
2411
Philipp Reisner774b3052011-02-22 02:07:03 -05002412enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, int vnr)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002413{
2414 struct drbd_conf *mdev;
2415 struct gendisk *disk;
2416 struct request_queue *q;
Philipp Reisner774b3052011-02-22 02:07:03 -05002417 int vnr_got = vnr;
Philipp Reisner81a5d602011-02-22 19:53:16 -05002418 int minor_got = minor;
Lars Ellenberg8432b312011-03-08 16:11:16 +01002419 enum drbd_ret_code err = ERR_NOMEM;
Philipp Reisner774b3052011-02-22 02:07:03 -05002420
2421 mdev = minor_to_mdev(minor);
2422 if (mdev)
2423 return ERR_MINOR_EXISTS;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002424
2425 /* GFP_KERNEL, we are outside of all write-out paths */
2426 mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL);
2427 if (!mdev)
Philipp Reisner774b3052011-02-22 02:07:03 -05002428 return ERR_NOMEM;
2429
2430 mdev->tconn = tconn;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002431 mdev->minor = minor;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002432 mdev->vnr = vnr;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002433
2434 drbd_init_set_defaults(mdev);
2435
2436 q = blk_alloc_queue(GFP_KERNEL);
2437 if (!q)
2438 goto out_no_q;
2439 mdev->rq_queue = q;
2440 q->queuedata = mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002441
2442 disk = alloc_disk(1);
2443 if (!disk)
2444 goto out_no_disk;
2445 mdev->vdisk = disk;
2446
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002447 set_disk_ro(disk, true);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002448
2449 disk->queue = q;
2450 disk->major = DRBD_MAJOR;
2451 disk->first_minor = minor;
2452 disk->fops = &drbd_ops;
2453 sprintf(disk->disk_name, "drbd%d", minor);
2454 disk->private_data = mdev;
2455
2456 mdev->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
2457 /* we have no partitions. we contain only ourselves. */
2458 mdev->this_bdev->bd_contains = mdev->this_bdev;
2459
2460 q->backing_dev_info.congested_fn = drbd_congested;
2461 q->backing_dev_info.congested_data = mdev;
2462
Andreas Gruenbacher2f58dcf2010-12-13 17:48:19 +01002463 blk_queue_make_request(q, drbd_make_request);
Philipp Reisner99432fc2011-05-20 16:39:13 +02002464 /* Setting the max_hw_sectors to an odd value of 8kibyte here
2465 This triggers a max_bio_size message upon first attach or connect */
2466 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002467 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
2468 blk_queue_merge_bvec(q, drbd_merge_bvec);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002469 q->queue_lock = &mdev->tconn->req_lock; /* needed since we use */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002470
2471 mdev->md_io_page = alloc_page(GFP_KERNEL);
2472 if (!mdev->md_io_page)
2473 goto out_no_io_page;
2474
2475 if (drbd_bm_init(mdev))
2476 goto out_no_bitmap;
Andreas Gruenbacherdac13892011-01-21 17:18:39 +01002477 mdev->read_requests = RB_ROOT;
Andreas Gruenbacherde696712011-01-20 15:00:24 +01002478 mdev->write_requests = RB_ROOT;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002479
Philipp Reisnerb411b362009-09-25 16:07:19 -07002480 mdev->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
2481 if (!mdev->current_epoch)
2482 goto out_no_epoch;
2483
2484 INIT_LIST_HEAD(&mdev->current_epoch->list);
2485 mdev->epochs = 1;
2486
Lars Ellenberg8432b312011-03-08 16:11:16 +01002487 if (!idr_pre_get(&minors, GFP_KERNEL))
2488 goto out_no_minor_idr;
2489 if (idr_get_new_above(&minors, mdev, minor, &minor_got))
2490 goto out_no_minor_idr;
2491 if (minor_got != minor) {
2492 err = ERR_MINOR_EXISTS;
2493 drbd_msg_put_info("requested minor exists already");
2494 goto out_idr_remove_minor;
Lars Ellenberg569083c2011-03-07 09:49:02 +01002495 }
2496
Lars Ellenberg8432b312011-03-08 16:11:16 +01002497 if (!idr_pre_get(&tconn->volumes, GFP_KERNEL))
Lars Ellenberg569083c2011-03-07 09:49:02 +01002498 goto out_idr_remove_minor;
Lars Ellenberg8432b312011-03-08 16:11:16 +01002499 if (idr_get_new_above(&tconn->volumes, mdev, vnr, &vnr_got))
2500 goto out_idr_remove_minor;
2501 if (vnr_got != vnr) {
2502 err = ERR_INVALID_REQUEST;
2503 drbd_msg_put_info("requested volume exists already");
2504 goto out_idr_remove_vol;
Philipp Reisner81a5d602011-02-22 19:53:16 -05002505 }
Philipp Reisner774b3052011-02-22 02:07:03 -05002506 add_disk(disk);
2507
Philipp Reisner2325eb62011-03-15 16:56:18 +01002508 /* inherit the connection state */
2509 mdev->state.conn = tconn->cstate;
2510 if (mdev->state.conn == C_WF_REPORT_PARAMS)
2511 drbd_connected(vnr, mdev, tconn);
2512
Philipp Reisner774b3052011-02-22 02:07:03 -05002513 return NO_ERROR;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002514
Lars Ellenberg569083c2011-03-07 09:49:02 +01002515out_idr_remove_vol:
2516 idr_remove(&tconn->volumes, vnr_got);
Lars Ellenberg8432b312011-03-08 16:11:16 +01002517out_idr_remove_minor:
2518 idr_remove(&minors, minor_got);
Lars Ellenberg569083c2011-03-07 09:49:02 +01002519 synchronize_rcu();
Lars Ellenberg8432b312011-03-08 16:11:16 +01002520out_no_minor_idr:
Philipp Reisner81a5d602011-02-22 19:53:16 -05002521 kfree(mdev->current_epoch);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002522out_no_epoch:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002523 drbd_bm_cleanup(mdev);
2524out_no_bitmap:
2525 __free_page(mdev->md_io_page);
2526out_no_io_page:
2527 put_disk(disk);
2528out_no_disk:
2529 blk_cleanup_queue(q);
2530out_no_q:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002531 kfree(mdev);
Lars Ellenberg8432b312011-03-08 16:11:16 +01002532 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002533}
2534
2535/* counterpart of drbd_new_device.
2536 * last part of drbd_delete_device. */
2537void drbd_free_mdev(struct drbd_conf *mdev)
2538{
2539 kfree(mdev->current_epoch);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002540 if (mdev->bitmap) /* should no longer be there. */
2541 drbd_bm_cleanup(mdev);
2542 __free_page(mdev->md_io_page);
2543 put_disk(mdev->vdisk);
2544 blk_cleanup_queue(mdev->rq_queue);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002545 kfree(mdev);
2546}
2547
2548
2549int __init drbd_init(void)
2550{
2551 int err;
2552
Philipp Reisnerfd340c12011-01-19 16:57:39 +01002553 BUILD_BUG_ON(sizeof(struct p_header80) != sizeof(struct p_header95));
Andreas Gruenbacher60381782011-03-28 17:05:50 +02002554 BUILD_BUG_ON(sizeof(struct p_connection_features) != 80);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002555
Philipp Reisner2b8a90b2011-01-10 11:15:17 +01002556 if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002557 printk(KERN_ERR
Philipp Reisner81a5d602011-02-22 19:53:16 -05002558 "drbd: invalid minor_count (%d)\n", minor_count);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002559#ifdef MODULE
2560 return -EINVAL;
2561#else
2562 minor_count = 8;
2563#endif
2564 }
2565
Philipp Reisnerb411b362009-09-25 16:07:19 -07002566 err = register_blkdev(DRBD_MAJOR, "drbd");
2567 if (err) {
2568 printk(KERN_ERR
2569 "drbd: unable to register block device major %d\n",
2570 DRBD_MAJOR);
2571 return err;
2572 }
2573
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002574 err = drbd_genl_register();
2575 if (err) {
2576 printk(KERN_ERR "drbd: unable to register generic netlink family\n");
2577 goto fail;
2578 }
2579
2580
Philipp Reisnerb411b362009-09-25 16:07:19 -07002581 register_reboot_notifier(&drbd_notifier);
2582
2583 /*
2584 * allocate all necessary structs
2585 */
2586 err = -ENOMEM;
2587
2588 init_waitqueue_head(&drbd_pp_wait);
2589
2590 drbd_proc = NULL; /* play safe for drbd_cleanup */
Philipp Reisner81a5d602011-02-22 19:53:16 -05002591 idr_init(&minors);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002592
2593 err = drbd_create_mempools();
2594 if (err)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002595 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002596
Lars Ellenberg8c484ee2010-03-11 16:47:58 +01002597 drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002598 if (!drbd_proc) {
2599 printk(KERN_ERR "drbd: unable to register proc file\n");
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002600 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002601 }
2602
2603 rwlock_init(&global_state_lock);
Philipp Reisner21114382011-01-19 12:26:59 +01002604 INIT_LIST_HEAD(&drbd_tconns);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002605
2606 printk(KERN_INFO "drbd: initialized. "
2607 "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
2608 API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
2609 printk(KERN_INFO "drbd: %s\n", drbd_buildtag());
2610 printk(KERN_INFO "drbd: registered as block device major %d\n",
2611 DRBD_MAJOR);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002612
2613 return 0; /* Success! */
2614
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002615fail:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002616 drbd_cleanup();
2617 if (err == -ENOMEM)
2618 /* currently always the case */
2619 printk(KERN_ERR "drbd: ran out of memory\n");
2620 else
2621 printk(KERN_ERR "drbd: initialization failure\n");
2622 return err;
2623}
2624
2625void drbd_free_bc(struct drbd_backing_dev *ldev)
2626{
2627 if (ldev == NULL)
2628 return;
2629
Tejun Heoe525fd82010-11-13 11:55:17 +01002630 blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
2631 blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002632
2633 kfree(ldev);
2634}
2635
Philipp Reisner360cc742011-02-08 14:29:53 +01002636void drbd_free_sock(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002637{
Philipp Reisner360cc742011-02-08 14:29:53 +01002638 if (tconn->data.socket) {
2639 mutex_lock(&tconn->data.mutex);
2640 kernel_sock_shutdown(tconn->data.socket, SHUT_RDWR);
2641 sock_release(tconn->data.socket);
2642 tconn->data.socket = NULL;
2643 mutex_unlock(&tconn->data.mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002644 }
Philipp Reisner360cc742011-02-08 14:29:53 +01002645 if (tconn->meta.socket) {
2646 mutex_lock(&tconn->meta.mutex);
2647 kernel_sock_shutdown(tconn->meta.socket, SHUT_RDWR);
2648 sock_release(tconn->meta.socket);
2649 tconn->meta.socket = NULL;
2650 mutex_unlock(&tconn->meta.mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002651 }
2652}
2653
2654
2655void drbd_free_resources(struct drbd_conf *mdev)
2656{
Lars Ellenbergf3990022011-03-23 14:31:09 +01002657 crypto_free_hash(mdev->tconn->csums_tfm);
2658 mdev->tconn->csums_tfm = NULL;
2659 crypto_free_hash(mdev->tconn->verify_tfm);
2660 mdev->tconn->verify_tfm = NULL;
Philipp Reisnera0638452011-01-19 14:31:32 +01002661 crypto_free_hash(mdev->tconn->cram_hmac_tfm);
2662 mdev->tconn->cram_hmac_tfm = NULL;
2663 crypto_free_hash(mdev->tconn->integrity_w_tfm);
2664 mdev->tconn->integrity_w_tfm = NULL;
2665 crypto_free_hash(mdev->tconn->integrity_r_tfm);
2666 mdev->tconn->integrity_r_tfm = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002667
Philipp Reisner360cc742011-02-08 14:29:53 +01002668 drbd_free_sock(mdev->tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002669
2670 __no_warn(local,
2671 drbd_free_bc(mdev->ldev);
2672 mdev->ldev = NULL;);
2673}
2674
2675/* meta data management */
2676
2677struct meta_data_on_disk {
2678 u64 la_size; /* last agreed size. */
2679 u64 uuid[UI_SIZE]; /* UUIDs. */
2680 u64 device_uuid;
2681 u64 reserved_u64_1;
2682 u32 flags; /* MDF */
2683 u32 magic;
2684 u32 md_size_sect;
2685 u32 al_offset; /* offset to this block */
2686 u32 al_nr_extents; /* important for restoring the AL */
Lars Ellenbergf3990022011-03-23 14:31:09 +01002687 /* `-- act_log->nr_elements <-- ldev->dc.al_extents */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002688 u32 bm_offset; /* offset to the bitmap, from here */
2689 u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */
Philipp Reisner99432fc2011-05-20 16:39:13 +02002690 u32 la_peer_max_bio_size; /* last peer max_bio_size */
2691 u32 reserved_u32[3];
Philipp Reisnerb411b362009-09-25 16:07:19 -07002692
2693} __packed;
2694
2695/**
2696 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
2697 * @mdev: DRBD device.
2698 */
2699void drbd_md_sync(struct drbd_conf *mdev)
2700{
2701 struct meta_data_on_disk *buffer;
2702 sector_t sector;
2703 int i;
2704
Lars Ellenbergee15b032010-09-03 10:00:09 +02002705 del_timer(&mdev->md_sync_timer);
2706 /* timer may be rearmed by drbd_md_mark_dirty() now. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002707 if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
2708 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002709
2710 /* We use here D_FAILED and not D_ATTACHING because we try to write
2711 * metadata even if we detach due to a disk failure! */
2712 if (!get_ldev_if_state(mdev, D_FAILED))
2713 return;
2714
Philipp Reisnerb411b362009-09-25 16:07:19 -07002715 mutex_lock(&mdev->md_io_mutex);
2716 buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
2717 memset(buffer, 0, 512);
2718
2719 buffer->la_size = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
2720 for (i = UI_CURRENT; i < UI_SIZE; i++)
2721 buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
2722 buffer->flags = cpu_to_be32(mdev->ldev->md.flags);
2723 buffer->magic = cpu_to_be32(DRBD_MD_MAGIC);
2724
2725 buffer->md_size_sect = cpu_to_be32(mdev->ldev->md.md_size_sect);
2726 buffer->al_offset = cpu_to_be32(mdev->ldev->md.al_offset);
2727 buffer->al_nr_extents = cpu_to_be32(mdev->act_log->nr_elements);
2728 buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
2729 buffer->device_uuid = cpu_to_be64(mdev->ldev->md.device_uuid);
2730
2731 buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset);
Philipp Reisner99432fc2011-05-20 16:39:13 +02002732 buffer->la_peer_max_bio_size = cpu_to_be32(mdev->peer_max_bio_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002733
2734 D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset);
2735 sector = mdev->ldev->md.md_offset;
2736
Andreas Gruenbacher3fbf4d22010-12-13 02:25:41 +01002737 if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002738 /* this was a try anyways ... */
2739 dev_err(DEV, "meta data update failed!\n");
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002740 drbd_chk_io_error(mdev, 1, true);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002741 }
2742
2743 /* Update mdev->ldev->md.la_size_sect,
2744 * since we updated it on metadata. */
2745 mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev);
2746
2747 mutex_unlock(&mdev->md_io_mutex);
2748 put_ldev(mdev);
2749}
2750
2751/**
2752 * drbd_md_read() - Reads in the meta data super block
2753 * @mdev: DRBD device.
2754 * @bdev: Device from which the meta data should be read in.
2755 *
Andreas Gruenbacher116676c2010-12-08 13:33:11 +01002756 * Return 0 (NO_ERROR) on success, and an enum drbd_ret_code in case
Philipp Reisnerb411b362009-09-25 16:07:19 -07002757 * something goes wrong. Currently only: ERR_IO_MD_DISK, ERR_MD_INVALID.
2758 */
2759int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
2760{
2761 struct meta_data_on_disk *buffer;
2762 int i, rv = NO_ERROR;
2763
2764 if (!get_ldev_if_state(mdev, D_ATTACHING))
2765 return ERR_IO_MD_DISK;
2766
Philipp Reisnerb411b362009-09-25 16:07:19 -07002767 mutex_lock(&mdev->md_io_mutex);
2768 buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
2769
Andreas Gruenbacher3fbf4d22010-12-13 02:25:41 +01002770 if (drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002771 /* NOTE: can't do normal error processing here as this is
Philipp Reisnerb411b362009-09-25 16:07:19 -07002772 called BEFORE disk is attached */
2773 dev_err(DEV, "Error while reading metadata.\n");
2774 rv = ERR_IO_MD_DISK;
2775 goto err;
2776 }
2777
Andreas Gruenbachere7fad8a2011-01-11 13:54:02 +01002778 if (buffer->magic != cpu_to_be32(DRBD_MD_MAGIC)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002779 dev_err(DEV, "Error while reading metadata, magic not found.\n");
2780 rv = ERR_MD_INVALID;
2781 goto err;
2782 }
2783 if (be32_to_cpu(buffer->al_offset) != bdev->md.al_offset) {
2784 dev_err(DEV, "unexpected al_offset: %d (expected %d)\n",
2785 be32_to_cpu(buffer->al_offset), bdev->md.al_offset);
2786 rv = ERR_MD_INVALID;
2787 goto err;
2788 }
2789 if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
2790 dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n",
2791 be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
2792 rv = ERR_MD_INVALID;
2793 goto err;
2794 }
2795 if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
2796 dev_err(DEV, "unexpected md_size: %u (expected %u)\n",
2797 be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
2798 rv = ERR_MD_INVALID;
2799 goto err;
2800 }
2801
2802 if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
2803 dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
2804 be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
2805 rv = ERR_MD_INVALID;
2806 goto err;
2807 }
2808
2809 bdev->md.la_size_sect = be64_to_cpu(buffer->la_size);
2810 for (i = UI_CURRENT; i < UI_SIZE; i++)
2811 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
2812 bdev->md.flags = be32_to_cpu(buffer->flags);
Lars Ellenbergf3990022011-03-23 14:31:09 +01002813 bdev->dc.al_extents = be32_to_cpu(buffer->al_nr_extents);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002814 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
2815
Philipp Reisner87eeee42011-01-19 14:16:30 +01002816 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisner99432fc2011-05-20 16:39:13 +02002817 if (mdev->state.conn < C_CONNECTED) {
2818 int peer;
2819 peer = be32_to_cpu(buffer->la_peer_max_bio_size);
2820 peer = max_t(int, peer, DRBD_MAX_BIO_SIZE_SAFE);
2821 mdev->peer_max_bio_size = peer;
2822 }
Philipp Reisner87eeee42011-01-19 14:16:30 +01002823 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisner99432fc2011-05-20 16:39:13 +02002824
Lars Ellenbergf3990022011-03-23 14:31:09 +01002825 if (bdev->dc.al_extents < 7)
2826 bdev->dc.al_extents = 127;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002827
2828 err:
2829 mutex_unlock(&mdev->md_io_mutex);
2830 put_ldev(mdev);
2831
2832 return rv;
2833}
2834
2835/**
2836 * drbd_md_mark_dirty() - Mark meta data super block as dirty
2837 * @mdev: DRBD device.
2838 *
2839 * Call this function if you change anything that should be written to
2840 * the meta-data super block. This function sets MD_DIRTY, and starts a
2841 * timer that ensures that within five seconds you have to call drbd_md_sync().
2842 */
Lars Ellenbergca0e6092010-10-14 15:01:21 +02002843#ifdef DEBUG
Lars Ellenbergee15b032010-09-03 10:00:09 +02002844void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func)
2845{
2846 if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) {
2847 mod_timer(&mdev->md_sync_timer, jiffies + HZ);
2848 mdev->last_md_mark_dirty.line = line;
2849 mdev->last_md_mark_dirty.func = func;
2850 }
2851}
2852#else
Philipp Reisnerb411b362009-09-25 16:07:19 -07002853void drbd_md_mark_dirty(struct drbd_conf *mdev)
2854{
Lars Ellenbergee15b032010-09-03 10:00:09 +02002855 if (!test_and_set_bit(MD_DIRTY, &mdev->flags))
Lars Ellenbergca0e6092010-10-14 15:01:21 +02002856 mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002857}
Lars Ellenbergee15b032010-09-03 10:00:09 +02002858#endif
Philipp Reisnerb411b362009-09-25 16:07:19 -07002859
2860static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
2861{
2862 int i;
2863
Lars Ellenberg62b0da32011-01-20 13:25:21 +01002864 for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002865 mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
Philipp Reisnerb411b362009-09-25 16:07:19 -07002866}
2867
2868void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
2869{
2870 if (idx == UI_CURRENT) {
2871 if (mdev->state.role == R_PRIMARY)
2872 val |= 1;
2873 else
2874 val &= ~((u64)1);
2875
2876 drbd_set_ed_uuid(mdev, val);
2877 }
2878
2879 mdev->ldev->md.uuid[idx] = val;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002880 drbd_md_mark_dirty(mdev);
2881}
2882
2883
2884void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
2885{
2886 if (mdev->ldev->md.uuid[idx]) {
2887 drbd_uuid_move_history(mdev);
2888 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
Philipp Reisnerb411b362009-09-25 16:07:19 -07002889 }
2890 _drbd_uuid_set(mdev, idx, val);
2891}
2892
2893/**
2894 * drbd_uuid_new_current() - Creates a new current UUID
2895 * @mdev: DRBD device.
2896 *
2897 * Creates a new current UUID, and rotates the old current UUID into
2898 * the bitmap slot. Causes an incremental resync upon next connect.
2899 */
2900void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
2901{
2902 u64 val;
Lars Ellenberg62b0da32011-01-20 13:25:21 +01002903 unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
Philipp Reisnerb411b362009-09-25 16:07:19 -07002904
Lars Ellenberg62b0da32011-01-20 13:25:21 +01002905 if (bm_uuid)
2906 dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
2907
Philipp Reisnerb411b362009-09-25 16:07:19 -07002908 mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
Philipp Reisnerb411b362009-09-25 16:07:19 -07002909
2910 get_random_bytes(&val, sizeof(u64));
2911 _drbd_uuid_set(mdev, UI_CURRENT, val);
Lars Ellenberg62b0da32011-01-20 13:25:21 +01002912 drbd_print_uuids(mdev, "new current UUID");
Lars Ellenbergaaa8e2b2010-10-15 13:16:53 +02002913 /* get it to stable storage _now_ */
2914 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002915}
2916
2917void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
2918{
2919 if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
2920 return;
2921
2922 if (val == 0) {
2923 drbd_uuid_move_history(mdev);
2924 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
2925 mdev->ldev->md.uuid[UI_BITMAP] = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002926 } else {
Lars Ellenberg62b0da32011-01-20 13:25:21 +01002927 unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
2928 if (bm_uuid)
2929 dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002930
Lars Ellenberg62b0da32011-01-20 13:25:21 +01002931 mdev->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002932 }
2933 drbd_md_mark_dirty(mdev);
2934}
2935
2936/**
2937 * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
2938 * @mdev: DRBD device.
2939 *
2940 * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
2941 */
2942int drbd_bmio_set_n_write(struct drbd_conf *mdev)
2943{
2944 int rv = -EIO;
2945
2946 if (get_ldev_if_state(mdev, D_ATTACHING)) {
2947 drbd_md_set_flag(mdev, MDF_FULL_SYNC);
2948 drbd_md_sync(mdev);
2949 drbd_bm_set_all(mdev);
2950
2951 rv = drbd_bm_write(mdev);
2952
2953 if (!rv) {
2954 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
2955 drbd_md_sync(mdev);
2956 }
2957
2958 put_ldev(mdev);
2959 }
2960
2961 return rv;
2962}
2963
2964/**
2965 * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
2966 * @mdev: DRBD device.
2967 *
2968 * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
2969 */
2970int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
2971{
2972 int rv = -EIO;
2973
Philipp Reisner07782862010-08-31 12:00:50 +02002974 drbd_resume_al(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002975 if (get_ldev_if_state(mdev, D_ATTACHING)) {
2976 drbd_bm_clear_all(mdev);
2977 rv = drbd_bm_write(mdev);
2978 put_ldev(mdev);
2979 }
2980
2981 return rv;
2982}
2983
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01002984static int w_bitmap_io(struct drbd_work *w, int unused)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002985{
2986 struct bm_io_work *work = container_of(w, struct bm_io_work, w);
Philipp Reisner00d56942011-02-09 18:09:48 +01002987 struct drbd_conf *mdev = w->mdev;
Lars Ellenberg02851e92010-12-16 14:47:39 +01002988 int rv = -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002989
2990 D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0);
2991
Lars Ellenberg02851e92010-12-16 14:47:39 +01002992 if (get_ldev(mdev)) {
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01002993 drbd_bm_lock(mdev, work->why, work->flags);
Lars Ellenberg02851e92010-12-16 14:47:39 +01002994 rv = work->io_fn(mdev);
2995 drbd_bm_unlock(mdev);
2996 put_ldev(mdev);
2997 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002998
Lars Ellenberg4738fa12011-02-21 13:20:55 +01002999 clear_bit_unlock(BITMAP_IO, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003000 wake_up(&mdev->misc_wait);
3001
3002 if (work->done)
3003 work->done(mdev, rv);
3004
3005 clear_bit(BITMAP_IO_QUEUED, &mdev->flags);
3006 work->why = NULL;
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003007 work->flags = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003008
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003009 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003010}
3011
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003012void drbd_ldev_destroy(struct drbd_conf *mdev)
3013{
3014 lc_destroy(mdev->resync);
3015 mdev->resync = NULL;
3016 lc_destroy(mdev->act_log);
3017 mdev->act_log = NULL;
3018 __no_warn(local,
3019 drbd_free_bc(mdev->ldev);
3020 mdev->ldev = NULL;);
3021
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003022 clear_bit(GO_DISKLESS, &mdev->flags);
3023}
3024
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003025static int w_go_diskless(struct drbd_work *w, int unused)
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003026{
Philipp Reisner00d56942011-02-09 18:09:48 +01003027 struct drbd_conf *mdev = w->mdev;
3028
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003029 D_ASSERT(mdev->state.disk == D_FAILED);
Lars Ellenberg9d282872010-10-14 13:57:07 +02003030 /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
3031 * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003032 * the protected members anymore, though, so once put_ldev reaches zero
3033 * again, it will be safe to free them. */
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003034 drbd_force_state(mdev, NS(disk, D_DISKLESS));
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003035 return 0;
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003036}
3037
3038void drbd_go_diskless(struct drbd_conf *mdev)
3039{
3040 D_ASSERT(mdev->state.disk == D_FAILED);
3041 if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
Philipp Reisnere42325a2011-01-19 13:55:45 +01003042 drbd_queue_work(&mdev->tconn->data.work, &mdev->go_diskless);
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003043}
3044
Philipp Reisnerb411b362009-09-25 16:07:19 -07003045/**
3046 * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
3047 * @mdev: DRBD device.
3048 * @io_fn: IO callback to be called when bitmap IO is possible
3049 * @done: callback to be called after the bitmap IO was performed
3050 * @why: Descriptive text of the reason for doing the IO
3051 *
3052 * While IO on the bitmap happens we freeze application IO thus we ensure
3053 * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
3054 * called from worker context. It MUST NOT be used while a previous such
3055 * work is still pending!
3056 */
3057void drbd_queue_bitmap_io(struct drbd_conf *mdev,
3058 int (*io_fn)(struct drbd_conf *),
3059 void (*done)(struct drbd_conf *, int),
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003060 char *why, enum bm_flag flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003061{
Philipp Reisnere6b3ea82011-01-19 14:02:01 +01003062 D_ASSERT(current == mdev->tconn->worker.task);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003063
3064 D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags));
3065 D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags));
3066 D_ASSERT(list_empty(&mdev->bm_io_work.w.list));
3067 if (mdev->bm_io_work.why)
3068 dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n",
3069 why, mdev->bm_io_work.why);
3070
3071 mdev->bm_io_work.io_fn = io_fn;
3072 mdev->bm_io_work.done = done;
3073 mdev->bm_io_work.why = why;
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003074 mdev->bm_io_work.flags = flags;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003075
Philipp Reisner87eeee42011-01-19 14:16:30 +01003076 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003077 set_bit(BITMAP_IO, &mdev->flags);
3078 if (atomic_read(&mdev->ap_bio_cnt) == 0) {
Philipp Reisner127b3172010-11-16 10:07:53 +01003079 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
Philipp Reisnere42325a2011-01-19 13:55:45 +01003080 drbd_queue_work(&mdev->tconn->data.work, &mdev->bm_io_work.w);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003081 }
Philipp Reisner87eeee42011-01-19 14:16:30 +01003082 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003083}
3084
3085/**
3086 * drbd_bitmap_io() - Does an IO operation on the whole bitmap
3087 * @mdev: DRBD device.
3088 * @io_fn: IO callback to be called when bitmap IO is possible
3089 * @why: Descriptive text of the reason for doing the IO
3090 *
3091 * freezes application IO while that the actual IO operations runs. This
3092 * functions MAY NOT be called from worker context.
3093 */
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003094int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *),
3095 char *why, enum bm_flag flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003096{
3097 int rv;
3098
Philipp Reisnere6b3ea82011-01-19 14:02:01 +01003099 D_ASSERT(current != mdev->tconn->worker.task);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003100
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003101 if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
3102 drbd_suspend_io(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003103
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003104 drbd_bm_lock(mdev, why, flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003105 rv = io_fn(mdev);
3106 drbd_bm_unlock(mdev);
3107
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003108 if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
3109 drbd_resume_io(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003110
3111 return rv;
3112}
3113
3114void drbd_md_set_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
3115{
3116 if ((mdev->ldev->md.flags & flag) != flag) {
3117 drbd_md_mark_dirty(mdev);
3118 mdev->ldev->md.flags |= flag;
3119 }
3120}
3121
3122void drbd_md_clear_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
3123{
3124 if ((mdev->ldev->md.flags & flag) != 0) {
3125 drbd_md_mark_dirty(mdev);
3126 mdev->ldev->md.flags &= ~flag;
3127 }
3128}
3129int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
3130{
3131 return (bdev->md.flags & flag) != 0;
3132}
3133
3134static void md_sync_timer_fn(unsigned long data)
3135{
3136 struct drbd_conf *mdev = (struct drbd_conf *) data;
3137
Philipp Reisnere42325a2011-01-19 13:55:45 +01003138 drbd_queue_work_front(&mdev->tconn->data.work, &mdev->md_sync_work);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003139}
3140
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003141static int w_md_sync(struct drbd_work *w, int unused)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003142{
Philipp Reisner00d56942011-02-09 18:09:48 +01003143 struct drbd_conf *mdev = w->mdev;
3144
Philipp Reisnerb411b362009-09-25 16:07:19 -07003145 dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
Lars Ellenbergee15b032010-09-03 10:00:09 +02003146#ifdef DEBUG
3147 dev_warn(DEV, "last md_mark_dirty: %s:%u\n",
3148 mdev->last_md_mark_dirty.func, mdev->last_md_mark_dirty.line);
3149#endif
Philipp Reisnerb411b362009-09-25 16:07:19 -07003150 drbd_md_sync(mdev);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003151 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003152}
3153
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01003154const char *cmdname(enum drbd_packet cmd)
Andreas Gruenbacherf2ad9062011-01-26 17:13:25 +01003155{
3156 /* THINK may need to become several global tables
3157 * when we want to support more than
3158 * one PRO_VERSION */
3159 static const char *cmdnames[] = {
3160 [P_DATA] = "Data",
3161 [P_DATA_REPLY] = "DataReply",
3162 [P_RS_DATA_REPLY] = "RSDataReply",
3163 [P_BARRIER] = "Barrier",
3164 [P_BITMAP] = "ReportBitMap",
3165 [P_BECOME_SYNC_TARGET] = "BecomeSyncTarget",
3166 [P_BECOME_SYNC_SOURCE] = "BecomeSyncSource",
3167 [P_UNPLUG_REMOTE] = "UnplugRemote",
3168 [P_DATA_REQUEST] = "DataRequest",
3169 [P_RS_DATA_REQUEST] = "RSDataRequest",
3170 [P_SYNC_PARAM] = "SyncParam",
3171 [P_SYNC_PARAM89] = "SyncParam89",
3172 [P_PROTOCOL] = "ReportProtocol",
3173 [P_UUIDS] = "ReportUUIDs",
3174 [P_SIZES] = "ReportSizes",
3175 [P_STATE] = "ReportState",
3176 [P_SYNC_UUID] = "ReportSyncUUID",
3177 [P_AUTH_CHALLENGE] = "AuthChallenge",
3178 [P_AUTH_RESPONSE] = "AuthResponse",
3179 [P_PING] = "Ping",
3180 [P_PING_ACK] = "PingAck",
3181 [P_RECV_ACK] = "RecvAck",
3182 [P_WRITE_ACK] = "WriteAck",
3183 [P_RS_WRITE_ACK] = "RSWriteAck",
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003184 [P_DISCARD_WRITE] = "DiscardWrite",
Andreas Gruenbacherf2ad9062011-01-26 17:13:25 +01003185 [P_NEG_ACK] = "NegAck",
3186 [P_NEG_DREPLY] = "NegDReply",
3187 [P_NEG_RS_DREPLY] = "NegRSDReply",
3188 [P_BARRIER_ACK] = "BarrierAck",
3189 [P_STATE_CHG_REQ] = "StateChgRequest",
3190 [P_STATE_CHG_REPLY] = "StateChgReply",
3191 [P_OV_REQUEST] = "OVRequest",
3192 [P_OV_REPLY] = "OVReply",
3193 [P_OV_RESULT] = "OVResult",
3194 [P_CSUM_RS_REQUEST] = "CsumRSRequest",
3195 [P_RS_IS_IN_SYNC] = "CsumRSIsInSync",
3196 [P_COMPRESSED_BITMAP] = "CBitmap",
3197 [P_DELAY_PROBE] = "DelayProbe",
3198 [P_OUT_OF_SYNC] = "OutOfSync",
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003199 [P_RETRY_WRITE] = "RetryWrite",
Andreas Gruenbacherf2ad9062011-01-26 17:13:25 +01003200 };
3201
Andreas Gruenbachere5d6f332011-03-28 16:44:40 +02003202 if (cmd == P_INITIAL_META)
3203 return "InitialMeta";
3204 if (cmd == P_INITIAL_DATA)
3205 return "InitialData";
Andreas Gruenbacher60381782011-03-28 17:05:50 +02003206 if (cmd == P_CONNECTION_FEATURES)
3207 return "ConnectionFeatures";
Andreas Gruenbacher6e849ce2011-03-14 17:27:45 +01003208 if (cmd >= ARRAY_SIZE(cmdnames))
Andreas Gruenbacherf2ad9062011-01-26 17:13:25 +01003209 return "Unknown";
3210 return cmdnames[cmd];
3211}
3212
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003213/**
3214 * drbd_wait_misc - wait for a request to make progress
3215 * @mdev: device associated with the request
3216 * @i: the struct drbd_interval embedded in struct drbd_request or
3217 * struct drbd_peer_request
3218 */
3219int drbd_wait_misc(struct drbd_conf *mdev, struct drbd_interval *i)
3220{
3221 struct net_conf *net_conf = mdev->tconn->net_conf;
3222 DEFINE_WAIT(wait);
3223 long timeout;
3224
3225 if (!net_conf)
3226 return -ETIMEDOUT;
3227 timeout = MAX_SCHEDULE_TIMEOUT;
3228 if (net_conf->ko_count)
3229 timeout = net_conf->timeout * HZ / 10 * net_conf->ko_count;
3230
3231 /* Indicate to wake up mdev->misc_wait on progress. */
3232 i->waiting = true;
3233 prepare_to_wait(&mdev->misc_wait, &wait, TASK_INTERRUPTIBLE);
3234 spin_unlock_irq(&mdev->tconn->req_lock);
3235 timeout = schedule_timeout(timeout);
3236 finish_wait(&mdev->misc_wait, &wait);
3237 spin_lock_irq(&mdev->tconn->req_lock);
3238 if (!timeout || mdev->state.conn < C_CONNECTED)
3239 return -ETIMEDOUT;
3240 if (signal_pending(current))
3241 return -ERESTARTSYS;
3242 return 0;
3243}
3244
Philipp Reisnerb411b362009-09-25 16:07:19 -07003245#ifdef CONFIG_DRBD_FAULT_INJECTION
3246/* Fault insertion support including random number generator shamelessly
3247 * stolen from kernel/rcutorture.c */
3248struct fault_random_state {
3249 unsigned long state;
3250 unsigned long count;
3251};
3252
3253#define FAULT_RANDOM_MULT 39916801 /* prime */
3254#define FAULT_RANDOM_ADD 479001701 /* prime */
3255#define FAULT_RANDOM_REFRESH 10000
3256
3257/*
3258 * Crude but fast random-number generator. Uses a linear congruential
3259 * generator, with occasional help from get_random_bytes().
3260 */
3261static unsigned long
3262_drbd_fault_random(struct fault_random_state *rsp)
3263{
3264 long refresh;
3265
Roel Kluin49829ea2009-12-15 22:55:44 +01003266 if (!rsp->count--) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003267 get_random_bytes(&refresh, sizeof(refresh));
3268 rsp->state += refresh;
3269 rsp->count = FAULT_RANDOM_REFRESH;
3270 }
3271 rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
3272 return swahw32(rsp->state);
3273}
3274
3275static char *
3276_drbd_fault_str(unsigned int type) {
3277 static char *_faults[] = {
3278 [DRBD_FAULT_MD_WR] = "Meta-data write",
3279 [DRBD_FAULT_MD_RD] = "Meta-data read",
3280 [DRBD_FAULT_RS_WR] = "Resync write",
3281 [DRBD_FAULT_RS_RD] = "Resync read",
3282 [DRBD_FAULT_DT_WR] = "Data write",
3283 [DRBD_FAULT_DT_RD] = "Data read",
3284 [DRBD_FAULT_DT_RA] = "Data read ahead",
3285 [DRBD_FAULT_BM_ALLOC] = "BM allocation",
Philipp Reisner6b4388a2010-04-26 14:11:45 +02003286 [DRBD_FAULT_AL_EE] = "EE allocation",
3287 [DRBD_FAULT_RECEIVE] = "receive data corruption",
Philipp Reisnerb411b362009-09-25 16:07:19 -07003288 };
3289
3290 return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
3291}
3292
3293unsigned int
3294_drbd_insert_fault(struct drbd_conf *mdev, unsigned int type)
3295{
3296 static struct fault_random_state rrs = {0, 0};
3297
3298 unsigned int ret = (
3299 (fault_devs == 0 ||
3300 ((1 << mdev_to_minor(mdev)) & fault_devs) != 0) &&
3301 (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate));
3302
3303 if (ret) {
3304 fault_count++;
3305
Lars Ellenberg73835062010-05-27 11:51:56 +02003306 if (__ratelimit(&drbd_ratelimit_state))
Philipp Reisnerb411b362009-09-25 16:07:19 -07003307 dev_warn(DEV, "***Simulating %s failure\n",
3308 _drbd_fault_str(type));
3309 }
3310
3311 return ret;
3312}
3313#endif
3314
3315const char *drbd_buildtag(void)
3316{
3317 /* DRBD built from external sources has here a reference to the
3318 git hash of the source code. */
3319
3320 static char buildtag[38] = "\0uilt-in";
3321
3322 if (buildtag[0] == 0) {
3323#ifdef CONFIG_MODULES
3324 if (THIS_MODULE != NULL)
3325 sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
3326 else
3327#endif
3328 buildtag[0] = 'b';
3329 }
3330
3331 return buildtag;
3332}
3333
3334module_init(drbd_init)
3335module_exit(drbd_cleanup)
3336
Philipp Reisnerb411b362009-09-25 16:07:19 -07003337EXPORT_SYMBOL(drbd_conn_str);
3338EXPORT_SYMBOL(drbd_role_str);
3339EXPORT_SYMBOL(drbd_disk_str);
3340EXPORT_SYMBOL(drbd_set_st_err_str);