blob: 732053de1dbf2cd9e1709482b9f8dce7a99b045e [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11 from Logicworks, Inc. for making SDP replication support possible.
12
13 drbd is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
16 any later version.
17
18 drbd is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with drbd; see the file COPYING. If not, write to
25 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26
27 */
28
Philipp Reisnerb411b362009-09-25 16:07:19 -070029#include <linux/module.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070030#include <linux/drbd.h>
31#include <asm/uaccess.h>
32#include <asm/types.h>
33#include <net/sock.h>
34#include <linux/ctype.h>
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020035#include <linux/mutex.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070036#include <linux/fs.h>
37#include <linux/file.h>
38#include <linux/proc_fs.h>
39#include <linux/init.h>
40#include <linux/mm.h>
41#include <linux/memcontrol.h>
42#include <linux/mm_inline.h>
43#include <linux/slab.h>
44#include <linux/random.h>
45#include <linux/reboot.h>
46#include <linux/notifier.h>
47#include <linux/kthread.h>
48
49#define __KERNEL_SYSCALLS__
50#include <linux/unistd.h>
51#include <linux/vmalloc.h>
52
53#include <linux/drbd_limits.h>
54#include "drbd_int.h"
Philipp Reisnerb411b362009-09-25 16:07:19 -070055#include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
56
57#include "drbd_vli.h"
58
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020059static DEFINE_MUTEX(drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -070060int drbdd_init(struct drbd_thread *);
61int drbd_worker(struct drbd_thread *);
62int drbd_asender(struct drbd_thread *);
63
64int drbd_init(void);
65static int drbd_open(struct block_device *bdev, fmode_t mode);
66static int drbd_release(struct gendisk *gd, fmode_t mode);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +010067static int w_md_sync(struct drbd_work *w, int unused);
Philipp Reisnerb411b362009-09-25 16:07:19 -070068static void md_sync_timer_fn(unsigned long data);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +010069static int w_bitmap_io(struct drbd_work *w, int unused);
70static int w_go_diskless(struct drbd_work *w, int unused);
Philipp Reisnerb411b362009-09-25 16:07:19 -070071
Philipp Reisnerb411b362009-09-25 16:07:19 -070072MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
73 "Lars Ellenberg <lars@linbit.com>");
74MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
75MODULE_VERSION(REL_VERSION);
76MODULE_LICENSE("GPL");
Philipp Reisner81a5d602011-02-22 19:53:16 -050077MODULE_PARM_DESC(minor_count, "Approximate number of drbd devices ("
Philipp Reisner2b8a90b2011-01-10 11:15:17 +010078 __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")");
Philipp Reisnerb411b362009-09-25 16:07:19 -070079MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
80
81#include <linux/moduleparam.h>
82/* allow_open_on_secondary */
83MODULE_PARM_DESC(allow_oos, "DONT USE!");
84/* thanks to these macros, if compiled into the kernel (not-module),
85 * this becomes the boot parameter drbd.minor_count */
86module_param(minor_count, uint, 0444);
87module_param(disable_sendpage, bool, 0644);
88module_param(allow_oos, bool, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -070089module_param(proc_details, int, 0644);
90
91#ifdef CONFIG_DRBD_FAULT_INJECTION
92int enable_faults;
93int fault_rate;
94static int fault_count;
95int fault_devs;
96/* bitmap of enabled faults */
97module_param(enable_faults, int, 0664);
98/* fault rate % value - applies to all enabled faults */
99module_param(fault_rate, int, 0664);
100/* count of faults inserted */
101module_param(fault_count, int, 0664);
102/* bitmap of devices to insert faults on */
103module_param(fault_devs, int, 0644);
104#endif
105
106/* module parameter, defined */
Philipp Reisner2b8a90b2011-01-10 11:15:17 +0100107unsigned int minor_count = DRBD_MINOR_COUNT_DEF;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700108int disable_sendpage;
109int allow_oos;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700110int proc_details; /* Detail level in proc drbd*/
111
112/* Module parameter for setting the user mode helper program
113 * to run. Default is /sbin/drbdadm */
114char usermode_helper[80] = "/sbin/drbdadm";
115
116module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644);
117
118/* in 2.6.x, our device mapping and config info contains our virtual gendisks
119 * as member "struct gendisk *vdisk;"
120 */
Philipp Reisner81a5d602011-02-22 19:53:16 -0500121struct idr minors;
Philipp Reisner21114382011-01-19 12:26:59 +0100122struct list_head drbd_tconns; /* list of struct drbd_tconn */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700123
124struct kmem_cache *drbd_request_cache;
Andreas Gruenbacher6c852be2011-02-04 15:38:52 +0100125struct kmem_cache *drbd_ee_cache; /* peer requests */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700126struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
127struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
128mempool_t *drbd_request_mempool;
129mempool_t *drbd_ee_mempool;
Lars Ellenberg35abf592011-02-23 12:39:46 +0100130mempool_t *drbd_md_io_page_pool;
Lars Ellenbergda4a75d2011-02-23 17:02:01 +0100131struct bio_set *drbd_md_io_bio_set;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700132
133/* I do not use a standard mempool, because:
134 1) I want to hand out the pre-allocated objects first.
135 2) I want to be able to interrupt sleeping allocation with a signal.
136 Note: This is a single linked list, the next pointer is the private
137 member of struct page.
138 */
139struct page *drbd_pp_pool;
140spinlock_t drbd_pp_lock;
141int drbd_pp_vacant;
142wait_queue_head_t drbd_pp_wait;
143
144DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
145
Emese Revfy7d4e9d02009-12-14 00:59:30 +0100146static const struct block_device_operations drbd_ops = {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700147 .owner = THIS_MODULE,
148 .open = drbd_open,
149 .release = drbd_release,
150};
151
Lars Ellenbergda4a75d2011-02-23 17:02:01 +0100152static void bio_destructor_drbd(struct bio *bio)
153{
154 bio_free(bio, drbd_md_io_bio_set);
155}
156
157struct bio *bio_alloc_drbd(gfp_t gfp_mask)
158{
159 struct bio *bio;
160
161 if (!drbd_md_io_bio_set)
162 return bio_alloc(gfp_mask, 1);
163
164 bio = bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set);
165 if (!bio)
166 return NULL;
167 bio->bi_destructor = bio_destructor_drbd;
168 return bio;
169}
170
Philipp Reisnerb411b362009-09-25 16:07:19 -0700171#ifdef __CHECKER__
172/* When checking with sparse, and this is an inline function, sparse will
173 give tons of false positives. When this is a real functions sparse works.
174 */
175int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
176{
177 int io_allowed;
178
179 atomic_inc(&mdev->local_cnt);
180 io_allowed = (mdev->state.disk >= mins);
181 if (!io_allowed) {
182 if (atomic_dec_and_test(&mdev->local_cnt))
183 wake_up(&mdev->misc_wait);
184 }
185 return io_allowed;
186}
187
188#endif
189
190/**
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100191 * tl_release() - mark as BARRIER_ACKED all requests in the corresponding transfer log epoch
192 * @tconn: DRBD connection.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700193 * @barrier_nr: Expected identifier of the DRBD write barrier packet.
194 * @set_size: Expected number of requests before that barrier.
195 *
196 * In case the passed barrier_nr or set_size does not match the oldest
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100197 * epoch of not yet barrier-acked requests, this function will cause a
198 * termination of the connection.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700199 */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100200void tl_release(struct drbd_tconn *tconn, unsigned int barrier_nr,
201 unsigned int set_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700202{
Philipp Reisnerb411b362009-09-25 16:07:19 -0700203 struct drbd_request *r;
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100204 struct drbd_request *req = NULL;
205 int expect_epoch = 0;
206 int expect_size = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700207
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100208 spin_lock_irq(&tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700209
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100210 /* find latest not yet barrier-acked write request,
211 * count writes in its epoch. */
212 list_for_each_entry(r, &tconn->transfer_log, tl_requests) {
Lars Ellenberga0d856d2012-01-24 17:19:42 +0100213 const unsigned s = r->rq_state;
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100214 if (!req) {
215 if (!(s & RQ_WRITE))
216 continue;
217 if (!(s & RQ_NET_MASK))
218 continue;
219 if (s & RQ_NET_DONE)
220 continue;
221 req = r;
222 expect_epoch = req->epoch;
223 expect_size ++;
224 } else {
225 if (r->epoch != expect_epoch)
226 break;
227 if (!(s & RQ_WRITE))
228 continue;
229 /* if (s & RQ_DONE): not expected */
230 /* if (!(s & RQ_NET_MASK)): not expected */
231 expect_size++;
232 }
233 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700234
235 /* first some paranoia code */
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100236 if (req == NULL) {
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100237 conn_err(tconn, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
238 barrier_nr);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700239 goto bail;
240 }
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100241 if (expect_epoch != barrier_nr) {
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100242 conn_err(tconn, "BAD! BarrierAck #%u received, expected #%u!\n",
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100243 barrier_nr, expect_epoch);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700244 goto bail;
245 }
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100246
247 if (expect_size != set_size) {
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100248 conn_err(tconn, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100249 barrier_nr, set_size, expect_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700250 goto bail;
251 }
252
253 /* Clean up list of requests processed during current epoch */
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100254 list_for_each_entry_safe(req, r, &tconn->transfer_log, tl_requests) {
255 if (req->epoch != expect_epoch)
256 break;
257 _req_mod(req, BARRIER_ACKED);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700258 }
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100259 spin_unlock_irq(&tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700260
261 return;
262
263bail:
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100264 spin_unlock_irq(&tconn->req_lock);
265 conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700266}
267
Philipp Reisner617049a2010-12-22 12:48:31 +0100268
Philipp Reisner11b58e72010-05-12 17:08:26 +0200269/**
270 * _tl_restart() - Walks the transfer log, and applies an action to all requests
271 * @mdev: DRBD device.
272 * @what: The action/event to perform with all request objects
273 *
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100274 * @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO,
275 * RESTART_FROZEN_DISK_IO.
Philipp Reisner11b58e72010-05-12 17:08:26 +0200276 */
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100277/* must hold resource->req_lock */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100278void _tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what)
Philipp Reisner11b58e72010-05-12 17:08:26 +0200279{
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100280 struct drbd_request *req, *r;
Philipp Reisner11b58e72010-05-12 17:08:26 +0200281
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100282 list_for_each_entry_safe(req, r, &tconn->transfer_log, tl_requests)
283 _req_mod(req, what);
284}
Philipp Reisner11b58e72010-05-12 17:08:26 +0200285
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100286void tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what)
287{
288 spin_lock_irq(&tconn->req_lock);
289 _tl_restart(tconn, what);
290 spin_unlock_irq(&tconn->req_lock);
Philipp Reisnercdfda632011-07-05 15:38:59 +0200291}
Philipp Reisnerb411b362009-09-25 16:07:19 -0700292
293/**
294 * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
295 * @mdev: DRBD device.
296 *
297 * This is called after the connection to the peer was lost. The storage covered
298 * by the requests on the transfer gets marked as our of sync. Called from the
299 * receiver thread and the worker thread.
300 */
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100301void tl_clear(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700302{
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100303 tl_restart(tconn, CONNECTION_LOST_WHILE_PENDING);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700304}
305
Philipp Reisnercdfda632011-07-05 15:38:59 +0200306/**
Andreas Gruenbacher71fc7ee2011-07-17 23:06:12 +0200307 * tl_abort_disk_io() - Abort disk I/O for all requests for a certain mdev in the TL
Philipp Reisnercdfda632011-07-05 15:38:59 +0200308 * @mdev: DRBD device.
Philipp Reisnercdfda632011-07-05 15:38:59 +0200309 */
Andreas Gruenbacher71fc7ee2011-07-17 23:06:12 +0200310void tl_abort_disk_io(struct drbd_conf *mdev)
Philipp Reisnercdfda632011-07-05 15:38:59 +0200311{
312 struct drbd_tconn *tconn = mdev->tconn;
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100313 struct drbd_request *req, *r;
Philipp Reisnercdfda632011-07-05 15:38:59 +0200314
Philipp Reisnercdfda632011-07-05 15:38:59 +0200315 spin_lock_irq(&tconn->req_lock);
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100316 list_for_each_entry_safe(req, r, &tconn->transfer_log, tl_requests) {
Lars Ellenberg97ddb682011-07-15 23:52:44 +0200317 if (!(req->rq_state & RQ_LOCAL_PENDING))
318 continue;
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100319 if (req->w.mdev != mdev)
320 continue;
321 _req_mod(req, ABORT_DISK_IO);
Philipp Reisnercdfda632011-07-05 15:38:59 +0200322 }
Philipp Reisnercdfda632011-07-05 15:38:59 +0200323 spin_unlock_irq(&tconn->req_lock);
324}
325
Philipp Reisnerb411b362009-09-25 16:07:19 -0700326static int drbd_thread_setup(void *arg)
327{
328 struct drbd_thread *thi = (struct drbd_thread *) arg;
Philipp Reisner392c8802011-02-09 10:33:31 +0100329 struct drbd_tconn *tconn = thi->tconn;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700330 unsigned long flags;
331 int retval;
332
Philipp Reisnerf1b3a6e2011-02-08 15:35:58 +0100333 snprintf(current->comm, sizeof(current->comm), "drbd_%c_%s",
Philipp Reisner392c8802011-02-09 10:33:31 +0100334 thi->name[0], thi->tconn->name);
Philipp Reisnerf1b3a6e2011-02-08 15:35:58 +0100335
Philipp Reisnerb411b362009-09-25 16:07:19 -0700336restart:
337 retval = thi->function(thi);
338
339 spin_lock_irqsave(&thi->t_lock, flags);
340
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100341 /* if the receiver has been "EXITING", the last thing it did
Philipp Reisnerb411b362009-09-25 16:07:19 -0700342 * was set the conn state to "StandAlone",
343 * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
344 * and receiver thread will be "started".
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100345 * drbd_thread_start needs to set "RESTARTING" in that case.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700346 * t_state check and assignment needs to be within the same spinlock,
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100347 * so either thread_start sees EXITING, and can remap to RESTARTING,
348 * or thread_start see NONE, and can proceed as normal.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700349 */
350
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100351 if (thi->t_state == RESTARTING) {
Philipp Reisner392c8802011-02-09 10:33:31 +0100352 conn_info(tconn, "Restarting %s thread\n", thi->name);
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100353 thi->t_state = RUNNING;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700354 spin_unlock_irqrestore(&thi->t_lock, flags);
355 goto restart;
356 }
357
358 thi->task = NULL;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100359 thi->t_state = NONE;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700360 smp_mb();
Lars Ellenberg992d6e92011-05-02 11:47:18 +0200361 complete_all(&thi->stop);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700362 spin_unlock_irqrestore(&thi->t_lock, flags);
363
Philipp Reisner392c8802011-02-09 10:33:31 +0100364 conn_info(tconn, "Terminating %s\n", current->comm);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700365
366 /* Release mod reference taken when thread was started */
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200367
368 kref_put(&tconn->kref, &conn_destroy);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700369 module_put(THIS_MODULE);
370 return retval;
371}
372
Philipp Reisner392c8802011-02-09 10:33:31 +0100373static void drbd_thread_init(struct drbd_tconn *tconn, struct drbd_thread *thi,
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100374 int (*func) (struct drbd_thread *), char *name)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700375{
376 spin_lock_init(&thi->t_lock);
377 thi->task = NULL;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100378 thi->t_state = NONE;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700379 thi->function = func;
Philipp Reisner392c8802011-02-09 10:33:31 +0100380 thi->tconn = tconn;
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100381 strncpy(thi->name, name, ARRAY_SIZE(thi->name));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700382}
383
384int drbd_thread_start(struct drbd_thread *thi)
385{
Philipp Reisner392c8802011-02-09 10:33:31 +0100386 struct drbd_tconn *tconn = thi->tconn;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700387 struct task_struct *nt;
388 unsigned long flags;
389
Philipp Reisnerb411b362009-09-25 16:07:19 -0700390 /* is used from state engine doing drbd_thread_stop_nowait,
391 * while holding the req lock irqsave */
392 spin_lock_irqsave(&thi->t_lock, flags);
393
394 switch (thi->t_state) {
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100395 case NONE:
Philipp Reisner392c8802011-02-09 10:33:31 +0100396 conn_info(tconn, "Starting %s thread (from %s [%d])\n",
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100397 thi->name, current->comm, current->pid);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700398
399 /* Get ref on module for thread - this is released when thread exits */
400 if (!try_module_get(THIS_MODULE)) {
Philipp Reisner392c8802011-02-09 10:33:31 +0100401 conn_err(tconn, "Failed to get module reference in drbd_thread_start\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700402 spin_unlock_irqrestore(&thi->t_lock, flags);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100403 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700404 }
405
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200406 kref_get(&thi->tconn->kref);
407
Philipp Reisnerb411b362009-09-25 16:07:19 -0700408 init_completion(&thi->stop);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700409 thi->reset_cpu_mask = 1;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100410 thi->t_state = RUNNING;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700411 spin_unlock_irqrestore(&thi->t_lock, flags);
412 flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
413
414 nt = kthread_create(drbd_thread_setup, (void *) thi,
Philipp Reisner392c8802011-02-09 10:33:31 +0100415 "drbd_%c_%s", thi->name[0], thi->tconn->name);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700416
417 if (IS_ERR(nt)) {
Philipp Reisner392c8802011-02-09 10:33:31 +0100418 conn_err(tconn, "Couldn't start thread\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700419
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200420 kref_put(&tconn->kref, &conn_destroy);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700421 module_put(THIS_MODULE);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100422 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700423 }
424 spin_lock_irqsave(&thi->t_lock, flags);
425 thi->task = nt;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100426 thi->t_state = RUNNING;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700427 spin_unlock_irqrestore(&thi->t_lock, flags);
428 wake_up_process(nt);
429 break;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100430 case EXITING:
431 thi->t_state = RESTARTING;
Philipp Reisner392c8802011-02-09 10:33:31 +0100432 conn_info(tconn, "Restarting %s thread (from %s [%d])\n",
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100433 thi->name, current->comm, current->pid);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700434 /* fall through */
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100435 case RUNNING:
436 case RESTARTING:
Philipp Reisnerb411b362009-09-25 16:07:19 -0700437 default:
438 spin_unlock_irqrestore(&thi->t_lock, flags);
439 break;
440 }
441
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100442 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700443}
444
445
446void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
447{
448 unsigned long flags;
449
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100450 enum drbd_thread_state ns = restart ? RESTARTING : EXITING;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700451
452 /* may be called from state engine, holding the req lock irqsave */
453 spin_lock_irqsave(&thi->t_lock, flags);
454
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100455 if (thi->t_state == NONE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700456 spin_unlock_irqrestore(&thi->t_lock, flags);
457 if (restart)
458 drbd_thread_start(thi);
459 return;
460 }
461
462 if (thi->t_state != ns) {
463 if (thi->task == NULL) {
464 spin_unlock_irqrestore(&thi->t_lock, flags);
465 return;
466 }
467
468 thi->t_state = ns;
469 smp_mb();
470 init_completion(&thi->stop);
471 if (thi->task != current)
472 force_sig(DRBD_SIGKILL, thi->task);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700473 }
474
475 spin_unlock_irqrestore(&thi->t_lock, flags);
476
477 if (wait)
478 wait_for_completion(&thi->stop);
479}
480
Philipp Reisner392c8802011-02-09 10:33:31 +0100481static struct drbd_thread *drbd_task_to_thread(struct drbd_tconn *tconn, struct task_struct *task)
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100482{
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100483 struct drbd_thread *thi =
484 task == tconn->receiver.task ? &tconn->receiver :
485 task == tconn->asender.task ? &tconn->asender :
486 task == tconn->worker.task ? &tconn->worker : NULL;
487
488 return thi;
489}
490
Philipp Reisner392c8802011-02-09 10:33:31 +0100491char *drbd_task_to_thread_name(struct drbd_tconn *tconn, struct task_struct *task)
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100492{
Philipp Reisner392c8802011-02-09 10:33:31 +0100493 struct drbd_thread *thi = drbd_task_to_thread(tconn, task);
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100494 return thi ? thi->name : task->comm;
495}
496
Philipp Reisner80883192011-02-18 14:56:45 +0100497int conn_lowest_minor(struct drbd_tconn *tconn)
Philipp Reisner80822282011-02-08 12:46:30 +0100498{
Philipp Reisnere90285e2011-03-22 12:51:21 +0100499 struct drbd_conf *mdev;
Philipp Reisner695d08f2011-04-11 22:53:32 -0700500 int vnr = 0, m;
Philipp Reisner774b3052011-02-22 02:07:03 -0500501
Philipp Reisner695d08f2011-04-11 22:53:32 -0700502 rcu_read_lock();
Philipp Reisnere90285e2011-03-22 12:51:21 +0100503 mdev = idr_get_next(&tconn->volumes, &vnr);
Philipp Reisner695d08f2011-04-11 22:53:32 -0700504 m = mdev ? mdev_to_minor(mdev) : -1;
505 rcu_read_unlock();
506
507 return m;
Philipp Reisner80822282011-02-08 12:46:30 +0100508}
Philipp Reisner774b3052011-02-22 02:07:03 -0500509
510#ifdef CONFIG_SMP
Philipp Reisnerb411b362009-09-25 16:07:19 -0700511/**
512 * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
513 * @mdev: DRBD device.
514 *
515 * Forces all threads of a device onto the same CPU. This is beneficial for
516 * DRBD's performance. May be overwritten by user's configuration.
517 */
Philipp Reisner80822282011-02-08 12:46:30 +0100518void drbd_calc_cpu_mask(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700519{
520 int ord, cpu;
521
522 /* user override. */
Philipp Reisner80822282011-02-08 12:46:30 +0100523 if (cpumask_weight(tconn->cpu_mask))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700524 return;
525
Philipp Reisner80822282011-02-08 12:46:30 +0100526 ord = conn_lowest_minor(tconn) % cpumask_weight(cpu_online_mask);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700527 for_each_online_cpu(cpu) {
528 if (ord-- == 0) {
Philipp Reisner80822282011-02-08 12:46:30 +0100529 cpumask_set_cpu(cpu, tconn->cpu_mask);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700530 return;
531 }
532 }
533 /* should not be reached */
Philipp Reisner80822282011-02-08 12:46:30 +0100534 cpumask_setall(tconn->cpu_mask);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700535}
536
537/**
538 * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
539 * @mdev: DRBD device.
Philipp Reisnerbc31fe32011-02-07 11:14:38 +0100540 * @thi: drbd_thread object
Philipp Reisnerb411b362009-09-25 16:07:19 -0700541 *
542 * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
543 * prematurely.
544 */
Philipp Reisner80822282011-02-08 12:46:30 +0100545void drbd_thread_current_set_cpu(struct drbd_thread *thi)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700546{
547 struct task_struct *p = current;
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100548
Philipp Reisnerb411b362009-09-25 16:07:19 -0700549 if (!thi->reset_cpu_mask)
550 return;
551 thi->reset_cpu_mask = 0;
Philipp Reisner392c8802011-02-09 10:33:31 +0100552 set_cpus_allowed_ptr(p, thi->tconn->cpu_mask);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700553}
554#endif
555
Andreas Gruenbacher52b061a2011-03-30 11:38:49 +0200556/**
557 * drbd_header_size - size of a packet header
558 *
559 * The header size is a multiple of 8, so any payload following the header is
560 * word aligned on 64-bit architectures. (The bitmap send and receive code
561 * relies on this.)
562 */
563unsigned int drbd_header_size(struct drbd_tconn *tconn)
564{
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +0200565 if (tconn->agreed_pro_version >= 100) {
566 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header100), 8));
567 return sizeof(struct p_header100);
568 } else {
569 BUILD_BUG_ON(sizeof(struct p_header80) !=
570 sizeof(struct p_header95));
571 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header80), 8));
572 return sizeof(struct p_header80);
573 }
Andreas Gruenbacher52b061a2011-03-30 11:38:49 +0200574}
575
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200576static unsigned int prepare_header80(struct p_header80 *h, enum drbd_packet cmd, int size)
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100577{
578 h->magic = cpu_to_be32(DRBD_MAGIC);
579 h->command = cpu_to_be16(cmd);
580 h->length = cpu_to_be16(size);
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200581 return sizeof(struct p_header80);
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100582}
583
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200584static unsigned int prepare_header95(struct p_header95 *h, enum drbd_packet cmd, int size)
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100585{
586 h->magic = cpu_to_be16(DRBD_MAGIC_BIG);
587 h->command = cpu_to_be16(cmd);
Andreas Gruenbacherb55d84b2011-03-22 13:17:47 +0100588 h->length = cpu_to_be32(size);
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200589 return sizeof(struct p_header95);
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100590}
591
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +0200592static unsigned int prepare_header100(struct p_header100 *h, enum drbd_packet cmd,
593 int size, int vnr)
Philipp Reisnerd38e7872011-02-07 15:32:04 +0100594{
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +0200595 h->magic = cpu_to_be32(DRBD_MAGIC_100);
596 h->volume = cpu_to_be16(vnr);
597 h->command = cpu_to_be16(cmd);
598 h->length = cpu_to_be32(size);
599 h->pad = 0;
600 return sizeof(struct p_header100);
601}
602
603static unsigned int prepare_header(struct drbd_tconn *tconn, int vnr,
604 void *buffer, enum drbd_packet cmd, int size)
605{
606 if (tconn->agreed_pro_version >= 100)
607 return prepare_header100(buffer, cmd, size, vnr);
608 else if (tconn->agreed_pro_version >= 95 &&
609 size > DRBD_MAX_SIZE_H80_PACKET)
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200610 return prepare_header95(buffer, cmd, size);
Philipp Reisnerd38e7872011-02-07 15:32:04 +0100611 else
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200612 return prepare_header80(buffer, cmd, size);
Philipp Reisnerd38e7872011-02-07 15:32:04 +0100613}
614
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200615static void *__conn_prepare_command(struct drbd_tconn *tconn,
616 struct drbd_socket *sock)
617{
618 if (!sock->socket)
619 return NULL;
620 return sock->sbuf + drbd_header_size(tconn);
621}
622
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200623void *conn_prepare_command(struct drbd_tconn *tconn, struct drbd_socket *sock)
624{
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200625 void *p;
626
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200627 mutex_lock(&sock->mutex);
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200628 p = __conn_prepare_command(tconn, sock);
629 if (!p)
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200630 mutex_unlock(&sock->mutex);
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200631
632 return p;
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200633}
634
635void *drbd_prepare_command(struct drbd_conf *mdev, struct drbd_socket *sock)
636{
637 return conn_prepare_command(mdev->tconn, sock);
638}
639
640static int __send_command(struct drbd_tconn *tconn, int vnr,
641 struct drbd_socket *sock, enum drbd_packet cmd,
642 unsigned int header_size, void *data,
643 unsigned int size)
644{
645 int msg_flags;
646 int err;
647
648 /*
649 * Called with @data == NULL and the size of the data blocks in @size
650 * for commands that send data blocks. For those commands, omit the
651 * MSG_MORE flag: this will increase the likelihood that data blocks
652 * which are page aligned on the sender will end up page aligned on the
653 * receiver.
654 */
655 msg_flags = data ? MSG_MORE : 0;
656
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200657 header_size += prepare_header(tconn, vnr, sock->sbuf, cmd,
658 header_size + size);
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200659 err = drbd_send_all(tconn, sock->socket, sock->sbuf, header_size,
660 msg_flags);
661 if (data && !err)
662 err = drbd_send_all(tconn, sock->socket, data, size, 0);
663 return err;
664}
665
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200666static int __conn_send_command(struct drbd_tconn *tconn, struct drbd_socket *sock,
667 enum drbd_packet cmd, unsigned int header_size,
668 void *data, unsigned int size)
669{
670 return __send_command(tconn, 0, sock, cmd, header_size, data, size);
671}
672
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200673int conn_send_command(struct drbd_tconn *tconn, struct drbd_socket *sock,
674 enum drbd_packet cmd, unsigned int header_size,
675 void *data, unsigned int size)
676{
677 int err;
678
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200679 err = __conn_send_command(tconn, sock, cmd, header_size, data, size);
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200680 mutex_unlock(&sock->mutex);
681 return err;
682}
683
684int drbd_send_command(struct drbd_conf *mdev, struct drbd_socket *sock,
685 enum drbd_packet cmd, unsigned int header_size,
686 void *data, unsigned int size)
687{
688 int err;
689
690 err = __send_command(mdev->tconn, mdev->vnr, sock, cmd, header_size,
691 data, size);
692 mutex_unlock(&sock->mutex);
693 return err;
694}
695
Andreas Gruenbachere307f352011-03-22 10:55:48 +0100696int drbd_send_ping(struct drbd_tconn *tconn)
697{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200698 struct drbd_socket *sock;
699
700 sock = &tconn->meta;
701 if (!conn_prepare_command(tconn, sock))
702 return -EIO;
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200703 return conn_send_command(tconn, sock, P_PING, 0, NULL, 0);
Andreas Gruenbachere307f352011-03-22 10:55:48 +0100704}
705
706int drbd_send_ping_ack(struct drbd_tconn *tconn)
707{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200708 struct drbd_socket *sock;
709
710 sock = &tconn->meta;
711 if (!conn_prepare_command(tconn, sock))
712 return -EIO;
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200713 return conn_send_command(tconn, sock, P_PING_ACK, 0, NULL, 0);
Andreas Gruenbachere307f352011-03-22 10:55:48 +0100714}
715
Lars Ellenbergf3990022011-03-23 14:31:09 +0100716int drbd_send_sync_param(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700717{
Andreas Gruenbacher7c967152011-03-22 00:49:36 +0100718 struct drbd_socket *sock;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200719 struct p_rs_param_95 *p;
720 int size;
Philipp Reisner31890f42011-01-19 14:12:51 +0100721 const int apv = mdev->tconn->agreed_pro_version;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200722 enum drbd_packet cmd;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200723 struct net_conf *nc;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200724 struct disk_conf *dc;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200725
726 sock = &mdev->tconn->data;
727 p = drbd_prepare_command(mdev, sock);
728 if (!p)
729 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700730
Philipp Reisner44ed1672011-04-19 17:10:19 +0200731 rcu_read_lock();
732 nc = rcu_dereference(mdev->tconn->net_conf);
733
Philipp Reisnerb411b362009-09-25 16:07:19 -0700734 size = apv <= 87 ? sizeof(struct p_rs_param)
735 : apv == 88 ? sizeof(struct p_rs_param)
Philipp Reisner44ed1672011-04-19 17:10:19 +0200736 + strlen(nc->verify_alg) + 1
Philipp Reisner8e26f9c2010-07-06 17:25:54 +0200737 : apv <= 94 ? sizeof(struct p_rs_param_89)
738 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700739
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200740 cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700741
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200742 /* initialize verify_alg and csums_alg */
743 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700744
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200745 if (get_ldev(mdev)) {
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200746 dc = rcu_dereference(mdev->ldev->disk_conf);
Andreas Gruenbacher6394b932011-05-11 14:29:52 +0200747 p->resync_rate = cpu_to_be32(dc->resync_rate);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200748 p->c_plan_ahead = cpu_to_be32(dc->c_plan_ahead);
749 p->c_delay_target = cpu_to_be32(dc->c_delay_target);
750 p->c_fill_target = cpu_to_be32(dc->c_fill_target);
751 p->c_max_rate = cpu_to_be32(dc->c_max_rate);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200752 put_ldev(mdev);
753 } else {
Andreas Gruenbacher6394b932011-05-11 14:29:52 +0200754 p->resync_rate = cpu_to_be32(DRBD_RESYNC_RATE_DEF);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200755 p->c_plan_ahead = cpu_to_be32(DRBD_C_PLAN_AHEAD_DEF);
756 p->c_delay_target = cpu_to_be32(DRBD_C_DELAY_TARGET_DEF);
757 p->c_fill_target = cpu_to_be32(DRBD_C_FILL_TARGET_DEF);
758 p->c_max_rate = cpu_to_be32(DRBD_C_MAX_RATE_DEF);
759 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700760
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200761 if (apv >= 88)
Philipp Reisner44ed1672011-04-19 17:10:19 +0200762 strcpy(p->verify_alg, nc->verify_alg);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200763 if (apv >= 89)
Philipp Reisner44ed1672011-04-19 17:10:19 +0200764 strcpy(p->csums_alg, nc->csums_alg);
765 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700766
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200767 return drbd_send_command(mdev, sock, cmd, size, NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700768}
769
Philipp Reisnerd659f2a2011-05-16 17:38:45 +0200770int __drbd_send_protocol(struct drbd_tconn *tconn, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700771{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200772 struct drbd_socket *sock;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700773 struct p_protocol *p;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200774 struct net_conf *nc;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200775 int size, cf;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700776
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200777 sock = &tconn->data;
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200778 p = __conn_prepare_command(tconn, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200779 if (!p)
780 return -EIO;
781
Philipp Reisner44ed1672011-04-19 17:10:19 +0200782 rcu_read_lock();
783 nc = rcu_dereference(tconn->net_conf);
784
Andreas Gruenbacher6dff2902011-06-28 14:18:12 +0200785 if (nc->tentative && tconn->agreed_pro_version < 92) {
Philipp Reisner44ed1672011-04-19 17:10:19 +0200786 rcu_read_unlock();
787 mutex_unlock(&sock->mutex);
788 conn_err(tconn, "--dry-run is not supported by peer");
789 return -EOPNOTSUPP;
790 }
791
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200792 size = sizeof(*p);
Philipp Reisnerdc8228d2011-02-08 10:13:15 +0100793 if (tconn->agreed_pro_version >= 87)
Philipp Reisner44ed1672011-04-19 17:10:19 +0200794 size += strlen(nc->integrity_alg) + 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700795
Philipp Reisner44ed1672011-04-19 17:10:19 +0200796 p->protocol = cpu_to_be32(nc->wire_protocol);
797 p->after_sb_0p = cpu_to_be32(nc->after_sb_0p);
798 p->after_sb_1p = cpu_to_be32(nc->after_sb_1p);
799 p->after_sb_2p = cpu_to_be32(nc->after_sb_2p);
800 p->two_primaries = cpu_to_be32(nc->two_primaries);
Philipp Reisnercf14c2e2010-02-02 21:03:50 +0100801 cf = 0;
Andreas Gruenbacher6139f602011-05-06 20:00:02 +0200802 if (nc->discard_my_data)
803 cf |= CF_DISCARD_MY_DATA;
Andreas Gruenbacher6dff2902011-06-28 14:18:12 +0200804 if (nc->tentative)
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200805 cf |= CF_DRY_RUN;
Philipp Reisnercf14c2e2010-02-02 21:03:50 +0100806 p->conn_flags = cpu_to_be32(cf);
807
Philipp Reisnerdc8228d2011-02-08 10:13:15 +0100808 if (tconn->agreed_pro_version >= 87)
Philipp Reisner44ed1672011-04-19 17:10:19 +0200809 strcpy(p->integrity_alg, nc->integrity_alg);
810 rcu_read_unlock();
811
Philipp Reisnerd659f2a2011-05-16 17:38:45 +0200812 return __conn_send_command(tconn, sock, cmd, size, NULL, 0);
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200813}
814
815int drbd_send_protocol(struct drbd_tconn *tconn)
816{
817 int err;
818
819 mutex_lock(&tconn->data.mutex);
Philipp Reisnerd659f2a2011-05-16 17:38:45 +0200820 err = __drbd_send_protocol(tconn, P_PROTOCOL);
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200821 mutex_unlock(&tconn->data.mutex);
822
823 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700824}
825
826int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
827{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200828 struct drbd_socket *sock;
829 struct p_uuids *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700830 int i;
831
832 if (!get_ldev_if_state(mdev, D_NEGOTIATING))
Andreas Gruenbacher2ae5f952011-03-16 01:07:20 +0100833 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700834
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200835 sock = &mdev->tconn->data;
836 p = drbd_prepare_command(mdev, sock);
837 if (!p) {
838 put_ldev(mdev);
839 return -EIO;
840 }
Philipp Reisner39a1aa72012-08-08 21:19:09 +0200841 spin_lock_irq(&mdev->ldev->md.uuid_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700842 for (i = UI_CURRENT; i < UI_SIZE; i++)
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200843 p->uuid[i] = mdev->ldev ? cpu_to_be64(mdev->ldev->md.uuid[i]) : 0;
Philipp Reisner39a1aa72012-08-08 21:19:09 +0200844 spin_unlock_irq(&mdev->ldev->md.uuid_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700845
846 mdev->comm_bm_set = drbd_bm_total_weight(mdev);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200847 p->uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
Philipp Reisner44ed1672011-04-19 17:10:19 +0200848 rcu_read_lock();
Andreas Gruenbacher6139f602011-05-06 20:00:02 +0200849 uuid_flags |= rcu_dereference(mdev->tconn->net_conf)->discard_my_data ? 1 : 0;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200850 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700851 uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0;
852 uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200853 p->uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700854
855 put_ldev(mdev);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200856 return drbd_send_command(mdev, sock, P_UUIDS, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700857}
858
859int drbd_send_uuids(struct drbd_conf *mdev)
860{
861 return _drbd_send_uuids(mdev, 0);
862}
863
864int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev)
865{
866 return _drbd_send_uuids(mdev, 8);
867}
868
Lars Ellenberg62b0da32011-01-20 13:25:21 +0100869void drbd_print_uuids(struct drbd_conf *mdev, const char *text)
870{
871 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
872 u64 *uuid = mdev->ldev->md.uuid;
873 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX\n",
874 text,
875 (unsigned long long)uuid[UI_CURRENT],
876 (unsigned long long)uuid[UI_BITMAP],
877 (unsigned long long)uuid[UI_HISTORY_START],
878 (unsigned long long)uuid[UI_HISTORY_END]);
879 put_ldev(mdev);
880 } else {
881 dev_info(DEV, "%s effective data uuid: %016llX\n",
882 text,
883 (unsigned long long)mdev->ed_uuid);
884 }
885}
886
Andreas Gruenbacher9c1b7f72011-03-16 01:09:01 +0100887void drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700888{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200889 struct drbd_socket *sock;
890 struct p_rs_uuid *p;
Lars Ellenberg5a22db82010-12-17 21:14:23 +0100891 u64 uuid;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700892
Lars Ellenberg5a22db82010-12-17 21:14:23 +0100893 D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
894
Philipp Reisner0cfac5d2011-11-10 12:12:52 +0100895 uuid = mdev->ldev->md.uuid[UI_BITMAP];
896 if (uuid && uuid != UUID_JUST_CREATED)
897 uuid = uuid + UUID_NEW_BM_OFFSET;
898 else
899 get_random_bytes(&uuid, sizeof(u64));
Lars Ellenberg5a22db82010-12-17 21:14:23 +0100900 drbd_uuid_set(mdev, UI_BITMAP, uuid);
Lars Ellenberg62b0da32011-01-20 13:25:21 +0100901 drbd_print_uuids(mdev, "updated sync UUID");
Lars Ellenberg5a22db82010-12-17 21:14:23 +0100902 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700903
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200904 sock = &mdev->tconn->data;
905 p = drbd_prepare_command(mdev, sock);
906 if (p) {
907 p->uuid = cpu_to_be64(uuid);
908 drbd_send_command(mdev, sock, P_SYNC_UUID, sizeof(*p), NULL, 0);
909 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700910}
911
Philipp Reisnere89b5912010-03-24 17:11:33 +0100912int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700913{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200914 struct drbd_socket *sock;
915 struct p_sizes *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700916 sector_t d_size, u_size;
Philipp Reisner99432fc2011-05-20 16:39:13 +0200917 int q_order_type, max_bio_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700918
919 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
920 D_ASSERT(mdev->ldev->backing_bdev);
921 d_size = drbd_get_max_capacity(mdev->ldev);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200922 rcu_read_lock();
923 u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
924 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700925 q_order_type = drbd_queue_order_type(mdev);
Philipp Reisner99432fc2011-05-20 16:39:13 +0200926 max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
927 max_bio_size = min_t(int, max_bio_size, DRBD_MAX_BIO_SIZE);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700928 put_ldev(mdev);
929 } else {
930 d_size = 0;
931 u_size = 0;
932 q_order_type = QUEUE_ORDERED_NONE;
Philipp Reisner99432fc2011-05-20 16:39:13 +0200933 max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700934 }
935
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200936 sock = &mdev->tconn->data;
937 p = drbd_prepare_command(mdev, sock);
938 if (!p)
939 return -EIO;
Philipp Reisner2ffca4f2011-06-30 15:43:06 +0200940
941 if (mdev->tconn->agreed_pro_version <= 94)
942 max_bio_size = min_t(int, max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
943 else if (mdev->tconn->agreed_pro_version < 100)
944 max_bio_size = min_t(int, max_bio_size, DRBD_MAX_BIO_SIZE_P95);
945
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200946 p->d_size = cpu_to_be64(d_size);
947 p->u_size = cpu_to_be64(u_size);
948 p->c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
949 p->max_bio_size = cpu_to_be32(max_bio_size);
950 p->queue_order_type = cpu_to_be16(q_order_type);
951 p->dds_flags = cpu_to_be16(flags);
952 return drbd_send_command(mdev, sock, P_SIZES, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700953}
954
955/**
Philipp Reisner43de7c82011-11-10 13:16:13 +0100956 * drbd_send_current_state() - Sends the drbd state to the peer
Philipp Reisnerb411b362009-09-25 16:07:19 -0700957 * @mdev: DRBD device.
958 */
Philipp Reisner43de7c82011-11-10 13:16:13 +0100959int drbd_send_current_state(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700960{
Andreas Gruenbacher7c967152011-03-22 00:49:36 +0100961 struct drbd_socket *sock;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200962 struct p_state *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700963
Andreas Gruenbacher7c967152011-03-22 00:49:36 +0100964 sock = &mdev->tconn->data;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200965 p = drbd_prepare_command(mdev, sock);
966 if (!p)
967 return -EIO;
968 p->state = cpu_to_be32(mdev->state.i); /* Within the send mutex */
969 return drbd_send_command(mdev, sock, P_STATE, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700970}
971
Philipp Reisner43de7c82011-11-10 13:16:13 +0100972/**
973 * drbd_send_state() - After a state change, sends the new state to the peer
974 * @mdev: DRBD device.
975 * @state: the state to send, not necessarily the current state.
976 *
977 * Each state change queues an "after_state_ch" work, which will eventually
978 * send the resulting new state to the peer. If more state changes happen
979 * between queuing and processing of the after_state_ch work, we still
980 * want to send each intermediary state in the order it occurred.
981 */
982int drbd_send_state(struct drbd_conf *mdev, union drbd_state state)
983{
984 struct drbd_socket *sock;
985 struct p_state *p;
986
987 sock = &mdev->tconn->data;
988 p = drbd_prepare_command(mdev, sock);
989 if (!p)
990 return -EIO;
991 p->state = cpu_to_be32(state.i); /* Within the send mutex */
992 return drbd_send_command(mdev, sock, P_STATE, sizeof(*p), NULL, 0);
993}
994
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200995int drbd_send_state_req(struct drbd_conf *mdev, union drbd_state mask, union drbd_state val)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700996{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200997 struct drbd_socket *sock;
998 struct p_req_state *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700999
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001000 sock = &mdev->tconn->data;
1001 p = drbd_prepare_command(mdev, sock);
1002 if (!p)
1003 return -EIO;
1004 p->mask = cpu_to_be32(mask.i);
1005 p->val = cpu_to_be32(val.i);
1006 return drbd_send_command(mdev, sock, P_STATE_CHG_REQ, sizeof(*p), NULL, 0);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001007}
1008
1009int conn_send_state_req(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val)
1010{
1011 enum drbd_packet cmd;
1012 struct drbd_socket *sock;
1013 struct p_req_state *p;
1014
1015 cmd = tconn->agreed_pro_version < 100 ? P_STATE_CHG_REQ : P_CONN_ST_CHG_REQ;
1016 sock = &tconn->data;
1017 p = conn_prepare_command(tconn, sock);
1018 if (!p)
1019 return -EIO;
1020 p->mask = cpu_to_be32(mask.i);
1021 p->val = cpu_to_be32(val.i);
1022 return conn_send_command(tconn, sock, cmd, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001023}
1024
Andreas Gruenbacher2f4e7ab2011-03-16 01:20:38 +01001025void drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001026{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001027 struct drbd_socket *sock;
1028 struct p_req_state_reply *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001029
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001030 sock = &mdev->tconn->meta;
1031 p = drbd_prepare_command(mdev, sock);
1032 if (p) {
1033 p->retcode = cpu_to_be32(retcode);
1034 drbd_send_command(mdev, sock, P_STATE_CHG_REPLY, sizeof(*p), NULL, 0);
1035 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001036}
1037
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001038void conn_send_sr_reply(struct drbd_tconn *tconn, enum drbd_state_rv retcode)
Philipp Reisner047cd4a2011-02-15 11:09:33 +01001039{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001040 struct drbd_socket *sock;
1041 struct p_req_state_reply *p;
Philipp Reisner047cd4a2011-02-15 11:09:33 +01001042 enum drbd_packet cmd = tconn->agreed_pro_version < 100 ? P_STATE_CHG_REPLY : P_CONN_ST_CHG_REPLY;
1043
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001044 sock = &tconn->meta;
1045 p = conn_prepare_command(tconn, sock);
1046 if (p) {
1047 p->retcode = cpu_to_be32(retcode);
1048 conn_send_command(tconn, sock, cmd, sizeof(*p), NULL, 0);
1049 }
Philipp Reisner047cd4a2011-02-15 11:09:33 +01001050}
1051
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001052static void dcbp_set_code(struct p_compressed_bm *p, enum drbd_bitmap_code code)
1053{
1054 BUG_ON(code & ~0xf);
1055 p->encoding = (p->encoding & ~0xf) | code;
1056}
1057
1058static void dcbp_set_start(struct p_compressed_bm *p, int set)
1059{
1060 p->encoding = (p->encoding & ~0x80) | (set ? 0x80 : 0);
1061}
1062
1063static void dcbp_set_pad_bits(struct p_compressed_bm *p, int n)
1064{
1065 BUG_ON(n & ~0x7);
1066 p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4);
1067}
1068
Philipp Reisnerb411b362009-09-25 16:07:19 -07001069int fill_bitmap_rle_bits(struct drbd_conf *mdev,
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001070 struct p_compressed_bm *p,
1071 unsigned int size,
1072 struct bm_xfer_ctx *c)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001073{
1074 struct bitstream bs;
1075 unsigned long plain_bits;
1076 unsigned long tmp;
1077 unsigned long rl;
1078 unsigned len;
1079 unsigned toggle;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001080 int bits, use_rle;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001081
1082 /* may we use this feature? */
Philipp Reisner44ed1672011-04-19 17:10:19 +02001083 rcu_read_lock();
1084 use_rle = rcu_dereference(mdev->tconn->net_conf)->use_rle;
1085 rcu_read_unlock();
1086 if (!use_rle || mdev->tconn->agreed_pro_version < 90)
1087 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001088
1089 if (c->bit_offset >= c->bm_bits)
1090 return 0; /* nothing to do. */
1091
1092 /* use at most thus many bytes */
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001093 bitstream_init(&bs, p->code, size, 0);
1094 memset(p->code, 0, size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001095 /* plain bits covered in this code string */
1096 plain_bits = 0;
1097
1098 /* p->encoding & 0x80 stores whether the first run length is set.
1099 * bit offset is implicit.
1100 * start with toggle == 2 to be able to tell the first iteration */
1101 toggle = 2;
1102
1103 /* see how much plain bits we can stuff into one packet
1104 * using RLE and VLI. */
1105 do {
1106 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(mdev, c->bit_offset)
1107 : _drbd_bm_find_next(mdev, c->bit_offset);
1108 if (tmp == -1UL)
1109 tmp = c->bm_bits;
1110 rl = tmp - c->bit_offset;
1111
1112 if (toggle == 2) { /* first iteration */
1113 if (rl == 0) {
1114 /* the first checked bit was set,
1115 * store start value, */
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001116 dcbp_set_start(p, 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001117 /* but skip encoding of zero run length */
1118 toggle = !toggle;
1119 continue;
1120 }
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001121 dcbp_set_start(p, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001122 }
1123
1124 /* paranoia: catch zero runlength.
1125 * can only happen if bitmap is modified while we scan it. */
1126 if (rl == 0) {
1127 dev_err(DEV, "unexpected zero runlength while encoding bitmap "
1128 "t:%u bo:%lu\n", toggle, c->bit_offset);
1129 return -1;
1130 }
1131
1132 bits = vli_encode_bits(&bs, rl);
1133 if (bits == -ENOBUFS) /* buffer full */
1134 break;
1135 if (bits <= 0) {
1136 dev_err(DEV, "error while encoding bitmap: %d\n", bits);
1137 return 0;
1138 }
1139
1140 toggle = !toggle;
1141 plain_bits += rl;
1142 c->bit_offset = tmp;
1143 } while (c->bit_offset < c->bm_bits);
1144
1145 len = bs.cur.b - p->code + !!bs.cur.bit;
1146
1147 if (plain_bits < (len << 3)) {
1148 /* incompressible with this method.
1149 * we need to rewind both word and bit position. */
1150 c->bit_offset -= plain_bits;
1151 bm_xfer_ctx_bit_to_word_offset(c);
1152 c->bit_offset = c->word_offset * BITS_PER_LONG;
1153 return 0;
1154 }
1155
1156 /* RLE + VLI was able to compress it just fine.
1157 * update c->word_offset. */
1158 bm_xfer_ctx_bit_to_word_offset(c);
1159
1160 /* store pad_bits */
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001161 dcbp_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001162
1163 return len;
1164}
1165
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001166/**
1167 * send_bitmap_rle_or_plain
1168 *
1169 * Return 0 when done, 1 when another iteration is needed, and a negative error
1170 * code upon failure.
1171 */
1172static int
Andreas Gruenbacher79ed9bd2011-03-24 21:31:38 +01001173send_bitmap_rle_or_plain(struct drbd_conf *mdev, struct bm_xfer_ctx *c)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001174{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001175 struct drbd_socket *sock = &mdev->tconn->data;
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001176 unsigned int header_size = drbd_header_size(mdev->tconn);
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001177 struct p_compressed_bm *p = sock->sbuf + header_size;
Andreas Gruenbachera982dd52010-12-10 00:45:25 +01001178 int len, err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001179
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001180 len = fill_bitmap_rle_bits(mdev, p,
1181 DRBD_SOCKET_BUFFER_SIZE - header_size - sizeof(*p), c);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001182 if (len < 0)
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001183 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001184
1185 if (len) {
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001186 dcbp_set_code(p, RLE_VLI_Bits);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001187 err = __send_command(mdev->tconn, mdev->vnr, sock,
1188 P_COMPRESSED_BITMAP, sizeof(*p) + len,
1189 NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001190 c->packets[0]++;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001191 c->bytes[0] += header_size + sizeof(*p) + len;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001192
1193 if (c->bit_offset >= c->bm_bits)
1194 len = 0; /* DONE */
1195 } else {
1196 /* was not compressible.
1197 * send a buffer full of plain text bits instead. */
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001198 unsigned int data_size;
1199 unsigned long num_words;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001200 unsigned long *p = sock->sbuf + header_size;
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001201
1202 data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001203 num_words = min_t(size_t, data_size / sizeof(*p),
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001204 c->bm_words - c->word_offset);
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001205 len = num_words * sizeof(*p);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001206 if (len)
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001207 drbd_bm_get_lel(mdev, c->word_offset, num_words, p);
1208 err = __send_command(mdev->tconn, mdev->vnr, sock, P_BITMAP, len, NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001209 c->word_offset += num_words;
1210 c->bit_offset = c->word_offset * BITS_PER_LONG;
1211
1212 c->packets[1]++;
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001213 c->bytes[1] += header_size + len;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001214
1215 if (c->bit_offset > c->bm_bits)
1216 c->bit_offset = c->bm_bits;
1217 }
Andreas Gruenbachera982dd52010-12-10 00:45:25 +01001218 if (!err) {
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001219 if (len == 0) {
1220 INFO_bm_xfer_stats(mdev, "send", c);
1221 return 0;
1222 } else
1223 return 1;
1224 }
1225 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001226}
1227
1228/* See the comment at receive_bitmap() */
Andreas Gruenbacher058820c2011-03-22 16:03:43 +01001229static int _drbd_send_bitmap(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001230{
1231 struct bm_xfer_ctx c;
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001232 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001233
Andreas Gruenbacher841ce242010-12-15 19:31:20 +01001234 if (!expect(mdev->bitmap))
1235 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001236
Philipp Reisnerb411b362009-09-25 16:07:19 -07001237 if (get_ldev(mdev)) {
1238 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
1239 dev_info(DEV, "Writing the whole bitmap, MDF_FullSync was set.\n");
1240 drbd_bm_set_all(mdev);
1241 if (drbd_bm_write(mdev)) {
1242 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
1243 * but otherwise process as per normal - need to tell other
1244 * side that a full resync is required! */
1245 dev_err(DEV, "Failed to write bitmap to disk!\n");
1246 } else {
1247 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
1248 drbd_md_sync(mdev);
1249 }
1250 }
1251 put_ldev(mdev);
1252 }
1253
1254 c = (struct bm_xfer_ctx) {
1255 .bm_bits = drbd_bm_bits(mdev),
1256 .bm_words = drbd_bm_words(mdev),
1257 };
1258
1259 do {
Andreas Gruenbacher79ed9bd2011-03-24 21:31:38 +01001260 err = send_bitmap_rle_or_plain(mdev, &c);
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001261 } while (err > 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001262
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001263 return err == 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001264}
1265
1266int drbd_send_bitmap(struct drbd_conf *mdev)
1267{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001268 struct drbd_socket *sock = &mdev->tconn->data;
1269 int err = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001270
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001271 mutex_lock(&sock->mutex);
1272 if (sock->socket)
1273 err = !_drbd_send_bitmap(mdev);
1274 mutex_unlock(&sock->mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001275 return err;
1276}
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001277
Lars Ellenberg9ed57dc2012-03-26 20:55:17 +02001278void drbd_send_b_ack(struct drbd_tconn *tconn, u32 barrier_nr, u32 set_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001279{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001280 struct drbd_socket *sock;
1281 struct p_barrier_ack *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001282
Lars Ellenberg9ed57dc2012-03-26 20:55:17 +02001283 if (tconn->cstate < C_WF_REPORT_PARAMS)
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001284 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001285
Lars Ellenberg9ed57dc2012-03-26 20:55:17 +02001286 sock = &tconn->meta;
1287 p = conn_prepare_command(tconn, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001288 if (!p)
1289 return;
1290 p->barrier = barrier_nr;
1291 p->set_size = cpu_to_be32(set_size);
Lars Ellenberg9ed57dc2012-03-26 20:55:17 +02001292 conn_send_command(tconn, sock, P_BARRIER_ACK, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001293}
1294
1295/**
1296 * _drbd_send_ack() - Sends an ack packet
1297 * @mdev: DRBD device.
1298 * @cmd: Packet command code.
1299 * @sector: sector, needs to be in big endian byte order
1300 * @blksize: size in byte, needs to be in big endian byte order
1301 * @block_id: Id, big endian byte order
1302 */
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001303static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
1304 u64 sector, u32 blksize, u64 block_id)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001305{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001306 struct drbd_socket *sock;
1307 struct p_block_ack *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001308
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001309 if (mdev->state.conn < C_CONNECTED)
Andreas Gruenbachera8c32aa2011-03-16 01:27:22 +01001310 return -EIO;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001311
1312 sock = &mdev->tconn->meta;
1313 p = drbd_prepare_command(mdev, sock);
1314 if (!p)
1315 return -EIO;
1316 p->sector = sector;
1317 p->block_id = block_id;
1318 p->blksize = blksize;
1319 p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
1320 return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001321}
1322
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02001323/* dp->sector and dp->block_id already/still in network byte order,
1324 * data_size is payload size according to dp->head,
1325 * and may need to be corrected for digest size. */
Andreas Gruenbachera9a99942011-03-16 01:30:14 +01001326void drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packet cmd,
1327 struct p_data *dp, int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001328{
Andreas Gruenbacher88104ca2011-04-28 21:47:21 +02001329 if (mdev->tconn->peer_integrity_tfm)
1330 data_size -= crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
Andreas Gruenbachera9a99942011-03-16 01:30:14 +01001331 _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
1332 dp->block_id);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001333}
1334
Andreas Gruenbachera9a99942011-03-16 01:30:14 +01001335void drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packet cmd,
1336 struct p_block_req *rp)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001337{
Andreas Gruenbachera9a99942011-03-16 01:30:14 +01001338 _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001339}
1340
1341/**
1342 * drbd_send_ack() - Sends an ack packet
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001343 * @mdev: DRBD device
1344 * @cmd: packet command code
1345 * @peer_req: peer request
Philipp Reisnerb411b362009-09-25 16:07:19 -07001346 */
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001347int drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001348 struct drbd_peer_request *peer_req)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001349{
Andreas Gruenbacherdd516122011-03-16 15:39:08 +01001350 return _drbd_send_ack(mdev, cmd,
1351 cpu_to_be64(peer_req->i.sector),
1352 cpu_to_be32(peer_req->i.size),
1353 peer_req->block_id);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001354}
1355
1356/* This function misuses the block_id field to signal if the blocks
1357 * are is sync or not. */
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001358int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packet cmd,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001359 sector_t sector, int blksize, u64 block_id)
1360{
Andreas Gruenbacherfa79abd2011-03-16 01:31:39 +01001361 return _drbd_send_ack(mdev, cmd,
1362 cpu_to_be64(sector),
1363 cpu_to_be32(blksize),
1364 cpu_to_be64(block_id));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001365}
1366
1367int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
1368 sector_t sector, int size, u64 block_id)
1369{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001370 struct drbd_socket *sock;
1371 struct p_block_req *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001372
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001373 sock = &mdev->tconn->data;
1374 p = drbd_prepare_command(mdev, sock);
1375 if (!p)
1376 return -EIO;
1377 p->sector = cpu_to_be64(sector);
1378 p->block_id = block_id;
1379 p->blksize = cpu_to_be32(size);
1380 return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001381}
1382
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001383int drbd_send_drequest_csum(struct drbd_conf *mdev, sector_t sector, int size,
1384 void *digest, int digest_size, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001385{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001386 struct drbd_socket *sock;
1387 struct p_block_req *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001388
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001389 /* FIXME: Put the digest into the preallocated socket buffer. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001390
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001391 sock = &mdev->tconn->data;
1392 p = drbd_prepare_command(mdev, sock);
1393 if (!p)
1394 return -EIO;
1395 p->sector = cpu_to_be64(sector);
1396 p->block_id = ID_SYNCER /* unused */;
1397 p->blksize = cpu_to_be32(size);
1398 return drbd_send_command(mdev, sock, cmd, sizeof(*p),
1399 digest, digest_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001400}
1401
1402int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
1403{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001404 struct drbd_socket *sock;
1405 struct p_block_req *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001406
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001407 sock = &mdev->tconn->data;
1408 p = drbd_prepare_command(mdev, sock);
1409 if (!p)
1410 return -EIO;
1411 p->sector = cpu_to_be64(sector);
1412 p->block_id = ID_SYNCER /* unused */;
1413 p->blksize = cpu_to_be32(size);
1414 return drbd_send_command(mdev, sock, P_OV_REQUEST, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001415}
1416
1417/* called on sndtimeo
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001418 * returns false if we should retry,
1419 * true if we think connection is dead
Philipp Reisnerb411b362009-09-25 16:07:19 -07001420 */
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001421static int we_should_drop_the_connection(struct drbd_tconn *tconn, struct socket *sock)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001422{
1423 int drop_it;
1424 /* long elapsed = (long)(jiffies - mdev->last_received); */
1425
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001426 drop_it = tconn->meta.socket == sock
1427 || !tconn->asender.task
1428 || get_t_state(&tconn->asender) != RUNNING
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01001429 || tconn->cstate < C_WF_REPORT_PARAMS;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001430
1431 if (drop_it)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001432 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001433
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001434 drop_it = !--tconn->ko_count;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001435 if (!drop_it) {
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001436 conn_err(tconn, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
1437 current->comm, current->pid, tconn->ko_count);
1438 request_ping(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001439 }
1440
1441 return drop_it; /* && (mdev->state == R_PRIMARY) */;
1442}
1443
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001444static void drbd_update_congested(struct drbd_tconn *tconn)
Andreas Gruenbacher9e204cd2011-01-26 18:45:11 +01001445{
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001446 struct sock *sk = tconn->data.socket->sk;
Andreas Gruenbacher9e204cd2011-01-26 18:45:11 +01001447 if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001448 set_bit(NET_CONGESTED, &tconn->flags);
Andreas Gruenbacher9e204cd2011-01-26 18:45:11 +01001449}
1450
Philipp Reisnerb411b362009-09-25 16:07:19 -07001451/* The idea of sendpage seems to be to put some kind of reference
1452 * to the page into the skb, and to hand it over to the NIC. In
1453 * this process get_page() gets called.
1454 *
1455 * As soon as the page was really sent over the network put_page()
1456 * gets called by some part of the network layer. [ NIC driver? ]
1457 *
1458 * [ get_page() / put_page() increment/decrement the count. If count
1459 * reaches 0 the page will be freed. ]
1460 *
1461 * This works nicely with pages from FSs.
1462 * But this means that in protocol A we might signal IO completion too early!
1463 *
1464 * In order not to corrupt data during a resync we must make sure
1465 * that we do not reuse our own buffer pages (EEs) to early, therefore
1466 * we have the net_ee list.
1467 *
1468 * XFS seems to have problems, still, it submits pages with page_count == 0!
1469 * As a workaround, we disable sendpage on pages
1470 * with page_count == 0 or PageSlab.
1471 */
1472static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page,
Andreas Gruenbacherb9874272011-03-16 09:41:10 +01001473 int offset, size_t size, unsigned msg_flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001474{
Andreas Gruenbacherb9874272011-03-16 09:41:10 +01001475 struct socket *socket;
1476 void *addr;
1477 int err;
1478
1479 socket = mdev->tconn->data.socket;
1480 addr = kmap(page) + offset;
1481 err = drbd_send_all(mdev->tconn, socket, addr, size, msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001482 kunmap(page);
Andreas Gruenbacherb9874272011-03-16 09:41:10 +01001483 if (!err)
1484 mdev->send_cnt += size >> 9;
1485 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001486}
1487
1488static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001489 int offset, size_t size, unsigned msg_flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001490{
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001491 struct socket *socket = mdev->tconn->data.socket;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001492 mm_segment_t oldfs = get_fs();
Philipp Reisnerb411b362009-09-25 16:07:19 -07001493 int len = size;
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001494 int err = -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001495
1496 /* e.g. XFS meta- & log-data is in slab pages, which have a
1497 * page_count of 0 and/or have PageSlab() set.
1498 * we cannot use send_page for those, as that does get_page();
1499 * put_page(); and would cause either a VM_BUG directly, or
1500 * __page_cache_release a page that would actually still be referenced
1501 * by someone, leading to some obscure delayed Oops somewhere else. */
1502 if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001503 return _drbd_no_send_page(mdev, page, offset, size, msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001504
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001505 msg_flags |= MSG_NOSIGNAL;
Philipp Reisner1a7ba642011-02-07 14:56:02 +01001506 drbd_update_congested(mdev->tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001507 set_fs(KERNEL_DS);
1508 do {
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001509 int sent;
1510
1511 sent = socket->ops->sendpage(socket, page, offset, len, msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001512 if (sent <= 0) {
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001513 if (sent == -EAGAIN) {
1514 if (we_should_drop_the_connection(mdev->tconn, socket))
1515 break;
1516 continue;
1517 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001518 dev_warn(DEV, "%s: size=%d len=%d sent=%d\n",
1519 __func__, (int)size, len, sent);
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001520 if (sent < 0)
1521 err = sent;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001522 break;
1523 }
1524 len -= sent;
1525 offset += sent;
1526 } while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
1527 set_fs(oldfs);
Philipp Reisner01a311a2011-02-07 14:30:33 +01001528 clear_bit(NET_CONGESTED, &mdev->tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001529
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001530 if (len == 0) {
1531 err = 0;
1532 mdev->send_cnt += size >> 9;
1533 }
1534 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001535}
1536
1537static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
1538{
1539 struct bio_vec *bvec;
1540 int i;
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001541 /* hint all but last page with MSG_MORE */
Lars Ellenberg4b8514e2012-03-26 16:12:49 +02001542 bio_for_each_segment(bvec, bio, i) {
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001543 int err;
1544
1545 err = _drbd_no_send_page(mdev, bvec->bv_page,
1546 bvec->bv_offset, bvec->bv_len,
1547 i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
1548 if (err)
1549 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001550 }
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001551 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001552}
1553
1554static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
1555{
1556 struct bio_vec *bvec;
1557 int i;
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001558 /* hint all but last page with MSG_MORE */
Lars Ellenberg4b8514e2012-03-26 16:12:49 +02001559 bio_for_each_segment(bvec, bio, i) {
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001560 int err;
1561
1562 err = _drbd_send_page(mdev, bvec->bv_page,
1563 bvec->bv_offset, bvec->bv_len,
1564 i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
1565 if (err)
1566 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001567 }
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001568 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001569}
1570
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001571static int _drbd_send_zc_ee(struct drbd_conf *mdev,
1572 struct drbd_peer_request *peer_req)
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001573{
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001574 struct page *page = peer_req->pages;
1575 unsigned len = peer_req->i.size;
Andreas Gruenbacher9f692302011-03-16 10:49:09 +01001576 int err;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001577
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001578 /* hint all but last page with MSG_MORE */
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001579 page_chain_for_each(page) {
1580 unsigned l = min_t(unsigned, len, PAGE_SIZE);
Andreas Gruenbacher9f692302011-03-16 10:49:09 +01001581
1582 err = _drbd_send_page(mdev, page, 0, l,
1583 page_chain_next(page) ? MSG_MORE : 0);
1584 if (err)
1585 return err;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001586 len -= l;
1587 }
Andreas Gruenbacher9f692302011-03-16 10:49:09 +01001588 return 0;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001589}
1590
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001591static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
1592{
Philipp Reisner31890f42011-01-19 14:12:51 +01001593 if (mdev->tconn->agreed_pro_version >= 95)
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001594 return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001595 (bi_rw & REQ_FUA ? DP_FUA : 0) |
1596 (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
1597 (bi_rw & REQ_DISCARD ? DP_DISCARD : 0);
1598 else
Jens Axboe721a9602011-03-09 11:56:30 +01001599 return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001600}
1601
Philipp Reisnerb411b362009-09-25 16:07:19 -07001602/* Used to send write requests
1603 * R_PRIMARY -> Peer (P_DATA)
1604 */
1605int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
1606{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001607 struct drbd_socket *sock;
1608 struct p_data *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001609 unsigned int dp_flags = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001610 int dgs;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001611 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001612
Philipp Reisner46e1ce42011-05-16 12:57:15 +02001613 sock = &mdev->tconn->data;
1614 p = drbd_prepare_command(mdev, sock);
Andreas Gruenbacher7d4c7822011-07-17 23:06:12 +02001615 dgs = mdev->tconn->integrity_tfm ? crypto_hash_digestsize(mdev->tconn->integrity_tfm) : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001616
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001617 if (!p)
1618 return -EIO;
1619 p->sector = cpu_to_be64(req->i.sector);
1620 p->block_id = (unsigned long)req;
Lars Ellenberg5cdb0bf32012-03-26 16:21:25 +02001621 p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001622 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001623 if (mdev->state.conn >= C_SYNC_SOURCE &&
1624 mdev->state.conn <= C_PAUSED_SYNC_T)
1625 dp_flags |= DP_MAY_SET_IN_SYNC;
Philipp Reisner303d1442011-04-13 16:24:47 -07001626 if (mdev->tconn->agreed_pro_version >= 100) {
1627 if (req->rq_state & RQ_EXP_RECEIVE_ACK)
1628 dp_flags |= DP_SEND_RECEIVE_ACK;
1629 if (req->rq_state & RQ_EXP_WRITE_ACK)
1630 dp_flags |= DP_SEND_WRITE_ACK;
1631 }
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001632 p->dp_flags = cpu_to_be32(dp_flags);
1633 if (dgs)
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02001634 drbd_csum_bio(mdev, mdev->tconn->integrity_tfm, req->master_bio, p + 1);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001635 err = __send_command(mdev->tconn, mdev->vnr, sock, P_DATA, sizeof(*p) + dgs, NULL, req->i.size);
Andreas Gruenbacher6bdb9b02011-03-16 11:52:58 +01001636 if (!err) {
Lars Ellenberg470be442010-11-10 10:36:52 +01001637 /* For protocol A, we have to memcpy the payload into
1638 * socket buffers, as we may complete right away
1639 * as soon as we handed it over to tcp, at which point the data
1640 * pages may become invalid.
1641 *
1642 * For data-integrity enabled, we copy it as well, so we can be
1643 * sure that even if the bio pages may still be modified, it
1644 * won't change the data on the wire, thus if the digest checks
1645 * out ok after sending on this side, but does not fit on the
1646 * receiving side, we sure have detected corruption elsewhere.
1647 */
Philipp Reisner303d1442011-04-13 16:24:47 -07001648 if (!(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK)) || dgs)
Andreas Gruenbacher6bdb9b02011-03-16 11:52:58 +01001649 err = _drbd_send_bio(mdev, req->master_bio);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001650 else
Andreas Gruenbacher6bdb9b02011-03-16 11:52:58 +01001651 err = _drbd_send_zc_bio(mdev, req->master_bio);
Lars Ellenberg470be442010-11-10 10:36:52 +01001652
1653 /* double check digest, sometimes buffers have been modified in flight. */
1654 if (dgs > 0 && dgs <= 64) {
Bart Van Assche24c48302011-05-21 18:32:29 +02001655 /* 64 byte, 512 bit, is the largest digest size
Lars Ellenberg470be442010-11-10 10:36:52 +01001656 * currently supported in kernel crypto. */
1657 unsigned char digest[64];
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02001658 drbd_csum_bio(mdev, mdev->tconn->integrity_tfm, req->master_bio, digest);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001659 if (memcmp(p + 1, digest, dgs)) {
Lars Ellenberg470be442010-11-10 10:36:52 +01001660 dev_warn(DEV,
1661 "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
Andreas Gruenbacherace652a2011-01-03 17:09:58 +01001662 (unsigned long long)req->i.sector, req->i.size);
Lars Ellenberg470be442010-11-10 10:36:52 +01001663 }
1664 } /* else if (dgs > 64) {
1665 ... Be noisy about digest too large ...
1666 } */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001667 }
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001668 mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
Philipp Reisnerbd26bfc52010-05-04 12:33:58 +02001669
Andreas Gruenbacher6bdb9b02011-03-16 11:52:58 +01001670 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001671}
1672
1673/* answer packet, used to send data back for read requests:
1674 * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
1675 * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
1676 */
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001677int drbd_send_block(struct drbd_conf *mdev, enum drbd_packet cmd,
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001678 struct drbd_peer_request *peer_req)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001679{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001680 struct drbd_socket *sock;
1681 struct p_data *p;
Andreas Gruenbacher7b57b89d2011-03-16 11:35:20 +01001682 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001683 int dgs;
1684
Philipp Reisner46e1ce42011-05-16 12:57:15 +02001685 sock = &mdev->tconn->data;
1686 p = drbd_prepare_command(mdev, sock);
1687
Andreas Gruenbacher7d4c7822011-07-17 23:06:12 +02001688 dgs = mdev->tconn->integrity_tfm ? crypto_hash_digestsize(mdev->tconn->integrity_tfm) : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001689
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001690 if (!p)
1691 return -EIO;
1692 p->sector = cpu_to_be64(peer_req->i.sector);
1693 p->block_id = peer_req->block_id;
1694 p->seq_num = 0; /* unused */
Lars Ellenbergb17f33c2012-02-08 15:32:51 +01001695 p->dp_flags = 0;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001696 if (dgs)
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02001697 drbd_csum_ee(mdev, mdev->tconn->integrity_tfm, peer_req, p + 1);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001698 err = __send_command(mdev->tconn, mdev->vnr, sock, cmd, sizeof(*p) + dgs, NULL, peer_req->i.size);
Andreas Gruenbacher7b57b89d2011-03-16 11:35:20 +01001699 if (!err)
1700 err = _drbd_send_zc_ee(mdev, peer_req);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001701 mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
Philipp Reisnerbd26bfc52010-05-04 12:33:58 +02001702
Andreas Gruenbacher7b57b89d2011-03-16 11:35:20 +01001703 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001704}
1705
Andreas Gruenbacher8f7bed72010-12-19 23:53:14 +01001706int drbd_send_out_of_sync(struct drbd_conf *mdev, struct drbd_request *req)
Philipp Reisner73a01a12010-10-27 14:33:00 +02001707{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001708 struct drbd_socket *sock;
1709 struct p_block_desc *p;
Philipp Reisner73a01a12010-10-27 14:33:00 +02001710
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001711 sock = &mdev->tconn->data;
1712 p = drbd_prepare_command(mdev, sock);
1713 if (!p)
1714 return -EIO;
1715 p->sector = cpu_to_be64(req->i.sector);
1716 p->blksize = cpu_to_be32(req->i.size);
1717 return drbd_send_command(mdev, sock, P_OUT_OF_SYNC, sizeof(*p), NULL, 0);
Philipp Reisner73a01a12010-10-27 14:33:00 +02001718}
1719
Philipp Reisnerb411b362009-09-25 16:07:19 -07001720/*
1721 drbd_send distinguishes two cases:
1722
1723 Packets sent via the data socket "sock"
1724 and packets sent via the meta data socket "msock"
1725
1726 sock msock
1727 -----------------+-------------------------+------------------------------
1728 timeout conf.timeout / 2 conf.timeout / 2
1729 timeout action send a ping via msock Abort communication
1730 and close all sockets
1731*/
1732
1733/*
1734 * you must have down()ed the appropriate [m]sock_mutex elsewhere!
1735 */
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001736int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001737 void *buf, size_t size, unsigned msg_flags)
1738{
1739 struct kvec iov;
1740 struct msghdr msg;
1741 int rv, sent = 0;
1742
1743 if (!sock)
Andreas Gruenbacherc0d42c82010-12-09 23:52:22 +01001744 return -EBADR;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001745
1746 /* THINK if (signal_pending) return ... ? */
1747
1748 iov.iov_base = buf;
1749 iov.iov_len = size;
1750
1751 msg.msg_name = NULL;
1752 msg.msg_namelen = 0;
1753 msg.msg_control = NULL;
1754 msg.msg_controllen = 0;
1755 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
1756
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001757 if (sock == tconn->data.socket) {
Philipp Reisner44ed1672011-04-19 17:10:19 +02001758 rcu_read_lock();
1759 tconn->ko_count = rcu_dereference(tconn->net_conf)->ko_count;
1760 rcu_read_unlock();
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001761 drbd_update_congested(tconn);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001762 }
1763 do {
1764 /* STRANGE
1765 * tcp_sendmsg does _not_ use its size parameter at all ?
1766 *
1767 * -EAGAIN on timeout, -EINTR on signal.
1768 */
1769/* THINK
1770 * do we need to block DRBD_SIG if sock == &meta.socket ??
1771 * otherwise wake_asender() might interrupt some send_*Ack !
1772 */
1773 rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
1774 if (rv == -EAGAIN) {
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001775 if (we_should_drop_the_connection(tconn, sock))
Philipp Reisnerb411b362009-09-25 16:07:19 -07001776 break;
1777 else
1778 continue;
1779 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001780 if (rv == -EINTR) {
1781 flush_signals(current);
1782 rv = 0;
1783 }
1784 if (rv < 0)
1785 break;
1786 sent += rv;
1787 iov.iov_base += rv;
1788 iov.iov_len -= rv;
1789 } while (sent < size);
1790
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001791 if (sock == tconn->data.socket)
1792 clear_bit(NET_CONGESTED, &tconn->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001793
1794 if (rv <= 0) {
1795 if (rv != -EAGAIN) {
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001796 conn_err(tconn, "%s_sendmsg returned %d\n",
1797 sock == tconn->meta.socket ? "msock" : "sock",
1798 rv);
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01001799 conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001800 } else
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01001801 conn_request_state(tconn, NS(conn, C_TIMEOUT), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001802 }
1803
1804 return sent;
1805}
1806
Andreas Gruenbacherfb708e42010-12-15 17:04:36 +01001807/**
1808 * drbd_send_all - Send an entire buffer
1809 *
1810 * Returns 0 upon success and a negative error value otherwise.
1811 */
1812int drbd_send_all(struct drbd_tconn *tconn, struct socket *sock, void *buffer,
1813 size_t size, unsigned msg_flags)
1814{
1815 int err;
1816
1817 err = drbd_send(tconn, sock, buffer, size, msg_flags);
1818 if (err < 0)
1819 return err;
1820 if (err != size)
1821 return -EIO;
1822 return 0;
1823}
1824
Philipp Reisnerb411b362009-09-25 16:07:19 -07001825static int drbd_open(struct block_device *bdev, fmode_t mode)
1826{
1827 struct drbd_conf *mdev = bdev->bd_disk->private_data;
1828 unsigned long flags;
1829 int rv = 0;
1830
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02001831 mutex_lock(&drbd_main_mutex);
Philipp Reisner87eeee42011-01-19 14:16:30 +01001832 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001833 /* to have a stable mdev->state.role
1834 * and no race with updating open_cnt */
1835
1836 if (mdev->state.role != R_PRIMARY) {
1837 if (mode & FMODE_WRITE)
1838 rv = -EROFS;
1839 else if (!allow_oos)
1840 rv = -EMEDIUMTYPE;
1841 }
1842
1843 if (!rv)
1844 mdev->open_cnt++;
Philipp Reisner87eeee42011-01-19 14:16:30 +01001845 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02001846 mutex_unlock(&drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001847
1848 return rv;
1849}
1850
1851static int drbd_release(struct gendisk *gd, fmode_t mode)
1852{
1853 struct drbd_conf *mdev = gd->private_data;
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02001854 mutex_lock(&drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001855 mdev->open_cnt--;
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02001856 mutex_unlock(&drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001857 return 0;
1858}
1859
Philipp Reisnerb411b362009-09-25 16:07:19 -07001860static void drbd_set_defaults(struct drbd_conf *mdev)
1861{
Lars Ellenbergf3990022011-03-23 14:31:09 +01001862 /* Beware! The actual layout differs
1863 * between big endian and little endian */
Philipp Reisnerda9fbc22011-03-29 10:52:01 +02001864 mdev->state = (union drbd_dev_state) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001865 { .role = R_SECONDARY,
1866 .peer = R_UNKNOWN,
1867 .conn = C_STANDALONE,
1868 .disk = D_DISKLESS,
1869 .pdsk = D_UNKNOWN,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001870 } };
1871}
1872
1873void drbd_init_set_defaults(struct drbd_conf *mdev)
1874{
1875 /* the memset(,0,) did most of this.
1876 * note: only assignments, no allocation in here */
1877
1878 drbd_set_defaults(mdev);
1879
Philipp Reisnerb411b362009-09-25 16:07:19 -07001880 atomic_set(&mdev->ap_bio_cnt, 0);
1881 atomic_set(&mdev->ap_pending_cnt, 0);
1882 atomic_set(&mdev->rs_pending_cnt, 0);
1883 atomic_set(&mdev->unacked_cnt, 0);
1884 atomic_set(&mdev->local_cnt, 0);
Lars Ellenberg435f0742010-09-06 12:30:25 +02001885 atomic_set(&mdev->pp_in_use_by_net, 0);
Philipp Reisner778f2712010-07-06 11:14:00 +02001886 atomic_set(&mdev->rs_sect_in, 0);
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001887 atomic_set(&mdev->rs_sect_ev, 0);
Philipp Reisner759fbdf2010-10-26 16:02:27 +02001888 atomic_set(&mdev->ap_in_flight, 0);
Philipp Reisnercdfda632011-07-05 15:38:59 +02001889 atomic_set(&mdev->md_io_in_use, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001890
Philipp Reisner8410da8f02011-02-11 20:11:10 +01001891 mutex_init(&mdev->own_state_mutex);
1892 mdev->state_mutex = &mdev->own_state_mutex;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001893
Philipp Reisnerb411b362009-09-25 16:07:19 -07001894 spin_lock_init(&mdev->al_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001895 spin_lock_init(&mdev->peer_seq_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001896
1897 INIT_LIST_HEAD(&mdev->active_ee);
1898 INIT_LIST_HEAD(&mdev->sync_ee);
1899 INIT_LIST_HEAD(&mdev->done_ee);
1900 INIT_LIST_HEAD(&mdev->read_ee);
1901 INIT_LIST_HEAD(&mdev->net_ee);
1902 INIT_LIST_HEAD(&mdev->resync_reads);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001903 INIT_LIST_HEAD(&mdev->resync_work.list);
1904 INIT_LIST_HEAD(&mdev->unplug_work.list);
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02001905 INIT_LIST_HEAD(&mdev->go_diskless.list);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001906 INIT_LIST_HEAD(&mdev->md_sync_work.list);
Philipp Reisnerc4752ef2010-10-27 17:32:36 +02001907 INIT_LIST_HEAD(&mdev->start_resync_work.list);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001908 INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
Philipp Reisner0ced55a2010-04-30 15:26:20 +02001909
Philipp Reisner794abb72010-12-27 11:51:23 +01001910 mdev->resync_work.cb = w_resync_timer;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001911 mdev->unplug_work.cb = w_send_write_hint;
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02001912 mdev->go_diskless.cb = w_go_diskless;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001913 mdev->md_sync_work.cb = w_md_sync;
1914 mdev->bm_io_work.w.cb = w_bitmap_io;
Philipp Reisner370a43e2011-01-14 16:03:11 +01001915 mdev->start_resync_work.cb = w_start_resync;
Philipp Reisnera21e9292011-02-08 15:08:49 +01001916
1917 mdev->resync_work.mdev = mdev;
1918 mdev->unplug_work.mdev = mdev;
1919 mdev->go_diskless.mdev = mdev;
1920 mdev->md_sync_work.mdev = mdev;
1921 mdev->bm_io_work.w.mdev = mdev;
1922 mdev->start_resync_work.mdev = mdev;
1923
Philipp Reisnerb411b362009-09-25 16:07:19 -07001924 init_timer(&mdev->resync_timer);
1925 init_timer(&mdev->md_sync_timer);
Philipp Reisner370a43e2011-01-14 16:03:11 +01001926 init_timer(&mdev->start_resync_timer);
Philipp Reisner7fde2be2011-03-01 11:08:28 +01001927 init_timer(&mdev->request_timer);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001928 mdev->resync_timer.function = resync_timer_fn;
1929 mdev->resync_timer.data = (unsigned long) mdev;
1930 mdev->md_sync_timer.function = md_sync_timer_fn;
1931 mdev->md_sync_timer.data = (unsigned long) mdev;
Philipp Reisner370a43e2011-01-14 16:03:11 +01001932 mdev->start_resync_timer.function = start_resync_timer_fn;
1933 mdev->start_resync_timer.data = (unsigned long) mdev;
Philipp Reisner7fde2be2011-03-01 11:08:28 +01001934 mdev->request_timer.function = request_timer_fn;
1935 mdev->request_timer.data = (unsigned long) mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001936
1937 init_waitqueue_head(&mdev->misc_wait);
1938 init_waitqueue_head(&mdev->state_wait);
1939 init_waitqueue_head(&mdev->ee_wait);
1940 init_waitqueue_head(&mdev->al_wait);
1941 init_waitqueue_head(&mdev->seq_wait);
1942
Philipp Reisnerb411b362009-09-25 16:07:19 -07001943 mdev->resync_wenr = LC_FREE;
Philipp Reisner99432fc2011-05-20 16:39:13 +02001944 mdev->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
1945 mdev->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001946}
1947
1948void drbd_mdev_cleanup(struct drbd_conf *mdev)
1949{
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001950 int i;
Philipp Reisnere6b3ea82011-01-19 14:02:01 +01001951 if (mdev->tconn->receiver.t_state != NONE)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001952 dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
Philipp Reisnere6b3ea82011-01-19 14:02:01 +01001953 mdev->tconn->receiver.t_state);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001954
Philipp Reisnerb411b362009-09-25 16:07:19 -07001955 mdev->al_writ_cnt =
1956 mdev->bm_writ_cnt =
1957 mdev->read_cnt =
1958 mdev->recv_cnt =
1959 mdev->send_cnt =
1960 mdev->writ_cnt =
1961 mdev->p_size =
1962 mdev->rs_start =
1963 mdev->rs_total =
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001964 mdev->rs_failed = 0;
1965 mdev->rs_last_events = 0;
Lars Ellenberg0f0601f2010-08-11 23:40:24 +02001966 mdev->rs_last_sect_ev = 0;
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001967 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
1968 mdev->rs_mark_left[i] = 0;
1969 mdev->rs_mark_time[i] = 0;
1970 }
Philipp Reisner89e58e72011-01-19 13:12:45 +01001971 D_ASSERT(mdev->tconn->net_conf == NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001972
1973 drbd_set_my_capacity(mdev, 0);
1974 if (mdev->bitmap) {
1975 /* maybe never allocated. */
Philipp Reisner02d9a942010-03-24 16:23:03 +01001976 drbd_bm_resize(mdev, 0, 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001977 drbd_bm_cleanup(mdev);
1978 }
1979
Philipp Reisner1d041222011-04-22 15:20:23 +02001980 drbd_free_bc(mdev->ldev);
1981 mdev->ldev = NULL;
1982
Philipp Reisner07782862010-08-31 12:00:50 +02001983 clear_bit(AL_SUSPENDED, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001984
Philipp Reisnerb411b362009-09-25 16:07:19 -07001985 D_ASSERT(list_empty(&mdev->active_ee));
1986 D_ASSERT(list_empty(&mdev->sync_ee));
1987 D_ASSERT(list_empty(&mdev->done_ee));
1988 D_ASSERT(list_empty(&mdev->read_ee));
1989 D_ASSERT(list_empty(&mdev->net_ee));
1990 D_ASSERT(list_empty(&mdev->resync_reads));
Lars Ellenbergd5b27b02011-11-14 15:42:37 +01001991 D_ASSERT(list_empty(&mdev->tconn->sender_work.q));
Philipp Reisnerb411b362009-09-25 16:07:19 -07001992 D_ASSERT(list_empty(&mdev->resync_work.list));
1993 D_ASSERT(list_empty(&mdev->unplug_work.list));
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02001994 D_ASSERT(list_empty(&mdev->go_diskless.list));
Lars Ellenberg2265b472010-12-16 15:41:26 +01001995
1996 drbd_set_defaults(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001997}
1998
1999
2000static void drbd_destroy_mempools(void)
2001{
2002 struct page *page;
2003
2004 while (drbd_pp_pool) {
2005 page = drbd_pp_pool;
2006 drbd_pp_pool = (struct page *)page_private(page);
2007 __free_page(page);
2008 drbd_pp_vacant--;
2009 }
2010
2011 /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
2012
Lars Ellenbergda4a75d2011-02-23 17:02:01 +01002013 if (drbd_md_io_bio_set)
2014 bioset_free(drbd_md_io_bio_set);
Lars Ellenberg35abf592011-02-23 12:39:46 +01002015 if (drbd_md_io_page_pool)
2016 mempool_destroy(drbd_md_io_page_pool);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002017 if (drbd_ee_mempool)
2018 mempool_destroy(drbd_ee_mempool);
2019 if (drbd_request_mempool)
2020 mempool_destroy(drbd_request_mempool);
2021 if (drbd_ee_cache)
2022 kmem_cache_destroy(drbd_ee_cache);
2023 if (drbd_request_cache)
2024 kmem_cache_destroy(drbd_request_cache);
2025 if (drbd_bm_ext_cache)
2026 kmem_cache_destroy(drbd_bm_ext_cache);
2027 if (drbd_al_ext_cache)
2028 kmem_cache_destroy(drbd_al_ext_cache);
2029
Lars Ellenbergda4a75d2011-02-23 17:02:01 +01002030 drbd_md_io_bio_set = NULL;
Lars Ellenberg35abf592011-02-23 12:39:46 +01002031 drbd_md_io_page_pool = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002032 drbd_ee_mempool = NULL;
2033 drbd_request_mempool = NULL;
2034 drbd_ee_cache = NULL;
2035 drbd_request_cache = NULL;
2036 drbd_bm_ext_cache = NULL;
2037 drbd_al_ext_cache = NULL;
2038
2039 return;
2040}
2041
2042static int drbd_create_mempools(void)
2043{
2044 struct page *page;
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01002045 const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002046 int i;
2047
2048 /* prepare our caches and mempools */
2049 drbd_request_mempool = NULL;
2050 drbd_ee_cache = NULL;
2051 drbd_request_cache = NULL;
2052 drbd_bm_ext_cache = NULL;
2053 drbd_al_ext_cache = NULL;
2054 drbd_pp_pool = NULL;
Lars Ellenberg35abf592011-02-23 12:39:46 +01002055 drbd_md_io_page_pool = NULL;
Lars Ellenbergda4a75d2011-02-23 17:02:01 +01002056 drbd_md_io_bio_set = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002057
2058 /* caches */
2059 drbd_request_cache = kmem_cache_create(
2060 "drbd_req", sizeof(struct drbd_request), 0, 0, NULL);
2061 if (drbd_request_cache == NULL)
2062 goto Enomem;
2063
2064 drbd_ee_cache = kmem_cache_create(
Andreas Gruenbacherf6ffca92011-02-04 15:30:34 +01002065 "drbd_ee", sizeof(struct drbd_peer_request), 0, 0, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002066 if (drbd_ee_cache == NULL)
2067 goto Enomem;
2068
2069 drbd_bm_ext_cache = kmem_cache_create(
2070 "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL);
2071 if (drbd_bm_ext_cache == NULL)
2072 goto Enomem;
2073
2074 drbd_al_ext_cache = kmem_cache_create(
2075 "drbd_al", sizeof(struct lc_element), 0, 0, NULL);
2076 if (drbd_al_ext_cache == NULL)
2077 goto Enomem;
2078
2079 /* mempools */
Lars Ellenbergda4a75d2011-02-23 17:02:01 +01002080 drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0);
2081 if (drbd_md_io_bio_set == NULL)
2082 goto Enomem;
2083
Lars Ellenberg35abf592011-02-23 12:39:46 +01002084 drbd_md_io_page_pool = mempool_create_page_pool(DRBD_MIN_POOL_PAGES, 0);
2085 if (drbd_md_io_page_pool == NULL)
2086 goto Enomem;
2087
Philipp Reisnerb411b362009-09-25 16:07:19 -07002088 drbd_request_mempool = mempool_create(number,
2089 mempool_alloc_slab, mempool_free_slab, drbd_request_cache);
2090 if (drbd_request_mempool == NULL)
2091 goto Enomem;
2092
2093 drbd_ee_mempool = mempool_create(number,
2094 mempool_alloc_slab, mempool_free_slab, drbd_ee_cache);
Nicolas Kaiser2027ae12010-10-28 06:15:26 -06002095 if (drbd_ee_mempool == NULL)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002096 goto Enomem;
2097
2098 /* drbd's page pool */
2099 spin_lock_init(&drbd_pp_lock);
2100
2101 for (i = 0; i < number; i++) {
2102 page = alloc_page(GFP_HIGHUSER);
2103 if (!page)
2104 goto Enomem;
2105 set_page_private(page, (unsigned long)drbd_pp_pool);
2106 drbd_pp_pool = page;
2107 }
2108 drbd_pp_vacant = number;
2109
2110 return 0;
2111
2112Enomem:
2113 drbd_destroy_mempools(); /* in case we allocated some */
2114 return -ENOMEM;
2115}
2116
2117static int drbd_notify_sys(struct notifier_block *this, unsigned long code,
2118 void *unused)
2119{
2120 /* just so we have it. you never know what interesting things we
2121 * might want to do here some day...
2122 */
2123
2124 return NOTIFY_DONE;
2125}
2126
2127static struct notifier_block drbd_notifier = {
2128 .notifier_call = drbd_notify_sys,
2129};
2130
Andreas Gruenbacher7721f562011-04-06 17:14:02 +02002131static void drbd_release_all_peer_reqs(struct drbd_conf *mdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002132{
2133 int rr;
2134
Andreas Gruenbacher7721f562011-04-06 17:14:02 +02002135 rr = drbd_free_peer_reqs(mdev, &mdev->active_ee);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002136 if (rr)
2137 dev_err(DEV, "%d EEs in active list found!\n", rr);
2138
Andreas Gruenbacher7721f562011-04-06 17:14:02 +02002139 rr = drbd_free_peer_reqs(mdev, &mdev->sync_ee);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002140 if (rr)
2141 dev_err(DEV, "%d EEs in sync list found!\n", rr);
2142
Andreas Gruenbacher7721f562011-04-06 17:14:02 +02002143 rr = drbd_free_peer_reqs(mdev, &mdev->read_ee);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002144 if (rr)
2145 dev_err(DEV, "%d EEs in read list found!\n", rr);
2146
Andreas Gruenbacher7721f562011-04-06 17:14:02 +02002147 rr = drbd_free_peer_reqs(mdev, &mdev->done_ee);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002148 if (rr)
2149 dev_err(DEV, "%d EEs in done list found!\n", rr);
2150
Andreas Gruenbacher7721f562011-04-06 17:14:02 +02002151 rr = drbd_free_peer_reqs(mdev, &mdev->net_ee);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002152 if (rr)
2153 dev_err(DEV, "%d EEs in net list found!\n", rr);
2154}
2155
Philipp Reisner774b3052011-02-22 02:07:03 -05002156/* caution. no locking. */
Philipp Reisner81fa2e62011-05-04 15:10:30 +02002157void drbd_minor_destroy(struct kref *kref)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002158{
Philipp Reisner81fa2e62011-05-04 15:10:30 +02002159 struct drbd_conf *mdev = container_of(kref, struct drbd_conf, kref);
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02002160 struct drbd_tconn *tconn = mdev->tconn;
2161
Philipp Reisnercdfda632011-07-05 15:38:59 +02002162 del_timer_sync(&mdev->request_timer);
2163
Philipp Reisnerb411b362009-09-25 16:07:19 -07002164 /* paranoia asserts */
Andreas Gruenbacher70dc65e2010-12-21 14:46:57 +01002165 D_ASSERT(mdev->open_cnt == 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002166 /* end paranoia asserts */
2167
Philipp Reisnerb411b362009-09-25 16:07:19 -07002168 /* cleanup stuff that may have been allocated during
2169 * device (re-)configuration or state changes */
2170
2171 if (mdev->this_bdev)
2172 bdput(mdev->this_bdev);
2173
Philipp Reisner1d041222011-04-22 15:20:23 +02002174 drbd_free_bc(mdev->ldev);
2175 mdev->ldev = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002176
Andreas Gruenbacher7721f562011-04-06 17:14:02 +02002177 drbd_release_all_peer_reqs(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002178
Philipp Reisnerb411b362009-09-25 16:07:19 -07002179 lc_destroy(mdev->act_log);
2180 lc_destroy(mdev->resync);
2181
2182 kfree(mdev->p_uuid);
2183 /* mdev->p_uuid = NULL; */
2184
Philipp Reisnercd1d9952011-04-11 21:24:24 -07002185 if (mdev->bitmap) /* should no longer be there. */
2186 drbd_bm_cleanup(mdev);
2187 __free_page(mdev->md_io_page);
2188 put_disk(mdev->vdisk);
2189 blk_cleanup_queue(mdev->rq_queue);
Philipp Reisner9958c852011-05-03 16:19:31 +02002190 kfree(mdev->rs_plan_s);
Philipp Reisnercd1d9952011-04-11 21:24:24 -07002191 kfree(mdev);
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02002192
2193 kref_put(&tconn->kref, &conn_destroy);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002194}
2195
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01002196/* One global retry thread, if we need to push back some bio and have it
2197 * reinserted through our make request function.
2198 */
2199static struct retry_worker {
2200 struct workqueue_struct *wq;
2201 struct work_struct worker;
2202
2203 spinlock_t lock;
2204 struct list_head writes;
2205} retry;
2206
2207static void do_retry(struct work_struct *ws)
2208{
2209 struct retry_worker *retry = container_of(ws, struct retry_worker, worker);
2210 LIST_HEAD(writes);
2211 struct drbd_request *req, *tmp;
2212
2213 spin_lock_irq(&retry->lock);
2214 list_splice_init(&retry->writes, &writes);
2215 spin_unlock_irq(&retry->lock);
2216
2217 list_for_each_entry_safe(req, tmp, &writes, tl_requests) {
2218 struct drbd_conf *mdev = req->w.mdev;
2219 struct bio *bio = req->master_bio;
2220 unsigned long start_time = req->start_time;
Lars Ellenberg9a278a72012-07-24 10:12:36 +02002221 bool expected;
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01002222
Lars Ellenberg9a278a72012-07-24 10:12:36 +02002223 expected =
2224 expect(atomic_read(&req->completion_ref) == 0) &&
2225 expect(req->rq_state & RQ_POSTPONED) &&
2226 expect((req->rq_state & RQ_LOCAL_PENDING) == 0 ||
2227 (req->rq_state & RQ_LOCAL_ABORTED) != 0);
2228
2229 if (!expected)
2230 dev_err(DEV, "req=%p completion_ref=%d rq_state=%x\n",
2231 req, atomic_read(&req->completion_ref),
2232 req->rq_state);
2233
2234 /* We still need to put one kref associated with the
2235 * "completion_ref" going zero in the code path that queued it
2236 * here. The request object may still be referenced by a
2237 * frozen local req->private_bio, in case we force-detached.
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01002238 */
Lars Ellenberg9a278a72012-07-24 10:12:36 +02002239 kref_put(&req->kref, drbd_req_destroy);
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01002240
2241 /* A single suspended or otherwise blocking device may stall
2242 * all others as well. Fortunately, this code path is to
2243 * recover from a situation that "should not happen":
2244 * concurrent writes in multi-primary setup.
2245 * In a "normal" lifecycle, this workqueue is supposed to be
2246 * destroyed without ever doing anything.
2247 * If it turns out to be an issue anyways, we can do per
2248 * resource (replication group) or per device (minor) retry
2249 * workqueues instead.
2250 */
2251
2252 /* We are not just doing generic_make_request(),
2253 * as we want to keep the start_time information. */
Lars Ellenberg5df69ec2012-01-24 16:49:58 +01002254 inc_ap_bio(mdev);
2255 __drbd_make_request(mdev, bio, start_time);
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01002256 }
2257}
2258
Lars Ellenberg9d05e7c2012-07-17 10:05:04 +02002259void drbd_restart_request(struct drbd_request *req)
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01002260{
2261 unsigned long flags;
2262 spin_lock_irqsave(&retry.lock, flags);
2263 list_move_tail(&req->tl_requests, &retry.writes);
2264 spin_unlock_irqrestore(&retry.lock, flags);
2265
2266 /* Drop the extra reference that would otherwise
2267 * have been dropped by complete_master_bio.
2268 * do_retry() needs to grab a new one. */
2269 dec_ap_bio(req->w.mdev);
2270
2271 queue_work(retry.wq, &retry.worker);
2272}
2273
2274
Philipp Reisnerb411b362009-09-25 16:07:19 -07002275static void drbd_cleanup(void)
2276{
2277 unsigned int i;
Philipp Reisner81a5d602011-02-22 19:53:16 -05002278 struct drbd_conf *mdev;
Philipp Reisner81fa2e62011-05-04 15:10:30 +02002279 struct drbd_tconn *tconn, *tmp;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002280
2281 unregister_reboot_notifier(&drbd_notifier);
2282
Lars Ellenberg17a93f32010-11-24 10:37:35 +01002283 /* first remove proc,
2284 * drbdsetup uses it's presence to detect
2285 * whether DRBD is loaded.
2286 * If we would get stuck in proc removal,
2287 * but have netlink already deregistered,
2288 * some drbdsetup commands may wait forever
2289 * for an answer.
2290 */
2291 if (drbd_proc)
2292 remove_proc_entry("drbd", NULL);
2293
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01002294 if (retry.wq)
2295 destroy_workqueue(retry.wq);
2296
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002297 drbd_genl_unregister();
Philipp Reisnerb411b362009-09-25 16:07:19 -07002298
Philipp Reisner81fa2e62011-05-04 15:10:30 +02002299 idr_for_each_entry(&minors, mdev, i) {
2300 idr_remove(&minors, mdev_to_minor(mdev));
2301 idr_remove(&mdev->tconn->volumes, mdev->vnr);
2302 del_gendisk(mdev->vdisk);
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02002303 /* synchronize_rcu(); No other threads running at this point */
Philipp Reisner81fa2e62011-05-04 15:10:30 +02002304 kref_put(&mdev->kref, &drbd_minor_destroy);
2305 }
2306
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02002307 /* not _rcu since, no other updater anymore. Genl already unregistered */
Philipp Reisner81fa2e62011-05-04 15:10:30 +02002308 list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02002309 list_del(&tconn->all_tconn); /* not _rcu no proc, not other threads */
2310 /* synchronize_rcu(); */
Philipp Reisner81fa2e62011-05-04 15:10:30 +02002311 kref_put(&tconn->kref, &conn_destroy);
2312 }
Philipp Reisnerff370e52011-04-11 21:10:11 -07002313
Philipp Reisner81a5d602011-02-22 19:53:16 -05002314 drbd_destroy_mempools();
Philipp Reisnerb411b362009-09-25 16:07:19 -07002315 unregister_blkdev(DRBD_MAJOR, "drbd");
2316
Philipp Reisner81a5d602011-02-22 19:53:16 -05002317 idr_destroy(&minors);
2318
Philipp Reisnerb411b362009-09-25 16:07:19 -07002319 printk(KERN_INFO "drbd: module cleanup done.\n");
2320}
2321
2322/**
2323 * drbd_congested() - Callback for pdflush
2324 * @congested_data: User data
2325 * @bdi_bits: Bits pdflush is currently interested in
2326 *
2327 * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
2328 */
2329static int drbd_congested(void *congested_data, int bdi_bits)
2330{
2331 struct drbd_conf *mdev = congested_data;
2332 struct request_queue *q;
2333 char reason = '-';
2334 int r = 0;
2335
Andreas Gruenbacher1b881ef2010-12-13 18:03:38 +01002336 if (!may_inc_ap_bio(mdev)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002337 /* DRBD has frozen IO */
2338 r = bdi_bits;
2339 reason = 'd';
2340 goto out;
2341 }
2342
Lars Ellenberg6f3465e2012-07-30 09:08:25 +02002343 if (test_bit(CALLBACK_PENDING, &mdev->tconn->flags)) {
2344 r |= (1 << BDI_async_congested);
2345 /* Without good local data, we would need to read from remote,
2346 * and that would need the worker thread as well, which is
2347 * currently blocked waiting for that usermode helper to
2348 * finish.
2349 */
2350 if (!get_ldev_if_state(mdev, D_UP_TO_DATE))
2351 r |= (1 << BDI_sync_congested);
2352 else
2353 put_ldev(mdev);
2354 r &= bdi_bits;
2355 reason = 'c';
2356 goto out;
2357 }
2358
Philipp Reisnerb411b362009-09-25 16:07:19 -07002359 if (get_ldev(mdev)) {
2360 q = bdev_get_queue(mdev->ldev->backing_bdev);
2361 r = bdi_congested(&q->backing_dev_info, bdi_bits);
2362 put_ldev(mdev);
2363 if (r)
2364 reason = 'b';
2365 }
2366
Philipp Reisner01a311a2011-02-07 14:30:33 +01002367 if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->tconn->flags)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002368 r |= (1 << BDI_async_congested);
2369 reason = reason == 'b' ? 'a' : 'n';
2370 }
2371
2372out:
2373 mdev->congestion_reason = reason;
2374 return r;
2375}
2376
Philipp Reisner6699b652011-02-09 11:10:24 +01002377static void drbd_init_workqueue(struct drbd_work_queue* wq)
2378{
Philipp Reisner6699b652011-02-09 11:10:24 +01002379 spin_lock_init(&wq->q_lock);
2380 INIT_LIST_HEAD(&wq->q);
Lars Ellenberg8c0785a2011-10-19 11:50:57 +02002381 init_waitqueue_head(&wq->q_wait);
Philipp Reisner6699b652011-02-09 11:10:24 +01002382}
2383
Philipp Reisner0ace9df2011-04-24 10:53:19 +02002384struct drbd_tconn *conn_get_by_name(const char *name)
Philipp Reisner1aba4d72011-02-21 15:38:08 +01002385{
2386 struct drbd_tconn *tconn;
2387
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002388 if (!name || !name[0])
2389 return NULL;
2390
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02002391 rcu_read_lock();
Philipp Reisnerec0bddb2011-05-04 15:47:01 +02002392 list_for_each_entry_rcu(tconn, &drbd_tconns, all_tconn) {
Philipp Reisner0ace9df2011-04-24 10:53:19 +02002393 if (!strcmp(tconn->name, name)) {
2394 kref_get(&tconn->kref);
Philipp Reisner1aba4d72011-02-21 15:38:08 +01002395 goto found;
Philipp Reisner0ace9df2011-04-24 10:53:19 +02002396 }
Philipp Reisner1aba4d72011-02-21 15:38:08 +01002397 }
2398 tconn = NULL;
2399found:
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02002400 rcu_read_unlock();
Philipp Reisner1aba4d72011-02-21 15:38:08 +01002401 return tconn;
2402}
2403
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002404struct drbd_tconn *conn_get_by_addrs(void *my_addr, int my_addr_len,
2405 void *peer_addr, int peer_addr_len)
2406{
2407 struct drbd_tconn *tconn;
2408
2409 rcu_read_lock();
2410 list_for_each_entry_rcu(tconn, &drbd_tconns, all_tconn) {
2411 if (tconn->my_addr_len == my_addr_len &&
2412 tconn->peer_addr_len == peer_addr_len &&
2413 !memcmp(&tconn->my_addr, my_addr, my_addr_len) &&
2414 !memcmp(&tconn->peer_addr, peer_addr, peer_addr_len)) {
2415 kref_get(&tconn->kref);
2416 goto found;
2417 }
2418 }
2419 tconn = NULL;
2420found:
2421 rcu_read_unlock();
2422 return tconn;
2423}
2424
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002425static int drbd_alloc_socket(struct drbd_socket *socket)
2426{
2427 socket->rbuf = (void *) __get_free_page(GFP_KERNEL);
2428 if (!socket->rbuf)
2429 return -ENOMEM;
Andreas Gruenbacher5a87d922011-03-24 21:17:52 +01002430 socket->sbuf = (void *) __get_free_page(GFP_KERNEL);
2431 if (!socket->sbuf)
2432 return -ENOMEM;
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002433 return 0;
2434}
2435
2436static void drbd_free_socket(struct drbd_socket *socket)
2437{
Andreas Gruenbacher5a87d922011-03-24 21:17:52 +01002438 free_page((unsigned long) socket->sbuf);
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002439 free_page((unsigned long) socket->rbuf);
2440}
2441
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002442void conn_free_crypto(struct drbd_tconn *tconn)
2443{
Philipp Reisner1d041222011-04-22 15:20:23 +02002444 drbd_free_sock(tconn);
2445
2446 crypto_free_hash(tconn->csums_tfm);
2447 crypto_free_hash(tconn->verify_tfm);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002448 crypto_free_hash(tconn->cram_hmac_tfm);
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02002449 crypto_free_hash(tconn->integrity_tfm);
Andreas Gruenbacher5b614ab2011-04-27 21:00:12 +02002450 crypto_free_hash(tconn->peer_integrity_tfm);
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002451 kfree(tconn->int_dig_in);
2452 kfree(tconn->int_dig_vv);
Philipp Reisner1d041222011-04-22 15:20:23 +02002453
2454 tconn->csums_tfm = NULL;
2455 tconn->verify_tfm = NULL;
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002456 tconn->cram_hmac_tfm = NULL;
Andreas Gruenbacher8d412fc2011-04-27 20:59:18 +02002457 tconn->integrity_tfm = NULL;
Andreas Gruenbacher5b614ab2011-04-27 21:00:12 +02002458 tconn->peer_integrity_tfm = NULL;
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002459 tconn->int_dig_in = NULL;
2460 tconn->int_dig_vv = NULL;
2461}
2462
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02002463int set_resource_options(struct drbd_tconn *tconn, struct res_opts *res_opts)
2464{
2465 cpumask_var_t new_cpu_mask;
2466 int err;
2467
2468 if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL))
2469 return -ENOMEM;
2470 /*
2471 retcode = ERR_NOMEM;
2472 drbd_msg_put_info("unable to allocate cpumask");
2473 */
2474
2475 /* silently ignore cpu mask on UP kernel */
2476 if (nr_cpu_ids > 1 && res_opts->cpu_mask[0] != 0) {
2477 /* FIXME: Get rid of constant 32 here */
Philipp Reisnerc5b005a2012-04-30 12:53:52 +02002478 err = bitmap_parse(res_opts->cpu_mask, 32,
2479 cpumask_bits(new_cpu_mask), nr_cpu_ids);
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02002480 if (err) {
Philipp Reisnerc5b005a2012-04-30 12:53:52 +02002481 conn_warn(tconn, "bitmap_parse() failed with %d\n", err);
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02002482 /* retcode = ERR_CPU_MASK_PARSE; */
2483 goto fail;
2484 }
2485 }
2486 tconn->res_opts = *res_opts;
2487 if (!cpumask_equal(tconn->cpu_mask, new_cpu_mask)) {
2488 cpumask_copy(tconn->cpu_mask, new_cpu_mask);
2489 drbd_calc_cpu_mask(tconn);
2490 tconn->receiver.reset_cpu_mask = 1;
2491 tconn->asender.reset_cpu_mask = 1;
2492 tconn->worker.reset_cpu_mask = 1;
2493 }
2494 err = 0;
2495
2496fail:
2497 free_cpumask_var(new_cpu_mask);
2498 return err;
2499
2500}
2501
Philipp Reisnerec0bddb2011-05-04 15:47:01 +02002502/* caller must be under genl_lock() */
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02002503struct drbd_tconn *conn_create(const char *name, struct res_opts *res_opts)
Philipp Reisner21114382011-01-19 12:26:59 +01002504{
2505 struct drbd_tconn *tconn;
2506
2507 tconn = kzalloc(sizeof(struct drbd_tconn), GFP_KERNEL);
2508 if (!tconn)
2509 return NULL;
2510
2511 tconn->name = kstrdup(name, GFP_KERNEL);
2512 if (!tconn->name)
2513 goto fail;
2514
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002515 if (drbd_alloc_socket(&tconn->data))
2516 goto fail;
2517 if (drbd_alloc_socket(&tconn->meta))
2518 goto fail;
2519
Philipp Reisner774b3052011-02-22 02:07:03 -05002520 if (!zalloc_cpumask_var(&tconn->cpu_mask, GFP_KERNEL))
2521 goto fail;
2522
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02002523 if (set_resource_options(tconn, res_opts))
2524 goto fail;
2525
Philipp Reisner12038a32011-11-09 19:18:00 +01002526 tconn->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
2527 if (!tconn->current_epoch)
2528 goto fail;
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +01002529
2530 INIT_LIST_HEAD(&tconn->transfer_log);
2531
Philipp Reisner12038a32011-11-09 19:18:00 +01002532 INIT_LIST_HEAD(&tconn->current_epoch->list);
2533 tconn->epochs = 1;
2534 spin_lock_init(&tconn->epoch_lock);
Philipp Reisner4b0007c2011-11-09 20:12:34 +01002535 tconn->write_ordering = WO_bdev_flush;
2536
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +01002537 tconn->send.seen_any_write_yet = false;
2538 tconn->send.current_epoch_nr = 0;
2539 tconn->send.current_epoch_writes = 0;
2540
Philipp Reisnerbbeb6412011-02-10 13:45:46 +01002541 tconn->cstate = C_STANDALONE;
Philipp Reisner8410da8f02011-02-11 20:11:10 +01002542 mutex_init(&tconn->cstate_mutex);
Philipp Reisner6699b652011-02-09 11:10:24 +01002543 spin_lock_init(&tconn->req_lock);
Philipp Reisnera0095502011-05-03 13:14:15 +02002544 mutex_init(&tconn->conf_update);
Philipp Reisner2a67d8b2011-02-09 14:10:32 +01002545 init_waitqueue_head(&tconn->ping_wait);
Philipp Reisner062e8792011-02-08 11:09:18 +01002546 idr_init(&tconn->volumes);
Philipp Reisnerb2fb6dbe2011-01-19 13:48:44 +01002547
Lars Ellenbergd5b27b02011-11-14 15:42:37 +01002548 drbd_init_workqueue(&tconn->sender_work);
Philipp Reisner6699b652011-02-09 11:10:24 +01002549 mutex_init(&tconn->data.mutex);
Philipp Reisner6699b652011-02-09 11:10:24 +01002550 mutex_init(&tconn->meta.mutex);
2551
Philipp Reisner392c8802011-02-09 10:33:31 +01002552 drbd_thread_init(tconn, &tconn->receiver, drbdd_init, "receiver");
2553 drbd_thread_init(tconn, &tconn->worker, drbd_worker, "worker");
2554 drbd_thread_init(tconn, &tconn->asender, drbd_asender, "asender");
2555
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02002556 kref_init(&tconn->kref);
Philipp Reisnerec0bddb2011-05-04 15:47:01 +02002557 list_add_tail_rcu(&tconn->all_tconn, &drbd_tconns);
Philipp Reisner21114382011-01-19 12:26:59 +01002558
2559 return tconn;
2560
2561fail:
Philipp Reisner12038a32011-11-09 19:18:00 +01002562 kfree(tconn->current_epoch);
Philipp Reisner774b3052011-02-22 02:07:03 -05002563 free_cpumask_var(tconn->cpu_mask);
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002564 drbd_free_socket(&tconn->meta);
2565 drbd_free_socket(&tconn->data);
Philipp Reisner21114382011-01-19 12:26:59 +01002566 kfree(tconn->name);
2567 kfree(tconn);
2568
2569 return NULL;
2570}
2571
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02002572void conn_destroy(struct kref *kref)
Philipp Reisner21114382011-01-19 12:26:59 +01002573{
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02002574 struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
2575
Philipp Reisner12038a32011-11-09 19:18:00 +01002576 if (atomic_read(&tconn->current_epoch->epoch_size) != 0)
2577 conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size));
2578 kfree(tconn->current_epoch);
2579
Philipp Reisner062e8792011-02-08 11:09:18 +01002580 idr_destroy(&tconn->volumes);
Philipp Reisner21114382011-01-19 12:26:59 +01002581
Philipp Reisner774b3052011-02-22 02:07:03 -05002582 free_cpumask_var(tconn->cpu_mask);
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002583 drbd_free_socket(&tconn->meta);
2584 drbd_free_socket(&tconn->data);
Philipp Reisner21114382011-01-19 12:26:59 +01002585 kfree(tconn->name);
Philipp Reisnerb42a70a2011-01-27 10:55:20 +01002586 kfree(tconn->int_dig_in);
2587 kfree(tconn->int_dig_vv);
Philipp Reisner21114382011-01-19 12:26:59 +01002588 kfree(tconn);
2589}
2590
Philipp Reisner774b3052011-02-22 02:07:03 -05002591enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, int vnr)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002592{
2593 struct drbd_conf *mdev;
2594 struct gendisk *disk;
2595 struct request_queue *q;
Philipp Reisner774b3052011-02-22 02:07:03 -05002596 int vnr_got = vnr;
Philipp Reisner81a5d602011-02-22 19:53:16 -05002597 int minor_got = minor;
Lars Ellenberg8432b312011-03-08 16:11:16 +01002598 enum drbd_ret_code err = ERR_NOMEM;
Philipp Reisner774b3052011-02-22 02:07:03 -05002599
2600 mdev = minor_to_mdev(minor);
2601 if (mdev)
2602 return ERR_MINOR_EXISTS;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002603
2604 /* GFP_KERNEL, we are outside of all write-out paths */
2605 mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL);
2606 if (!mdev)
Philipp Reisner774b3052011-02-22 02:07:03 -05002607 return ERR_NOMEM;
2608
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02002609 kref_get(&tconn->kref);
Philipp Reisner774b3052011-02-22 02:07:03 -05002610 mdev->tconn = tconn;
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02002611
Philipp Reisnerb411b362009-09-25 16:07:19 -07002612 mdev->minor = minor;
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002613 mdev->vnr = vnr;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002614
2615 drbd_init_set_defaults(mdev);
2616
2617 q = blk_alloc_queue(GFP_KERNEL);
2618 if (!q)
2619 goto out_no_q;
2620 mdev->rq_queue = q;
2621 q->queuedata = mdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002622
2623 disk = alloc_disk(1);
2624 if (!disk)
2625 goto out_no_disk;
2626 mdev->vdisk = disk;
2627
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002628 set_disk_ro(disk, true);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002629
2630 disk->queue = q;
2631 disk->major = DRBD_MAJOR;
2632 disk->first_minor = minor;
2633 disk->fops = &drbd_ops;
2634 sprintf(disk->disk_name, "drbd%d", minor);
2635 disk->private_data = mdev;
2636
2637 mdev->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
2638 /* we have no partitions. we contain only ourselves. */
2639 mdev->this_bdev->bd_contains = mdev->this_bdev;
2640
2641 q->backing_dev_info.congested_fn = drbd_congested;
2642 q->backing_dev_info.congested_data = mdev;
2643
Andreas Gruenbacher2f58dcf2010-12-13 17:48:19 +01002644 blk_queue_make_request(q, drbd_make_request);
Lars Ellenberg81a35372012-07-30 09:00:54 +02002645 blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
Philipp Reisner99432fc2011-05-20 16:39:13 +02002646 /* Setting the max_hw_sectors to an odd value of 8kibyte here
2647 This triggers a max_bio_size message upon first attach or connect */
2648 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002649 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
2650 blk_queue_merge_bvec(q, drbd_merge_bvec);
Philipp Reisner87eeee42011-01-19 14:16:30 +01002651 q->queue_lock = &mdev->tconn->req_lock; /* needed since we use */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002652
2653 mdev->md_io_page = alloc_page(GFP_KERNEL);
2654 if (!mdev->md_io_page)
2655 goto out_no_io_page;
2656
2657 if (drbd_bm_init(mdev))
2658 goto out_no_bitmap;
Andreas Gruenbacherdac13892011-01-21 17:18:39 +01002659 mdev->read_requests = RB_ROOT;
Andreas Gruenbacherde696712011-01-20 15:00:24 +01002660 mdev->write_requests = RB_ROOT;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002661
Lars Ellenberg8432b312011-03-08 16:11:16 +01002662 if (!idr_pre_get(&minors, GFP_KERNEL))
2663 goto out_no_minor_idr;
2664 if (idr_get_new_above(&minors, mdev, minor, &minor_got))
2665 goto out_no_minor_idr;
2666 if (minor_got != minor) {
2667 err = ERR_MINOR_EXISTS;
2668 drbd_msg_put_info("requested minor exists already");
2669 goto out_idr_remove_minor;
Lars Ellenberg569083c2011-03-07 09:49:02 +01002670 }
2671
Lars Ellenberg8432b312011-03-08 16:11:16 +01002672 if (!idr_pre_get(&tconn->volumes, GFP_KERNEL))
Lars Ellenberg569083c2011-03-07 09:49:02 +01002673 goto out_idr_remove_minor;
Lars Ellenberg8432b312011-03-08 16:11:16 +01002674 if (idr_get_new_above(&tconn->volumes, mdev, vnr, &vnr_got))
2675 goto out_idr_remove_minor;
2676 if (vnr_got != vnr) {
2677 err = ERR_INVALID_REQUEST;
2678 drbd_msg_put_info("requested volume exists already");
2679 goto out_idr_remove_vol;
Philipp Reisner81a5d602011-02-22 19:53:16 -05002680 }
Philipp Reisner774b3052011-02-22 02:07:03 -05002681 add_disk(disk);
Philipp Reisner81fa2e62011-05-04 15:10:30 +02002682 kref_init(&mdev->kref); /* one ref for both idrs and the the add_disk */
Philipp Reisner774b3052011-02-22 02:07:03 -05002683
Philipp Reisner2325eb62011-03-15 16:56:18 +01002684 /* inherit the connection state */
2685 mdev->state.conn = tconn->cstate;
2686 if (mdev->state.conn == C_WF_REPORT_PARAMS)
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02002687 drbd_connected(mdev);
Philipp Reisner2325eb62011-03-15 16:56:18 +01002688
Philipp Reisner774b3052011-02-22 02:07:03 -05002689 return NO_ERROR;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002690
Lars Ellenberg569083c2011-03-07 09:49:02 +01002691out_idr_remove_vol:
2692 idr_remove(&tconn->volumes, vnr_got);
Lars Ellenberg8432b312011-03-08 16:11:16 +01002693out_idr_remove_minor:
2694 idr_remove(&minors, minor_got);
Lars Ellenberg569083c2011-03-07 09:49:02 +01002695 synchronize_rcu();
Lars Ellenberg8432b312011-03-08 16:11:16 +01002696out_no_minor_idr:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002697 drbd_bm_cleanup(mdev);
2698out_no_bitmap:
2699 __free_page(mdev->md_io_page);
2700out_no_io_page:
2701 put_disk(disk);
2702out_no_disk:
2703 blk_cleanup_queue(q);
2704out_no_q:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002705 kfree(mdev);
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02002706 kref_put(&tconn->kref, &conn_destroy);
Lars Ellenberg8432b312011-03-08 16:11:16 +01002707 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002708}
2709
Philipp Reisnerb411b362009-09-25 16:07:19 -07002710int __init drbd_init(void)
2711{
2712 int err;
2713
Philipp Reisner2b8a90b2011-01-10 11:15:17 +01002714 if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002715 printk(KERN_ERR
Philipp Reisner81a5d602011-02-22 19:53:16 -05002716 "drbd: invalid minor_count (%d)\n", minor_count);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002717#ifdef MODULE
2718 return -EINVAL;
2719#else
Andreas Gruenbacher46530e82011-05-31 13:08:53 +02002720 minor_count = DRBD_MINOR_COUNT_DEF;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002721#endif
2722 }
2723
Philipp Reisnerb411b362009-09-25 16:07:19 -07002724 err = register_blkdev(DRBD_MAJOR, "drbd");
2725 if (err) {
2726 printk(KERN_ERR
2727 "drbd: unable to register block device major %d\n",
2728 DRBD_MAJOR);
2729 return err;
2730 }
2731
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002732 err = drbd_genl_register();
2733 if (err) {
2734 printk(KERN_ERR "drbd: unable to register generic netlink family\n");
2735 goto fail;
2736 }
2737
2738
Philipp Reisnerb411b362009-09-25 16:07:19 -07002739 register_reboot_notifier(&drbd_notifier);
2740
2741 /*
2742 * allocate all necessary structs
2743 */
2744 err = -ENOMEM;
2745
2746 init_waitqueue_head(&drbd_pp_wait);
2747
2748 drbd_proc = NULL; /* play safe for drbd_cleanup */
Philipp Reisner81a5d602011-02-22 19:53:16 -05002749 idr_init(&minors);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002750
2751 err = drbd_create_mempools();
2752 if (err)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002753 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002754
Lars Ellenberg8c484ee2010-03-11 16:47:58 +01002755 drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002756 if (!drbd_proc) {
2757 printk(KERN_ERR "drbd: unable to register proc file\n");
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002758 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002759 }
2760
2761 rwlock_init(&global_state_lock);
Philipp Reisner21114382011-01-19 12:26:59 +01002762 INIT_LIST_HEAD(&drbd_tconns);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002763
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01002764 retry.wq = create_singlethread_workqueue("drbd-reissue");
2765 if (!retry.wq) {
2766 printk(KERN_ERR "drbd: unable to create retry workqueue\n");
2767 goto fail;
2768 }
2769 INIT_WORK(&retry.worker, do_retry);
2770 spin_lock_init(&retry.lock);
2771 INIT_LIST_HEAD(&retry.writes);
2772
Philipp Reisnerb411b362009-09-25 16:07:19 -07002773 printk(KERN_INFO "drbd: initialized. "
2774 "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
2775 API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
2776 printk(KERN_INFO "drbd: %s\n", drbd_buildtag());
2777 printk(KERN_INFO "drbd: registered as block device major %d\n",
2778 DRBD_MAJOR);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002779
2780 return 0; /* Success! */
2781
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002782fail:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002783 drbd_cleanup();
2784 if (err == -ENOMEM)
2785 /* currently always the case */
2786 printk(KERN_ERR "drbd: ran out of memory\n");
2787 else
2788 printk(KERN_ERR "drbd: initialization failure\n");
2789 return err;
2790}
2791
2792void drbd_free_bc(struct drbd_backing_dev *ldev)
2793{
2794 if (ldev == NULL)
2795 return;
2796
Tejun Heoe525fd82010-11-13 11:55:17 +01002797 blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
2798 blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002799
2800 kfree(ldev);
2801}
2802
Philipp Reisner360cc742011-02-08 14:29:53 +01002803void drbd_free_sock(struct drbd_tconn *tconn)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002804{
Philipp Reisner360cc742011-02-08 14:29:53 +01002805 if (tconn->data.socket) {
2806 mutex_lock(&tconn->data.mutex);
2807 kernel_sock_shutdown(tconn->data.socket, SHUT_RDWR);
2808 sock_release(tconn->data.socket);
2809 tconn->data.socket = NULL;
2810 mutex_unlock(&tconn->data.mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002811 }
Philipp Reisner360cc742011-02-08 14:29:53 +01002812 if (tconn->meta.socket) {
2813 mutex_lock(&tconn->meta.mutex);
2814 kernel_sock_shutdown(tconn->meta.socket, SHUT_RDWR);
2815 sock_release(tconn->meta.socket);
2816 tconn->meta.socket = NULL;
2817 mutex_unlock(&tconn->meta.mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002818 }
2819}
2820
Philipp Reisnerb411b362009-09-25 16:07:19 -07002821/* meta data management */
2822
2823struct meta_data_on_disk {
2824 u64 la_size; /* last agreed size. */
2825 u64 uuid[UI_SIZE]; /* UUIDs. */
2826 u64 device_uuid;
2827 u64 reserved_u64_1;
2828 u32 flags; /* MDF */
2829 u32 magic;
2830 u32 md_size_sect;
2831 u32 al_offset; /* offset to this block */
2832 u32 al_nr_extents; /* important for restoring the AL */
Lars Ellenbergf3990022011-03-23 14:31:09 +01002833 /* `-- act_log->nr_elements <-- ldev->dc.al_extents */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002834 u32 bm_offset; /* offset to the bitmap, from here */
2835 u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */
Philipp Reisner99432fc2011-05-20 16:39:13 +02002836 u32 la_peer_max_bio_size; /* last peer max_bio_size */
2837 u32 reserved_u32[3];
Philipp Reisnerb411b362009-09-25 16:07:19 -07002838
2839} __packed;
2840
2841/**
2842 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
2843 * @mdev: DRBD device.
2844 */
2845void drbd_md_sync(struct drbd_conf *mdev)
2846{
2847 struct meta_data_on_disk *buffer;
2848 sector_t sector;
2849 int i;
2850
Lars Ellenbergee15b032010-09-03 10:00:09 +02002851 del_timer(&mdev->md_sync_timer);
2852 /* timer may be rearmed by drbd_md_mark_dirty() now. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002853 if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
2854 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002855
2856 /* We use here D_FAILED and not D_ATTACHING because we try to write
2857 * metadata even if we detach due to a disk failure! */
2858 if (!get_ldev_if_state(mdev, D_FAILED))
2859 return;
2860
Philipp Reisnercdfda632011-07-05 15:38:59 +02002861 buffer = drbd_md_get_buffer(mdev);
2862 if (!buffer)
2863 goto out;
2864
Philipp Reisnerb411b362009-09-25 16:07:19 -07002865 memset(buffer, 0, 512);
2866
2867 buffer->la_size = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
2868 for (i = UI_CURRENT; i < UI_SIZE; i++)
2869 buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
2870 buffer->flags = cpu_to_be32(mdev->ldev->md.flags);
Lars Ellenbergd5d7ebd2011-07-05 20:59:26 +02002871 buffer->magic = cpu_to_be32(DRBD_MD_MAGIC_84_UNCLEAN);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002872
2873 buffer->md_size_sect = cpu_to_be32(mdev->ldev->md.md_size_sect);
2874 buffer->al_offset = cpu_to_be32(mdev->ldev->md.al_offset);
2875 buffer->al_nr_extents = cpu_to_be32(mdev->act_log->nr_elements);
2876 buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
2877 buffer->device_uuid = cpu_to_be64(mdev->ldev->md.device_uuid);
2878
2879 buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset);
Philipp Reisner99432fc2011-05-20 16:39:13 +02002880 buffer->la_peer_max_bio_size = cpu_to_be32(mdev->peer_max_bio_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002881
2882 D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset);
2883 sector = mdev->ldev->md.md_offset;
2884
Andreas Gruenbacher3fbf4d22010-12-13 02:25:41 +01002885 if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002886 /* this was a try anyways ... */
2887 dev_err(DEV, "meta data update failed!\n");
Lars Ellenberg0c849662012-07-30 09:07:28 +02002888 drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002889 }
2890
2891 /* Update mdev->ldev->md.la_size_sect,
2892 * since we updated it on metadata. */
2893 mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev);
2894
Philipp Reisnercdfda632011-07-05 15:38:59 +02002895 drbd_md_put_buffer(mdev);
2896out:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002897 put_ldev(mdev);
2898}
2899
2900/**
2901 * drbd_md_read() - Reads in the meta data super block
2902 * @mdev: DRBD device.
2903 * @bdev: Device from which the meta data should be read in.
2904 *
Andreas Gruenbacher116676c2010-12-08 13:33:11 +01002905 * Return 0 (NO_ERROR) on success, and an enum drbd_ret_code in case
Lars Ellenbergd5d7ebd2011-07-05 20:59:26 +02002906 * something goes wrong.
Philipp Reisnerb411b362009-09-25 16:07:19 -07002907 */
2908int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
2909{
2910 struct meta_data_on_disk *buffer;
Lars Ellenbergd5d7ebd2011-07-05 20:59:26 +02002911 u32 magic, flags;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002912 int i, rv = NO_ERROR;
2913
2914 if (!get_ldev_if_state(mdev, D_ATTACHING))
2915 return ERR_IO_MD_DISK;
2916
Philipp Reisnercdfda632011-07-05 15:38:59 +02002917 buffer = drbd_md_get_buffer(mdev);
2918 if (!buffer)
2919 goto out;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002920
Andreas Gruenbacher3fbf4d22010-12-13 02:25:41 +01002921 if (drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002922 /* NOTE: can't do normal error processing here as this is
Philipp Reisnerb411b362009-09-25 16:07:19 -07002923 called BEFORE disk is attached */
2924 dev_err(DEV, "Error while reading metadata.\n");
2925 rv = ERR_IO_MD_DISK;
2926 goto err;
2927 }
2928
Lars Ellenbergd5d7ebd2011-07-05 20:59:26 +02002929 magic = be32_to_cpu(buffer->magic);
2930 flags = be32_to_cpu(buffer->flags);
2931 if (magic == DRBD_MD_MAGIC_84_UNCLEAN ||
2932 (magic == DRBD_MD_MAGIC_08 && !(flags & MDF_AL_CLEAN))) {
2933 /* btw: that's Activity Log clean, not "all" clean. */
2934 dev_err(DEV, "Found unclean meta data. Did you \"drbdadm apply-al\"?\n");
2935 rv = ERR_MD_UNCLEAN;
2936 goto err;
2937 }
2938 if (magic != DRBD_MD_MAGIC_08) {
Philipp Reisner43de7c82011-11-10 13:16:13 +01002939 if (magic == DRBD_MD_MAGIC_07)
Lars Ellenbergd5d7ebd2011-07-05 20:59:26 +02002940 dev_err(DEV, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n");
2941 else
2942 dev_err(DEV, "Meta data magic not found. Did you \"drbdadm create-md\"?\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07002943 rv = ERR_MD_INVALID;
2944 goto err;
2945 }
2946 if (be32_to_cpu(buffer->al_offset) != bdev->md.al_offset) {
2947 dev_err(DEV, "unexpected al_offset: %d (expected %d)\n",
2948 be32_to_cpu(buffer->al_offset), bdev->md.al_offset);
2949 rv = ERR_MD_INVALID;
2950 goto err;
2951 }
2952 if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
2953 dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n",
2954 be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
2955 rv = ERR_MD_INVALID;
2956 goto err;
2957 }
2958 if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
2959 dev_err(DEV, "unexpected md_size: %u (expected %u)\n",
2960 be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
2961 rv = ERR_MD_INVALID;
2962 goto err;
2963 }
2964
2965 if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
2966 dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
2967 be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
2968 rv = ERR_MD_INVALID;
2969 goto err;
2970 }
2971
2972 bdev->md.la_size_sect = be64_to_cpu(buffer->la_size);
2973 for (i = UI_CURRENT; i < UI_SIZE; i++)
2974 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
2975 bdev->md.flags = be32_to_cpu(buffer->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002976 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
2977
Philipp Reisner87eeee42011-01-19 14:16:30 +01002978 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisner99432fc2011-05-20 16:39:13 +02002979 if (mdev->state.conn < C_CONNECTED) {
2980 int peer;
2981 peer = be32_to_cpu(buffer->la_peer_max_bio_size);
2982 peer = max_t(int, peer, DRBD_MAX_BIO_SIZE_SAFE);
2983 mdev->peer_max_bio_size = peer;
2984 }
Philipp Reisner87eeee42011-01-19 14:16:30 +01002985 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisner99432fc2011-05-20 16:39:13 +02002986
Philipp Reisnerb411b362009-09-25 16:07:19 -07002987 err:
Philipp Reisnercdfda632011-07-05 15:38:59 +02002988 drbd_md_put_buffer(mdev);
2989 out:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002990 put_ldev(mdev);
2991
2992 return rv;
2993}
2994
2995/**
2996 * drbd_md_mark_dirty() - Mark meta data super block as dirty
2997 * @mdev: DRBD device.
2998 *
2999 * Call this function if you change anything that should be written to
3000 * the meta-data super block. This function sets MD_DIRTY, and starts a
3001 * timer that ensures that within five seconds you have to call drbd_md_sync().
3002 */
Lars Ellenbergca0e6092010-10-14 15:01:21 +02003003#ifdef DEBUG
Lars Ellenbergee15b032010-09-03 10:00:09 +02003004void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func)
3005{
3006 if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) {
3007 mod_timer(&mdev->md_sync_timer, jiffies + HZ);
3008 mdev->last_md_mark_dirty.line = line;
3009 mdev->last_md_mark_dirty.func = func;
3010 }
3011}
3012#else
Philipp Reisnerb411b362009-09-25 16:07:19 -07003013void drbd_md_mark_dirty(struct drbd_conf *mdev)
3014{
Lars Ellenbergee15b032010-09-03 10:00:09 +02003015 if (!test_and_set_bit(MD_DIRTY, &mdev->flags))
Lars Ellenbergca0e6092010-10-14 15:01:21 +02003016 mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003017}
Lars Ellenbergee15b032010-09-03 10:00:09 +02003018#endif
Philipp Reisnerb411b362009-09-25 16:07:19 -07003019
Philipp Reisner39a1aa72012-08-08 21:19:09 +02003020void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003021{
3022 int i;
3023
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003024 for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003025 mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
Philipp Reisnerb411b362009-09-25 16:07:19 -07003026}
3027
Philipp Reisner39a1aa72012-08-08 21:19:09 +02003028void __drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003029{
3030 if (idx == UI_CURRENT) {
3031 if (mdev->state.role == R_PRIMARY)
3032 val |= 1;
3033 else
3034 val &= ~((u64)1);
3035
3036 drbd_set_ed_uuid(mdev, val);
3037 }
3038
3039 mdev->ldev->md.uuid[idx] = val;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003040 drbd_md_mark_dirty(mdev);
3041}
3042
Philipp Reisner39a1aa72012-08-08 21:19:09 +02003043void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3044{
3045 unsigned long flags;
3046 spin_lock_irqsave(&mdev->ldev->md.uuid_lock, flags);
3047 __drbd_uuid_set(mdev, idx, val);
3048 spin_unlock_irqrestore(&mdev->ldev->md.uuid_lock, flags);
3049}
Philipp Reisnerb411b362009-09-25 16:07:19 -07003050
3051void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3052{
Philipp Reisner39a1aa72012-08-08 21:19:09 +02003053 unsigned long flags;
3054 spin_lock_irqsave(&mdev->ldev->md.uuid_lock, flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003055 if (mdev->ldev->md.uuid[idx]) {
3056 drbd_uuid_move_history(mdev);
3057 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
Philipp Reisnerb411b362009-09-25 16:07:19 -07003058 }
Philipp Reisner39a1aa72012-08-08 21:19:09 +02003059 __drbd_uuid_set(mdev, idx, val);
3060 spin_unlock_irqrestore(&mdev->ldev->md.uuid_lock, flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003061}
3062
3063/**
3064 * drbd_uuid_new_current() - Creates a new current UUID
3065 * @mdev: DRBD device.
3066 *
3067 * Creates a new current UUID, and rotates the old current UUID into
3068 * the bitmap slot. Causes an incremental resync upon next connect.
3069 */
3070void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
3071{
3072 u64 val;
Philipp Reisner39a1aa72012-08-08 21:19:09 +02003073 unsigned long long bm_uuid;
3074
3075 get_random_bytes(&val, sizeof(u64));
3076
3077 spin_lock_irq(&mdev->ldev->md.uuid_lock);
3078 bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
Philipp Reisnerb411b362009-09-25 16:07:19 -07003079
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003080 if (bm_uuid)
3081 dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
3082
Philipp Reisnerb411b362009-09-25 16:07:19 -07003083 mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
Philipp Reisner39a1aa72012-08-08 21:19:09 +02003084 __drbd_uuid_set(mdev, UI_CURRENT, val);
3085 spin_unlock_irq(&mdev->ldev->md.uuid_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003086
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003087 drbd_print_uuids(mdev, "new current UUID");
Lars Ellenbergaaa8e2b2010-10-15 13:16:53 +02003088 /* get it to stable storage _now_ */
3089 drbd_md_sync(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003090}
3091
3092void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
3093{
Philipp Reisner39a1aa72012-08-08 21:19:09 +02003094 unsigned long flags;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003095 if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
3096 return;
3097
Philipp Reisner39a1aa72012-08-08 21:19:09 +02003098 spin_lock_irqsave(&mdev->ldev->md.uuid_lock, flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003099 if (val == 0) {
3100 drbd_uuid_move_history(mdev);
3101 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
3102 mdev->ldev->md.uuid[UI_BITMAP] = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003103 } else {
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003104 unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
3105 if (bm_uuid)
3106 dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003107
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003108 mdev->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003109 }
Philipp Reisner39a1aa72012-08-08 21:19:09 +02003110 spin_unlock_irqrestore(&mdev->ldev->md.uuid_lock, flags);
3111
Philipp Reisnerb411b362009-09-25 16:07:19 -07003112 drbd_md_mark_dirty(mdev);
3113}
3114
3115/**
3116 * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3117 * @mdev: DRBD device.
3118 *
3119 * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
3120 */
3121int drbd_bmio_set_n_write(struct drbd_conf *mdev)
3122{
3123 int rv = -EIO;
3124
3125 if (get_ldev_if_state(mdev, D_ATTACHING)) {
3126 drbd_md_set_flag(mdev, MDF_FULL_SYNC);
3127 drbd_md_sync(mdev);
3128 drbd_bm_set_all(mdev);
3129
3130 rv = drbd_bm_write(mdev);
3131
3132 if (!rv) {
3133 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
3134 drbd_md_sync(mdev);
3135 }
3136
3137 put_ldev(mdev);
3138 }
3139
3140 return rv;
3141}
3142
3143/**
3144 * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3145 * @mdev: DRBD device.
3146 *
3147 * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
3148 */
3149int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
3150{
3151 int rv = -EIO;
3152
Philipp Reisner07782862010-08-31 12:00:50 +02003153 drbd_resume_al(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003154 if (get_ldev_if_state(mdev, D_ATTACHING)) {
3155 drbd_bm_clear_all(mdev);
3156 rv = drbd_bm_write(mdev);
3157 put_ldev(mdev);
3158 }
3159
3160 return rv;
3161}
3162
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003163static int w_bitmap_io(struct drbd_work *w, int unused)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003164{
3165 struct bm_io_work *work = container_of(w, struct bm_io_work, w);
Philipp Reisner00d56942011-02-09 18:09:48 +01003166 struct drbd_conf *mdev = w->mdev;
Lars Ellenberg02851e92010-12-16 14:47:39 +01003167 int rv = -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003168
3169 D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0);
3170
Lars Ellenberg02851e92010-12-16 14:47:39 +01003171 if (get_ldev(mdev)) {
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003172 drbd_bm_lock(mdev, work->why, work->flags);
Lars Ellenberg02851e92010-12-16 14:47:39 +01003173 rv = work->io_fn(mdev);
3174 drbd_bm_unlock(mdev);
3175 put_ldev(mdev);
3176 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003177
Lars Ellenberg4738fa12011-02-21 13:20:55 +01003178 clear_bit_unlock(BITMAP_IO, &mdev->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003179 wake_up(&mdev->misc_wait);
3180
3181 if (work->done)
3182 work->done(mdev, rv);
3183
3184 clear_bit(BITMAP_IO_QUEUED, &mdev->flags);
3185 work->why = NULL;
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003186 work->flags = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003187
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003188 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003189}
3190
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003191void drbd_ldev_destroy(struct drbd_conf *mdev)
3192{
3193 lc_destroy(mdev->resync);
3194 mdev->resync = NULL;
3195 lc_destroy(mdev->act_log);
3196 mdev->act_log = NULL;
3197 __no_warn(local,
3198 drbd_free_bc(mdev->ldev);
3199 mdev->ldev = NULL;);
3200
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003201 clear_bit(GO_DISKLESS, &mdev->flags);
3202}
3203
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003204static int w_go_diskless(struct drbd_work *w, int unused)
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003205{
Philipp Reisner00d56942011-02-09 18:09:48 +01003206 struct drbd_conf *mdev = w->mdev;
3207
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003208 D_ASSERT(mdev->state.disk == D_FAILED);
Lars Ellenberg9d282872010-10-14 13:57:07 +02003209 /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
3210 * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003211 * the protected members anymore, though, so once put_ldev reaches zero
3212 * again, it will be safe to free them. */
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003213 drbd_force_state(mdev, NS(disk, D_DISKLESS));
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003214 return 0;
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003215}
3216
3217void drbd_go_diskless(struct drbd_conf *mdev)
3218{
3219 D_ASSERT(mdev->state.disk == D_FAILED);
3220 if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
Lars Ellenbergd5b27b02011-11-14 15:42:37 +01003221 drbd_queue_work(&mdev->tconn->sender_work, &mdev->go_diskless);
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003222}
3223
Philipp Reisnerb411b362009-09-25 16:07:19 -07003224/**
3225 * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
3226 * @mdev: DRBD device.
3227 * @io_fn: IO callback to be called when bitmap IO is possible
3228 * @done: callback to be called after the bitmap IO was performed
3229 * @why: Descriptive text of the reason for doing the IO
3230 *
3231 * While IO on the bitmap happens we freeze application IO thus we ensure
3232 * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
3233 * called from worker context. It MUST NOT be used while a previous such
3234 * work is still pending!
3235 */
3236void drbd_queue_bitmap_io(struct drbd_conf *mdev,
3237 int (*io_fn)(struct drbd_conf *),
3238 void (*done)(struct drbd_conf *, int),
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003239 char *why, enum bm_flag flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003240{
Philipp Reisnere6b3ea82011-01-19 14:02:01 +01003241 D_ASSERT(current == mdev->tconn->worker.task);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003242
3243 D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags));
3244 D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags));
3245 D_ASSERT(list_empty(&mdev->bm_io_work.w.list));
3246 if (mdev->bm_io_work.why)
3247 dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n",
3248 why, mdev->bm_io_work.why);
3249
3250 mdev->bm_io_work.io_fn = io_fn;
3251 mdev->bm_io_work.done = done;
3252 mdev->bm_io_work.why = why;
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003253 mdev->bm_io_work.flags = flags;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003254
Philipp Reisner87eeee42011-01-19 14:16:30 +01003255 spin_lock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003256 set_bit(BITMAP_IO, &mdev->flags);
3257 if (atomic_read(&mdev->ap_bio_cnt) == 0) {
Philipp Reisner127b3172010-11-16 10:07:53 +01003258 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
Lars Ellenbergd5b27b02011-11-14 15:42:37 +01003259 drbd_queue_work(&mdev->tconn->sender_work, &mdev->bm_io_work.w);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003260 }
Philipp Reisner87eeee42011-01-19 14:16:30 +01003261 spin_unlock_irq(&mdev->tconn->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003262}
3263
3264/**
3265 * drbd_bitmap_io() - Does an IO operation on the whole bitmap
3266 * @mdev: DRBD device.
3267 * @io_fn: IO callback to be called when bitmap IO is possible
3268 * @why: Descriptive text of the reason for doing the IO
3269 *
3270 * freezes application IO while that the actual IO operations runs. This
3271 * functions MAY NOT be called from worker context.
3272 */
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003273int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *),
3274 char *why, enum bm_flag flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003275{
3276 int rv;
3277
Philipp Reisnere6b3ea82011-01-19 14:02:01 +01003278 D_ASSERT(current != mdev->tconn->worker.task);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003279
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003280 if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
3281 drbd_suspend_io(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003282
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003283 drbd_bm_lock(mdev, why, flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003284 rv = io_fn(mdev);
3285 drbd_bm_unlock(mdev);
3286
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003287 if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
3288 drbd_resume_io(mdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003289
3290 return rv;
3291}
3292
3293void drbd_md_set_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
3294{
3295 if ((mdev->ldev->md.flags & flag) != flag) {
3296 drbd_md_mark_dirty(mdev);
3297 mdev->ldev->md.flags |= flag;
3298 }
3299}
3300
3301void drbd_md_clear_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
3302{
3303 if ((mdev->ldev->md.flags & flag) != 0) {
3304 drbd_md_mark_dirty(mdev);
3305 mdev->ldev->md.flags &= ~flag;
3306 }
3307}
3308int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
3309{
3310 return (bdev->md.flags & flag) != 0;
3311}
3312
3313static void md_sync_timer_fn(unsigned long data)
3314{
3315 struct drbd_conf *mdev = (struct drbd_conf *) data;
3316
Lars Ellenbergb792b652012-08-22 14:59:06 +02003317 /* must not double-queue! */
3318 if (list_empty(&mdev->md_sync_work.list))
3319 drbd_queue_work_front(&mdev->tconn->sender_work, &mdev->md_sync_work);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003320}
3321
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003322static int w_md_sync(struct drbd_work *w, int unused)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003323{
Philipp Reisner00d56942011-02-09 18:09:48 +01003324 struct drbd_conf *mdev = w->mdev;
3325
Philipp Reisnerb411b362009-09-25 16:07:19 -07003326 dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
Lars Ellenbergee15b032010-09-03 10:00:09 +02003327#ifdef DEBUG
3328 dev_warn(DEV, "last md_mark_dirty: %s:%u\n",
3329 mdev->last_md_mark_dirty.func, mdev->last_md_mark_dirty.line);
3330#endif
Philipp Reisnerb411b362009-09-25 16:07:19 -07003331 drbd_md_sync(mdev);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003332 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003333}
3334
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01003335const char *cmdname(enum drbd_packet cmd)
Andreas Gruenbacherf2ad9062011-01-26 17:13:25 +01003336{
3337 /* THINK may need to become several global tables
3338 * when we want to support more than
3339 * one PRO_VERSION */
3340 static const char *cmdnames[] = {
3341 [P_DATA] = "Data",
3342 [P_DATA_REPLY] = "DataReply",
3343 [P_RS_DATA_REPLY] = "RSDataReply",
3344 [P_BARRIER] = "Barrier",
3345 [P_BITMAP] = "ReportBitMap",
3346 [P_BECOME_SYNC_TARGET] = "BecomeSyncTarget",
3347 [P_BECOME_SYNC_SOURCE] = "BecomeSyncSource",
3348 [P_UNPLUG_REMOTE] = "UnplugRemote",
3349 [P_DATA_REQUEST] = "DataRequest",
3350 [P_RS_DATA_REQUEST] = "RSDataRequest",
3351 [P_SYNC_PARAM] = "SyncParam",
3352 [P_SYNC_PARAM89] = "SyncParam89",
3353 [P_PROTOCOL] = "ReportProtocol",
3354 [P_UUIDS] = "ReportUUIDs",
3355 [P_SIZES] = "ReportSizes",
3356 [P_STATE] = "ReportState",
3357 [P_SYNC_UUID] = "ReportSyncUUID",
3358 [P_AUTH_CHALLENGE] = "AuthChallenge",
3359 [P_AUTH_RESPONSE] = "AuthResponse",
3360 [P_PING] = "Ping",
3361 [P_PING_ACK] = "PingAck",
3362 [P_RECV_ACK] = "RecvAck",
3363 [P_WRITE_ACK] = "WriteAck",
3364 [P_RS_WRITE_ACK] = "RSWriteAck",
Lars Ellenbergd4dabbe2012-08-01 12:33:51 +02003365 [P_SUPERSEDED] = "Superseded",
Andreas Gruenbacherf2ad9062011-01-26 17:13:25 +01003366 [P_NEG_ACK] = "NegAck",
3367 [P_NEG_DREPLY] = "NegDReply",
3368 [P_NEG_RS_DREPLY] = "NegRSDReply",
3369 [P_BARRIER_ACK] = "BarrierAck",
3370 [P_STATE_CHG_REQ] = "StateChgRequest",
3371 [P_STATE_CHG_REPLY] = "StateChgReply",
3372 [P_OV_REQUEST] = "OVRequest",
3373 [P_OV_REPLY] = "OVReply",
3374 [P_OV_RESULT] = "OVResult",
3375 [P_CSUM_RS_REQUEST] = "CsumRSRequest",
3376 [P_RS_IS_IN_SYNC] = "CsumRSIsInSync",
3377 [P_COMPRESSED_BITMAP] = "CBitmap",
3378 [P_DELAY_PROBE] = "DelayProbe",
3379 [P_OUT_OF_SYNC] = "OutOfSync",
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003380 [P_RETRY_WRITE] = "RetryWrite",
Lars Ellenbergae25b332011-04-24 00:01:16 +02003381 [P_RS_CANCEL] = "RSCancel",
3382 [P_CONN_ST_CHG_REQ] = "conn_st_chg_req",
3383 [P_CONN_ST_CHG_REPLY] = "conn_st_chg_reply",
Philipp Reisner036b17e2011-05-16 17:38:11 +02003384 [P_RETRY_WRITE] = "retry_write",
3385 [P_PROTOCOL_UPDATE] = "protocol_update",
Lars Ellenbergae25b332011-04-24 00:01:16 +02003386
3387 /* enum drbd_packet, but not commands - obsoleted flags:
3388 * P_MAY_IGNORE
3389 * P_MAX_OPT_CMD
3390 */
Andreas Gruenbacherf2ad9062011-01-26 17:13:25 +01003391 };
3392
Lars Ellenbergae25b332011-04-24 00:01:16 +02003393 /* too big for the array: 0xfffX */
Andreas Gruenbachere5d6f332011-03-28 16:44:40 +02003394 if (cmd == P_INITIAL_META)
3395 return "InitialMeta";
3396 if (cmd == P_INITIAL_DATA)
3397 return "InitialData";
Andreas Gruenbacher60381782011-03-28 17:05:50 +02003398 if (cmd == P_CONNECTION_FEATURES)
3399 return "ConnectionFeatures";
Andreas Gruenbacher6e849ce2011-03-14 17:27:45 +01003400 if (cmd >= ARRAY_SIZE(cmdnames))
Andreas Gruenbacherf2ad9062011-01-26 17:13:25 +01003401 return "Unknown";
3402 return cmdnames[cmd];
3403}
3404
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003405/**
3406 * drbd_wait_misc - wait for a request to make progress
3407 * @mdev: device associated with the request
3408 * @i: the struct drbd_interval embedded in struct drbd_request or
3409 * struct drbd_peer_request
3410 */
3411int drbd_wait_misc(struct drbd_conf *mdev, struct drbd_interval *i)
3412{
Philipp Reisner44ed1672011-04-19 17:10:19 +02003413 struct net_conf *nc;
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003414 DEFINE_WAIT(wait);
3415 long timeout;
3416
Philipp Reisner44ed1672011-04-19 17:10:19 +02003417 rcu_read_lock();
3418 nc = rcu_dereference(mdev->tconn->net_conf);
3419 if (!nc) {
3420 rcu_read_unlock();
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003421 return -ETIMEDOUT;
Philipp Reisner44ed1672011-04-19 17:10:19 +02003422 }
3423 timeout = nc->ko_count ? nc->timeout * HZ / 10 * nc->ko_count : MAX_SCHEDULE_TIMEOUT;
3424 rcu_read_unlock();
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003425
3426 /* Indicate to wake up mdev->misc_wait on progress. */
3427 i->waiting = true;
3428 prepare_to_wait(&mdev->misc_wait, &wait, TASK_INTERRUPTIBLE);
3429 spin_unlock_irq(&mdev->tconn->req_lock);
3430 timeout = schedule_timeout(timeout);
3431 finish_wait(&mdev->misc_wait, &wait);
3432 spin_lock_irq(&mdev->tconn->req_lock);
3433 if (!timeout || mdev->state.conn < C_CONNECTED)
3434 return -ETIMEDOUT;
3435 if (signal_pending(current))
3436 return -ERESTARTSYS;
3437 return 0;
3438}
3439
Philipp Reisnerb411b362009-09-25 16:07:19 -07003440#ifdef CONFIG_DRBD_FAULT_INJECTION
3441/* Fault insertion support including random number generator shamelessly
3442 * stolen from kernel/rcutorture.c */
3443struct fault_random_state {
3444 unsigned long state;
3445 unsigned long count;
3446};
3447
3448#define FAULT_RANDOM_MULT 39916801 /* prime */
3449#define FAULT_RANDOM_ADD 479001701 /* prime */
3450#define FAULT_RANDOM_REFRESH 10000
3451
3452/*
3453 * Crude but fast random-number generator. Uses a linear congruential
3454 * generator, with occasional help from get_random_bytes().
3455 */
3456static unsigned long
3457_drbd_fault_random(struct fault_random_state *rsp)
3458{
3459 long refresh;
3460
Roel Kluin49829ea2009-12-15 22:55:44 +01003461 if (!rsp->count--) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003462 get_random_bytes(&refresh, sizeof(refresh));
3463 rsp->state += refresh;
3464 rsp->count = FAULT_RANDOM_REFRESH;
3465 }
3466 rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
3467 return swahw32(rsp->state);
3468}
3469
3470static char *
3471_drbd_fault_str(unsigned int type) {
3472 static char *_faults[] = {
3473 [DRBD_FAULT_MD_WR] = "Meta-data write",
3474 [DRBD_FAULT_MD_RD] = "Meta-data read",
3475 [DRBD_FAULT_RS_WR] = "Resync write",
3476 [DRBD_FAULT_RS_RD] = "Resync read",
3477 [DRBD_FAULT_DT_WR] = "Data write",
3478 [DRBD_FAULT_DT_RD] = "Data read",
3479 [DRBD_FAULT_DT_RA] = "Data read ahead",
3480 [DRBD_FAULT_BM_ALLOC] = "BM allocation",
Philipp Reisner6b4388a2010-04-26 14:11:45 +02003481 [DRBD_FAULT_AL_EE] = "EE allocation",
3482 [DRBD_FAULT_RECEIVE] = "receive data corruption",
Philipp Reisnerb411b362009-09-25 16:07:19 -07003483 };
3484
3485 return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
3486}
3487
3488unsigned int
3489_drbd_insert_fault(struct drbd_conf *mdev, unsigned int type)
3490{
3491 static struct fault_random_state rrs = {0, 0};
3492
3493 unsigned int ret = (
3494 (fault_devs == 0 ||
3495 ((1 << mdev_to_minor(mdev)) & fault_devs) != 0) &&
3496 (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate));
3497
3498 if (ret) {
3499 fault_count++;
3500
Lars Ellenberg73835062010-05-27 11:51:56 +02003501 if (__ratelimit(&drbd_ratelimit_state))
Philipp Reisnerb411b362009-09-25 16:07:19 -07003502 dev_warn(DEV, "***Simulating %s failure\n",
3503 _drbd_fault_str(type));
3504 }
3505
3506 return ret;
3507}
3508#endif
3509
3510const char *drbd_buildtag(void)
3511{
3512 /* DRBD built from external sources has here a reference to the
3513 git hash of the source code. */
3514
3515 static char buildtag[38] = "\0uilt-in";
3516
3517 if (buildtag[0] == 0) {
3518#ifdef CONFIG_MODULES
3519 if (THIS_MODULE != NULL)
3520 sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
3521 else
3522#endif
3523 buildtag[0] = 'b';
3524 }
3525
3526 return buildtag;
3527}
3528
3529module_init(drbd_init)
3530module_exit(drbd_cleanup)
3531
Philipp Reisnerb411b362009-09-25 16:07:19 -07003532EXPORT_SYMBOL(drbd_conn_str);
3533EXPORT_SYMBOL(drbd_role_str);
3534EXPORT_SYMBOL(drbd_disk_str);
3535EXPORT_SYMBOL(drbd_set_st_err_str);