blob: ed35d52b47630a6dfaff3449c420991d6dc1fef0 [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11 from Logicworks, Inc. for making SDP replication support possible.
12
13 drbd is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
16 any later version.
17
18 drbd is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with drbd; see the file COPYING. If not, write to
25 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26
27 */
28
Philipp Reisnerb411b362009-09-25 16:07:19 -070029#include <linux/module.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070030#include <linux/drbd.h>
31#include <asm/uaccess.h>
32#include <asm/types.h>
33#include <net/sock.h>
34#include <linux/ctype.h>
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020035#include <linux/mutex.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070036#include <linux/fs.h>
37#include <linux/file.h>
38#include <linux/proc_fs.h>
39#include <linux/init.h>
40#include <linux/mm.h>
41#include <linux/memcontrol.h>
42#include <linux/mm_inline.h>
43#include <linux/slab.h>
44#include <linux/random.h>
45#include <linux/reboot.h>
46#include <linux/notifier.h>
47#include <linux/kthread.h>
Lars Ellenberg113fef92013-03-22 18:14:40 -060048#include <linux/workqueue.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070049#define __KERNEL_SYSCALLS__
50#include <linux/unistd.h>
51#include <linux/vmalloc.h>
52
53#include <linux/drbd_limits.h>
54#include "drbd_int.h"
Andreas Gruenbachera3603a62011-05-30 11:47:37 +020055#include "drbd_protocol.h"
Philipp Reisnerb411b362009-09-25 16:07:19 -070056#include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
57
58#include "drbd_vli.h"
59
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020060static DEFINE_MUTEX(drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -070061static int drbd_open(struct block_device *bdev, fmode_t mode);
Al Virodb2a1442013-05-05 21:52:57 -040062static void drbd_release(struct gendisk *gd, fmode_t mode);
Philipp Reisnerb411b362009-09-25 16:07:19 -070063static void md_sync_timer_fn(unsigned long data);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +010064static int w_bitmap_io(struct drbd_work *w, int unused);
Philipp Reisnerb411b362009-09-25 16:07:19 -070065
Philipp Reisnerb411b362009-09-25 16:07:19 -070066MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
67 "Lars Ellenberg <lars@linbit.com>");
68MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
69MODULE_VERSION(REL_VERSION);
70MODULE_LICENSE("GPL");
Philipp Reisner81a5d602011-02-22 19:53:16 -050071MODULE_PARM_DESC(minor_count, "Approximate number of drbd devices ("
Philipp Reisner2b8a90b2011-01-10 11:15:17 +010072 __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")");
Philipp Reisnerb411b362009-09-25 16:07:19 -070073MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
74
75#include <linux/moduleparam.h>
76/* allow_open_on_secondary */
77MODULE_PARM_DESC(allow_oos, "DONT USE!");
78/* thanks to these macros, if compiled into the kernel (not-module),
79 * this becomes the boot parameter drbd.minor_count */
80module_param(minor_count, uint, 0444);
81module_param(disable_sendpage, bool, 0644);
82module_param(allow_oos, bool, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -070083module_param(proc_details, int, 0644);
84
85#ifdef CONFIG_DRBD_FAULT_INJECTION
86int enable_faults;
87int fault_rate;
88static int fault_count;
89int fault_devs;
90/* bitmap of enabled faults */
91module_param(enable_faults, int, 0664);
92/* fault rate % value - applies to all enabled faults */
93module_param(fault_rate, int, 0664);
94/* count of faults inserted */
95module_param(fault_count, int, 0664);
96/* bitmap of devices to insert faults on */
97module_param(fault_devs, int, 0644);
98#endif
99
100/* module parameter, defined */
Philipp Reisner2b8a90b2011-01-10 11:15:17 +0100101unsigned int minor_count = DRBD_MINOR_COUNT_DEF;
Rusty Russell90ab5ee2012-01-13 09:32:20 +1030102bool disable_sendpage;
103bool allow_oos;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700104int proc_details; /* Detail level in proc drbd*/
105
106/* Module parameter for setting the user mode helper program
107 * to run. Default is /sbin/drbdadm */
108char usermode_helper[80] = "/sbin/drbdadm";
109
110module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644);
111
112/* in 2.6.x, our device mapping and config info contains our virtual gendisks
113 * as member "struct gendisk *vdisk;"
114 */
Andreas Gruenbacher05a10ec2011-06-07 22:54:17 +0200115struct idr drbd_devices;
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +0200116struct list_head drbd_resources;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700117
118struct kmem_cache *drbd_request_cache;
Andreas Gruenbacher6c852be2011-02-04 15:38:52 +0100119struct kmem_cache *drbd_ee_cache; /* peer requests */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700120struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
121struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
122mempool_t *drbd_request_mempool;
123mempool_t *drbd_ee_mempool;
Lars Ellenberg42818082011-02-23 12:39:46 +0100124mempool_t *drbd_md_io_page_pool;
Lars Ellenberg9476f392011-02-23 17:02:01 +0100125struct bio_set *drbd_md_io_bio_set;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700126
127/* I do not use a standard mempool, because:
128 1) I want to hand out the pre-allocated objects first.
129 2) I want to be able to interrupt sleeping allocation with a signal.
130 Note: This is a single linked list, the next pointer is the private
131 member of struct page.
132 */
133struct page *drbd_pp_pool;
134spinlock_t drbd_pp_lock;
135int drbd_pp_vacant;
136wait_queue_head_t drbd_pp_wait;
137
138DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
139
Emese Revfy7d4e9d02009-12-14 00:59:30 +0100140static const struct block_device_operations drbd_ops = {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700141 .owner = THIS_MODULE,
142 .open = drbd_open,
143 .release = drbd_release,
144};
145
Lars Ellenberg9476f392011-02-23 17:02:01 +0100146struct bio *bio_alloc_drbd(gfp_t gfp_mask)
147{
Lars Ellenbergda4a75d2011-02-23 17:02:01 +0100148 struct bio *bio;
149
Lars Ellenberg9476f392011-02-23 17:02:01 +0100150 if (!drbd_md_io_bio_set)
151 return bio_alloc(gfp_mask, 1);
152
Lars Ellenbergda4a75d2011-02-23 17:02:01 +0100153 bio = bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set);
154 if (!bio)
155 return NULL;
Lars Ellenbergda4a75d2011-02-23 17:02:01 +0100156 return bio;
Lars Ellenberg9476f392011-02-23 17:02:01 +0100157}
158
Philipp Reisnerb411b362009-09-25 16:07:19 -0700159#ifdef __CHECKER__
160/* When checking with sparse, and this is an inline function, sparse will
161 give tons of false positives. When this is a real functions sparse works.
162 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200163int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700164{
165 int io_allowed;
166
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200167 atomic_inc(&device->local_cnt);
168 io_allowed = (device->state.disk >= mins);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700169 if (!io_allowed) {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200170 if (atomic_dec_and_test(&device->local_cnt))
171 wake_up(&device->misc_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700172 }
173 return io_allowed;
174}
175
176#endif
177
178/**
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100179 * tl_release() - mark as BARRIER_ACKED all requests in the corresponding transfer log epoch
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200180 * @connection: DRBD connection.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700181 * @barrier_nr: Expected identifier of the DRBD write barrier packet.
182 * @set_size: Expected number of requests before that barrier.
183 *
184 * In case the passed barrier_nr or set_size does not match the oldest
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100185 * epoch of not yet barrier-acked requests, this function will cause a
186 * termination of the connection.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700187 */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200188void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100189 unsigned int set_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700190{
Philipp Reisnerb411b362009-09-25 16:07:19 -0700191 struct drbd_request *r;
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100192 struct drbd_request *req = NULL;
193 int expect_epoch = 0;
194 int expect_size = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700195
Andreas Gruenbacher05008132011-07-07 14:19:42 +0200196 spin_lock_irq(&connection->resource->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700197
Philipp Reisner98683652012-11-09 14:18:43 +0100198 /* find oldest not yet barrier-acked write request,
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100199 * count writes in its epoch. */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200200 list_for_each_entry(r, &connection->transfer_log, tl_requests) {
Lars Ellenberga0d856d2012-01-24 17:19:42 +0100201 const unsigned s = r->rq_state;
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100202 if (!req) {
203 if (!(s & RQ_WRITE))
204 continue;
205 if (!(s & RQ_NET_MASK))
206 continue;
207 if (s & RQ_NET_DONE)
208 continue;
209 req = r;
210 expect_epoch = req->epoch;
211 expect_size ++;
212 } else {
213 if (r->epoch != expect_epoch)
214 break;
215 if (!(s & RQ_WRITE))
216 continue;
217 /* if (s & RQ_DONE): not expected */
218 /* if (!(s & RQ_NET_MASK)): not expected */
219 expect_size++;
220 }
221 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700222
223 /* first some paranoia code */
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100224 if (req == NULL) {
Andreas Gruenbacher1ec861e2011-07-06 11:01:44 +0200225 drbd_err(connection, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100226 barrier_nr);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700227 goto bail;
228 }
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100229 if (expect_epoch != barrier_nr) {
Andreas Gruenbacher1ec861e2011-07-06 11:01:44 +0200230 drbd_err(connection, "BAD! BarrierAck #%u received, expected #%u!\n",
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100231 barrier_nr, expect_epoch);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700232 goto bail;
233 }
234
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100235 if (expect_size != set_size) {
Andreas Gruenbacher1ec861e2011-07-06 11:01:44 +0200236 drbd_err(connection, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100237 barrier_nr, set_size, expect_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700238 goto bail;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700239 }
240
Philipp Reisner98683652012-11-09 14:18:43 +0100241 /* Clean up list of requests processed during current epoch. */
242 /* this extra list walk restart is paranoia,
243 * to catch requests being barrier-acked "unexpectedly".
244 * It usually should find the same req again, or some READ preceding it. */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200245 list_for_each_entry(req, &connection->transfer_log, tl_requests)
Philipp Reisner98683652012-11-09 14:18:43 +0100246 if (req->epoch == expect_epoch)
247 break;
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200248 list_for_each_entry_safe_from(req, r, &connection->transfer_log, tl_requests) {
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100249 if (req->epoch != expect_epoch)
250 break;
251 _req_mod(req, BARRIER_ACKED);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700252 }
Andreas Gruenbacher05008132011-07-07 14:19:42 +0200253 spin_unlock_irq(&connection->resource->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700254
255 return;
256
257bail:
Andreas Gruenbacher05008132011-07-07 14:19:42 +0200258 spin_unlock_irq(&connection->resource->req_lock);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200259 conn_request_state(connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700260}
261
Philipp Reisner617049a2010-12-22 12:48:31 +0100262
Philipp Reisner11b58e72010-05-12 17:08:26 +0200263/**
264 * _tl_restart() - Walks the transfer log, and applies an action to all requests
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200265 * @device: DRBD device.
Philipp Reisner11b58e72010-05-12 17:08:26 +0200266 * @what: The action/event to perform with all request objects
267 *
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100268 * @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO,
269 * RESTART_FROZEN_DISK_IO.
Philipp Reisner11b58e72010-05-12 17:08:26 +0200270 */
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100271/* must hold resource->req_lock */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200272void _tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
Philipp Reisner11b58e72010-05-12 17:08:26 +0200273{
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100274 struct drbd_request *req, *r;
Philipp Reisner11b58e72010-05-12 17:08:26 +0200275
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200276 list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests)
Philipp Reisner509fc012012-07-31 11:22:58 +0200277 _req_mod(req, what);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200278}
279
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200280void tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100281{
Andreas Gruenbacher05008132011-07-07 14:19:42 +0200282 spin_lock_irq(&connection->resource->req_lock);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200283 _tl_restart(connection, what);
Andreas Gruenbacher05008132011-07-07 14:19:42 +0200284 spin_unlock_irq(&connection->resource->req_lock);
Philipp Reisnercdfda632011-07-05 15:38:59 +0200285}
Philipp Reisnerb411b362009-09-25 16:07:19 -0700286
287/**
288 * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200289 * @device: DRBD device.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700290 *
291 * This is called after the connection to the peer was lost. The storage covered
292 * by the requests on the transfer gets marked as our of sync. Called from the
293 * receiver thread and the worker thread.
294 */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200295void tl_clear(struct drbd_connection *connection)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700296{
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200297 tl_restart(connection, CONNECTION_LOST_WHILE_PENDING);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700298}
299
300/**
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200301 * tl_abort_disk_io() - Abort disk I/O for all requests for a certain device in the TL
302 * @device: DRBD device.
Philipp Reisnerfd2491f2011-07-18 16:25:15 +0200303 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200304void tl_abort_disk_io(struct drbd_device *device)
Philipp Reisnerfd2491f2011-07-18 16:25:15 +0200305{
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +0200306 struct drbd_connection *connection = first_peer_device(device)->connection;
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100307 struct drbd_request *req, *r;
Philipp Reisnerfd2491f2011-07-18 16:25:15 +0200308
Andreas Gruenbacher05008132011-07-07 14:19:42 +0200309 spin_lock_irq(&connection->resource->req_lock);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200310 list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests) {
Philipp Reisnerfd2491f2011-07-18 16:25:15 +0200311 if (!(req->rq_state & RQ_LOCAL_PENDING))
312 continue;
Andreas Gruenbacher84b8c062011-07-28 15:27:51 +0200313 if (req->device != device)
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100314 continue;
315 _req_mod(req, ABORT_DISK_IO);
Philipp Reisnerfd2491f2011-07-18 16:25:15 +0200316 }
Andreas Gruenbacher05008132011-07-07 14:19:42 +0200317 spin_unlock_irq(&connection->resource->req_lock);
Philipp Reisnerfd2491f2011-07-18 16:25:15 +0200318}
319
Philipp Reisnerb411b362009-09-25 16:07:19 -0700320static int drbd_thread_setup(void *arg)
321{
322 struct drbd_thread *thi = (struct drbd_thread *) arg;
Andreas Gruenbacher2457b6d2011-07-21 13:45:21 +0200323 struct drbd_resource *resource = thi->resource;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700324 unsigned long flags;
325 int retval;
326
Philipp Reisnerf1b3a6e2011-02-08 15:35:58 +0100327 snprintf(current->comm, sizeof(current->comm), "drbd_%c_%s",
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +0200328 thi->name[0],
Andreas Gruenbacher2457b6d2011-07-21 13:45:21 +0200329 resource->name);
Philipp Reisnerf1b3a6e2011-02-08 15:35:58 +0100330
Philipp Reisnerb411b362009-09-25 16:07:19 -0700331restart:
332 retval = thi->function(thi);
333
334 spin_lock_irqsave(&thi->t_lock, flags);
335
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100336 /* if the receiver has been "EXITING", the last thing it did
Philipp Reisnerb411b362009-09-25 16:07:19 -0700337 * was set the conn state to "StandAlone",
338 * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
339 * and receiver thread will be "started".
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100340 * drbd_thread_start needs to set "RESTARTING" in that case.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700341 * t_state check and assignment needs to be within the same spinlock,
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100342 * so either thread_start sees EXITING, and can remap to RESTARTING,
343 * or thread_start see NONE, and can proceed as normal.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700344 */
345
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100346 if (thi->t_state == RESTARTING) {
Andreas Gruenbacher2457b6d2011-07-21 13:45:21 +0200347 drbd_info(resource, "Restarting %s thread\n", thi->name);
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100348 thi->t_state = RUNNING;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700349 spin_unlock_irqrestore(&thi->t_lock, flags);
350 goto restart;
351 }
352
353 thi->task = NULL;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100354 thi->t_state = NONE;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700355 smp_mb();
Lars Ellenberg992d6e92011-05-02 11:47:18 +0200356 complete_all(&thi->stop);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700357 spin_unlock_irqrestore(&thi->t_lock, flags);
358
Andreas Gruenbacher2457b6d2011-07-21 13:45:21 +0200359 drbd_info(resource, "Terminating %s\n", current->comm);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700360
361 /* Release mod reference taken when thread was started */
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200362
Andreas Gruenbacher2457b6d2011-07-21 13:45:21 +0200363 if (thi->connection)
364 kref_put(&thi->connection->kref, drbd_destroy_connection);
365 kref_put(&resource->kref, drbd_destroy_resource);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700366 module_put(THIS_MODULE);
367 return retval;
368}
369
Andreas Gruenbacher2457b6d2011-07-21 13:45:21 +0200370static void drbd_thread_init(struct drbd_resource *resource, struct drbd_thread *thi,
Andreas Gruenbacherc60b0252011-08-10 15:05:02 +0200371 int (*func) (struct drbd_thread *), const char *name)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700372{
373 spin_lock_init(&thi->t_lock);
374 thi->task = NULL;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100375 thi->t_state = NONE;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700376 thi->function = func;
Andreas Gruenbacher2457b6d2011-07-21 13:45:21 +0200377 thi->resource = resource;
378 thi->connection = NULL;
Andreas Gruenbacherc60b0252011-08-10 15:05:02 +0200379 thi->name = name;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700380}
381
382int drbd_thread_start(struct drbd_thread *thi)
383{
Andreas Gruenbacher2457b6d2011-07-21 13:45:21 +0200384 struct drbd_resource *resource = thi->resource;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700385 struct task_struct *nt;
386 unsigned long flags;
387
Philipp Reisnerb411b362009-09-25 16:07:19 -0700388 /* is used from state engine doing drbd_thread_stop_nowait,
389 * while holding the req lock irqsave */
390 spin_lock_irqsave(&thi->t_lock, flags);
391
392 switch (thi->t_state) {
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100393 case NONE:
Andreas Gruenbacher2457b6d2011-07-21 13:45:21 +0200394 drbd_info(resource, "Starting %s thread (from %s [%d])\n",
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100395 thi->name, current->comm, current->pid);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700396
397 /* Get ref on module for thread - this is released when thread exits */
398 if (!try_module_get(THIS_MODULE)) {
Andreas Gruenbacher2457b6d2011-07-21 13:45:21 +0200399 drbd_err(resource, "Failed to get module reference in drbd_thread_start\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700400 spin_unlock_irqrestore(&thi->t_lock, flags);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100401 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700402 }
403
Andreas Gruenbacher2457b6d2011-07-21 13:45:21 +0200404 kref_get(&resource->kref);
405 if (thi->connection)
406 kref_get(&thi->connection->kref);
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200407
Philipp Reisnerb411b362009-09-25 16:07:19 -0700408 init_completion(&thi->stop);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700409 thi->reset_cpu_mask = 1;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100410 thi->t_state = RUNNING;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700411 spin_unlock_irqrestore(&thi->t_lock, flags);
412 flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
413
414 nt = kthread_create(drbd_thread_setup, (void *) thi,
Andreas Gruenbacher2457b6d2011-07-21 13:45:21 +0200415 "drbd_%c_%s", thi->name[0], thi->resource->name);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700416
417 if (IS_ERR(nt)) {
Andreas Gruenbacher2457b6d2011-07-21 13:45:21 +0200418 drbd_err(resource, "Couldn't start thread\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700419
Andreas Gruenbacher2457b6d2011-07-21 13:45:21 +0200420 if (thi->connection)
421 kref_put(&thi->connection->kref, drbd_destroy_connection);
422 kref_put(&resource->kref, drbd_destroy_resource);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700423 module_put(THIS_MODULE);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100424 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700425 }
426 spin_lock_irqsave(&thi->t_lock, flags);
427 thi->task = nt;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100428 thi->t_state = RUNNING;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700429 spin_unlock_irqrestore(&thi->t_lock, flags);
430 wake_up_process(nt);
431 break;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100432 case EXITING:
433 thi->t_state = RESTARTING;
Andreas Gruenbacher2457b6d2011-07-21 13:45:21 +0200434 drbd_info(resource, "Restarting %s thread (from %s [%d])\n",
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100435 thi->name, current->comm, current->pid);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700436 /* fall through */
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100437 case RUNNING:
438 case RESTARTING:
Philipp Reisnerb411b362009-09-25 16:07:19 -0700439 default:
440 spin_unlock_irqrestore(&thi->t_lock, flags);
441 break;
442 }
443
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100444 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700445}
446
447
448void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
449{
450 unsigned long flags;
451
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100452 enum drbd_thread_state ns = restart ? RESTARTING : EXITING;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700453
454 /* may be called from state engine, holding the req lock irqsave */
455 spin_lock_irqsave(&thi->t_lock, flags);
456
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100457 if (thi->t_state == NONE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700458 spin_unlock_irqrestore(&thi->t_lock, flags);
459 if (restart)
460 drbd_thread_start(thi);
461 return;
462 }
463
464 if (thi->t_state != ns) {
465 if (thi->task == NULL) {
466 spin_unlock_irqrestore(&thi->t_lock, flags);
467 return;
468 }
469
470 thi->t_state = ns;
471 smp_mb();
472 init_completion(&thi->stop);
473 if (thi->task != current)
474 force_sig(DRBD_SIGKILL, thi->task);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700475 }
476
477 spin_unlock_irqrestore(&thi->t_lock, flags);
478
479 if (wait)
480 wait_for_completion(&thi->stop);
481}
482
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200483int conn_lowest_minor(struct drbd_connection *connection)
Philipp Reisner80822282011-02-08 12:46:30 +0100484{
Andreas Gruenbacherc06ece62011-06-21 17:23:59 +0200485 struct drbd_peer_device *peer_device;
486 int vnr = 0, minor = -1;
Philipp Reisner774b3052011-02-22 02:07:03 -0500487
Philipp Reisner695d08f2011-04-11 22:53:32 -0700488 rcu_read_lock();
Andreas Gruenbacherc06ece62011-06-21 17:23:59 +0200489 peer_device = idr_get_next(&connection->peer_devices, &vnr);
490 if (peer_device)
491 minor = device_to_minor(peer_device->device);
Philipp Reisner695d08f2011-04-11 22:53:32 -0700492 rcu_read_unlock();
493
Andreas Gruenbacherc06ece62011-06-21 17:23:59 +0200494 return minor;
Philipp Reisner80822282011-02-08 12:46:30 +0100495}
Philipp Reisner774b3052011-02-22 02:07:03 -0500496
Philipp Reisnerb411b362009-09-25 16:07:19 -0700497#ifdef CONFIG_SMP
498/**
499 * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
Philipp Reisnerb411b362009-09-25 16:07:19 -0700500 *
Andreas Gruenbacher625a6ba2011-07-22 14:29:02 +0200501 * Forces all threads of a resource onto the same CPU. This is beneficial for
Philipp Reisnerb411b362009-09-25 16:07:19 -0700502 * DRBD's performance. May be overwritten by user's configuration.
503 */
Andreas Gruenbacher625a6ba2011-07-22 14:29:02 +0200504static void drbd_calc_cpu_mask(cpumask_var_t *cpu_mask)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700505{
Andreas Gruenbacher625a6ba2011-07-22 14:29:02 +0200506 unsigned int *resources_per_cpu, min_index = ~0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700507
Andreas Gruenbacher625a6ba2011-07-22 14:29:02 +0200508 resources_per_cpu = kzalloc(nr_cpu_ids * sizeof(*resources_per_cpu), GFP_KERNEL);
509 if (resources_per_cpu) {
510 struct drbd_resource *resource;
511 unsigned int cpu, min = ~0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700512
Andreas Gruenbacher625a6ba2011-07-22 14:29:02 +0200513 rcu_read_lock();
514 for_each_resource_rcu(resource, &drbd_resources) {
515 for_each_cpu(cpu, resource->cpu_mask)
516 resources_per_cpu[cpu]++;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700517 }
Andreas Gruenbacher625a6ba2011-07-22 14:29:02 +0200518 rcu_read_unlock();
519 for_each_online_cpu(cpu) {
520 if (resources_per_cpu[cpu] < min) {
521 min = resources_per_cpu[cpu];
522 min_index = cpu;
523 }
524 }
525 kfree(resources_per_cpu);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700526 }
Andreas Gruenbacher625a6ba2011-07-22 14:29:02 +0200527 if (min_index == ~0) {
528 cpumask_setall(*cpu_mask);
529 return;
530 }
531 cpumask_set_cpu(min_index, *cpu_mask);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700532}
533
534/**
535 * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200536 * @device: DRBD device.
Philipp Reisnerbc31fe32011-02-07 11:14:38 +0100537 * @thi: drbd_thread object
Philipp Reisnerb411b362009-09-25 16:07:19 -0700538 *
539 * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
540 * prematurely.
541 */
Philipp Reisner80822282011-02-08 12:46:30 +0100542void drbd_thread_current_set_cpu(struct drbd_thread *thi)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700543{
Andreas Gruenbacher2457b6d2011-07-21 13:45:21 +0200544 struct drbd_resource *resource = thi->resource;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700545 struct task_struct *p = current;
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100546
Philipp Reisnerb411b362009-09-25 16:07:19 -0700547 if (!thi->reset_cpu_mask)
548 return;
549 thi->reset_cpu_mask = 0;
Andreas Gruenbacher2457b6d2011-07-21 13:45:21 +0200550 set_cpus_allowed_ptr(p, resource->cpu_mask);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700551}
Andreas Gruenbacher625a6ba2011-07-22 14:29:02 +0200552#else
553#define drbd_calc_cpu_mask(A) ({})
Philipp Reisnerb411b362009-09-25 16:07:19 -0700554#endif
555
Andreas Gruenbacher52b061a2011-03-30 11:38:49 +0200556/**
557 * drbd_header_size - size of a packet header
558 *
559 * The header size is a multiple of 8, so any payload following the header is
560 * word aligned on 64-bit architectures. (The bitmap send and receive code
561 * relies on this.)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700562 */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200563unsigned int drbd_header_size(struct drbd_connection *connection)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700564{
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200565 if (connection->agreed_pro_version >= 100) {
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +0200566 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header100), 8));
567 return sizeof(struct p_header100);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700568 } else {
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +0200569 BUILD_BUG_ON(sizeof(struct p_header80) !=
570 sizeof(struct p_header95));
571 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header80), 8));
572 return sizeof(struct p_header80);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700573 }
Andreas Gruenbacher52b061a2011-03-30 11:38:49 +0200574}
Philipp Reisnerb411b362009-09-25 16:07:19 -0700575
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200576static unsigned int prepare_header80(struct p_header80 *h, enum drbd_packet cmd, int size)
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100577{
578 h->magic = cpu_to_be32(DRBD_MAGIC);
579 h->command = cpu_to_be16(cmd);
580 h->length = cpu_to_be16(size);
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200581 return sizeof(struct p_header80);
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100582}
Philipp Reisnerb411b362009-09-25 16:07:19 -0700583
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200584static unsigned int prepare_header95(struct p_header95 *h, enum drbd_packet cmd, int size)
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100585{
586 h->magic = cpu_to_be16(DRBD_MAGIC_BIG);
587 h->command = cpu_to_be16(cmd);
Andreas Gruenbacherb55d84b2011-03-22 13:17:47 +0100588 h->length = cpu_to_be32(size);
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200589 return sizeof(struct p_header95);
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100590}
591
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +0200592static unsigned int prepare_header100(struct p_header100 *h, enum drbd_packet cmd,
593 int size, int vnr)
Philipp Reisnerd38e7872011-02-07 15:32:04 +0100594{
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +0200595 h->magic = cpu_to_be32(DRBD_MAGIC_100);
596 h->volume = cpu_to_be16(vnr);
597 h->command = cpu_to_be16(cmd);
598 h->length = cpu_to_be32(size);
599 h->pad = 0;
600 return sizeof(struct p_header100);
601}
602
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200603static unsigned int prepare_header(struct drbd_connection *connection, int vnr,
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +0200604 void *buffer, enum drbd_packet cmd, int size)
605{
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200606 if (connection->agreed_pro_version >= 100)
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +0200607 return prepare_header100(buffer, cmd, size, vnr);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200608 else if (connection->agreed_pro_version >= 95 &&
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +0200609 size > DRBD_MAX_SIZE_H80_PACKET)
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200610 return prepare_header95(buffer, cmd, size);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700611 else
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200612 return prepare_header80(buffer, cmd, size);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700613}
614
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200615static void *__conn_prepare_command(struct drbd_connection *connection,
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200616 struct drbd_socket *sock)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700617{
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200618 if (!sock->socket)
619 return NULL;
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200620 return sock->sbuf + drbd_header_size(connection);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700621}
622
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200623void *conn_prepare_command(struct drbd_connection *connection, struct drbd_socket *sock)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700624{
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200625 void *p;
626
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200627 mutex_lock(&sock->mutex);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200628 p = __conn_prepare_command(connection, sock);
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200629 if (!p)
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200630 mutex_unlock(&sock->mutex);
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200631
632 return p;
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200633}
634
Andreas Gruenbacher69a22772011-08-09 00:47:13 +0200635void *drbd_prepare_command(struct drbd_peer_device *peer_device, struct drbd_socket *sock)
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200636{
Andreas Gruenbacher69a22772011-08-09 00:47:13 +0200637 return conn_prepare_command(peer_device->connection, sock);
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200638}
639
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200640static int __send_command(struct drbd_connection *connection, int vnr,
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200641 struct drbd_socket *sock, enum drbd_packet cmd,
642 unsigned int header_size, void *data,
643 unsigned int size)
644{
645 int msg_flags;
646 int err;
647
648 /*
649 * Called with @data == NULL and the size of the data blocks in @size
650 * for commands that send data blocks. For those commands, omit the
651 * MSG_MORE flag: this will increase the likelihood that data blocks
652 * which are page aligned on the sender will end up page aligned on the
653 * receiver.
654 */
655 msg_flags = data ? MSG_MORE : 0;
656
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200657 header_size += prepare_header(connection, vnr, sock->sbuf, cmd,
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200658 header_size + size);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200659 err = drbd_send_all(connection, sock->socket, sock->sbuf, header_size,
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200660 msg_flags);
661 if (data && !err)
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200662 err = drbd_send_all(connection, sock->socket, data, size, 0);
Lars Ellenberg123ff122014-02-05 06:13:53 +0100663 /* DRBD protocol "pings" are latency critical.
664 * This is supposed to trigger tcp_push_pending_frames() */
665 if (!err && (cmd == P_PING || cmd == P_PING_ACK))
666 drbd_tcp_nodelay(sock->socket);
667
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200668 return err;
669}
670
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200671static int __conn_send_command(struct drbd_connection *connection, struct drbd_socket *sock,
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200672 enum drbd_packet cmd, unsigned int header_size,
673 void *data, unsigned int size)
674{
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200675 return __send_command(connection, 0, sock, cmd, header_size, data, size);
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200676}
677
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200678int conn_send_command(struct drbd_connection *connection, struct drbd_socket *sock,
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200679 enum drbd_packet cmd, unsigned int header_size,
680 void *data, unsigned int size)
681{
682 int err;
683
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200684 err = __conn_send_command(connection, sock, cmd, header_size, data, size);
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200685 mutex_unlock(&sock->mutex);
686 return err;
687}
688
Andreas Gruenbacher69a22772011-08-09 00:47:13 +0200689int drbd_send_command(struct drbd_peer_device *peer_device, struct drbd_socket *sock,
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200690 enum drbd_packet cmd, unsigned int header_size,
691 void *data, unsigned int size)
692{
693 int err;
694
Andreas Gruenbacher69a22772011-08-09 00:47:13 +0200695 err = __send_command(peer_device->connection, peer_device->device->vnr,
696 sock, cmd, header_size, data, size);
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200697 mutex_unlock(&sock->mutex);
698 return err;
699}
700
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200701int drbd_send_ping(struct drbd_connection *connection)
Andreas Gruenbachere307f352011-03-22 10:55:48 +0100702{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200703 struct drbd_socket *sock;
704
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200705 sock = &connection->meta;
706 if (!conn_prepare_command(connection, sock))
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200707 return -EIO;
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200708 return conn_send_command(connection, sock, P_PING, 0, NULL, 0);
Andreas Gruenbachere307f352011-03-22 10:55:48 +0100709}
710
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200711int drbd_send_ping_ack(struct drbd_connection *connection)
Andreas Gruenbachere307f352011-03-22 10:55:48 +0100712{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200713 struct drbd_socket *sock;
714
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200715 sock = &connection->meta;
716 if (!conn_prepare_command(connection, sock))
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200717 return -EIO;
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200718 return conn_send_command(connection, sock, P_PING_ACK, 0, NULL, 0);
Andreas Gruenbachere307f352011-03-22 10:55:48 +0100719}
720
Andreas Gruenbacher69a22772011-08-09 00:47:13 +0200721int drbd_send_sync_param(struct drbd_peer_device *peer_device)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700722{
Andreas Gruenbacher7c967152011-03-22 00:49:36 +0100723 struct drbd_socket *sock;
Philipp Reisner8e26f9c2010-07-06 17:25:54 +0200724 struct p_rs_param_95 *p;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200725 int size;
Andreas Gruenbacher69a22772011-08-09 00:47:13 +0200726 const int apv = peer_device->connection->agreed_pro_version;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200727 enum drbd_packet cmd;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200728 struct net_conf *nc;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200729 struct disk_conf *dc;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200730
Andreas Gruenbacher69a22772011-08-09 00:47:13 +0200731 sock = &peer_device->connection->data;
732 p = drbd_prepare_command(peer_device, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200733 if (!p)
734 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700735
Philipp Reisner44ed1672011-04-19 17:10:19 +0200736 rcu_read_lock();
Andreas Gruenbacher69a22772011-08-09 00:47:13 +0200737 nc = rcu_dereference(peer_device->connection->net_conf);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700738
739 size = apv <= 87 ? sizeof(struct p_rs_param)
740 : apv == 88 ? sizeof(struct p_rs_param)
Philipp Reisner44ed1672011-04-19 17:10:19 +0200741 + strlen(nc->verify_alg) + 1
Philipp Reisner8e26f9c2010-07-06 17:25:54 +0200742 : apv <= 94 ? sizeof(struct p_rs_param_89)
743 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700744
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200745 cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700746
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200747 /* initialize verify_alg and csums_alg */
748 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700749
Andreas Gruenbacher69a22772011-08-09 00:47:13 +0200750 if (get_ldev(peer_device->device)) {
751 dc = rcu_dereference(peer_device->device->ldev->disk_conf);
Andreas Gruenbacher6394b932011-05-11 14:29:52 +0200752 p->resync_rate = cpu_to_be32(dc->resync_rate);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200753 p->c_plan_ahead = cpu_to_be32(dc->c_plan_ahead);
754 p->c_delay_target = cpu_to_be32(dc->c_delay_target);
755 p->c_fill_target = cpu_to_be32(dc->c_fill_target);
756 p->c_max_rate = cpu_to_be32(dc->c_max_rate);
Andreas Gruenbacher69a22772011-08-09 00:47:13 +0200757 put_ldev(peer_device->device);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200758 } else {
Andreas Gruenbacher6394b932011-05-11 14:29:52 +0200759 p->resync_rate = cpu_to_be32(DRBD_RESYNC_RATE_DEF);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200760 p->c_plan_ahead = cpu_to_be32(DRBD_C_PLAN_AHEAD_DEF);
761 p->c_delay_target = cpu_to_be32(DRBD_C_DELAY_TARGET_DEF);
762 p->c_fill_target = cpu_to_be32(DRBD_C_FILL_TARGET_DEF);
763 p->c_max_rate = cpu_to_be32(DRBD_C_MAX_RATE_DEF);
764 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700765
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200766 if (apv >= 88)
Philipp Reisner44ed1672011-04-19 17:10:19 +0200767 strcpy(p->verify_alg, nc->verify_alg);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200768 if (apv >= 89)
Philipp Reisner44ed1672011-04-19 17:10:19 +0200769 strcpy(p->csums_alg, nc->csums_alg);
770 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700771
Andreas Gruenbacher69a22772011-08-09 00:47:13 +0200772 return drbd_send_command(peer_device, sock, cmd, size, NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700773}
774
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200775int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700776{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200777 struct drbd_socket *sock;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700778 struct p_protocol *p;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200779 struct net_conf *nc;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200780 int size, cf;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700781
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200782 sock = &connection->data;
783 p = __conn_prepare_command(connection, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200784 if (!p)
785 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700786
Philipp Reisner44ed1672011-04-19 17:10:19 +0200787 rcu_read_lock();
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200788 nc = rcu_dereference(connection->net_conf);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700789
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200790 if (nc->tentative && connection->agreed_pro_version < 92) {
Philipp Reisner44ed1672011-04-19 17:10:19 +0200791 rcu_read_unlock();
792 mutex_unlock(&sock->mutex);
Andreas Gruenbacher1ec861e2011-07-06 11:01:44 +0200793 drbd_err(connection, "--dry-run is not supported by peer");
Philipp Reisner44ed1672011-04-19 17:10:19 +0200794 return -EOPNOTSUPP;
Philipp Reisnercf14c2e2010-02-02 21:03:50 +0100795 }
Philipp Reisner44ed1672011-04-19 17:10:19 +0200796
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200797 size = sizeof(*p);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200798 if (connection->agreed_pro_version >= 87)
Philipp Reisner44ed1672011-04-19 17:10:19 +0200799 size += strlen(nc->integrity_alg) + 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700800
Philipp Reisner44ed1672011-04-19 17:10:19 +0200801 p->protocol = cpu_to_be32(nc->wire_protocol);
802 p->after_sb_0p = cpu_to_be32(nc->after_sb_0p);
803 p->after_sb_1p = cpu_to_be32(nc->after_sb_1p);
804 p->after_sb_2p = cpu_to_be32(nc->after_sb_2p);
805 p->two_primaries = cpu_to_be32(nc->two_primaries);
Philipp Reisnercf14c2e2010-02-02 21:03:50 +0100806 cf = 0;
Andreas Gruenbacher6139f602011-05-06 20:00:02 +0200807 if (nc->discard_my_data)
808 cf |= CF_DISCARD_MY_DATA;
Andreas Gruenbacher6dff2902011-06-28 14:18:12 +0200809 if (nc->tentative)
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200810 cf |= CF_DRY_RUN;
Philipp Reisnercf14c2e2010-02-02 21:03:50 +0100811 p->conn_flags = cpu_to_be32(cf);
812
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200813 if (connection->agreed_pro_version >= 87)
Philipp Reisner44ed1672011-04-19 17:10:19 +0200814 strcpy(p->integrity_alg, nc->integrity_alg);
815 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700816
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200817 return __conn_send_command(connection, sock, cmd, size, NULL, 0);
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200818}
819
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200820int drbd_send_protocol(struct drbd_connection *connection)
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200821{
822 int err;
823
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200824 mutex_lock(&connection->data.mutex);
825 err = __drbd_send_protocol(connection, P_PROTOCOL);
826 mutex_unlock(&connection->data.mutex);
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200827
828 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700829}
830
Andreas Gruenbacher69a22772011-08-09 00:47:13 +0200831static int _drbd_send_uuids(struct drbd_peer_device *peer_device, u64 uuid_flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700832{
Andreas Gruenbacher69a22772011-08-09 00:47:13 +0200833 struct drbd_device *device = peer_device->device;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200834 struct drbd_socket *sock;
835 struct p_uuids *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700836 int i;
837
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200838 if (!get_ldev_if_state(device, D_NEGOTIATING))
Andreas Gruenbacher2ae5f952011-03-16 01:07:20 +0100839 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700840
Andreas Gruenbacher69a22772011-08-09 00:47:13 +0200841 sock = &peer_device->connection->data;
842 p = drbd_prepare_command(peer_device, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200843 if (!p) {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200844 put_ldev(device);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200845 return -EIO;
846 }
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200847 spin_lock_irq(&device->ldev->md.uuid_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700848 for (i = UI_CURRENT; i < UI_SIZE; i++)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200849 p->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]);
850 spin_unlock_irq(&device->ldev->md.uuid_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700851
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200852 device->comm_bm_set = drbd_bm_total_weight(device);
853 p->uuid[UI_SIZE] = cpu_to_be64(device->comm_bm_set);
Philipp Reisner44ed1672011-04-19 17:10:19 +0200854 rcu_read_lock();
Andreas Gruenbacher69a22772011-08-09 00:47:13 +0200855 uuid_flags |= rcu_dereference(peer_device->connection->net_conf)->discard_my_data ? 1 : 0;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200856 rcu_read_unlock();
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200857 uuid_flags |= test_bit(CRASHED_PRIMARY, &device->flags) ? 2 : 0;
858 uuid_flags |= device->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200859 p->uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700860
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200861 put_ldev(device);
Andreas Gruenbacher69a22772011-08-09 00:47:13 +0200862 return drbd_send_command(peer_device, sock, P_UUIDS, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700863}
864
Andreas Gruenbacher69a22772011-08-09 00:47:13 +0200865int drbd_send_uuids(struct drbd_peer_device *peer_device)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700866{
Andreas Gruenbacher69a22772011-08-09 00:47:13 +0200867 return _drbd_send_uuids(peer_device, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700868}
869
Andreas Gruenbacher69a22772011-08-09 00:47:13 +0200870int drbd_send_uuids_skip_initial_sync(struct drbd_peer_device *peer_device)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700871{
Andreas Gruenbacher69a22772011-08-09 00:47:13 +0200872 return _drbd_send_uuids(peer_device, 8);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700873}
874
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200875void drbd_print_uuids(struct drbd_device *device, const char *text)
Lars Ellenberg62b0da32011-01-20 13:25:21 +0100876{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200877 if (get_ldev_if_state(device, D_NEGOTIATING)) {
878 u64 *uuid = device->ldev->md.uuid;
Andreas Gruenbacherd0180172011-07-03 17:53:52 +0200879 drbd_info(device, "%s %016llX:%016llX:%016llX:%016llX\n",
Lars Ellenberg62b0da32011-01-20 13:25:21 +0100880 text,
881 (unsigned long long)uuid[UI_CURRENT],
882 (unsigned long long)uuid[UI_BITMAP],
883 (unsigned long long)uuid[UI_HISTORY_START],
884 (unsigned long long)uuid[UI_HISTORY_END]);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200885 put_ldev(device);
Lars Ellenberg62b0da32011-01-20 13:25:21 +0100886 } else {
Andreas Gruenbacherd0180172011-07-03 17:53:52 +0200887 drbd_info(device, "%s effective data uuid: %016llX\n",
Lars Ellenberg62b0da32011-01-20 13:25:21 +0100888 text,
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200889 (unsigned long long)device->ed_uuid);
Lars Ellenberg62b0da32011-01-20 13:25:21 +0100890 }
891}
892
Andreas Gruenbacher69a22772011-08-09 00:47:13 +0200893void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *peer_device)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700894{
Andreas Gruenbacher69a22772011-08-09 00:47:13 +0200895 struct drbd_device *device = peer_device->device;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200896 struct drbd_socket *sock;
897 struct p_rs_uuid *p;
Lars Ellenberg5a22db82010-12-17 21:14:23 +0100898 u64 uuid;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700899
Andreas Gruenbacher0b0ba1e2011-06-27 16:23:33 +0200900 D_ASSERT(device, device->state.disk == D_UP_TO_DATE);
Lars Ellenberg5a22db82010-12-17 21:14:23 +0100901
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200902 uuid = device->ldev->md.uuid[UI_BITMAP];
Philipp Reisner5ba3dac2011-10-05 15:54:18 +0200903 if (uuid && uuid != UUID_JUST_CREATED)
904 uuid = uuid + UUID_NEW_BM_OFFSET;
905 else
906 get_random_bytes(&uuid, sizeof(u64));
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200907 drbd_uuid_set(device, UI_BITMAP, uuid);
908 drbd_print_uuids(device, "updated sync UUID");
909 drbd_md_sync(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700910
Andreas Gruenbacher69a22772011-08-09 00:47:13 +0200911 sock = &peer_device->connection->data;
912 p = drbd_prepare_command(peer_device, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200913 if (p) {
914 p->uuid = cpu_to_be64(uuid);
Andreas Gruenbacher69a22772011-08-09 00:47:13 +0200915 drbd_send_command(peer_device, sock, P_SYNC_UUID, sizeof(*p), NULL, 0);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200916 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700917}
918
Andreas Gruenbacher69a22772011-08-09 00:47:13 +0200919int drbd_send_sizes(struct drbd_peer_device *peer_device, int trigger_reply, enum dds_flags flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700920{
Andreas Gruenbacher69a22772011-08-09 00:47:13 +0200921 struct drbd_device *device = peer_device->device;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200922 struct drbd_socket *sock;
923 struct p_sizes *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700924 sector_t d_size, u_size;
Lars Ellenbergdb141b22012-06-25 19:15:58 +0200925 int q_order_type;
926 unsigned int max_bio_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700927
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200928 if (get_ldev_if_state(device, D_NEGOTIATING)) {
Andreas Gruenbacher0b0ba1e2011-06-27 16:23:33 +0200929 D_ASSERT(device, device->ldev->backing_bdev);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200930 d_size = drbd_get_max_capacity(device->ldev);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200931 rcu_read_lock();
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200932 u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200933 rcu_read_unlock();
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200934 q_order_type = drbd_queue_order_type(device);
935 max_bio_size = queue_max_hw_sectors(device->ldev->backing_bdev->bd_disk->queue) << 9;
Lars Ellenbergdb141b22012-06-25 19:15:58 +0200936 max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200937 put_ldev(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700938 } else {
939 d_size = 0;
940 u_size = 0;
941 q_order_type = QUEUE_ORDERED_NONE;
Philipp Reisner99432fc2011-05-20 16:39:13 +0200942 max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700943 }
944
Andreas Gruenbacher69a22772011-08-09 00:47:13 +0200945 sock = &peer_device->connection->data;
946 p = drbd_prepare_command(peer_device, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200947 if (!p)
948 return -EIO;
Philipp Reisner2ffca4f2011-06-30 15:43:06 +0200949
Andreas Gruenbacher69a22772011-08-09 00:47:13 +0200950 if (peer_device->connection->agreed_pro_version <= 94)
Lars Ellenbergdb141b22012-06-25 19:15:58 +0200951 max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
Andreas Gruenbacher69a22772011-08-09 00:47:13 +0200952 else if (peer_device->connection->agreed_pro_version < 100)
Philipp Reisner98683652012-11-09 14:18:43 +0100953 max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE_P95);
Philipp Reisner68093842011-06-30 15:43:06 +0200954
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200955 p->d_size = cpu_to_be64(d_size);
956 p->u_size = cpu_to_be64(u_size);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200957 p->c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(device->this_bdev));
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200958 p->max_bio_size = cpu_to_be32(max_bio_size);
959 p->queue_order_type = cpu_to_be16(q_order_type);
960 p->dds_flags = cpu_to_be16(flags);
Andreas Gruenbacher69a22772011-08-09 00:47:13 +0200961 return drbd_send_command(peer_device, sock, P_SIZES, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700962}
963
964/**
Lars Ellenbergf479ea02011-10-27 16:52:30 +0200965 * drbd_send_current_state() - Sends the drbd state to the peer
Andreas Gruenbacher69a22772011-08-09 00:47:13 +0200966 * @peer_device: DRBD peer device.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700967 */
Andreas Gruenbacher69a22772011-08-09 00:47:13 +0200968int drbd_send_current_state(struct drbd_peer_device *peer_device)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700969{
Andreas Gruenbacher7c967152011-03-22 00:49:36 +0100970 struct drbd_socket *sock;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200971 struct p_state *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700972
Andreas Gruenbacher69a22772011-08-09 00:47:13 +0200973 sock = &peer_device->connection->data;
974 p = drbd_prepare_command(peer_device, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200975 if (!p)
976 return -EIO;
Andreas Gruenbacher69a22772011-08-09 00:47:13 +0200977 p->state = cpu_to_be32(peer_device->device->state.i); /* Within the send mutex */
978 return drbd_send_command(peer_device, sock, P_STATE, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700979}
980
Lars Ellenbergf479ea02011-10-27 16:52:30 +0200981/**
982 * drbd_send_state() - After a state change, sends the new state to the peer
Andreas Gruenbacher69a22772011-08-09 00:47:13 +0200983 * @peer_device: DRBD peer device.
Philipp Reisner43de7c82011-11-10 13:16:13 +0100984 * @state: the state to send, not necessarily the current state.
Lars Ellenbergf479ea02011-10-27 16:52:30 +0200985 *
986 * Each state change queues an "after_state_ch" work, which will eventually
987 * send the resulting new state to the peer. If more state changes happen
988 * between queuing and processing of the after_state_ch work, we still
989 * want to send each intermediary state in the order it occurred.
990 */
Andreas Gruenbacher69a22772011-08-09 00:47:13 +0200991int drbd_send_state(struct drbd_peer_device *peer_device, union drbd_state state)
Lars Ellenbergf479ea02011-10-27 16:52:30 +0200992{
Philipp Reisner43de7c82011-11-10 13:16:13 +0100993 struct drbd_socket *sock;
994 struct p_state *p;
Lars Ellenbergf479ea02011-10-27 16:52:30 +0200995
Andreas Gruenbacher69a22772011-08-09 00:47:13 +0200996 sock = &peer_device->connection->data;
997 p = drbd_prepare_command(peer_device, sock);
Philipp Reisner43de7c82011-11-10 13:16:13 +0100998 if (!p)
999 return -EIO;
1000 p->state = cpu_to_be32(state.i); /* Within the send mutex */
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001001 return drbd_send_command(peer_device, sock, P_STATE, sizeof(*p), NULL, 0);
Philipp Reisner43de7c82011-11-10 13:16:13 +01001002}
Lars Ellenbergf479ea02011-10-27 16:52:30 +02001003
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001004int drbd_send_state_req(struct drbd_peer_device *peer_device, union drbd_state mask, union drbd_state val)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001005{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001006 struct drbd_socket *sock;
1007 struct p_req_state *p;
Lars Ellenbergf479ea02011-10-27 16:52:30 +02001008
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001009 sock = &peer_device->connection->data;
1010 p = drbd_prepare_command(peer_device, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001011 if (!p)
1012 return -EIO;
1013 p->mask = cpu_to_be32(mask.i);
1014 p->val = cpu_to_be32(val.i);
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001015 return drbd_send_command(peer_device, sock, P_STATE_CHG_REQ, sizeof(*p), NULL, 0);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001016}
1017
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001018int conn_send_state_req(struct drbd_connection *connection, union drbd_state mask, union drbd_state val)
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001019{
1020 enum drbd_packet cmd;
1021 struct drbd_socket *sock;
1022 struct p_req_state *p;
1023
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001024 cmd = connection->agreed_pro_version < 100 ? P_STATE_CHG_REQ : P_CONN_ST_CHG_REQ;
1025 sock = &connection->data;
1026 p = conn_prepare_command(connection, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001027 if (!p)
1028 return -EIO;
1029 p->mask = cpu_to_be32(mask.i);
1030 p->val = cpu_to_be32(val.i);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001031 return conn_send_command(connection, sock, cmd, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001032}
1033
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001034void drbd_send_sr_reply(struct drbd_peer_device *peer_device, enum drbd_state_rv retcode)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001035{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001036 struct drbd_socket *sock;
1037 struct p_req_state_reply *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001038
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001039 sock = &peer_device->connection->meta;
1040 p = drbd_prepare_command(peer_device, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001041 if (p) {
1042 p->retcode = cpu_to_be32(retcode);
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001043 drbd_send_command(peer_device, sock, P_STATE_CHG_REPLY, sizeof(*p), NULL, 0);
Lars Ellenbergf479ea02011-10-27 16:52:30 +02001044 }
Lars Ellenbergf479ea02011-10-27 16:52:30 +02001045}
1046
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001047void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001048{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001049 struct drbd_socket *sock;
1050 struct p_req_state_reply *p;
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001051 enum drbd_packet cmd = connection->agreed_pro_version < 100 ? P_STATE_CHG_REPLY : P_CONN_ST_CHG_REPLY;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001052
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001053 sock = &connection->meta;
1054 p = conn_prepare_command(connection, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001055 if (p) {
1056 p->retcode = cpu_to_be32(retcode);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001057 conn_send_command(connection, sock, cmd, sizeof(*p), NULL, 0);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001058 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001059}
1060
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001061static void dcbp_set_code(struct p_compressed_bm *p, enum drbd_bitmap_code code)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001062{
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001063 BUG_ON(code & ~0xf);
1064 p->encoding = (p->encoding & ~0xf) | code;
1065}
Philipp Reisnerb411b362009-09-25 16:07:19 -07001066
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001067static void dcbp_set_start(struct p_compressed_bm *p, int set)
1068{
1069 p->encoding = (p->encoding & ~0x80) | (set ? 0x80 : 0);
1070}
Philipp Reisnerb411b362009-09-25 16:07:19 -07001071
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001072static void dcbp_set_pad_bits(struct p_compressed_bm *p, int n)
1073{
1074 BUG_ON(n & ~0x7);
1075 p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001076}
1077
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001078static int fill_bitmap_rle_bits(struct drbd_device *device,
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001079 struct p_compressed_bm *p,
1080 unsigned int size,
1081 struct bm_xfer_ctx *c)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001082{
1083 struct bitstream bs;
1084 unsigned long plain_bits;
1085 unsigned long tmp;
1086 unsigned long rl;
1087 unsigned len;
1088 unsigned toggle;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001089 int bits, use_rle;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001090
1091 /* may we use this feature? */
Philipp Reisner44ed1672011-04-19 17:10:19 +02001092 rcu_read_lock();
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001093 use_rle = rcu_dereference(first_peer_device(device)->connection->net_conf)->use_rle;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001094 rcu_read_unlock();
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001095 if (!use_rle || first_peer_device(device)->connection->agreed_pro_version < 90)
Philipp Reisner44ed1672011-04-19 17:10:19 +02001096 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001097
1098 if (c->bit_offset >= c->bm_bits)
1099 return 0; /* nothing to do. */
1100
1101 /* use at most thus many bytes */
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001102 bitstream_init(&bs, p->code, size, 0);
1103 memset(p->code, 0, size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001104 /* plain bits covered in this code string */
1105 plain_bits = 0;
1106
1107 /* p->encoding & 0x80 stores whether the first run length is set.
1108 * bit offset is implicit.
1109 * start with toggle == 2 to be able to tell the first iteration */
1110 toggle = 2;
1111
1112 /* see how much plain bits we can stuff into one packet
1113 * using RLE and VLI. */
1114 do {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001115 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(device, c->bit_offset)
1116 : _drbd_bm_find_next(device, c->bit_offset);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001117 if (tmp == -1UL)
1118 tmp = c->bm_bits;
1119 rl = tmp - c->bit_offset;
1120
1121 if (toggle == 2) { /* first iteration */
1122 if (rl == 0) {
1123 /* the first checked bit was set,
1124 * store start value, */
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001125 dcbp_set_start(p, 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001126 /* but skip encoding of zero run length */
1127 toggle = !toggle;
1128 continue;
1129 }
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001130 dcbp_set_start(p, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001131 }
1132
1133 /* paranoia: catch zero runlength.
1134 * can only happen if bitmap is modified while we scan it. */
1135 if (rl == 0) {
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02001136 drbd_err(device, "unexpected zero runlength while encoding bitmap "
Philipp Reisnerb411b362009-09-25 16:07:19 -07001137 "t:%u bo:%lu\n", toggle, c->bit_offset);
1138 return -1;
1139 }
1140
1141 bits = vli_encode_bits(&bs, rl);
1142 if (bits == -ENOBUFS) /* buffer full */
1143 break;
1144 if (bits <= 0) {
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02001145 drbd_err(device, "error while encoding bitmap: %d\n", bits);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001146 return 0;
1147 }
1148
1149 toggle = !toggle;
1150 plain_bits += rl;
1151 c->bit_offset = tmp;
1152 } while (c->bit_offset < c->bm_bits);
1153
1154 len = bs.cur.b - p->code + !!bs.cur.bit;
1155
1156 if (plain_bits < (len << 3)) {
1157 /* incompressible with this method.
1158 * we need to rewind both word and bit position. */
1159 c->bit_offset -= plain_bits;
1160 bm_xfer_ctx_bit_to_word_offset(c);
1161 c->bit_offset = c->word_offset * BITS_PER_LONG;
1162 return 0;
1163 }
1164
1165 /* RLE + VLI was able to compress it just fine.
1166 * update c->word_offset. */
1167 bm_xfer_ctx_bit_to_word_offset(c);
1168
1169 /* store pad_bits */
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001170 dcbp_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001171
1172 return len;
1173}
1174
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001175/**
1176 * send_bitmap_rle_or_plain
1177 *
1178 * Return 0 when done, 1 when another iteration is needed, and a negative error
1179 * code upon failure.
1180 */
1181static int
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001182send_bitmap_rle_or_plain(struct drbd_device *device, struct bm_xfer_ctx *c)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001183{
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001184 struct drbd_socket *sock = &first_peer_device(device)->connection->data;
1185 unsigned int header_size = drbd_header_size(first_peer_device(device)->connection);
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001186 struct p_compressed_bm *p = sock->sbuf + header_size;
Andreas Gruenbachera982dd52010-12-10 00:45:25 +01001187 int len, err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001188
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001189 len = fill_bitmap_rle_bits(device, p,
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001190 DRBD_SOCKET_BUFFER_SIZE - header_size - sizeof(*p), c);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001191 if (len < 0)
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001192 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001193
1194 if (len) {
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001195 dcbp_set_code(p, RLE_VLI_Bits);
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001196 err = __send_command(first_peer_device(device)->connection, device->vnr, sock,
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001197 P_COMPRESSED_BITMAP, sizeof(*p) + len,
1198 NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001199 c->packets[0]++;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001200 c->bytes[0] += header_size + sizeof(*p) + len;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001201
1202 if (c->bit_offset >= c->bm_bits)
1203 len = 0; /* DONE */
1204 } else {
1205 /* was not compressible.
1206 * send a buffer full of plain text bits instead. */
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001207 unsigned int data_size;
1208 unsigned long num_words;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001209 unsigned long *p = sock->sbuf + header_size;
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001210
1211 data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001212 num_words = min_t(size_t, data_size / sizeof(*p),
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001213 c->bm_words - c->word_offset);
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001214 len = num_words * sizeof(*p);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001215 if (len)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001216 drbd_bm_get_lel(device, c->word_offset, num_words, p);
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001217 err = __send_command(first_peer_device(device)->connection, device->vnr, sock, P_BITMAP, len, NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001218 c->word_offset += num_words;
1219 c->bit_offset = c->word_offset * BITS_PER_LONG;
1220
1221 c->packets[1]++;
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001222 c->bytes[1] += header_size + len;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001223
1224 if (c->bit_offset > c->bm_bits)
1225 c->bit_offset = c->bm_bits;
1226 }
Andreas Gruenbachera982dd52010-12-10 00:45:25 +01001227 if (!err) {
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001228 if (len == 0) {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001229 INFO_bm_xfer_stats(device, "send", c);
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001230 return 0;
1231 } else
1232 return 1;
1233 }
1234 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001235}
1236
1237/* See the comment at receive_bitmap() */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001238static int _drbd_send_bitmap(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001239{
1240 struct bm_xfer_ctx c;
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001241 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001242
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001243 if (!expect(device->bitmap))
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001244 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001245
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001246 if (get_ldev(device)) {
1247 if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC)) {
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02001248 drbd_info(device, "Writing the whole bitmap, MDF_FullSync was set.\n");
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001249 drbd_bm_set_all(device);
1250 if (drbd_bm_write(device)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001251 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
1252 * but otherwise process as per normal - need to tell other
1253 * side that a full resync is required! */
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02001254 drbd_err(device, "Failed to write bitmap to disk!\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07001255 } else {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001256 drbd_md_clear_flag(device, MDF_FULL_SYNC);
1257 drbd_md_sync(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001258 }
1259 }
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001260 put_ldev(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001261 }
1262
1263 c = (struct bm_xfer_ctx) {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001264 .bm_bits = drbd_bm_bits(device),
1265 .bm_words = drbd_bm_words(device),
Philipp Reisnerb411b362009-09-25 16:07:19 -07001266 };
1267
1268 do {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001269 err = send_bitmap_rle_or_plain(device, &c);
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001270 } while (err > 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001271
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001272 return err == 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001273}
1274
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001275int drbd_send_bitmap(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001276{
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001277 struct drbd_socket *sock = &first_peer_device(device)->connection->data;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001278 int err = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001279
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001280 mutex_lock(&sock->mutex);
1281 if (sock->socket)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001282 err = !_drbd_send_bitmap(device);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001283 mutex_unlock(&sock->mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001284 return err;
1285}
1286
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001287void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr, u32 set_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001288{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001289 struct drbd_socket *sock;
1290 struct p_barrier_ack *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001291
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001292 if (connection->cstate < C_WF_REPORT_PARAMS)
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001293 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001294
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001295 sock = &connection->meta;
1296 p = conn_prepare_command(connection, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001297 if (!p)
1298 return;
1299 p->barrier = barrier_nr;
1300 p->set_size = cpu_to_be32(set_size);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001301 conn_send_command(connection, sock, P_BARRIER_ACK, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001302}
1303
1304/**
1305 * _drbd_send_ack() - Sends an ack packet
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001306 * @device: DRBD device.
Philipp Reisnerb411b362009-09-25 16:07:19 -07001307 * @cmd: Packet command code.
1308 * @sector: sector, needs to be in big endian byte order
1309 * @blksize: size in byte, needs to be in big endian byte order
1310 * @block_id: Id, big endian byte order
1311 */
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001312static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001313 u64 sector, u32 blksize, u64 block_id)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001314{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001315 struct drbd_socket *sock;
1316 struct p_block_ack *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001317
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001318 if (peer_device->device->state.conn < C_CONNECTED)
Andreas Gruenbachera8c32aa2011-03-16 01:27:22 +01001319 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001320
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001321 sock = &peer_device->connection->meta;
1322 p = drbd_prepare_command(peer_device, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001323 if (!p)
1324 return -EIO;
1325 p->sector = sector;
1326 p->block_id = block_id;
1327 p->blksize = blksize;
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001328 p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq));
1329 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001330}
1331
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02001332/* dp->sector and dp->block_id already/still in network byte order,
1333 * data_size is payload size according to dp->head,
1334 * and may need to be corrected for digest size. */
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001335void drbd_send_ack_dp(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
Andreas Gruenbachera9a99942011-03-16 01:30:14 +01001336 struct p_data *dp, int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001337{
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001338 if (peer_device->connection->peer_integrity_tfm)
1339 data_size -= crypto_hash_digestsize(peer_device->connection->peer_integrity_tfm);
1340 _drbd_send_ack(peer_device, cmd, dp->sector, cpu_to_be32(data_size),
Andreas Gruenbachera9a99942011-03-16 01:30:14 +01001341 dp->block_id);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001342}
1343
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001344void drbd_send_ack_rp(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
Andreas Gruenbachera9a99942011-03-16 01:30:14 +01001345 struct p_block_req *rp)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001346{
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001347 _drbd_send_ack(peer_device, cmd, rp->sector, rp->blksize, rp->block_id);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001348}
1349
1350/**
1351 * drbd_send_ack() - Sends an ack packet
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001352 * @device: DRBD device
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001353 * @cmd: packet command code
1354 * @peer_req: peer request
Philipp Reisnerb411b362009-09-25 16:07:19 -07001355 */
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001356int drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001357 struct drbd_peer_request *peer_req)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001358{
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001359 return _drbd_send_ack(peer_device, cmd,
Andreas Gruenbacherdd516122011-03-16 15:39:08 +01001360 cpu_to_be64(peer_req->i.sector),
1361 cpu_to_be32(peer_req->i.size),
1362 peer_req->block_id);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001363}
1364
1365/* This function misuses the block_id field to signal if the blocks
1366 * are is sync or not. */
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001367int drbd_send_ack_ex(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001368 sector_t sector, int blksize, u64 block_id)
1369{
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001370 return _drbd_send_ack(peer_device, cmd,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001371 cpu_to_be64(sector),
1372 cpu_to_be32(blksize),
1373 cpu_to_be64(block_id));
1374}
1375
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001376int drbd_send_drequest(struct drbd_peer_device *peer_device, int cmd,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001377 sector_t sector, int size, u64 block_id)
1378{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001379 struct drbd_socket *sock;
1380 struct p_block_req *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001381
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001382 sock = &peer_device->connection->data;
1383 p = drbd_prepare_command(peer_device, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001384 if (!p)
1385 return -EIO;
1386 p->sector = cpu_to_be64(sector);
1387 p->block_id = block_id;
1388 p->blksize = cpu_to_be32(size);
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001389 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001390}
1391
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001392int drbd_send_drequest_csum(struct drbd_peer_device *peer_device, sector_t sector, int size,
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001393 void *digest, int digest_size, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001394{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001395 struct drbd_socket *sock;
1396 struct p_block_req *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001397
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001398 /* FIXME: Put the digest into the preallocated socket buffer. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001399
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001400 sock = &peer_device->connection->data;
1401 p = drbd_prepare_command(peer_device, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001402 if (!p)
1403 return -EIO;
1404 p->sector = cpu_to_be64(sector);
1405 p->block_id = ID_SYNCER /* unused */;
1406 p->blksize = cpu_to_be32(size);
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001407 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), digest, digest_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001408}
1409
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001410int drbd_send_ov_request(struct drbd_peer_device *peer_device, sector_t sector, int size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001411{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001412 struct drbd_socket *sock;
1413 struct p_block_req *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001414
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001415 sock = &peer_device->connection->data;
1416 p = drbd_prepare_command(peer_device, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001417 if (!p)
1418 return -EIO;
1419 p->sector = cpu_to_be64(sector);
1420 p->block_id = ID_SYNCER /* unused */;
1421 p->blksize = cpu_to_be32(size);
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001422 return drbd_send_command(peer_device, sock, P_OV_REQUEST, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001423}
1424
1425/* called on sndtimeo
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001426 * returns false if we should retry,
1427 * true if we think connection is dead
Philipp Reisnerb411b362009-09-25 16:07:19 -07001428 */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001429static int we_should_drop_the_connection(struct drbd_connection *connection, struct socket *sock)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001430{
1431 int drop_it;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001432 /* long elapsed = (long)(jiffies - device->last_received); */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001433
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001434 drop_it = connection->meta.socket == sock
1435 || !connection->asender.task
1436 || get_t_state(&connection->asender) != RUNNING
1437 || connection->cstate < C_WF_REPORT_PARAMS;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001438
1439 if (drop_it)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001440 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001441
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001442 drop_it = !--connection->ko_count;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001443 if (!drop_it) {
Andreas Gruenbacher1ec861e2011-07-06 11:01:44 +02001444 drbd_err(connection, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001445 current->comm, current->pid, connection->ko_count);
1446 request_ping(connection);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001447 }
1448
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001449 return drop_it; /* && (device->state == R_PRIMARY) */;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001450}
1451
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001452static void drbd_update_congested(struct drbd_connection *connection)
Andreas Gruenbacher9e204cd2011-01-26 18:45:11 +01001453{
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001454 struct sock *sk = connection->data.socket->sk;
Andreas Gruenbacher9e204cd2011-01-26 18:45:11 +01001455 if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001456 set_bit(NET_CONGESTED, &connection->flags);
Andreas Gruenbacher9e204cd2011-01-26 18:45:11 +01001457}
1458
Philipp Reisnerb411b362009-09-25 16:07:19 -07001459/* The idea of sendpage seems to be to put some kind of reference
1460 * to the page into the skb, and to hand it over to the NIC. In
1461 * this process get_page() gets called.
1462 *
1463 * As soon as the page was really sent over the network put_page()
1464 * gets called by some part of the network layer. [ NIC driver? ]
1465 *
1466 * [ get_page() / put_page() increment/decrement the count. If count
1467 * reaches 0 the page will be freed. ]
1468 *
1469 * This works nicely with pages from FSs.
1470 * But this means that in protocol A we might signal IO completion too early!
1471 *
1472 * In order not to corrupt data during a resync we must make sure
1473 * that we do not reuse our own buffer pages (EEs) to early, therefore
1474 * we have the net_ee list.
1475 *
1476 * XFS seems to have problems, still, it submits pages with page_count == 0!
1477 * As a workaround, we disable sendpage on pages
1478 * with page_count == 0 or PageSlab.
1479 */
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001480static int _drbd_no_send_page(struct drbd_peer_device *peer_device, struct page *page,
Andreas Gruenbacherb9874272011-03-16 09:41:10 +01001481 int offset, size_t size, unsigned msg_flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001482{
Andreas Gruenbacherb9874272011-03-16 09:41:10 +01001483 struct socket *socket;
1484 void *addr;
1485 int err;
1486
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001487 socket = peer_device->connection->data.socket;
Andreas Gruenbacherb9874272011-03-16 09:41:10 +01001488 addr = kmap(page) + offset;
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001489 err = drbd_send_all(peer_device->connection, socket, addr, size, msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001490 kunmap(page);
Andreas Gruenbacherb9874272011-03-16 09:41:10 +01001491 if (!err)
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001492 peer_device->device->send_cnt += size >> 9;
Andreas Gruenbacherb9874272011-03-16 09:41:10 +01001493 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001494}
1495
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001496static int _drbd_send_page(struct drbd_peer_device *peer_device, struct page *page,
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001497 int offset, size_t size, unsigned msg_flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001498{
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001499 struct socket *socket = peer_device->connection->data.socket;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001500 mm_segment_t oldfs = get_fs();
Philipp Reisnerb411b362009-09-25 16:07:19 -07001501 int len = size;
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001502 int err = -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001503
1504 /* e.g. XFS meta- & log-data is in slab pages, which have a
1505 * page_count of 0 and/or have PageSlab() set.
1506 * we cannot use send_page for those, as that does get_page();
1507 * put_page(); and would cause either a VM_BUG directly, or
1508 * __page_cache_release a page that would actually still be referenced
1509 * by someone, leading to some obscure delayed Oops somewhere else. */
1510 if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001511 return _drbd_no_send_page(peer_device, page, offset, size, msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001512
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001513 msg_flags |= MSG_NOSIGNAL;
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001514 drbd_update_congested(peer_device->connection);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001515 set_fs(KERNEL_DS);
1516 do {
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001517 int sent;
1518
1519 sent = socket->ops->sendpage(socket, page, offset, len, msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001520 if (sent <= 0) {
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001521 if (sent == -EAGAIN) {
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001522 if (we_should_drop_the_connection(peer_device->connection, socket))
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001523 break;
1524 continue;
1525 }
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001526 drbd_warn(peer_device->device, "%s: size=%d len=%d sent=%d\n",
Philipp Reisnerb411b362009-09-25 16:07:19 -07001527 __func__, (int)size, len, sent);
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001528 if (sent < 0)
1529 err = sent;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001530 break;
1531 }
1532 len -= sent;
1533 offset += sent;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001534 } while (len > 0 /* THINK && device->cstate >= C_CONNECTED*/);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001535 set_fs(oldfs);
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001536 clear_bit(NET_CONGESTED, &peer_device->connection->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001537
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001538 if (len == 0) {
1539 err = 0;
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001540 peer_device->device->send_cnt += size >> 9;
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001541 }
1542 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001543}
1544
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001545static int _drbd_send_bio(struct drbd_peer_device *peer_device, struct bio *bio)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001546{
Kent Overstreet79886132013-11-23 17:19:00 -08001547 struct bio_vec bvec;
1548 struct bvec_iter iter;
1549
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001550 /* hint all but last page with MSG_MORE */
Kent Overstreet79886132013-11-23 17:19:00 -08001551 bio_for_each_segment(bvec, bio, iter) {
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001552 int err;
1553
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001554 err = _drbd_no_send_page(peer_device, bvec.bv_page,
Kent Overstreet79886132013-11-23 17:19:00 -08001555 bvec.bv_offset, bvec.bv_len,
Kent Overstreet4550dd62013-08-07 14:26:21 -07001556 bio_iter_last(bvec, iter)
Kent Overstreet79886132013-11-23 17:19:00 -08001557 ? 0 : MSG_MORE);
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001558 if (err)
1559 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001560 }
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001561 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001562}
1563
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001564static int _drbd_send_zc_bio(struct drbd_peer_device *peer_device, struct bio *bio)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001565{
Kent Overstreet79886132013-11-23 17:19:00 -08001566 struct bio_vec bvec;
1567 struct bvec_iter iter;
1568
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001569 /* hint all but last page with MSG_MORE */
Kent Overstreet79886132013-11-23 17:19:00 -08001570 bio_for_each_segment(bvec, bio, iter) {
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001571 int err;
1572
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001573 err = _drbd_send_page(peer_device, bvec.bv_page,
Kent Overstreet79886132013-11-23 17:19:00 -08001574 bvec.bv_offset, bvec.bv_len,
Kent Overstreet4550dd62013-08-07 14:26:21 -07001575 bio_iter_last(bvec, iter) ? 0 : MSG_MORE);
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001576 if (err)
1577 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001578 }
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001579 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001580}
1581
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001582static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device,
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001583 struct drbd_peer_request *peer_req)
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001584{
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001585 struct page *page = peer_req->pages;
1586 unsigned len = peer_req->i.size;
Andreas Gruenbacher9f692302011-03-16 10:49:09 +01001587 int err;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001588
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001589 /* hint all but last page with MSG_MORE */
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001590 page_chain_for_each(page) {
1591 unsigned l = min_t(unsigned, len, PAGE_SIZE);
Andreas Gruenbacher9f692302011-03-16 10:49:09 +01001592
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001593 err = _drbd_send_page(peer_device, page, 0, l,
Andreas Gruenbacher9f692302011-03-16 10:49:09 +01001594 page_chain_next(page) ? MSG_MORE : 0);
1595 if (err)
1596 return err;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001597 len -= l;
1598 }
Andreas Gruenbacher9f692302011-03-16 10:49:09 +01001599 return 0;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001600}
1601
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001602static u32 bio_flags_to_wire(struct drbd_connection *connection, unsigned long bi_rw)
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001603{
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001604 if (connection->agreed_pro_version >= 95)
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001605 return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001606 (bi_rw & REQ_FUA ? DP_FUA : 0) |
1607 (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
1608 (bi_rw & REQ_DISCARD ? DP_DISCARD : 0);
1609 else
Jens Axboe721a9602011-03-09 11:56:30 +01001610 return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001611}
1612
Lars Ellenberg2f632ae2014-04-28 18:43:24 +02001613/* Used to send write or TRIM aka REQ_DISCARD requests
1614 * R_PRIMARY -> Peer (P_DATA, P_TRIM)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001615 */
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001616int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *req)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001617{
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001618 struct drbd_device *device = peer_device->device;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001619 struct drbd_socket *sock;
1620 struct p_data *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001621 unsigned int dp_flags = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001622 int dgs;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001623 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001624
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001625 sock = &peer_device->connection->data;
1626 p = drbd_prepare_command(peer_device, sock);
1627 dgs = peer_device->connection->integrity_tfm ?
1628 crypto_hash_digestsize(peer_device->connection->integrity_tfm) : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001629
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001630 if (!p)
1631 return -EIO;
1632 p->sector = cpu_to_be64(req->i.sector);
1633 p->block_id = (unsigned long)req;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001634 p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001635 dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio->bi_rw);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001636 if (device->state.conn >= C_SYNC_SOURCE &&
1637 device->state.conn <= C_PAUSED_SYNC_T)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001638 dp_flags |= DP_MAY_SET_IN_SYNC;
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001639 if (peer_device->connection->agreed_pro_version >= 100) {
Philipp Reisner303d1442011-04-13 16:24:47 -07001640 if (req->rq_state & RQ_EXP_RECEIVE_ACK)
1641 dp_flags |= DP_SEND_RECEIVE_ACK;
1642 if (req->rq_state & RQ_EXP_WRITE_ACK)
1643 dp_flags |= DP_SEND_WRITE_ACK;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001644 }
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001645 p->dp_flags = cpu_to_be32(dp_flags);
Lars Ellenberg2f632ae2014-04-28 18:43:24 +02001646
1647 if (dp_flags & DP_DISCARD) {
1648 struct p_trim *t = (struct p_trim*)p;
1649 t->size = cpu_to_be32(req->i.size);
1650 err = __send_command(peer_device->connection, device->vnr, sock, P_TRIM, sizeof(*t), NULL, 0);
1651 goto out;
1652 }
1653
1654 /* our digest is still only over the payload.
1655 * TRIM does not carry any payload. */
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001656 if (dgs)
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001657 drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, p + 1);
1658 err = __send_command(peer_device->connection, device->vnr, sock, P_DATA, sizeof(*p) + dgs, NULL, req->i.size);
Andreas Gruenbacher6bdb9b02011-03-16 11:52:58 +01001659 if (!err) {
Lars Ellenberg470be442010-11-10 10:36:52 +01001660 /* For protocol A, we have to memcpy the payload into
1661 * socket buffers, as we may complete right away
1662 * as soon as we handed it over to tcp, at which point the data
1663 * pages may become invalid.
1664 *
1665 * For data-integrity enabled, we copy it as well, so we can be
1666 * sure that even if the bio pages may still be modified, it
1667 * won't change the data on the wire, thus if the digest checks
1668 * out ok after sending on this side, but does not fit on the
1669 * receiving side, we sure have detected corruption elsewhere.
1670 */
Philipp Reisner303d1442011-04-13 16:24:47 -07001671 if (!(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK)) || dgs)
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001672 err = _drbd_send_bio(peer_device, req->master_bio);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001673 else
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001674 err = _drbd_send_zc_bio(peer_device, req->master_bio);
Lars Ellenberg470be442010-11-10 10:36:52 +01001675
1676 /* double check digest, sometimes buffers have been modified in flight. */
1677 if (dgs > 0 && dgs <= 64) {
Bart Van Assche24c48302011-05-21 18:32:29 +02001678 /* 64 byte, 512 bit, is the largest digest size
Lars Ellenberg470be442010-11-10 10:36:52 +01001679 * currently supported in kernel crypto. */
1680 unsigned char digest[64];
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001681 drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, digest);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001682 if (memcmp(p + 1, digest, dgs)) {
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02001683 drbd_warn(device,
Lars Ellenberg470be442010-11-10 10:36:52 +01001684 "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
Andreas Gruenbacherace652a2011-01-03 17:09:58 +01001685 (unsigned long long)req->i.sector, req->i.size);
Lars Ellenberg470be442010-11-10 10:36:52 +01001686 }
1687 } /* else if (dgs > 64) {
1688 ... Be noisy about digest too large ...
1689 } */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001690 }
Lars Ellenberg2f632ae2014-04-28 18:43:24 +02001691out:
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001692 mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001693
Andreas Gruenbacher6bdb9b02011-03-16 11:52:58 +01001694 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001695}
1696
1697/* answer packet, used to send data back for read requests:
1698 * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
1699 * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
1700 */
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001701int drbd_send_block(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001702 struct drbd_peer_request *peer_req)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001703{
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001704 struct drbd_device *device = peer_device->device;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001705 struct drbd_socket *sock;
1706 struct p_data *p;
Andreas Gruenbacher7b57b89d2011-03-16 11:35:20 +01001707 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001708 int dgs;
1709
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001710 sock = &peer_device->connection->data;
1711 p = drbd_prepare_command(peer_device, sock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001712
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001713 dgs = peer_device->connection->integrity_tfm ?
1714 crypto_hash_digestsize(peer_device->connection->integrity_tfm) : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001715
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001716 if (!p)
1717 return -EIO;
1718 p->sector = cpu_to_be64(peer_req->i.sector);
1719 p->block_id = peer_req->block_id;
1720 p->seq_num = 0; /* unused */
Lars Ellenbergb17f33c2012-02-08 15:32:51 +01001721 p->dp_flags = 0;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001722 if (dgs)
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001723 drbd_csum_ee(peer_device->connection->integrity_tfm, peer_req, p + 1);
1724 err = __send_command(peer_device->connection, device->vnr, sock, cmd, sizeof(*p) + dgs, NULL, peer_req->i.size);
Andreas Gruenbacher7b57b89d2011-03-16 11:35:20 +01001725 if (!err)
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001726 err = _drbd_send_zc_ee(peer_device, peer_req);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001727 mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001728
Andreas Gruenbacher7b57b89d2011-03-16 11:35:20 +01001729 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001730}
1731
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001732int drbd_send_out_of_sync(struct drbd_peer_device *peer_device, struct drbd_request *req)
Philipp Reisner73a01a12010-10-27 14:33:00 +02001733{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001734 struct drbd_socket *sock;
1735 struct p_block_desc *p;
Philipp Reisner73a01a12010-10-27 14:33:00 +02001736
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001737 sock = &peer_device->connection->data;
1738 p = drbd_prepare_command(peer_device, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001739 if (!p)
1740 return -EIO;
1741 p->sector = cpu_to_be64(req->i.sector);
1742 p->blksize = cpu_to_be32(req->i.size);
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02001743 return drbd_send_command(peer_device, sock, P_OUT_OF_SYNC, sizeof(*p), NULL, 0);
Philipp Reisner73a01a12010-10-27 14:33:00 +02001744}
1745
Philipp Reisnerb411b362009-09-25 16:07:19 -07001746/*
1747 drbd_send distinguishes two cases:
1748
1749 Packets sent via the data socket "sock"
1750 and packets sent via the meta data socket "msock"
1751
1752 sock msock
1753 -----------------+-------------------------+------------------------------
1754 timeout conf.timeout / 2 conf.timeout / 2
1755 timeout action send a ping via msock Abort communication
1756 and close all sockets
1757*/
1758
1759/*
1760 * you must have down()ed the appropriate [m]sock_mutex elsewhere!
1761 */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001762int drbd_send(struct drbd_connection *connection, struct socket *sock,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001763 void *buf, size_t size, unsigned msg_flags)
1764{
1765 struct kvec iov;
1766 struct msghdr msg;
1767 int rv, sent = 0;
1768
1769 if (!sock)
Andreas Gruenbacherc0d42c82010-12-09 23:52:22 +01001770 return -EBADR;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001771
1772 /* THINK if (signal_pending) return ... ? */
1773
1774 iov.iov_base = buf;
1775 iov.iov_len = size;
1776
1777 msg.msg_name = NULL;
1778 msg.msg_namelen = 0;
1779 msg.msg_control = NULL;
1780 msg.msg_controllen = 0;
1781 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
1782
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001783 if (sock == connection->data.socket) {
Philipp Reisner44ed1672011-04-19 17:10:19 +02001784 rcu_read_lock();
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001785 connection->ko_count = rcu_dereference(connection->net_conf)->ko_count;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001786 rcu_read_unlock();
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001787 drbd_update_congested(connection);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001788 }
1789 do {
1790 /* STRANGE
1791 * tcp_sendmsg does _not_ use its size parameter at all ?
1792 *
1793 * -EAGAIN on timeout, -EINTR on signal.
1794 */
1795/* THINK
1796 * do we need to block DRBD_SIG if sock == &meta.socket ??
1797 * otherwise wake_asender() might interrupt some send_*Ack !
1798 */
1799 rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
1800 if (rv == -EAGAIN) {
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001801 if (we_should_drop_the_connection(connection, sock))
Philipp Reisnerb411b362009-09-25 16:07:19 -07001802 break;
1803 else
1804 continue;
1805 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001806 if (rv == -EINTR) {
1807 flush_signals(current);
1808 rv = 0;
1809 }
1810 if (rv < 0)
1811 break;
1812 sent += rv;
1813 iov.iov_base += rv;
1814 iov.iov_len -= rv;
1815 } while (sent < size);
1816
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001817 if (sock == connection->data.socket)
1818 clear_bit(NET_CONGESTED, &connection->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001819
1820 if (rv <= 0) {
1821 if (rv != -EAGAIN) {
Andreas Gruenbacher1ec861e2011-07-06 11:01:44 +02001822 drbd_err(connection, "%s_sendmsg returned %d\n",
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001823 sock == connection->meta.socket ? "msock" : "sock",
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001824 rv);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001825 conn_request_state(connection, NS(conn, C_BROKEN_PIPE), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001826 } else
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001827 conn_request_state(connection, NS(conn, C_TIMEOUT), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001828 }
1829
1830 return sent;
1831}
1832
Andreas Gruenbacherfb708e42010-12-15 17:04:36 +01001833/**
1834 * drbd_send_all - Send an entire buffer
1835 *
1836 * Returns 0 upon success and a negative error value otherwise.
1837 */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001838int drbd_send_all(struct drbd_connection *connection, struct socket *sock, void *buffer,
Andreas Gruenbacherfb708e42010-12-15 17:04:36 +01001839 size_t size, unsigned msg_flags)
1840{
1841 int err;
1842
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001843 err = drbd_send(connection, sock, buffer, size, msg_flags);
Andreas Gruenbacherfb708e42010-12-15 17:04:36 +01001844 if (err < 0)
1845 return err;
1846 if (err != size)
1847 return -EIO;
1848 return 0;
1849}
1850
Philipp Reisnerb411b362009-09-25 16:07:19 -07001851static int drbd_open(struct block_device *bdev, fmode_t mode)
1852{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001853 struct drbd_device *device = bdev->bd_disk->private_data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001854 unsigned long flags;
1855 int rv = 0;
1856
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02001857 mutex_lock(&drbd_main_mutex);
Andreas Gruenbacher05008132011-07-07 14:19:42 +02001858 spin_lock_irqsave(&device->resource->req_lock, flags);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001859 /* to have a stable device->state.role
Philipp Reisnerb411b362009-09-25 16:07:19 -07001860 * and no race with updating open_cnt */
1861
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001862 if (device->state.role != R_PRIMARY) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001863 if (mode & FMODE_WRITE)
1864 rv = -EROFS;
1865 else if (!allow_oos)
1866 rv = -EMEDIUMTYPE;
1867 }
1868
1869 if (!rv)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001870 device->open_cnt++;
Andreas Gruenbacher05008132011-07-07 14:19:42 +02001871 spin_unlock_irqrestore(&device->resource->req_lock, flags);
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02001872 mutex_unlock(&drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001873
1874 return rv;
1875}
1876
Al Virodb2a1442013-05-05 21:52:57 -04001877static void drbd_release(struct gendisk *gd, fmode_t mode)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001878{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001879 struct drbd_device *device = gd->private_data;
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02001880 mutex_lock(&drbd_main_mutex);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001881 device->open_cnt--;
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02001882 mutex_unlock(&drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001883}
1884
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001885static void drbd_set_defaults(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001886{
Lars Ellenbergf3990022011-03-23 14:31:09 +01001887 /* Beware! The actual layout differs
1888 * between big endian and little endian */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001889 device->state = (union drbd_dev_state) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001890 { .role = R_SECONDARY,
1891 .peer = R_UNKNOWN,
1892 .conn = C_STANDALONE,
1893 .disk = D_DISKLESS,
1894 .pdsk = D_UNKNOWN,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001895 } };
1896}
1897
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001898void drbd_init_set_defaults(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001899{
1900 /* the memset(,0,) did most of this.
1901 * note: only assignments, no allocation in here */
1902
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001903 drbd_set_defaults(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001904
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001905 atomic_set(&device->ap_bio_cnt, 0);
1906 atomic_set(&device->ap_pending_cnt, 0);
1907 atomic_set(&device->rs_pending_cnt, 0);
1908 atomic_set(&device->unacked_cnt, 0);
1909 atomic_set(&device->local_cnt, 0);
1910 atomic_set(&device->pp_in_use_by_net, 0);
1911 atomic_set(&device->rs_sect_in, 0);
1912 atomic_set(&device->rs_sect_ev, 0);
1913 atomic_set(&device->ap_in_flight, 0);
1914 atomic_set(&device->md_io_in_use, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001915
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001916 mutex_init(&device->own_state_mutex);
1917 device->state_mutex = &device->own_state_mutex;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001918
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001919 spin_lock_init(&device->al_lock);
1920 spin_lock_init(&device->peer_seq_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001921
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001922 INIT_LIST_HEAD(&device->active_ee);
1923 INIT_LIST_HEAD(&device->sync_ee);
1924 INIT_LIST_HEAD(&device->done_ee);
1925 INIT_LIST_HEAD(&device->read_ee);
1926 INIT_LIST_HEAD(&device->net_ee);
1927 INIT_LIST_HEAD(&device->resync_reads);
1928 INIT_LIST_HEAD(&device->resync_work.list);
1929 INIT_LIST_HEAD(&device->unplug_work.list);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001930 INIT_LIST_HEAD(&device->bm_io_work.w.list);
Philipp Reisner0ced55a2010-04-30 15:26:20 +02001931
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001932 device->resync_work.cb = w_resync_timer;
1933 device->unplug_work.cb = w_send_write_hint;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001934 device->bm_io_work.w.cb = w_bitmap_io;
Philipp Reisnera21e9292011-02-08 15:08:49 +01001935
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001936 init_timer(&device->resync_timer);
1937 init_timer(&device->md_sync_timer);
1938 init_timer(&device->start_resync_timer);
1939 init_timer(&device->request_timer);
1940 device->resync_timer.function = resync_timer_fn;
1941 device->resync_timer.data = (unsigned long) device;
1942 device->md_sync_timer.function = md_sync_timer_fn;
1943 device->md_sync_timer.data = (unsigned long) device;
1944 device->start_resync_timer.function = start_resync_timer_fn;
1945 device->start_resync_timer.data = (unsigned long) device;
1946 device->request_timer.function = request_timer_fn;
1947 device->request_timer.data = (unsigned long) device;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001948
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001949 init_waitqueue_head(&device->misc_wait);
1950 init_waitqueue_head(&device->state_wait);
1951 init_waitqueue_head(&device->ee_wait);
1952 init_waitqueue_head(&device->al_wait);
1953 init_waitqueue_head(&device->seq_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001954
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001955 device->resync_wenr = LC_FREE;
1956 device->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
1957 device->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001958}
1959
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001960void drbd_device_cleanup(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001961{
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001962 int i;
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001963 if (first_peer_device(device)->connection->receiver.t_state != NONE)
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02001964 drbd_err(device, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001965 first_peer_device(device)->connection->receiver.t_state);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001966
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001967 device->al_writ_cnt =
1968 device->bm_writ_cnt =
1969 device->read_cnt =
1970 device->recv_cnt =
1971 device->send_cnt =
1972 device->writ_cnt =
1973 device->p_size =
1974 device->rs_start =
1975 device->rs_total =
1976 device->rs_failed = 0;
1977 device->rs_last_events = 0;
1978 device->rs_last_sect_ev = 0;
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001979 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001980 device->rs_mark_left[i] = 0;
1981 device->rs_mark_time[i] = 0;
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001982 }
Andreas Gruenbacher0b0ba1e2011-06-27 16:23:33 +02001983 D_ASSERT(device, first_peer_device(device)->connection->net_conf == NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001984
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001985 drbd_set_my_capacity(device, 0);
1986 if (device->bitmap) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001987 /* maybe never allocated. */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001988 drbd_bm_resize(device, 0, 1);
1989 drbd_bm_cleanup(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001990 }
1991
Philipp Reisner28995af2013-11-22 16:48:14 +01001992 drbd_free_ldev(device->ldev);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001993 device->ldev = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001994
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001995 clear_bit(AL_SUSPENDED, &device->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001996
Andreas Gruenbacher0b0ba1e2011-06-27 16:23:33 +02001997 D_ASSERT(device, list_empty(&device->active_ee));
1998 D_ASSERT(device, list_empty(&device->sync_ee));
1999 D_ASSERT(device, list_empty(&device->done_ee));
2000 D_ASSERT(device, list_empty(&device->read_ee));
2001 D_ASSERT(device, list_empty(&device->net_ee));
2002 D_ASSERT(device, list_empty(&device->resync_reads));
2003 D_ASSERT(device, list_empty(&first_peer_device(device)->connection->sender_work.q));
2004 D_ASSERT(device, list_empty(&device->resync_work.list));
2005 D_ASSERT(device, list_empty(&device->unplug_work.list));
Lars Ellenberg2265b472010-12-16 15:41:26 +01002006
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002007 drbd_set_defaults(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002008}
2009
2010
2011static void drbd_destroy_mempools(void)
2012{
2013 struct page *page;
2014
2015 while (drbd_pp_pool) {
2016 page = drbd_pp_pool;
2017 drbd_pp_pool = (struct page *)page_private(page);
2018 __free_page(page);
2019 drbd_pp_vacant--;
2020 }
2021
Andreas Gruenbacher0b0ba1e2011-06-27 16:23:33 +02002022 /* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002023
Lars Ellenberg9476f392011-02-23 17:02:01 +01002024 if (drbd_md_io_bio_set)
2025 bioset_free(drbd_md_io_bio_set);
Lars Ellenberg42818082011-02-23 12:39:46 +01002026 if (drbd_md_io_page_pool)
2027 mempool_destroy(drbd_md_io_page_pool);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002028 if (drbd_ee_mempool)
2029 mempool_destroy(drbd_ee_mempool);
2030 if (drbd_request_mempool)
2031 mempool_destroy(drbd_request_mempool);
2032 if (drbd_ee_cache)
2033 kmem_cache_destroy(drbd_ee_cache);
2034 if (drbd_request_cache)
2035 kmem_cache_destroy(drbd_request_cache);
2036 if (drbd_bm_ext_cache)
2037 kmem_cache_destroy(drbd_bm_ext_cache);
2038 if (drbd_al_ext_cache)
2039 kmem_cache_destroy(drbd_al_ext_cache);
2040
Lars Ellenberg9476f392011-02-23 17:02:01 +01002041 drbd_md_io_bio_set = NULL;
Lars Ellenberg42818082011-02-23 12:39:46 +01002042 drbd_md_io_page_pool = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002043 drbd_ee_mempool = NULL;
2044 drbd_request_mempool = NULL;
2045 drbd_ee_cache = NULL;
2046 drbd_request_cache = NULL;
2047 drbd_bm_ext_cache = NULL;
2048 drbd_al_ext_cache = NULL;
2049
2050 return;
2051}
2052
2053static int drbd_create_mempools(void)
2054{
2055 struct page *page;
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01002056 const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002057 int i;
2058
2059 /* prepare our caches and mempools */
2060 drbd_request_mempool = NULL;
2061 drbd_ee_cache = NULL;
2062 drbd_request_cache = NULL;
2063 drbd_bm_ext_cache = NULL;
2064 drbd_al_ext_cache = NULL;
2065 drbd_pp_pool = NULL;
Lars Ellenberg42818082011-02-23 12:39:46 +01002066 drbd_md_io_page_pool = NULL;
Lars Ellenberg9476f392011-02-23 17:02:01 +01002067 drbd_md_io_bio_set = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002068
2069 /* caches */
2070 drbd_request_cache = kmem_cache_create(
2071 "drbd_req", sizeof(struct drbd_request), 0, 0, NULL);
2072 if (drbd_request_cache == NULL)
2073 goto Enomem;
2074
2075 drbd_ee_cache = kmem_cache_create(
Andreas Gruenbacherf6ffca92011-02-04 15:30:34 +01002076 "drbd_ee", sizeof(struct drbd_peer_request), 0, 0, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002077 if (drbd_ee_cache == NULL)
2078 goto Enomem;
2079
2080 drbd_bm_ext_cache = kmem_cache_create(
2081 "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL);
2082 if (drbd_bm_ext_cache == NULL)
2083 goto Enomem;
2084
2085 drbd_al_ext_cache = kmem_cache_create(
2086 "drbd_al", sizeof(struct lc_element), 0, 0, NULL);
2087 if (drbd_al_ext_cache == NULL)
2088 goto Enomem;
2089
2090 /* mempools */
Lars Ellenberg9476f392011-02-23 17:02:01 +01002091 drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0);
2092 if (drbd_md_io_bio_set == NULL)
2093 goto Enomem;
Lars Ellenberg9476f392011-02-23 17:02:01 +01002094
Lars Ellenberg42818082011-02-23 12:39:46 +01002095 drbd_md_io_page_pool = mempool_create_page_pool(DRBD_MIN_POOL_PAGES, 0);
2096 if (drbd_md_io_page_pool == NULL)
2097 goto Enomem;
2098
Philipp Reisnerb411b362009-09-25 16:07:19 -07002099 drbd_request_mempool = mempool_create(number,
2100 mempool_alloc_slab, mempool_free_slab, drbd_request_cache);
2101 if (drbd_request_mempool == NULL)
2102 goto Enomem;
2103
2104 drbd_ee_mempool = mempool_create(number,
2105 mempool_alloc_slab, mempool_free_slab, drbd_ee_cache);
Nicolas Kaiser2027ae12010-10-28 06:15:26 -06002106 if (drbd_ee_mempool == NULL)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002107 goto Enomem;
2108
2109 /* drbd's page pool */
2110 spin_lock_init(&drbd_pp_lock);
2111
2112 for (i = 0; i < number; i++) {
2113 page = alloc_page(GFP_HIGHUSER);
2114 if (!page)
2115 goto Enomem;
2116 set_page_private(page, (unsigned long)drbd_pp_pool);
2117 drbd_pp_pool = page;
2118 }
2119 drbd_pp_vacant = number;
2120
2121 return 0;
2122
2123Enomem:
2124 drbd_destroy_mempools(); /* in case we allocated some */
2125 return -ENOMEM;
2126}
2127
2128static int drbd_notify_sys(struct notifier_block *this, unsigned long code,
2129 void *unused)
2130{
2131 /* just so we have it. you never know what interesting things we
2132 * might want to do here some day...
2133 */
2134
2135 return NOTIFY_DONE;
2136}
2137
2138static struct notifier_block drbd_notifier = {
2139 .notifier_call = drbd_notify_sys,
2140};
2141
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002142static void drbd_release_all_peer_reqs(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002143{
2144 int rr;
2145
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002146 rr = drbd_free_peer_reqs(device, &device->active_ee);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002147 if (rr)
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02002148 drbd_err(device, "%d EEs in active list found!\n", rr);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002149
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002150 rr = drbd_free_peer_reqs(device, &device->sync_ee);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002151 if (rr)
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02002152 drbd_err(device, "%d EEs in sync list found!\n", rr);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002153
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002154 rr = drbd_free_peer_reqs(device, &device->read_ee);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002155 if (rr)
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02002156 drbd_err(device, "%d EEs in read list found!\n", rr);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002157
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002158 rr = drbd_free_peer_reqs(device, &device->done_ee);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002159 if (rr)
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02002160 drbd_err(device, "%d EEs in done list found!\n", rr);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002161
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002162 rr = drbd_free_peer_reqs(device, &device->net_ee);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002163 if (rr)
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02002164 drbd_err(device, "%d EEs in net list found!\n", rr);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002165}
2166
Philipp Reisner774b3052011-02-22 02:07:03 -05002167/* caution. no locking. */
Andreas Gruenbacher05a10ec2011-06-07 22:54:17 +02002168void drbd_destroy_device(struct kref *kref)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002169{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002170 struct drbd_device *device = container_of(kref, struct drbd_device, kref);
Andreas Gruenbacher803ea132011-06-09 01:40:48 +02002171 struct drbd_resource *resource = device->resource;
2172 struct drbd_connection *connection;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002173
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002174 del_timer_sync(&device->request_timer);
Philipp Reisnerdfa8bed2011-06-29 14:06:08 +02002175
Philipp Reisnerb411b362009-09-25 16:07:19 -07002176 /* paranoia asserts */
Andreas Gruenbacher0b0ba1e2011-06-27 16:23:33 +02002177 D_ASSERT(device, device->open_cnt == 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002178 /* end paranoia asserts */
2179
Philipp Reisnerb411b362009-09-25 16:07:19 -07002180 /* cleanup stuff that may have been allocated during
2181 * device (re-)configuration or state changes */
2182
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002183 if (device->this_bdev)
2184 bdput(device->this_bdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002185
Philipp Reisner28995af2013-11-22 16:48:14 +01002186 drbd_free_ldev(device->ldev);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002187 device->ldev = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002188
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002189 drbd_release_all_peer_reqs(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002190
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002191 lc_destroy(device->act_log);
2192 lc_destroy(device->resync);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002193
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002194 kfree(device->p_uuid);
2195 /* device->p_uuid = NULL; */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002196
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002197 if (device->bitmap) /* should no longer be there. */
2198 drbd_bm_cleanup(device);
2199 __free_page(device->md_io_page);
2200 put_disk(device->vdisk);
2201 blk_cleanup_queue(device->rq_queue);
2202 kfree(device->rs_plan_s);
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02002203 kfree(first_peer_device(device));
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002204 kfree(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002205
Andreas Gruenbacher803ea132011-06-09 01:40:48 +02002206 for_each_connection(connection, resource)
2207 kref_put(&connection->kref, drbd_destroy_connection);
2208 kref_put(&resource->kref, drbd_destroy_resource);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002209}
2210
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01002211/* One global retry thread, if we need to push back some bio and have it
2212 * reinserted through our make request function.
2213 */
2214static struct retry_worker {
2215 struct workqueue_struct *wq;
2216 struct work_struct worker;
2217
2218 spinlock_t lock;
2219 struct list_head writes;
2220} retry;
2221
2222static void do_retry(struct work_struct *ws)
2223{
2224 struct retry_worker *retry = container_of(ws, struct retry_worker, worker);
2225 LIST_HEAD(writes);
2226 struct drbd_request *req, *tmp;
2227
2228 spin_lock_irq(&retry->lock);
2229 list_splice_init(&retry->writes, &writes);
2230 spin_unlock_irq(&retry->lock);
2231
2232 list_for_each_entry_safe(req, tmp, &writes, tl_requests) {
Andreas Gruenbacher84b8c062011-07-28 15:27:51 +02002233 struct drbd_device *device = req->device;
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01002234 struct bio *bio = req->master_bio;
2235 unsigned long start_time = req->start_time;
Lars Ellenberg9a278a72012-07-24 10:12:36 +02002236 bool expected;
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01002237
Andreas Gruenbacher84b8c062011-07-28 15:27:51 +02002238 expected =
Lars Ellenberg9a278a72012-07-24 10:12:36 +02002239 expect(atomic_read(&req->completion_ref) == 0) &&
2240 expect(req->rq_state & RQ_POSTPONED) &&
2241 expect((req->rq_state & RQ_LOCAL_PENDING) == 0 ||
2242 (req->rq_state & RQ_LOCAL_ABORTED) != 0);
2243
2244 if (!expected)
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02002245 drbd_err(device, "req=%p completion_ref=%d rq_state=%x\n",
Lars Ellenberg9a278a72012-07-24 10:12:36 +02002246 req, atomic_read(&req->completion_ref),
2247 req->rq_state);
2248
2249 /* We still need to put one kref associated with the
2250 * "completion_ref" going zero in the code path that queued it
2251 * here. The request object may still be referenced by a
2252 * frozen local req->private_bio, in case we force-detached.
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01002253 */
Lars Ellenberg9a278a72012-07-24 10:12:36 +02002254 kref_put(&req->kref, drbd_req_destroy);
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01002255
2256 /* A single suspended or otherwise blocking device may stall
2257 * all others as well. Fortunately, this code path is to
2258 * recover from a situation that "should not happen":
2259 * concurrent writes in multi-primary setup.
2260 * In a "normal" lifecycle, this workqueue is supposed to be
2261 * destroyed without ever doing anything.
2262 * If it turns out to be an issue anyways, we can do per
2263 * resource (replication group) or per device (minor) retry
2264 * workqueues instead.
2265 */
2266
2267 /* We are not just doing generic_make_request(),
2268 * as we want to keep the start_time information. */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002269 inc_ap_bio(device);
2270 __drbd_make_request(device, bio, start_time);
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01002271 }
2272}
2273
Lars Ellenberg9d05e7c2012-07-17 10:05:04 +02002274void drbd_restart_request(struct drbd_request *req)
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01002275{
2276 unsigned long flags;
2277 spin_lock_irqsave(&retry.lock, flags);
2278 list_move_tail(&req->tl_requests, &retry.writes);
2279 spin_unlock_irqrestore(&retry.lock, flags);
2280
2281 /* Drop the extra reference that would otherwise
2282 * have been dropped by complete_master_bio.
2283 * do_retry() needs to grab a new one. */
Andreas Gruenbacher84b8c062011-07-28 15:27:51 +02002284 dec_ap_bio(req->device);
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01002285
2286 queue_work(retry.wq, &retry.worker);
2287}
2288
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002289void drbd_destroy_resource(struct kref *kref)
2290{
2291 struct drbd_resource *resource =
2292 container_of(kref, struct drbd_resource, kref);
2293
Andreas Gruenbacher803ea132011-06-09 01:40:48 +02002294 idr_destroy(&resource->devices);
Andreas Gruenbacher625a6ba2011-07-22 14:29:02 +02002295 free_cpumask_var(resource->cpu_mask);
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002296 kfree(resource->name);
2297 kfree(resource);
2298}
2299
2300void drbd_free_resource(struct drbd_resource *resource)
2301{
2302 struct drbd_connection *connection, *tmp;
2303
2304 for_each_connection_safe(connection, tmp, resource) {
2305 list_del(&connection->connections);
2306 kref_put(&connection->kref, drbd_destroy_connection);
2307 }
2308 kref_put(&resource->kref, drbd_destroy_resource);
2309}
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01002310
Philipp Reisnerb411b362009-09-25 16:07:19 -07002311static void drbd_cleanup(void)
2312{
2313 unsigned int i;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002314 struct drbd_device *device;
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002315 struct drbd_resource *resource, *tmp;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002316
2317 unregister_reboot_notifier(&drbd_notifier);
2318
Lars Ellenberg17a93f32010-11-24 10:37:35 +01002319 /* first remove proc,
2320 * drbdsetup uses it's presence to detect
2321 * whether DRBD is loaded.
2322 * If we would get stuck in proc removal,
2323 * but have netlink already deregistered,
2324 * some drbdsetup commands may wait forever
2325 * for an answer.
2326 */
2327 if (drbd_proc)
2328 remove_proc_entry("drbd", NULL);
2329
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01002330 if (retry.wq)
2331 destroy_workqueue(retry.wq);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002332
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002333 drbd_genl_unregister();
Philipp Reisnerb411b362009-09-25 16:07:19 -07002334
Andreas Gruenbacher803ea132011-06-09 01:40:48 +02002335 idr_for_each_entry(&drbd_devices, device, i)
Andreas Gruenbacherf82795d2011-07-03 23:32:26 +02002336 drbd_delete_device(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002337
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02002338 /* not _rcu since, no other updater anymore. Genl already unregistered */
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002339 for_each_resource_safe(resource, tmp, &drbd_resources) {
2340 list_del(&resource->resources);
2341 drbd_free_resource(resource);
Philipp Reisner81fa2e62011-05-04 15:10:30 +02002342 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002343
Philipp Reisner81a5d602011-02-22 19:53:16 -05002344 drbd_destroy_mempools();
Philipp Reisnerb411b362009-09-25 16:07:19 -07002345 unregister_blkdev(DRBD_MAJOR, "drbd");
2346
Andreas Gruenbacher05a10ec2011-06-07 22:54:17 +02002347 idr_destroy(&drbd_devices);
Philipp Reisner81a5d602011-02-22 19:53:16 -05002348
Philipp Reisnerb411b362009-09-25 16:07:19 -07002349 printk(KERN_INFO "drbd: module cleanup done.\n");
2350}
2351
2352/**
Artem Bityutskiyd97482e2012-07-25 18:12:12 +03002353 * drbd_congested() - Callback for the flusher thread
Philipp Reisnerb411b362009-09-25 16:07:19 -07002354 * @congested_data: User data
Artem Bityutskiyd97482e2012-07-25 18:12:12 +03002355 * @bdi_bits: Bits the BDI flusher thread is currently interested in
Philipp Reisnerb411b362009-09-25 16:07:19 -07002356 *
2357 * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
2358 */
2359static int drbd_congested(void *congested_data, int bdi_bits)
2360{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002361 struct drbd_device *device = congested_data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002362 struct request_queue *q;
2363 char reason = '-';
2364 int r = 0;
2365
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002366 if (!may_inc_ap_bio(device)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002367 /* DRBD has frozen IO */
2368 r = bdi_bits;
2369 reason = 'd';
2370 goto out;
2371 }
2372
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02002373 if (test_bit(CALLBACK_PENDING, &first_peer_device(device)->connection->flags)) {
Lars Ellenbergc2ba6862012-06-14 15:14:06 +02002374 r |= (1 << BDI_async_congested);
2375 /* Without good local data, we would need to read from remote,
2376 * and that would need the worker thread as well, which is
2377 * currently blocked waiting for that usermode helper to
2378 * finish.
2379 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002380 if (!get_ldev_if_state(device, D_UP_TO_DATE))
Lars Ellenbergc2ba6862012-06-14 15:14:06 +02002381 r |= (1 << BDI_sync_congested);
2382 else
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002383 put_ldev(device);
Lars Ellenbergc2ba6862012-06-14 15:14:06 +02002384 r &= bdi_bits;
2385 reason = 'c';
2386 goto out;
2387 }
2388
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002389 if (get_ldev(device)) {
2390 q = bdev_get_queue(device->ldev->backing_bdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002391 r = bdi_congested(&q->backing_dev_info, bdi_bits);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002392 put_ldev(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002393 if (r)
2394 reason = 'b';
2395 }
2396
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02002397 if (bdi_bits & (1 << BDI_async_congested) &&
2398 test_bit(NET_CONGESTED, &first_peer_device(device)->connection->flags)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002399 r |= (1 << BDI_async_congested);
2400 reason = reason == 'b' ? 'a' : 'n';
2401 }
2402
2403out:
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002404 device->congestion_reason = reason;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002405 return r;
2406}
2407
Philipp Reisner6699b652011-02-09 11:10:24 +01002408static void drbd_init_workqueue(struct drbd_work_queue* wq)
2409{
Philipp Reisner6699b652011-02-09 11:10:24 +01002410 spin_lock_init(&wq->q_lock);
2411 INIT_LIST_HEAD(&wq->q);
Lars Ellenberg8c0785a2011-10-19 11:50:57 +02002412 init_waitqueue_head(&wq->q_wait);
Philipp Reisner6699b652011-02-09 11:10:24 +01002413}
2414
Andreas Gruenbacherb5043c52011-07-28 15:56:02 +02002415struct completion_work {
2416 struct drbd_work w;
2417 struct completion done;
2418};
2419
2420static int w_complete(struct drbd_work *w, int cancel)
2421{
2422 struct completion_work *completion_work =
2423 container_of(w, struct completion_work, w);
2424
2425 complete(&completion_work->done);
2426 return 0;
2427}
2428
2429void drbd_flush_workqueue(struct drbd_work_queue *work_queue)
2430{
2431 struct completion_work completion_work;
2432
2433 completion_work.w.cb = w_complete;
2434 init_completion(&completion_work.done);
2435 drbd_queue_work(work_queue, &completion_work.w);
2436 wait_for_completion(&completion_work.done);
2437}
2438
Andreas Gruenbacher4bc76042011-06-13 14:27:45 +02002439struct drbd_resource *drbd_find_resource(const char *name)
Philipp Reisner1aba4d72011-02-21 15:38:08 +01002440{
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002441 struct drbd_resource *resource;
Philipp Reisner1aba4d72011-02-21 15:38:08 +01002442
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002443 if (!name || !name[0])
2444 return NULL;
2445
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02002446 rcu_read_lock();
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002447 for_each_resource_rcu(resource, &drbd_resources) {
2448 if (!strcmp(resource->name, name)) {
Andreas Gruenbacher4bc76042011-06-13 14:27:45 +02002449 kref_get(&resource->kref);
Philipp Reisner1aba4d72011-02-21 15:38:08 +01002450 goto found;
Philipp Reisner0ace9df2011-04-24 10:53:19 +02002451 }
Philipp Reisner1aba4d72011-02-21 15:38:08 +01002452 }
Andreas Gruenbacher4bc76042011-06-13 14:27:45 +02002453 resource = NULL;
Philipp Reisner1aba4d72011-02-21 15:38:08 +01002454found:
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02002455 rcu_read_unlock();
Andreas Gruenbacher4bc76042011-06-13 14:27:45 +02002456 return resource;
Philipp Reisner1aba4d72011-02-21 15:38:08 +01002457}
2458
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002459struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len,
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002460 void *peer_addr, int peer_addr_len)
2461{
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002462 struct drbd_resource *resource;
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002463 struct drbd_connection *connection;
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002464
2465 rcu_read_lock();
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002466 for_each_resource_rcu(resource, &drbd_resources) {
2467 for_each_connection_rcu(connection, resource) {
2468 if (connection->my_addr_len == my_addr_len &&
2469 connection->peer_addr_len == peer_addr_len &&
2470 !memcmp(&connection->my_addr, my_addr, my_addr_len) &&
2471 !memcmp(&connection->peer_addr, peer_addr, peer_addr_len)) {
2472 kref_get(&connection->kref);
2473 goto found;
2474 }
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002475 }
2476 }
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002477 connection = NULL;
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002478found:
2479 rcu_read_unlock();
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002480 return connection;
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002481}
2482
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002483static int drbd_alloc_socket(struct drbd_socket *socket)
2484{
2485 socket->rbuf = (void *) __get_free_page(GFP_KERNEL);
2486 if (!socket->rbuf)
2487 return -ENOMEM;
Andreas Gruenbacher5a87d922011-03-24 21:17:52 +01002488 socket->sbuf = (void *) __get_free_page(GFP_KERNEL);
2489 if (!socket->sbuf)
2490 return -ENOMEM;
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002491 return 0;
2492}
2493
2494static void drbd_free_socket(struct drbd_socket *socket)
2495{
Andreas Gruenbacher5a87d922011-03-24 21:17:52 +01002496 free_page((unsigned long) socket->sbuf);
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002497 free_page((unsigned long) socket->rbuf);
2498}
2499
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002500void conn_free_crypto(struct drbd_connection *connection)
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002501{
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002502 drbd_free_sock(connection);
Philipp Reisner1d041222011-04-22 15:20:23 +02002503
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002504 crypto_free_hash(connection->csums_tfm);
2505 crypto_free_hash(connection->verify_tfm);
2506 crypto_free_hash(connection->cram_hmac_tfm);
2507 crypto_free_hash(connection->integrity_tfm);
2508 crypto_free_hash(connection->peer_integrity_tfm);
2509 kfree(connection->int_dig_in);
2510 kfree(connection->int_dig_vv);
Philipp Reisner1d041222011-04-22 15:20:23 +02002511
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002512 connection->csums_tfm = NULL;
2513 connection->verify_tfm = NULL;
2514 connection->cram_hmac_tfm = NULL;
2515 connection->integrity_tfm = NULL;
2516 connection->peer_integrity_tfm = NULL;
2517 connection->int_dig_in = NULL;
2518 connection->int_dig_vv = NULL;
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002519}
2520
Andreas Gruenbachereb6bea62011-06-21 16:11:28 +02002521int set_resource_options(struct drbd_resource *resource, struct res_opts *res_opts)
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02002522{
Andreas Gruenbachereb6bea62011-06-21 16:11:28 +02002523 struct drbd_connection *connection;
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02002524 cpumask_var_t new_cpu_mask;
2525 int err;
2526
2527 if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL))
2528 return -ENOMEM;
2529 /*
2530 retcode = ERR_NOMEM;
2531 drbd_msg_put_info("unable to allocate cpumask");
2532 */
2533
2534 /* silently ignore cpu mask on UP kernel */
2535 if (nr_cpu_ids > 1 && res_opts->cpu_mask[0] != 0) {
Andreas Gruenbacherf44d0432011-07-22 13:53:19 +02002536 err = bitmap_parse(res_opts->cpu_mask, DRBD_CPU_MASK_SIZE,
Philipp Reisnerc5b005a2012-04-30 12:53:52 +02002537 cpumask_bits(new_cpu_mask), nr_cpu_ids);
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02002538 if (err) {
Andreas Gruenbacher1ec861e2011-07-06 11:01:44 +02002539 drbd_warn(resource, "bitmap_parse() failed with %d\n", err);
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02002540 /* retcode = ERR_CPU_MASK_PARSE; */
2541 goto fail;
2542 }
2543 }
Andreas Gruenbachereb6bea62011-06-21 16:11:28 +02002544 resource->res_opts = *res_opts;
Andreas Gruenbacher625a6ba2011-07-22 14:29:02 +02002545 if (cpumask_empty(new_cpu_mask))
2546 drbd_calc_cpu_mask(&new_cpu_mask);
2547 if (!cpumask_equal(resource->cpu_mask, new_cpu_mask)) {
2548 cpumask_copy(resource->cpu_mask, new_cpu_mask);
2549 for_each_connection_rcu(connection, resource) {
Andreas Gruenbachereb6bea62011-06-21 16:11:28 +02002550 connection->receiver.reset_cpu_mask = 1;
2551 connection->asender.reset_cpu_mask = 1;
2552 connection->worker.reset_cpu_mask = 1;
2553 }
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02002554 }
2555 err = 0;
2556
2557fail:
2558 free_cpumask_var(new_cpu_mask);
2559 return err;
2560
2561}
2562
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002563struct drbd_resource *drbd_create_resource(const char *name)
2564{
2565 struct drbd_resource *resource;
2566
Andreas Gruenbacher6bbf53c2011-07-08 01:19:44 +02002567 resource = kzalloc(sizeof(struct drbd_resource), GFP_KERNEL);
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002568 if (!resource)
Andreas Gruenbacher625a6ba2011-07-22 14:29:02 +02002569 goto fail;
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002570 resource->name = kstrdup(name, GFP_KERNEL);
Andreas Gruenbacher625a6ba2011-07-22 14:29:02 +02002571 if (!resource->name)
2572 goto fail_free_resource;
2573 if (!zalloc_cpumask_var(&resource->cpu_mask, GFP_KERNEL))
2574 goto fail_free_name;
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002575 kref_init(&resource->kref);
Andreas Gruenbacher803ea132011-06-09 01:40:48 +02002576 idr_init(&resource->devices);
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002577 INIT_LIST_HEAD(&resource->connections);
Philipp Reisnere9526582013-11-22 15:53:41 +01002578 resource->write_ordering = WO_bdev_flush;
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002579 list_add_tail_rcu(&resource->resources, &drbd_resources);
Andreas Gruenbacher05008132011-07-07 14:19:42 +02002580 mutex_init(&resource->conf_update);
Lars Ellenberg9e276872014-04-28 18:43:22 +02002581 mutex_init(&resource->adm_mutex);
Andreas Gruenbacher05008132011-07-07 14:19:42 +02002582 spin_lock_init(&resource->req_lock);
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002583 return resource;
Andreas Gruenbacher625a6ba2011-07-22 14:29:02 +02002584
2585fail_free_name:
2586 kfree(resource->name);
2587fail_free_resource:
2588 kfree(resource);
2589fail:
2590 return NULL;
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002591}
2592
Philipp Reisnerec0bddb2011-05-04 15:47:01 +02002593/* caller must be under genl_lock() */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002594struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts)
Philipp Reisner21114382011-01-19 12:26:59 +01002595{
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002596 struct drbd_resource *resource;
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002597 struct drbd_connection *connection;
Philipp Reisner21114382011-01-19 12:26:59 +01002598
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002599 connection = kzalloc(sizeof(struct drbd_connection), GFP_KERNEL);
2600 if (!connection)
Philipp Reisner21114382011-01-19 12:26:59 +01002601 return NULL;
2602
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002603 if (drbd_alloc_socket(&connection->data))
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002604 goto fail;
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002605 if (drbd_alloc_socket(&connection->meta))
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002606 goto fail;
2607
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002608 connection->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
2609 if (!connection->current_epoch)
Philipp Reisner12038a32011-11-09 19:18:00 +01002610 goto fail;
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +01002611
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002612 INIT_LIST_HEAD(&connection->transfer_log);
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +01002613
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002614 INIT_LIST_HEAD(&connection->current_epoch->list);
2615 connection->epochs = 1;
2616 spin_lock_init(&connection->epoch_lock);
Philipp Reisner4b0007c2011-11-09 20:12:34 +01002617
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002618 connection->send.seen_any_write_yet = false;
2619 connection->send.current_epoch_nr = 0;
2620 connection->send.current_epoch_writes = 0;
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +01002621
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002622 resource = drbd_create_resource(name);
2623 if (!resource)
2624 goto fail;
2625
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002626 connection->cstate = C_STANDALONE;
2627 mutex_init(&connection->cstate_mutex);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002628 init_waitqueue_head(&connection->ping_wait);
Andreas Gruenbacherc06ece62011-06-21 17:23:59 +02002629 idr_init(&connection->peer_devices);
Philipp Reisnerb2fb6dbe2011-01-19 13:48:44 +01002630
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002631 drbd_init_workqueue(&connection->sender_work);
2632 mutex_init(&connection->data.mutex);
2633 mutex_init(&connection->meta.mutex);
Philipp Reisner6699b652011-02-09 11:10:24 +01002634
Andreas Gruenbacher2457b6d2011-07-21 13:45:21 +02002635 drbd_thread_init(resource, &connection->receiver, drbd_receiver, "receiver");
2636 connection->receiver.connection = connection;
2637 drbd_thread_init(resource, &connection->worker, drbd_worker, "worker");
2638 connection->worker.connection = connection;
2639 drbd_thread_init(resource, &connection->asender, drbd_asender, "asender");
2640 connection->asender.connection = connection;
Philipp Reisner392c8802011-02-09 10:33:31 +01002641
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002642 kref_init(&connection->kref);
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002643
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002644 connection->resource = resource;
Philipp Reisner21114382011-01-19 12:26:59 +01002645
Andreas Gruenbachereb6bea62011-06-21 16:11:28 +02002646 if (set_resource_options(resource, res_opts))
2647 goto fail_resource;
2648
2649 kref_get(&resource->kref);
2650 list_add_tail_rcu(&connection->connections, &resource->connections);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002651 return connection;
Philipp Reisner21114382011-01-19 12:26:59 +01002652
Andreas Gruenbachereb6bea62011-06-21 16:11:28 +02002653fail_resource:
2654 list_del(&resource->resources);
2655 drbd_free_resource(resource);
Philipp Reisner21114382011-01-19 12:26:59 +01002656fail:
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002657 kfree(connection->current_epoch);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002658 drbd_free_socket(&connection->meta);
2659 drbd_free_socket(&connection->data);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002660 kfree(connection);
Philipp Reisner21114382011-01-19 12:26:59 +01002661 return NULL;
2662}
2663
Andreas Gruenbacher05a10ec2011-06-07 22:54:17 +02002664void drbd_destroy_connection(struct kref *kref)
Philipp Reisner21114382011-01-19 12:26:59 +01002665{
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002666 struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref);
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002667 struct drbd_resource *resource = connection->resource;
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02002668
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002669 if (atomic_read(&connection->current_epoch->epoch_size) != 0)
Andreas Gruenbacher1ec861e2011-07-06 11:01:44 +02002670 drbd_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size));
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002671 kfree(connection->current_epoch);
Philipp Reisner12038a32011-11-09 19:18:00 +01002672
Andreas Gruenbacherc06ece62011-06-21 17:23:59 +02002673 idr_destroy(&connection->peer_devices);
Philipp Reisner21114382011-01-19 12:26:59 +01002674
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002675 drbd_free_socket(&connection->meta);
2676 drbd_free_socket(&connection->data);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002677 kfree(connection->int_dig_in);
2678 kfree(connection->int_dig_vv);
2679 kfree(connection);
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002680 kref_put(&resource->kref, drbd_destroy_resource);
Philipp Reisner21114382011-01-19 12:26:59 +01002681}
2682
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002683static int init_submitter(struct drbd_device *device)
Lars Ellenberg113fef92013-03-22 18:14:40 -06002684{
2685 /* opencoded create_singlethread_workqueue(),
2686 * to be able to say "drbd%d", ..., minor */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002687 device->submit.wq = alloc_workqueue("drbd%u_submit",
2688 WQ_UNBOUND | WQ_MEM_RECLAIM, 1, device->minor);
2689 if (!device->submit.wq)
Lars Ellenberg113fef92013-03-22 18:14:40 -06002690 return -ENOMEM;
2691
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002692 INIT_WORK(&device->submit.worker, do_submit);
2693 spin_lock_init(&device->submit.lock);
2694 INIT_LIST_HEAD(&device->submit.writes);
Lars Ellenberg113fef92013-03-22 18:14:40 -06002695 return 0;
2696}
2697
Lars Ellenberga910b122014-04-28 18:43:21 +02002698enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002699{
Lars Ellenberga910b122014-04-28 18:43:21 +02002700 struct drbd_resource *resource = adm_ctx->resource;
Andreas Gruenbacherb6f85ef2011-07-06 15:03:31 +02002701 struct drbd_connection *connection;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002702 struct drbd_device *device;
Andreas Gruenbacherb6f85ef2011-07-06 15:03:31 +02002703 struct drbd_peer_device *peer_device, *tmp_peer_device;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002704 struct gendisk *disk;
2705 struct request_queue *q;
Andreas Gruenbacher93e4bf72013-05-23 14:57:17 +02002706 int id;
Lars Ellenberga910b122014-04-28 18:43:21 +02002707 int vnr = adm_ctx->volume;
Lars Ellenberg8432b312011-03-08 16:11:16 +01002708 enum drbd_ret_code err = ERR_NOMEM;
Philipp Reisner774b3052011-02-22 02:07:03 -05002709
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002710 device = minor_to_device(minor);
2711 if (device)
Philipp Reisner774b3052011-02-22 02:07:03 -05002712 return ERR_MINOR_EXISTS;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002713
2714 /* GFP_KERNEL, we are outside of all write-out paths */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002715 device = kzalloc(sizeof(struct drbd_device), GFP_KERNEL);
2716 if (!device)
Philipp Reisner774b3052011-02-22 02:07:03 -05002717 return ERR_NOMEM;
Andreas Gruenbacher803ea132011-06-09 01:40:48 +02002718 kref_init(&device->kref);
2719
Andreas Gruenbacher803ea132011-06-09 01:40:48 +02002720 kref_get(&resource->kref);
2721 device->resource = resource;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002722 device->minor = minor;
2723 device->vnr = vnr;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002724
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002725 drbd_init_set_defaults(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002726
2727 q = blk_alloc_queue(GFP_KERNEL);
2728 if (!q)
2729 goto out_no_q;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002730 device->rq_queue = q;
2731 q->queuedata = device;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002732
2733 disk = alloc_disk(1);
2734 if (!disk)
2735 goto out_no_disk;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002736 device->vdisk = disk;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002737
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002738 set_disk_ro(disk, true);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002739
2740 disk->queue = q;
2741 disk->major = DRBD_MAJOR;
2742 disk->first_minor = minor;
2743 disk->fops = &drbd_ops;
2744 sprintf(disk->disk_name, "drbd%d", minor);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002745 disk->private_data = device;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002746
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002747 device->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002748 /* we have no partitions. we contain only ourselves. */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002749 device->this_bdev->bd_contains = device->this_bdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002750
2751 q->backing_dev_info.congested_fn = drbd_congested;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002752 q->backing_dev_info.congested_data = device;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002753
Andreas Gruenbacher2f58dcf2010-12-13 17:48:19 +01002754 blk_queue_make_request(q, drbd_make_request);
Lars Ellenberga73ff322012-06-25 19:15:38 +02002755 blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
Philipp Reisner99432fc2011-05-20 16:39:13 +02002756 /* Setting the max_hw_sectors to an odd value of 8kibyte here
2757 This triggers a max_bio_size message upon first attach or connect */
2758 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002759 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
2760 blk_queue_merge_bvec(q, drbd_merge_bvec);
Andreas Gruenbacher05008132011-07-07 14:19:42 +02002761 q->queue_lock = &resource->req_lock;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002762
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002763 device->md_io_page = alloc_page(GFP_KERNEL);
2764 if (!device->md_io_page)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002765 goto out_no_io_page;
2766
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002767 if (drbd_bm_init(device))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002768 goto out_no_bitmap;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002769 device->read_requests = RB_ROOT;
2770 device->write_requests = RB_ROOT;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002771
Andreas Gruenbacher93e4bf72013-05-23 14:57:17 +02002772 id = idr_alloc(&drbd_devices, device, minor, minor + 1, GFP_KERNEL);
2773 if (id < 0) {
2774 if (id == -ENOSPC) {
Tejun Heo56de2102013-02-27 17:04:01 -08002775 err = ERR_MINOR_EXISTS;
Lars Ellenberga910b122014-04-28 18:43:21 +02002776 drbd_msg_put_info(adm_ctx->reply_skb, "requested minor exists already");
Tejun Heo56de2102013-02-27 17:04:01 -08002777 }
Lars Ellenberg8432b312011-03-08 16:11:16 +01002778 goto out_no_minor_idr;
Tejun Heo56de2102013-02-27 17:04:01 -08002779 }
Andreas Gruenbacher803ea132011-06-09 01:40:48 +02002780 kref_get(&device->kref);
2781
2782 id = idr_alloc(&resource->devices, device, vnr, vnr + 1, GFP_KERNEL);
2783 if (id < 0) {
2784 if (id == -ENOSPC) {
2785 err = ERR_MINOR_EXISTS;
Lars Ellenberga910b122014-04-28 18:43:21 +02002786 drbd_msg_put_info(adm_ctx->reply_skb, "requested minor exists already");
Andreas Gruenbacher803ea132011-06-09 01:40:48 +02002787 }
2788 goto out_idr_remove_minor;
2789 }
2790 kref_get(&device->kref);
Tejun Heo56de2102013-02-27 17:04:01 -08002791
Andreas Gruenbacherb6f85ef2011-07-06 15:03:31 +02002792 INIT_LIST_HEAD(&device->peer_devices);
2793 for_each_connection(connection, resource) {
2794 peer_device = kzalloc(sizeof(struct drbd_peer_device), GFP_KERNEL);
2795 if (!peer_device)
2796 goto out_idr_remove_from_resource;
2797 peer_device->connection = connection;
2798 peer_device->device = device;
2799
2800 list_add(&peer_device->peer_devices, &device->peer_devices);
2801 kref_get(&device->kref);
2802
2803 id = idr_alloc(&connection->peer_devices, peer_device, vnr, vnr + 1, GFP_KERNEL);
2804 if (id < 0) {
2805 if (id == -ENOSPC) {
2806 err = ERR_INVALID_REQUEST;
Lars Ellenberga910b122014-04-28 18:43:21 +02002807 drbd_msg_put_info(adm_ctx->reply_skb, "requested volume exists already");
Andreas Gruenbacherb6f85ef2011-07-06 15:03:31 +02002808 }
2809 goto out_idr_remove_from_resource;
Tejun Heo56de2102013-02-27 17:04:01 -08002810 }
Andreas Gruenbacherb6f85ef2011-07-06 15:03:31 +02002811 kref_get(&connection->kref);
Lars Ellenberg569083c2011-03-07 09:49:02 +01002812 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002813
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002814 if (init_submitter(device)) {
Lars Ellenberg113fef92013-03-22 18:14:40 -06002815 err = ERR_NOMEM;
Lars Ellenberga910b122014-04-28 18:43:21 +02002816 drbd_msg_put_info(adm_ctx->reply_skb, "unable to create submit workqueue");
Lars Ellenberg113fef92013-03-22 18:14:40 -06002817 goto out_idr_remove_vol;
2818 }
2819
Philipp Reisner774b3052011-02-22 02:07:03 -05002820 add_disk(disk);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002821
Philipp Reisner2325eb62011-03-15 16:56:18 +01002822 /* inherit the connection state */
Andreas Gruenbacherb6f85ef2011-07-06 15:03:31 +02002823 device->state.conn = first_connection(resource)->cstate;
Andreas Gruenbacher69a22772011-08-09 00:47:13 +02002824 if (device->state.conn == C_WF_REPORT_PARAMS) {
2825 for_each_peer_device(peer_device, device)
2826 drbd_connected(peer_device);
2827 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002828
Philipp Reisner774b3052011-02-22 02:07:03 -05002829 return NO_ERROR;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002830
Lars Ellenberg113fef92013-03-22 18:14:40 -06002831out_idr_remove_vol:
Andreas Gruenbacherc06ece62011-06-21 17:23:59 +02002832 idr_remove(&connection->peer_devices, vnr);
Andreas Gruenbacher803ea132011-06-09 01:40:48 +02002833out_idr_remove_from_resource:
Andreas Gruenbacherb6f85ef2011-07-06 15:03:31 +02002834 for_each_connection(connection, resource) {
2835 peer_device = idr_find(&connection->peer_devices, vnr);
2836 if (peer_device) {
2837 idr_remove(&connection->peer_devices, vnr);
2838 kref_put(&connection->kref, drbd_destroy_connection);
2839 }
2840 }
2841 for_each_peer_device_safe(peer_device, tmp_peer_device, device) {
2842 list_del(&peer_device->peer_devices);
2843 kfree(peer_device);
2844 }
Andreas Gruenbacher803ea132011-06-09 01:40:48 +02002845 idr_remove(&resource->devices, vnr);
Lars Ellenberg8432b312011-03-08 16:11:16 +01002846out_idr_remove_minor:
Andreas Gruenbacher93e4bf72013-05-23 14:57:17 +02002847 idr_remove(&drbd_devices, minor);
Lars Ellenberg569083c2011-03-07 09:49:02 +01002848 synchronize_rcu();
Lars Ellenberg8432b312011-03-08 16:11:16 +01002849out_no_minor_idr:
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002850 drbd_bm_cleanup(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002851out_no_bitmap:
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002852 __free_page(device->md_io_page);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002853out_no_io_page:
2854 put_disk(disk);
2855out_no_disk:
2856 blk_cleanup_queue(q);
2857out_no_q:
Andreas Gruenbacher803ea132011-06-09 01:40:48 +02002858 kref_put(&resource->kref, drbd_destroy_resource);
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02002859 kfree(device);
Lars Ellenberg8432b312011-03-08 16:11:16 +01002860 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002861}
2862
Andreas Gruenbacherf82795d2011-07-03 23:32:26 +02002863void drbd_delete_device(struct drbd_device *device)
Andreas Gruenbacher803ea132011-06-09 01:40:48 +02002864{
2865 struct drbd_resource *resource = device->resource;
2866 struct drbd_connection *connection;
2867 int refs = 3;
2868
2869 for_each_connection(connection, resource) {
Andreas Gruenbacherc06ece62011-06-21 17:23:59 +02002870 idr_remove(&connection->peer_devices, device->vnr);
Andreas Gruenbacher803ea132011-06-09 01:40:48 +02002871 refs++;
2872 }
2873 idr_remove(&resource->devices, device->vnr);
2874 idr_remove(&drbd_devices, device_to_minor(device));
2875 del_gendisk(device->vdisk);
2876 synchronize_rcu();
2877 kref_sub(&device->kref, refs, drbd_destroy_device);
2878}
2879
Philipp Reisnerb411b362009-09-25 16:07:19 -07002880int __init drbd_init(void)
2881{
2882 int err;
2883
Philipp Reisner2b8a90b2011-01-10 11:15:17 +01002884 if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002885 printk(KERN_ERR
Philipp Reisner81a5d602011-02-22 19:53:16 -05002886 "drbd: invalid minor_count (%d)\n", minor_count);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002887#ifdef MODULE
2888 return -EINVAL;
2889#else
Andreas Gruenbacher46530e82011-05-31 13:08:53 +02002890 minor_count = DRBD_MINOR_COUNT_DEF;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002891#endif
2892 }
2893
Philipp Reisnerb411b362009-09-25 16:07:19 -07002894 err = register_blkdev(DRBD_MAJOR, "drbd");
2895 if (err) {
2896 printk(KERN_ERR
2897 "drbd: unable to register block device major %d\n",
2898 DRBD_MAJOR);
2899 return err;
2900 }
2901
2902 register_reboot_notifier(&drbd_notifier);
2903
2904 /*
2905 * allocate all necessary structs
2906 */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002907 init_waitqueue_head(&drbd_pp_wait);
2908
2909 drbd_proc = NULL; /* play safe for drbd_cleanup */
Andreas Gruenbacher05a10ec2011-06-07 22:54:17 +02002910 idr_init(&drbd_devices);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002911
Lars Ellenberg69babf02013-10-23 10:59:15 +02002912 rwlock_init(&global_state_lock);
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002913 INIT_LIST_HEAD(&drbd_resources);
Lars Ellenberg69babf02013-10-23 10:59:15 +02002914
2915 err = drbd_genl_register();
2916 if (err) {
2917 printk(KERN_ERR "drbd: unable to register generic netlink family\n");
2918 goto fail;
2919 }
2920
Philipp Reisnerb411b362009-09-25 16:07:19 -07002921 err = drbd_create_mempools();
2922 if (err)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002923 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002924
Wei Yongjun6110d702013-06-25 16:50:04 +02002925 err = -ENOMEM;
Lars Ellenberg8c484ee2010-03-11 16:47:58 +01002926 drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002927 if (!drbd_proc) {
2928 printk(KERN_ERR "drbd: unable to register proc file\n");
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002929 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002930 }
2931
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01002932 retry.wq = create_singlethread_workqueue("drbd-reissue");
2933 if (!retry.wq) {
2934 printk(KERN_ERR "drbd: unable to create retry workqueue\n");
2935 goto fail;
2936 }
2937 INIT_WORK(&retry.worker, do_retry);
2938 spin_lock_init(&retry.lock);
2939 INIT_LIST_HEAD(&retry.writes);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002940
2941 printk(KERN_INFO "drbd: initialized. "
2942 "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
2943 API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
2944 printk(KERN_INFO "drbd: %s\n", drbd_buildtag());
2945 printk(KERN_INFO "drbd: registered as block device major %d\n",
2946 DRBD_MAJOR);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002947
2948 return 0; /* Success! */
2949
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002950fail:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002951 drbd_cleanup();
2952 if (err == -ENOMEM)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002953 printk(KERN_ERR "drbd: ran out of memory\n");
2954 else
2955 printk(KERN_ERR "drbd: initialization failure\n");
2956 return err;
2957}
2958
Philipp Reisner28995af2013-11-22 16:48:14 +01002959void drbd_free_ldev(struct drbd_backing_dev *ldev)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002960{
2961 if (ldev == NULL)
2962 return;
2963
Tejun Heoe525fd82010-11-13 11:55:17 +01002964 blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
2965 blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002966
Lars Ellenberg94ad0a12013-03-27 14:08:42 +01002967 kfree(ldev->disk_conf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002968 kfree(ldev);
2969}
2970
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002971void drbd_free_sock(struct drbd_connection *connection)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002972{
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002973 if (connection->data.socket) {
2974 mutex_lock(&connection->data.mutex);
2975 kernel_sock_shutdown(connection->data.socket, SHUT_RDWR);
2976 sock_release(connection->data.socket);
2977 connection->data.socket = NULL;
2978 mutex_unlock(&connection->data.mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002979 }
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002980 if (connection->meta.socket) {
2981 mutex_lock(&connection->meta.mutex);
2982 kernel_sock_shutdown(connection->meta.socket, SHUT_RDWR);
2983 sock_release(connection->meta.socket);
2984 connection->meta.socket = NULL;
2985 mutex_unlock(&connection->meta.mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002986 }
2987}
2988
Philipp Reisnerb411b362009-09-25 16:07:19 -07002989/* meta data management */
2990
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002991void conn_md_sync(struct drbd_connection *connection)
Philipp Reisner19fffd72012-08-28 16:48:03 +02002992{
Andreas Gruenbacherc06ece62011-06-21 17:23:59 +02002993 struct drbd_peer_device *peer_device;
Philipp Reisner19fffd72012-08-28 16:48:03 +02002994 int vnr;
2995
2996 rcu_read_lock();
Andreas Gruenbacherc06ece62011-06-21 17:23:59 +02002997 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
2998 struct drbd_device *device = peer_device->device;
2999
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003000 kref_get(&device->kref);
Philipp Reisner19fffd72012-08-28 16:48:03 +02003001 rcu_read_unlock();
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003002 drbd_md_sync(device);
Andreas Gruenbacher05a10ec2011-06-07 22:54:17 +02003003 kref_put(&device->kref, drbd_destroy_device);
Philipp Reisner19fffd72012-08-28 16:48:03 +02003004 rcu_read_lock();
3005 }
3006 rcu_read_unlock();
3007}
3008
Lars Ellenbergae8bf312013-03-19 18:16:43 +01003009/* aligned 4kByte */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003010struct meta_data_on_disk {
Lars Ellenbergcccac982013-03-19 18:16:46 +01003011 u64 la_size_sect; /* last agreed size. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003012 u64 uuid[UI_SIZE]; /* UUIDs. */
3013 u64 device_uuid;
3014 u64 reserved_u64_1;
3015 u32 flags; /* MDF */
3016 u32 magic;
3017 u32 md_size_sect;
3018 u32 al_offset; /* offset to this block */
Lars Ellenbergae8bf312013-03-19 18:16:43 +01003019 u32 al_nr_extents; /* important for restoring the AL (userspace) */
Lars Ellenbergf3990022011-03-23 14:31:09 +01003020 /* `-- act_log->nr_elements <-- ldev->dc.al_extents */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003021 u32 bm_offset; /* offset to the bitmap, from here */
3022 u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */
Philipp Reisner99432fc2011-05-20 16:39:13 +02003023 u32 la_peer_max_bio_size; /* last peer max_bio_size */
Philipp Reisnerb411b362009-09-25 16:07:19 -07003024
Lars Ellenberg3a4d4eb2013-03-19 18:16:44 +01003025 /* see al_tr_number_to_on_disk_sector() */
3026 u32 al_stripes;
3027 u32 al_stripe_size_4k;
3028
3029 u8 reserved_u8[4096 - (7*8 + 10*4)];
Philipp Reisnerb411b362009-09-25 16:07:19 -07003030} __packed;
3031
Philipp Reisnerd752b262013-06-25 16:50:08 +02003032
3033
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003034void drbd_md_write(struct drbd_device *device, void *b)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003035{
Philipp Reisnerd752b262013-06-25 16:50:08 +02003036 struct meta_data_on_disk *buffer = b;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003037 sector_t sector;
3038 int i;
3039
Lars Ellenbergae8bf312013-03-19 18:16:43 +01003040 memset(buffer, 0, sizeof(*buffer));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003041
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003042 buffer->la_size_sect = cpu_to_be64(drbd_get_capacity(device->this_bdev));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003043 for (i = UI_CURRENT; i < UI_SIZE; i++)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003044 buffer->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]);
3045 buffer->flags = cpu_to_be32(device->ldev->md.flags);
Lars Ellenbergd5d7ebd2011-07-05 20:59:26 +02003046 buffer->magic = cpu_to_be32(DRBD_MD_MAGIC_84_UNCLEAN);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003047
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003048 buffer->md_size_sect = cpu_to_be32(device->ldev->md.md_size_sect);
3049 buffer->al_offset = cpu_to_be32(device->ldev->md.al_offset);
3050 buffer->al_nr_extents = cpu_to_be32(device->act_log->nr_elements);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003051 buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003052 buffer->device_uuid = cpu_to_be64(device->ldev->md.device_uuid);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003053
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003054 buffer->bm_offset = cpu_to_be32(device->ldev->md.bm_offset);
3055 buffer->la_peer_max_bio_size = cpu_to_be32(device->peer_max_bio_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003056
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003057 buffer->al_stripes = cpu_to_be32(device->ldev->md.al_stripes);
3058 buffer->al_stripe_size_4k = cpu_to_be32(device->ldev->md.al_stripe_size_4k);
Lars Ellenberg3a4d4eb2013-03-19 18:16:44 +01003059
Andreas Gruenbacher0b0ba1e2011-06-27 16:23:33 +02003060 D_ASSERT(device, drbd_md_ss(device->ldev) == device->ldev->md.md_offset);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003061 sector = device->ldev->md.md_offset;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003062
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003063 if (drbd_md_sync_page_io(device, device->ldev, sector, WRITE)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003064 /* this was a try anyways ... */
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02003065 drbd_err(device, "meta data update failed!\n");
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003066 drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003067 }
Philipp Reisnerd752b262013-06-25 16:50:08 +02003068}
3069
3070/**
3071 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003072 * @device: DRBD device.
Philipp Reisnerd752b262013-06-25 16:50:08 +02003073 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003074void drbd_md_sync(struct drbd_device *device)
Philipp Reisnerd752b262013-06-25 16:50:08 +02003075{
3076 struct meta_data_on_disk *buffer;
3077
3078 /* Don't accidentally change the DRBD meta data layout. */
3079 BUILD_BUG_ON(UI_SIZE != 4);
3080 BUILD_BUG_ON(sizeof(struct meta_data_on_disk) != 4096);
3081
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003082 del_timer(&device->md_sync_timer);
Philipp Reisnerd752b262013-06-25 16:50:08 +02003083 /* timer may be rearmed by drbd_md_mark_dirty() now. */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003084 if (!test_and_clear_bit(MD_DIRTY, &device->flags))
Philipp Reisnerd752b262013-06-25 16:50:08 +02003085 return;
3086
3087 /* We use here D_FAILED and not D_ATTACHING because we try to write
3088 * metadata even if we detach due to a disk failure! */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003089 if (!get_ldev_if_state(device, D_FAILED))
Philipp Reisnerd752b262013-06-25 16:50:08 +02003090 return;
3091
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003092 buffer = drbd_md_get_buffer(device);
Philipp Reisnerd752b262013-06-25 16:50:08 +02003093 if (!buffer)
3094 goto out;
3095
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003096 drbd_md_write(device, buffer);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003097
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003098 /* Update device->ldev->md.la_size_sect,
Philipp Reisnerb411b362009-09-25 16:07:19 -07003099 * since we updated it on metadata. */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003100 device->ldev->md.la_size_sect = drbd_get_capacity(device->this_bdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003101
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003102 drbd_md_put_buffer(device);
Philipp Reisnere1711732011-06-27 11:51:46 +02003103out:
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003104 put_ldev(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003105}
3106
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003107static int check_activity_log_stripe_size(struct drbd_device *device,
Lars Ellenberg3a4d4eb2013-03-19 18:16:44 +01003108 struct meta_data_on_disk *on_disk,
3109 struct drbd_md *in_core)
3110{
3111 u32 al_stripes = be32_to_cpu(on_disk->al_stripes);
3112 u32 al_stripe_size_4k = be32_to_cpu(on_disk->al_stripe_size_4k);
3113 u64 al_size_4k;
3114
3115 /* both not set: default to old fixed size activity log */
3116 if (al_stripes == 0 && al_stripe_size_4k == 0) {
3117 al_stripes = 1;
3118 al_stripe_size_4k = MD_32kB_SECT/8;
3119 }
3120
3121 /* some paranoia plausibility checks */
3122
3123 /* we need both values to be set */
3124 if (al_stripes == 0 || al_stripe_size_4k == 0)
3125 goto err;
3126
3127 al_size_4k = (u64)al_stripes * al_stripe_size_4k;
3128
3129 /* Upper limit of activity log area, to avoid potential overflow
3130 * problems in al_tr_number_to_on_disk_sector(). As right now, more
3131 * than 72 * 4k blocks total only increases the amount of history,
3132 * limiting this arbitrarily to 16 GB is not a real limitation ;-) */
3133 if (al_size_4k > (16 * 1024 * 1024/4))
3134 goto err;
3135
3136 /* Lower limit: we need at least 8 transaction slots (32kB)
3137 * to not break existing setups */
3138 if (al_size_4k < MD_32kB_SECT/8)
3139 goto err;
3140
3141 in_core->al_stripe_size_4k = al_stripe_size_4k;
3142 in_core->al_stripes = al_stripes;
3143 in_core->al_size_4k = al_size_4k;
3144
3145 return 0;
3146err:
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02003147 drbd_err(device, "invalid activity log striping: al_stripes=%u, al_stripe_size_4k=%u\n",
Lars Ellenberg3a4d4eb2013-03-19 18:16:44 +01003148 al_stripes, al_stripe_size_4k);
3149 return -EINVAL;
3150}
3151
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003152static int check_offsets_and_sizes(struct drbd_device *device, struct drbd_backing_dev *bdev)
Lars Ellenbergc04ccaa2013-03-19 18:16:47 +01003153{
3154 sector_t capacity = drbd_get_capacity(bdev->md_bdev);
3155 struct drbd_md *in_core = &bdev->md;
3156 s32 on_disk_al_sect;
3157 s32 on_disk_bm_sect;
3158
3159 /* The on-disk size of the activity log, calculated from offsets, and
3160 * the size of the activity log calculated from the stripe settings,
3161 * should match.
3162 * Though we could relax this a bit: it is ok, if the striped activity log
3163 * fits in the available on-disk activity log size.
3164 * Right now, that would break how resize is implemented.
3165 * TODO: make drbd_determine_dev_size() (and the drbdmeta tool) aware
3166 * of possible unused padding space in the on disk layout. */
3167 if (in_core->al_offset < 0) {
3168 if (in_core->bm_offset > in_core->al_offset)
3169 goto err;
3170 on_disk_al_sect = -in_core->al_offset;
3171 on_disk_bm_sect = in_core->al_offset - in_core->bm_offset;
3172 } else {
3173 if (in_core->al_offset != MD_4kB_SECT)
3174 goto err;
3175 if (in_core->bm_offset < in_core->al_offset + in_core->al_size_4k * MD_4kB_SECT)
3176 goto err;
3177
3178 on_disk_al_sect = in_core->bm_offset - MD_4kB_SECT;
3179 on_disk_bm_sect = in_core->md_size_sect - in_core->bm_offset;
3180 }
3181
3182 /* old fixed size meta data is exactly that: fixed. */
3183 if (in_core->meta_dev_idx >= 0) {
3184 if (in_core->md_size_sect != MD_128MB_SECT
3185 || in_core->al_offset != MD_4kB_SECT
3186 || in_core->bm_offset != MD_4kB_SECT + MD_32kB_SECT
3187 || in_core->al_stripes != 1
3188 || in_core->al_stripe_size_4k != MD_32kB_SECT/8)
3189 goto err;
3190 }
3191
3192 if (capacity < in_core->md_size_sect)
3193 goto err;
3194 if (capacity - in_core->md_size_sect < drbd_md_first_sector(bdev))
3195 goto err;
3196
3197 /* should be aligned, and at least 32k */
3198 if ((on_disk_al_sect & 7) || (on_disk_al_sect < MD_32kB_SECT))
3199 goto err;
3200
3201 /* should fit (for now: exactly) into the available on-disk space;
3202 * overflow prevention is in check_activity_log_stripe_size() above. */
3203 if (on_disk_al_sect != in_core->al_size_4k * MD_4kB_SECT)
3204 goto err;
3205
3206 /* again, should be aligned */
3207 if (in_core->bm_offset & 7)
3208 goto err;
3209
3210 /* FIXME check for device grow with flex external meta data? */
3211
3212 /* can the available bitmap space cover the last agreed device size? */
3213 if (on_disk_bm_sect < (in_core->la_size_sect+7)/MD_4kB_SECT/8/512)
3214 goto err;
3215
3216 return 0;
3217
3218err:
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02003219 drbd_err(device, "meta data offsets don't make sense: idx=%d "
Lars Ellenbergc04ccaa2013-03-19 18:16:47 +01003220 "al_s=%u, al_sz4k=%u, al_offset=%d, bm_offset=%d, "
3221 "md_size_sect=%u, la_size=%llu, md_capacity=%llu\n",
3222 in_core->meta_dev_idx,
3223 in_core->al_stripes, in_core->al_stripe_size_4k,
3224 in_core->al_offset, in_core->bm_offset, in_core->md_size_sect,
3225 (unsigned long long)in_core->la_size_sect,
3226 (unsigned long long)capacity);
3227
3228 return -EINVAL;
3229}
3230
3231
Philipp Reisnerb411b362009-09-25 16:07:19 -07003232/**
3233 * drbd_md_read() - Reads in the meta data super block
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003234 * @device: DRBD device.
Philipp Reisnerb411b362009-09-25 16:07:19 -07003235 * @bdev: Device from which the meta data should be read in.
3236 *
Lars Ellenberg3a4d4eb2013-03-19 18:16:44 +01003237 * Return NO_ERROR on success, and an enum drbd_ret_code in case
Lars Ellenbergd5d7ebd2011-07-05 20:59:26 +02003238 * something goes wrong.
Lars Ellenberg3a4d4eb2013-03-19 18:16:44 +01003239 *
Lars Ellenbergc04ccaa2013-03-19 18:16:47 +01003240 * Called exactly once during drbd_adm_attach(), while still being D_DISKLESS,
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003241 * even before @bdev is assigned to @device->ldev.
Philipp Reisnerb411b362009-09-25 16:07:19 -07003242 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003243int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003244{
3245 struct meta_data_on_disk *buffer;
Lars Ellenbergd5d7ebd2011-07-05 20:59:26 +02003246 u32 magic, flags;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003247 int i, rv = NO_ERROR;
3248
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003249 if (device->state.disk != D_DISKLESS)
Lars Ellenbergc04ccaa2013-03-19 18:16:47 +01003250 return ERR_DISK_CONFIGURED;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003251
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003252 buffer = drbd_md_get_buffer(device);
Philipp Reisnere1711732011-06-27 11:51:46 +02003253 if (!buffer)
Lars Ellenbergc04ccaa2013-03-19 18:16:47 +01003254 return ERR_NOMEM;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003255
Lars Ellenbergc04ccaa2013-03-19 18:16:47 +01003256 /* First, figure out where our meta data superblock is located,
3257 * and read it. */
Lars Ellenberg3a4d4eb2013-03-19 18:16:44 +01003258 bdev->md.meta_dev_idx = bdev->disk_conf->meta_dev_idx;
3259 bdev->md.md_offset = drbd_md_ss(bdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003260
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003261 if (drbd_md_sync_page_io(device, bdev, bdev->md.md_offset, READ)) {
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003262 /* NOTE: can't do normal error processing here as this is
Philipp Reisnerb411b362009-09-25 16:07:19 -07003263 called BEFORE disk is attached */
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02003264 drbd_err(device, "Error while reading metadata.\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07003265 rv = ERR_IO_MD_DISK;
3266 goto err;
3267 }
3268
Lars Ellenbergd5d7ebd2011-07-05 20:59:26 +02003269 magic = be32_to_cpu(buffer->magic);
3270 flags = be32_to_cpu(buffer->flags);
3271 if (magic == DRBD_MD_MAGIC_84_UNCLEAN ||
3272 (magic == DRBD_MD_MAGIC_08 && !(flags & MDF_AL_CLEAN))) {
3273 /* btw: that's Activity Log clean, not "all" clean. */
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02003274 drbd_err(device, "Found unclean meta data. Did you \"drbdadm apply-al\"?\n");
Lars Ellenbergd5d7ebd2011-07-05 20:59:26 +02003275 rv = ERR_MD_UNCLEAN;
3276 goto err;
3277 }
Lars Ellenberg3a4d4eb2013-03-19 18:16:44 +01003278
3279 rv = ERR_MD_INVALID;
Lars Ellenbergd5d7ebd2011-07-05 20:59:26 +02003280 if (magic != DRBD_MD_MAGIC_08) {
Philipp Reisner43de7c82011-11-10 13:16:13 +01003281 if (magic == DRBD_MD_MAGIC_07)
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02003282 drbd_err(device, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n");
Lars Ellenbergd5d7ebd2011-07-05 20:59:26 +02003283 else
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02003284 drbd_err(device, "Meta data magic not found. Did you \"drbdadm create-md\"?\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07003285 goto err;
3286 }
3287
3288 if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02003289 drbd_err(device, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
Philipp Reisnerb411b362009-09-25 16:07:19 -07003290 be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003291 goto err;
3292 }
3293
Lars Ellenbergc04ccaa2013-03-19 18:16:47 +01003294
3295 /* convert to in_core endian */
3296 bdev->md.la_size_sect = be64_to_cpu(buffer->la_size_sect);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003297 for (i = UI_CURRENT; i < UI_SIZE; i++)
3298 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
3299 bdev->md.flags = be32_to_cpu(buffer->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003300 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
3301
Lars Ellenbergc04ccaa2013-03-19 18:16:47 +01003302 bdev->md.md_size_sect = be32_to_cpu(buffer->md_size_sect);
3303 bdev->md.al_offset = be32_to_cpu(buffer->al_offset);
3304 bdev->md.bm_offset = be32_to_cpu(buffer->bm_offset);
3305
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003306 if (check_activity_log_stripe_size(device, buffer, &bdev->md))
Lars Ellenbergc04ccaa2013-03-19 18:16:47 +01003307 goto err;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003308 if (check_offsets_and_sizes(device, bdev))
Lars Ellenbergc04ccaa2013-03-19 18:16:47 +01003309 goto err;
3310
Philipp Reisnerb411b362009-09-25 16:07:19 -07003311 if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02003312 drbd_err(device, "unexpected bm_offset: %d (expected %d)\n",
Philipp Reisnerb411b362009-09-25 16:07:19 -07003313 be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003314 goto err;
3315 }
3316 if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02003317 drbd_err(device, "unexpected md_size: %u (expected %u)\n",
Philipp Reisnerb411b362009-09-25 16:07:19 -07003318 be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003319 goto err;
3320 }
3321
Lars Ellenberg3a4d4eb2013-03-19 18:16:44 +01003322 rv = NO_ERROR;
3323
Andreas Gruenbacher05008132011-07-07 14:19:42 +02003324 spin_lock_irq(&device->resource->req_lock);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003325 if (device->state.conn < C_CONNECTED) {
Lars Ellenbergdb141b22012-06-25 19:15:58 +02003326 unsigned int peer;
Philipp Reisner99432fc2011-05-20 16:39:13 +02003327 peer = be32_to_cpu(buffer->la_peer_max_bio_size);
Lars Ellenbergdb141b22012-06-25 19:15:58 +02003328 peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003329 device->peer_max_bio_size = peer;
Philipp Reisner99432fc2011-05-20 16:39:13 +02003330 }
Andreas Gruenbacher05008132011-07-07 14:19:42 +02003331 spin_unlock_irq(&device->resource->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003332
3333 err:
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003334 drbd_md_put_buffer(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003335
3336 return rv;
3337}
3338
3339/**
3340 * drbd_md_mark_dirty() - Mark meta data super block as dirty
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003341 * @device: DRBD device.
Philipp Reisnerb411b362009-09-25 16:07:19 -07003342 *
3343 * Call this function if you change anything that should be written to
3344 * the meta-data super block. This function sets MD_DIRTY, and starts a
3345 * timer that ensures that within five seconds you have to call drbd_md_sync().
3346 */
Lars Ellenbergca0e6092010-10-14 15:01:21 +02003347#ifdef DEBUG
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003348void drbd_md_mark_dirty_(struct drbd_device *device, unsigned int line, const char *func)
Lars Ellenbergee15b032010-09-03 10:00:09 +02003349{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003350 if (!test_and_set_bit(MD_DIRTY, &device->flags)) {
3351 mod_timer(&device->md_sync_timer, jiffies + HZ);
3352 device->last_md_mark_dirty.line = line;
3353 device->last_md_mark_dirty.func = func;
Lars Ellenbergee15b032010-09-03 10:00:09 +02003354 }
3355}
3356#else
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003357void drbd_md_mark_dirty(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003358{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003359 if (!test_and_set_bit(MD_DIRTY, &device->flags))
3360 mod_timer(&device->md_sync_timer, jiffies + 5*HZ);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003361}
Lars Ellenbergee15b032010-09-03 10:00:09 +02003362#endif
Philipp Reisnerb411b362009-09-25 16:07:19 -07003363
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003364void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003365{
3366 int i;
3367
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003368 for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003369 device->ldev->md.uuid[i+1] = device->ldev->md.uuid[i];
Philipp Reisnerb411b362009-09-25 16:07:19 -07003370}
3371
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003372void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003373{
3374 if (idx == UI_CURRENT) {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003375 if (device->state.role == R_PRIMARY)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003376 val |= 1;
3377 else
3378 val &= ~((u64)1);
3379
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003380 drbd_set_ed_uuid(device, val);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003381 }
3382
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003383 device->ldev->md.uuid[idx] = val;
3384 drbd_md_mark_dirty(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003385}
3386
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003387void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
Philipp Reisner9f2247b2012-08-16 14:25:58 +02003388{
3389 unsigned long flags;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003390 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
3391 __drbd_uuid_set(device, idx, val);
3392 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
Philipp Reisner9f2247b2012-08-16 14:25:58 +02003393}
Philipp Reisnerb411b362009-09-25 16:07:19 -07003394
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003395void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003396{
Philipp Reisner9f2247b2012-08-16 14:25:58 +02003397 unsigned long flags;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003398 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
3399 if (device->ldev->md.uuid[idx]) {
3400 drbd_uuid_move_history(device);
3401 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[idx];
Philipp Reisnerb411b362009-09-25 16:07:19 -07003402 }
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003403 __drbd_uuid_set(device, idx, val);
3404 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003405}
3406
3407/**
3408 * drbd_uuid_new_current() - Creates a new current UUID
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003409 * @device: DRBD device.
Philipp Reisnerb411b362009-09-25 16:07:19 -07003410 *
3411 * Creates a new current UUID, and rotates the old current UUID into
3412 * the bitmap slot. Causes an incremental resync upon next connect.
3413 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003414void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003415{
3416 u64 val;
Philipp Reisner9f2247b2012-08-16 14:25:58 +02003417 unsigned long long bm_uuid;
3418
3419 get_random_bytes(&val, sizeof(u64));
3420
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003421 spin_lock_irq(&device->ldev->md.uuid_lock);
3422 bm_uuid = device->ldev->md.uuid[UI_BITMAP];
Philipp Reisnerb411b362009-09-25 16:07:19 -07003423
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003424 if (bm_uuid)
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02003425 drbd_warn(device, "bm UUID was already set: %llX\n", bm_uuid);
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003426
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003427 device->ldev->md.uuid[UI_BITMAP] = device->ldev->md.uuid[UI_CURRENT];
3428 __drbd_uuid_set(device, UI_CURRENT, val);
3429 spin_unlock_irq(&device->ldev->md.uuid_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003430
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003431 drbd_print_uuids(device, "new current UUID");
Lars Ellenbergaaa8e2b2010-10-15 13:16:53 +02003432 /* get it to stable storage _now_ */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003433 drbd_md_sync(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003434}
3435
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003436void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003437{
Philipp Reisner9f2247b2012-08-16 14:25:58 +02003438 unsigned long flags;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003439 if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003440 return;
3441
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003442 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003443 if (val == 0) {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003444 drbd_uuid_move_history(device);
3445 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP];
3446 device->ldev->md.uuid[UI_BITMAP] = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003447 } else {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003448 unsigned long long bm_uuid = device->ldev->md.uuid[UI_BITMAP];
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003449 if (bm_uuid)
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02003450 drbd_warn(device, "bm UUID was already set: %llX\n", bm_uuid);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003451
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003452 device->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003453 }
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003454 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
Philipp Reisner9f2247b2012-08-16 14:25:58 +02003455
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003456 drbd_md_mark_dirty(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003457}
3458
3459/**
3460 * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003461 * @device: DRBD device.
Philipp Reisnerb411b362009-09-25 16:07:19 -07003462 *
3463 * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
3464 */
Philipp Reisner8fe39aa2013-11-22 13:22:13 +01003465int drbd_bmio_set_n_write(struct drbd_device *device) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003466{
3467 int rv = -EIO;
3468
Philipp Reisner8fe39aa2013-11-22 13:22:13 +01003469 drbd_md_set_flag(device, MDF_FULL_SYNC);
3470 drbd_md_sync(device);
3471 drbd_bm_set_all(device);
3472
3473 rv = drbd_bm_write(device);
3474
3475 if (!rv) {
3476 drbd_md_clear_flag(device, MDF_FULL_SYNC);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003477 drbd_md_sync(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003478 }
3479
3480 return rv;
3481}
3482
3483/**
3484 * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003485 * @device: DRBD device.
Philipp Reisnerb411b362009-09-25 16:07:19 -07003486 *
3487 * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
3488 */
Philipp Reisner8fe39aa2013-11-22 13:22:13 +01003489int drbd_bmio_clear_n_write(struct drbd_device *device) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003490{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003491 drbd_resume_al(device);
Philipp Reisner8fe39aa2013-11-22 13:22:13 +01003492 drbd_bm_clear_all(device);
3493 return drbd_bm_write(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003494}
3495
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003496static int w_bitmap_io(struct drbd_work *w, int unused)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003497{
Andreas Gruenbacher84b8c062011-07-28 15:27:51 +02003498 struct drbd_device *device =
3499 container_of(w, struct drbd_device, bm_io_work.w);
3500 struct bm_io_work *work = &device->bm_io_work;
Lars Ellenberg02851e92010-12-16 14:47:39 +01003501 int rv = -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003502
Andreas Gruenbacher0b0ba1e2011-06-27 16:23:33 +02003503 D_ASSERT(device, atomic_read(&device->ap_bio_cnt) == 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003504
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003505 if (get_ldev(device)) {
3506 drbd_bm_lock(device, work->why, work->flags);
3507 rv = work->io_fn(device);
3508 drbd_bm_unlock(device);
3509 put_ldev(device);
Lars Ellenberg02851e92010-12-16 14:47:39 +01003510 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003511
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003512 clear_bit_unlock(BITMAP_IO, &device->flags);
3513 wake_up(&device->misc_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003514
3515 if (work->done)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003516 work->done(device, rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003517
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003518 clear_bit(BITMAP_IO_QUEUED, &device->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003519 work->why = NULL;
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003520 work->flags = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003521
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003522 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003523}
3524
3525/**
3526 * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003527 * @device: DRBD device.
Philipp Reisnerb411b362009-09-25 16:07:19 -07003528 * @io_fn: IO callback to be called when bitmap IO is possible
3529 * @done: callback to be called after the bitmap IO was performed
3530 * @why: Descriptive text of the reason for doing the IO
3531 *
3532 * While IO on the bitmap happens we freeze application IO thus we ensure
3533 * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
3534 * called from worker context. It MUST NOT be used while a previous such
3535 * work is still pending!
Philipp Reisner8fe39aa2013-11-22 13:22:13 +01003536 *
3537 * Its worker function encloses the call of io_fn() by get_ldev() and
3538 * put_ldev().
Philipp Reisnerb411b362009-09-25 16:07:19 -07003539 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003540void drbd_queue_bitmap_io(struct drbd_device *device,
Andreas Gruenbacher54761692011-05-30 16:15:21 +02003541 int (*io_fn)(struct drbd_device *),
3542 void (*done)(struct drbd_device *, int),
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003543 char *why, enum bm_flag flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003544{
Andreas Gruenbacher0b0ba1e2011-06-27 16:23:33 +02003545 D_ASSERT(device, current == first_peer_device(device)->connection->worker.task);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003546
Andreas Gruenbacher0b0ba1e2011-06-27 16:23:33 +02003547 D_ASSERT(device, !test_bit(BITMAP_IO_QUEUED, &device->flags));
3548 D_ASSERT(device, !test_bit(BITMAP_IO, &device->flags));
3549 D_ASSERT(device, list_empty(&device->bm_io_work.w.list));
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003550 if (device->bm_io_work.why)
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02003551 drbd_err(device, "FIXME going to queue '%s' but '%s' still pending?\n",
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003552 why, device->bm_io_work.why);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003553
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003554 device->bm_io_work.io_fn = io_fn;
3555 device->bm_io_work.done = done;
3556 device->bm_io_work.why = why;
3557 device->bm_io_work.flags = flags;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003558
Andreas Gruenbacher05008132011-07-07 14:19:42 +02003559 spin_lock_irq(&device->resource->req_lock);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003560 set_bit(BITMAP_IO, &device->flags);
3561 if (atomic_read(&device->ap_bio_cnt) == 0) {
3562 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
Andreas Gruenbacher84b8c062011-07-28 15:27:51 +02003563 drbd_queue_work(&first_peer_device(device)->connection->sender_work,
3564 &device->bm_io_work.w);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003565 }
Andreas Gruenbacher05008132011-07-07 14:19:42 +02003566 spin_unlock_irq(&device->resource->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003567}
3568
3569/**
3570 * drbd_bitmap_io() - Does an IO operation on the whole bitmap
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003571 * @device: DRBD device.
Philipp Reisnerb411b362009-09-25 16:07:19 -07003572 * @io_fn: IO callback to be called when bitmap IO is possible
3573 * @why: Descriptive text of the reason for doing the IO
3574 *
3575 * freezes application IO while that the actual IO operations runs. This
3576 * functions MAY NOT be called from worker context.
3577 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003578int drbd_bitmap_io(struct drbd_device *device, int (*io_fn)(struct drbd_device *),
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003579 char *why, enum bm_flag flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003580{
3581 int rv;
3582
Andreas Gruenbacher0b0ba1e2011-06-27 16:23:33 +02003583 D_ASSERT(device, current != first_peer_device(device)->connection->worker.task);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003584
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003585 if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003586 drbd_suspend_io(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003587
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003588 drbd_bm_lock(device, why, flags);
3589 rv = io_fn(device);
3590 drbd_bm_unlock(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003591
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003592 if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003593 drbd_resume_io(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003594
3595 return rv;
3596}
3597
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003598void drbd_md_set_flag(struct drbd_device *device, int flag) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003599{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003600 if ((device->ldev->md.flags & flag) != flag) {
3601 drbd_md_mark_dirty(device);
3602 device->ldev->md.flags |= flag;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003603 }
3604}
3605
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003606void drbd_md_clear_flag(struct drbd_device *device, int flag) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003607{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003608 if ((device->ldev->md.flags & flag) != 0) {
3609 drbd_md_mark_dirty(device);
3610 device->ldev->md.flags &= ~flag;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003611 }
3612}
3613int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
3614{
3615 return (bdev->md.flags & flag) != 0;
3616}
3617
3618static void md_sync_timer_fn(unsigned long data)
3619{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003620 struct drbd_device *device = (struct drbd_device *) data;
Lars Ellenbergac0acb92014-02-11 09:47:58 +01003621 drbd_device_post_work(device, MD_SYNC);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003622}
3623
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01003624const char *cmdname(enum drbd_packet cmd)
Andreas Gruenbacherf2ad9062011-01-26 17:13:25 +01003625{
3626 /* THINK may need to become several global tables
3627 * when we want to support more than
3628 * one PRO_VERSION */
3629 static const char *cmdnames[] = {
3630 [P_DATA] = "Data",
3631 [P_DATA_REPLY] = "DataReply",
3632 [P_RS_DATA_REPLY] = "RSDataReply",
3633 [P_BARRIER] = "Barrier",
3634 [P_BITMAP] = "ReportBitMap",
3635 [P_BECOME_SYNC_TARGET] = "BecomeSyncTarget",
3636 [P_BECOME_SYNC_SOURCE] = "BecomeSyncSource",
3637 [P_UNPLUG_REMOTE] = "UnplugRemote",
3638 [P_DATA_REQUEST] = "DataRequest",
3639 [P_RS_DATA_REQUEST] = "RSDataRequest",
3640 [P_SYNC_PARAM] = "SyncParam",
3641 [P_SYNC_PARAM89] = "SyncParam89",
3642 [P_PROTOCOL] = "ReportProtocol",
3643 [P_UUIDS] = "ReportUUIDs",
3644 [P_SIZES] = "ReportSizes",
3645 [P_STATE] = "ReportState",
3646 [P_SYNC_UUID] = "ReportSyncUUID",
3647 [P_AUTH_CHALLENGE] = "AuthChallenge",
3648 [P_AUTH_RESPONSE] = "AuthResponse",
3649 [P_PING] = "Ping",
3650 [P_PING_ACK] = "PingAck",
3651 [P_RECV_ACK] = "RecvAck",
3652 [P_WRITE_ACK] = "WriteAck",
3653 [P_RS_WRITE_ACK] = "RSWriteAck",
Lars Ellenbergd4dabbe2012-08-01 12:33:51 +02003654 [P_SUPERSEDED] = "Superseded",
Andreas Gruenbacherf2ad9062011-01-26 17:13:25 +01003655 [P_NEG_ACK] = "NegAck",
3656 [P_NEG_DREPLY] = "NegDReply",
3657 [P_NEG_RS_DREPLY] = "NegRSDReply",
3658 [P_BARRIER_ACK] = "BarrierAck",
3659 [P_STATE_CHG_REQ] = "StateChgRequest",
3660 [P_STATE_CHG_REPLY] = "StateChgReply",
3661 [P_OV_REQUEST] = "OVRequest",
3662 [P_OV_REPLY] = "OVReply",
3663 [P_OV_RESULT] = "OVResult",
3664 [P_CSUM_RS_REQUEST] = "CsumRSRequest",
3665 [P_RS_IS_IN_SYNC] = "CsumRSIsInSync",
3666 [P_COMPRESSED_BITMAP] = "CBitmap",
3667 [P_DELAY_PROBE] = "DelayProbe",
3668 [P_OUT_OF_SYNC] = "OutOfSync",
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003669 [P_RETRY_WRITE] = "RetryWrite",
Lars Ellenbergae25b332011-04-24 00:01:16 +02003670 [P_RS_CANCEL] = "RSCancel",
3671 [P_CONN_ST_CHG_REQ] = "conn_st_chg_req",
3672 [P_CONN_ST_CHG_REPLY] = "conn_st_chg_reply",
Philipp Reisner036b17e2011-05-16 17:38:11 +02003673 [P_RETRY_WRITE] = "retry_write",
3674 [P_PROTOCOL_UPDATE] = "protocol_update",
Lars Ellenbergae25b332011-04-24 00:01:16 +02003675
3676 /* enum drbd_packet, but not commands - obsoleted flags:
3677 * P_MAY_IGNORE
3678 * P_MAX_OPT_CMD
3679 */
Andreas Gruenbacherf2ad9062011-01-26 17:13:25 +01003680 };
3681
Lars Ellenbergae25b332011-04-24 00:01:16 +02003682 /* too big for the array: 0xfffX */
Andreas Gruenbachere5d6f332011-03-28 16:44:40 +02003683 if (cmd == P_INITIAL_META)
3684 return "InitialMeta";
3685 if (cmd == P_INITIAL_DATA)
3686 return "InitialData";
Andreas Gruenbacher60381782011-03-28 17:05:50 +02003687 if (cmd == P_CONNECTION_FEATURES)
3688 return "ConnectionFeatures";
Andreas Gruenbacher6e849ce2011-03-14 17:27:45 +01003689 if (cmd >= ARRAY_SIZE(cmdnames))
Andreas Gruenbacherf2ad9062011-01-26 17:13:25 +01003690 return "Unknown";
3691 return cmdnames[cmd];
3692}
3693
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003694/**
3695 * drbd_wait_misc - wait for a request to make progress
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003696 * @device: device associated with the request
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003697 * @i: the struct drbd_interval embedded in struct drbd_request or
3698 * struct drbd_peer_request
3699 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003700int drbd_wait_misc(struct drbd_device *device, struct drbd_interval *i)
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003701{
Philipp Reisner44ed1672011-04-19 17:10:19 +02003702 struct net_conf *nc;
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003703 DEFINE_WAIT(wait);
3704 long timeout;
3705
Philipp Reisner44ed1672011-04-19 17:10:19 +02003706 rcu_read_lock();
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02003707 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
Philipp Reisner44ed1672011-04-19 17:10:19 +02003708 if (!nc) {
3709 rcu_read_unlock();
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003710 return -ETIMEDOUT;
Philipp Reisner44ed1672011-04-19 17:10:19 +02003711 }
3712 timeout = nc->ko_count ? nc->timeout * HZ / 10 * nc->ko_count : MAX_SCHEDULE_TIMEOUT;
3713 rcu_read_unlock();
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003714
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003715 /* Indicate to wake up device->misc_wait on progress. */
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003716 i->waiting = true;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003717 prepare_to_wait(&device->misc_wait, &wait, TASK_INTERRUPTIBLE);
Andreas Gruenbacher05008132011-07-07 14:19:42 +02003718 spin_unlock_irq(&device->resource->req_lock);
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003719 timeout = schedule_timeout(timeout);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003720 finish_wait(&device->misc_wait, &wait);
Andreas Gruenbacher05008132011-07-07 14:19:42 +02003721 spin_lock_irq(&device->resource->req_lock);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003722 if (!timeout || device->state.conn < C_CONNECTED)
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003723 return -ETIMEDOUT;
3724 if (signal_pending(current))
3725 return -ERESTARTSYS;
3726 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003727}
3728
3729#ifdef CONFIG_DRBD_FAULT_INJECTION
3730/* Fault insertion support including random number generator shamelessly
3731 * stolen from kernel/rcutorture.c */
3732struct fault_random_state {
3733 unsigned long state;
3734 unsigned long count;
3735};
3736
3737#define FAULT_RANDOM_MULT 39916801 /* prime */
3738#define FAULT_RANDOM_ADD 479001701 /* prime */
3739#define FAULT_RANDOM_REFRESH 10000
3740
3741/*
3742 * Crude but fast random-number generator. Uses a linear congruential
3743 * generator, with occasional help from get_random_bytes().
3744 */
3745static unsigned long
3746_drbd_fault_random(struct fault_random_state *rsp)
3747{
3748 long refresh;
3749
Roel Kluin49829ea2009-12-15 22:55:44 +01003750 if (!rsp->count--) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003751 get_random_bytes(&refresh, sizeof(refresh));
3752 rsp->state += refresh;
3753 rsp->count = FAULT_RANDOM_REFRESH;
3754 }
3755 rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
3756 return swahw32(rsp->state);
3757}
3758
3759static char *
3760_drbd_fault_str(unsigned int type) {
3761 static char *_faults[] = {
3762 [DRBD_FAULT_MD_WR] = "Meta-data write",
3763 [DRBD_FAULT_MD_RD] = "Meta-data read",
3764 [DRBD_FAULT_RS_WR] = "Resync write",
3765 [DRBD_FAULT_RS_RD] = "Resync read",
3766 [DRBD_FAULT_DT_WR] = "Data write",
3767 [DRBD_FAULT_DT_RD] = "Data read",
3768 [DRBD_FAULT_DT_RA] = "Data read ahead",
3769 [DRBD_FAULT_BM_ALLOC] = "BM allocation",
Philipp Reisner6b4388a2010-04-26 14:11:45 +02003770 [DRBD_FAULT_AL_EE] = "EE allocation",
3771 [DRBD_FAULT_RECEIVE] = "receive data corruption",
Philipp Reisnerb411b362009-09-25 16:07:19 -07003772 };
3773
3774 return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
3775}
3776
3777unsigned int
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003778_drbd_insert_fault(struct drbd_device *device, unsigned int type)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003779{
3780 static struct fault_random_state rrs = {0, 0};
3781
3782 unsigned int ret = (
3783 (fault_devs == 0 ||
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003784 ((1 << device_to_minor(device)) & fault_devs) != 0) &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003785 (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate));
3786
3787 if (ret) {
3788 fault_count++;
3789
Lars Ellenberg73835062010-05-27 11:51:56 +02003790 if (__ratelimit(&drbd_ratelimit_state))
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02003791 drbd_warn(device, "***Simulating %s failure\n",
Philipp Reisnerb411b362009-09-25 16:07:19 -07003792 _drbd_fault_str(type));
3793 }
3794
3795 return ret;
3796}
3797#endif
3798
3799const char *drbd_buildtag(void)
3800{
3801 /* DRBD built from external sources has here a reference to the
3802 git hash of the source code. */
3803
3804 static char buildtag[38] = "\0uilt-in";
3805
3806 if (buildtag[0] == 0) {
Cong Wangbc4854b2012-04-03 14:13:36 +08003807#ifdef MODULE
3808 sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
3809#else
3810 buildtag[0] = 'b';
Philipp Reisnerb411b362009-09-25 16:07:19 -07003811#endif
Philipp Reisnerb411b362009-09-25 16:07:19 -07003812 }
3813
3814 return buildtag;
3815}
3816
3817module_init(drbd_init)
3818module_exit(drbd_cleanup)
3819
Philipp Reisnerb411b362009-09-25 16:07:19 -07003820EXPORT_SYMBOL(drbd_conn_str);
3821EXPORT_SYMBOL(drbd_role_str);
3822EXPORT_SYMBOL(drbd_disk_str);
3823EXPORT_SYMBOL(drbd_set_st_err_str);