blob: 59a58e896cf581efd5c411e563a8eab7ff62e7fb [file] [log] [blame]
Philipp Reisnerb411b362009-09-25 16:07:19 -07001/*
2 drbd.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11 from Logicworks, Inc. for making SDP replication support possible.
12
13 drbd is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
16 any later version.
17
18 drbd is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with drbd; see the file COPYING. If not, write to
25 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26
27 */
28
Philipp Reisnerb411b362009-09-25 16:07:19 -070029#include <linux/module.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070030#include <linux/drbd.h>
31#include <asm/uaccess.h>
32#include <asm/types.h>
33#include <net/sock.h>
34#include <linux/ctype.h>
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020035#include <linux/mutex.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070036#include <linux/fs.h>
37#include <linux/file.h>
38#include <linux/proc_fs.h>
39#include <linux/init.h>
40#include <linux/mm.h>
41#include <linux/memcontrol.h>
42#include <linux/mm_inline.h>
43#include <linux/slab.h>
44#include <linux/random.h>
45#include <linux/reboot.h>
46#include <linux/notifier.h>
47#include <linux/kthread.h>
Lars Ellenberg113fef92013-03-22 18:14:40 -060048#include <linux/workqueue.h>
Philipp Reisnerb411b362009-09-25 16:07:19 -070049#define __KERNEL_SYSCALLS__
50#include <linux/unistd.h>
51#include <linux/vmalloc.h>
52
53#include <linux/drbd_limits.h>
54#include "drbd_int.h"
Andreas Gruenbachera3603a62011-05-30 11:47:37 +020055#include "drbd_protocol.h"
Philipp Reisnerb411b362009-09-25 16:07:19 -070056#include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
57
58#include "drbd_vli.h"
59
Arnd Bergmann2a48fc02010-06-02 14:28:52 +020060static DEFINE_MUTEX(drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -070061int drbd_worker(struct drbd_thread *);
Philipp Reisnerb411b362009-09-25 16:07:19 -070062
63int drbd_init(void);
64static int drbd_open(struct block_device *bdev, fmode_t mode);
Al Virodb2a1442013-05-05 21:52:57 -040065static void drbd_release(struct gendisk *gd, fmode_t mode);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +010066static int w_md_sync(struct drbd_work *w, int unused);
Philipp Reisnerb411b362009-09-25 16:07:19 -070067static void md_sync_timer_fn(unsigned long data);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +010068static int w_bitmap_io(struct drbd_work *w, int unused);
69static int w_go_diskless(struct drbd_work *w, int unused);
Philipp Reisnerb411b362009-09-25 16:07:19 -070070
Philipp Reisnerb411b362009-09-25 16:07:19 -070071MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
72 "Lars Ellenberg <lars@linbit.com>");
73MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
74MODULE_VERSION(REL_VERSION);
75MODULE_LICENSE("GPL");
Philipp Reisner81a5d602011-02-22 19:53:16 -050076MODULE_PARM_DESC(minor_count, "Approximate number of drbd devices ("
Philipp Reisner2b8a90b2011-01-10 11:15:17 +010077 __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")");
Philipp Reisnerb411b362009-09-25 16:07:19 -070078MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
79
80#include <linux/moduleparam.h>
81/* allow_open_on_secondary */
82MODULE_PARM_DESC(allow_oos, "DONT USE!");
83/* thanks to these macros, if compiled into the kernel (not-module),
84 * this becomes the boot parameter drbd.minor_count */
85module_param(minor_count, uint, 0444);
86module_param(disable_sendpage, bool, 0644);
87module_param(allow_oos, bool, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -070088module_param(proc_details, int, 0644);
89
90#ifdef CONFIG_DRBD_FAULT_INJECTION
91int enable_faults;
92int fault_rate;
93static int fault_count;
94int fault_devs;
95/* bitmap of enabled faults */
96module_param(enable_faults, int, 0664);
97/* fault rate % value - applies to all enabled faults */
98module_param(fault_rate, int, 0664);
99/* count of faults inserted */
100module_param(fault_count, int, 0664);
101/* bitmap of devices to insert faults on */
102module_param(fault_devs, int, 0644);
103#endif
104
105/* module parameter, defined */
Philipp Reisner2b8a90b2011-01-10 11:15:17 +0100106unsigned int minor_count = DRBD_MINOR_COUNT_DEF;
Rusty Russell90ab5ee2012-01-13 09:32:20 +1030107bool disable_sendpage;
108bool allow_oos;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700109int proc_details; /* Detail level in proc drbd*/
110
111/* Module parameter for setting the user mode helper program
112 * to run. Default is /sbin/drbdadm */
113char usermode_helper[80] = "/sbin/drbdadm";
114
115module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644);
116
117/* in 2.6.x, our device mapping and config info contains our virtual gendisks
118 * as member "struct gendisk *vdisk;"
119 */
Andreas Gruenbacher05a10ec2011-06-07 22:54:17 +0200120struct idr drbd_devices;
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +0200121struct list_head drbd_resources;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700122
123struct kmem_cache *drbd_request_cache;
Andreas Gruenbacher6c852be2011-02-04 15:38:52 +0100124struct kmem_cache *drbd_ee_cache; /* peer requests */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700125struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
126struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
127mempool_t *drbd_request_mempool;
128mempool_t *drbd_ee_mempool;
Lars Ellenberg42818082011-02-23 12:39:46 +0100129mempool_t *drbd_md_io_page_pool;
Lars Ellenberg9476f392011-02-23 17:02:01 +0100130struct bio_set *drbd_md_io_bio_set;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700131
132/* I do not use a standard mempool, because:
133 1) I want to hand out the pre-allocated objects first.
134 2) I want to be able to interrupt sleeping allocation with a signal.
135 Note: This is a single linked list, the next pointer is the private
136 member of struct page.
137 */
138struct page *drbd_pp_pool;
139spinlock_t drbd_pp_lock;
140int drbd_pp_vacant;
141wait_queue_head_t drbd_pp_wait;
142
143DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
144
Emese Revfy7d4e9d02009-12-14 00:59:30 +0100145static const struct block_device_operations drbd_ops = {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700146 .owner = THIS_MODULE,
147 .open = drbd_open,
148 .release = drbd_release,
149};
150
Lars Ellenberg9476f392011-02-23 17:02:01 +0100151struct bio *bio_alloc_drbd(gfp_t gfp_mask)
152{
Lars Ellenbergda4a75d2011-02-23 17:02:01 +0100153 struct bio *bio;
154
Lars Ellenberg9476f392011-02-23 17:02:01 +0100155 if (!drbd_md_io_bio_set)
156 return bio_alloc(gfp_mask, 1);
157
Lars Ellenbergda4a75d2011-02-23 17:02:01 +0100158 bio = bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set);
159 if (!bio)
160 return NULL;
Lars Ellenbergda4a75d2011-02-23 17:02:01 +0100161 return bio;
Lars Ellenberg9476f392011-02-23 17:02:01 +0100162}
163
Philipp Reisnerb411b362009-09-25 16:07:19 -0700164#ifdef __CHECKER__
165/* When checking with sparse, and this is an inline function, sparse will
166 give tons of false positives. When this is a real functions sparse works.
167 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200168int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700169{
170 int io_allowed;
171
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200172 atomic_inc(&device->local_cnt);
173 io_allowed = (device->state.disk >= mins);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700174 if (!io_allowed) {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200175 if (atomic_dec_and_test(&device->local_cnt))
176 wake_up(&device->misc_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700177 }
178 return io_allowed;
179}
180
181#endif
182
183/**
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100184 * tl_release() - mark as BARRIER_ACKED all requests in the corresponding transfer log epoch
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200185 * @connection: DRBD connection.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700186 * @barrier_nr: Expected identifier of the DRBD write barrier packet.
187 * @set_size: Expected number of requests before that barrier.
188 *
189 * In case the passed barrier_nr or set_size does not match the oldest
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100190 * epoch of not yet barrier-acked requests, this function will cause a
191 * termination of the connection.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700192 */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200193void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100194 unsigned int set_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700195{
Philipp Reisnerb411b362009-09-25 16:07:19 -0700196 struct drbd_request *r;
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100197 struct drbd_request *req = NULL;
198 int expect_epoch = 0;
199 int expect_size = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700200
Andreas Gruenbacher05008132011-07-07 14:19:42 +0200201 spin_lock_irq(&connection->resource->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700202
Philipp Reisner98683652012-11-09 14:18:43 +0100203 /* find oldest not yet barrier-acked write request,
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100204 * count writes in its epoch. */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200205 list_for_each_entry(r, &connection->transfer_log, tl_requests) {
Lars Ellenberga0d856d2012-01-24 17:19:42 +0100206 const unsigned s = r->rq_state;
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100207 if (!req) {
208 if (!(s & RQ_WRITE))
209 continue;
210 if (!(s & RQ_NET_MASK))
211 continue;
212 if (s & RQ_NET_DONE)
213 continue;
214 req = r;
215 expect_epoch = req->epoch;
216 expect_size ++;
217 } else {
218 if (r->epoch != expect_epoch)
219 break;
220 if (!(s & RQ_WRITE))
221 continue;
222 /* if (s & RQ_DONE): not expected */
223 /* if (!(s & RQ_NET_MASK)): not expected */
224 expect_size++;
225 }
226 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700227
228 /* first some paranoia code */
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100229 if (req == NULL) {
Andreas Gruenbacher1ec861e2011-07-06 11:01:44 +0200230 drbd_err(connection, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
Philipp Reisner2f5cdd02011-02-21 14:29:27 +0100231 barrier_nr);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700232 goto bail;
233 }
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100234 if (expect_epoch != barrier_nr) {
Andreas Gruenbacher1ec861e2011-07-06 11:01:44 +0200235 drbd_err(connection, "BAD! BarrierAck #%u received, expected #%u!\n",
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100236 barrier_nr, expect_epoch);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700237 goto bail;
238 }
239
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100240 if (expect_size != set_size) {
Andreas Gruenbacher1ec861e2011-07-06 11:01:44 +0200241 drbd_err(connection, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100242 barrier_nr, set_size, expect_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700243 goto bail;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700244 }
245
Philipp Reisner98683652012-11-09 14:18:43 +0100246 /* Clean up list of requests processed during current epoch. */
247 /* this extra list walk restart is paranoia,
248 * to catch requests being barrier-acked "unexpectedly".
249 * It usually should find the same req again, or some READ preceding it. */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200250 list_for_each_entry(req, &connection->transfer_log, tl_requests)
Philipp Reisner98683652012-11-09 14:18:43 +0100251 if (req->epoch == expect_epoch)
252 break;
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200253 list_for_each_entry_safe_from(req, r, &connection->transfer_log, tl_requests) {
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100254 if (req->epoch != expect_epoch)
255 break;
256 _req_mod(req, BARRIER_ACKED);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700257 }
Andreas Gruenbacher05008132011-07-07 14:19:42 +0200258 spin_unlock_irq(&connection->resource->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700259
260 return;
261
262bail:
Andreas Gruenbacher05008132011-07-07 14:19:42 +0200263 spin_unlock_irq(&connection->resource->req_lock);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200264 conn_request_state(connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700265}
266
Philipp Reisner617049a2010-12-22 12:48:31 +0100267
Philipp Reisner11b58e72010-05-12 17:08:26 +0200268/**
269 * _tl_restart() - Walks the transfer log, and applies an action to all requests
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200270 * @device: DRBD device.
Philipp Reisner11b58e72010-05-12 17:08:26 +0200271 * @what: The action/event to perform with all request objects
272 *
Andreas Gruenbacher8554df12011-01-25 15:37:43 +0100273 * @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO,
274 * RESTART_FROZEN_DISK_IO.
Philipp Reisner11b58e72010-05-12 17:08:26 +0200275 */
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100276/* must hold resource->req_lock */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200277void _tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
Philipp Reisner11b58e72010-05-12 17:08:26 +0200278{
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100279 struct drbd_request *req, *r;
Philipp Reisner11b58e72010-05-12 17:08:26 +0200280
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200281 list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests)
Philipp Reisner509fc012012-07-31 11:22:58 +0200282 _req_mod(req, what);
Philipp Reisner11b58e72010-05-12 17:08:26 +0200283}
284
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200285void tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100286{
Andreas Gruenbacher05008132011-07-07 14:19:42 +0200287 spin_lock_irq(&connection->resource->req_lock);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200288 _tl_restart(connection, what);
Andreas Gruenbacher05008132011-07-07 14:19:42 +0200289 spin_unlock_irq(&connection->resource->req_lock);
Philipp Reisnercdfda632011-07-05 15:38:59 +0200290}
Philipp Reisnerb411b362009-09-25 16:07:19 -0700291
292/**
293 * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200294 * @device: DRBD device.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700295 *
296 * This is called after the connection to the peer was lost. The storage covered
297 * by the requests on the transfer gets marked as our of sync. Called from the
298 * receiver thread and the worker thread.
299 */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200300void tl_clear(struct drbd_connection *connection)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700301{
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200302 tl_restart(connection, CONNECTION_LOST_WHILE_PENDING);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700303}
304
305/**
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200306 * tl_abort_disk_io() - Abort disk I/O for all requests for a certain device in the TL
307 * @device: DRBD device.
Philipp Reisnerfd2491f2011-07-18 16:25:15 +0200308 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200309void tl_abort_disk_io(struct drbd_device *device)
Philipp Reisnerfd2491f2011-07-18 16:25:15 +0200310{
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +0200311 struct drbd_connection *connection = first_peer_device(device)->connection;
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100312 struct drbd_request *req, *r;
Philipp Reisnerfd2491f2011-07-18 16:25:15 +0200313
Andreas Gruenbacher05008132011-07-07 14:19:42 +0200314 spin_lock_irq(&connection->resource->req_lock);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200315 list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests) {
Philipp Reisnerfd2491f2011-07-18 16:25:15 +0200316 if (!(req->rq_state & RQ_LOCAL_PENDING))
317 continue;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200318 if (req->w.device != device)
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +0100319 continue;
320 _req_mod(req, ABORT_DISK_IO);
Philipp Reisnerfd2491f2011-07-18 16:25:15 +0200321 }
Andreas Gruenbacher05008132011-07-07 14:19:42 +0200322 spin_unlock_irq(&connection->resource->req_lock);
Philipp Reisnerfd2491f2011-07-18 16:25:15 +0200323}
324
Philipp Reisnerb411b362009-09-25 16:07:19 -0700325static int drbd_thread_setup(void *arg)
326{
327 struct drbd_thread *thi = (struct drbd_thread *) arg;
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200328 struct drbd_connection *connection = thi->connection;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700329 unsigned long flags;
330 int retval;
331
Philipp Reisnerf1b3a6e2011-02-08 15:35:58 +0100332 snprintf(current->comm, sizeof(current->comm), "drbd_%c_%s",
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +0200333 thi->name[0],
334 thi->connection->resource->name);
Philipp Reisnerf1b3a6e2011-02-08 15:35:58 +0100335
Philipp Reisnerb411b362009-09-25 16:07:19 -0700336restart:
337 retval = thi->function(thi);
338
339 spin_lock_irqsave(&thi->t_lock, flags);
340
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100341 /* if the receiver has been "EXITING", the last thing it did
Philipp Reisnerb411b362009-09-25 16:07:19 -0700342 * was set the conn state to "StandAlone",
343 * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
344 * and receiver thread will be "started".
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100345 * drbd_thread_start needs to set "RESTARTING" in that case.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700346 * t_state check and assignment needs to be within the same spinlock,
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100347 * so either thread_start sees EXITING, and can remap to RESTARTING,
348 * or thread_start see NONE, and can proceed as normal.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700349 */
350
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100351 if (thi->t_state == RESTARTING) {
Andreas Gruenbacher1ec861e2011-07-06 11:01:44 +0200352 drbd_info(connection, "Restarting %s thread\n", thi->name);
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100353 thi->t_state = RUNNING;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700354 spin_unlock_irqrestore(&thi->t_lock, flags);
355 goto restart;
356 }
357
358 thi->task = NULL;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100359 thi->t_state = NONE;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700360 smp_mb();
Lars Ellenberg992d6e92011-05-02 11:47:18 +0200361 complete_all(&thi->stop);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700362 spin_unlock_irqrestore(&thi->t_lock, flags);
363
Andreas Gruenbacher1ec861e2011-07-06 11:01:44 +0200364 drbd_info(connection, "Terminating %s\n", current->comm);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700365
366 /* Release mod reference taken when thread was started */
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200367
Andreas Gruenbacher05a10ec2011-06-07 22:54:17 +0200368 kref_put(&connection->kref, drbd_destroy_connection);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700369 module_put(THIS_MODULE);
370 return retval;
371}
372
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200373static void drbd_thread_init(struct drbd_connection *connection, struct drbd_thread *thi,
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100374 int (*func) (struct drbd_thread *), char *name)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700375{
376 spin_lock_init(&thi->t_lock);
377 thi->task = NULL;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100378 thi->t_state = NONE;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700379 thi->function = func;
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200380 thi->connection = connection;
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100381 strncpy(thi->name, name, ARRAY_SIZE(thi->name));
Philipp Reisnerb411b362009-09-25 16:07:19 -0700382}
383
384int drbd_thread_start(struct drbd_thread *thi)
385{
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200386 struct drbd_connection *connection = thi->connection;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700387 struct task_struct *nt;
388 unsigned long flags;
389
Philipp Reisnerb411b362009-09-25 16:07:19 -0700390 /* is used from state engine doing drbd_thread_stop_nowait,
391 * while holding the req lock irqsave */
392 spin_lock_irqsave(&thi->t_lock, flags);
393
394 switch (thi->t_state) {
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100395 case NONE:
Andreas Gruenbacher1ec861e2011-07-06 11:01:44 +0200396 drbd_info(connection, "Starting %s thread (from %s [%d])\n",
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100397 thi->name, current->comm, current->pid);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700398
399 /* Get ref on module for thread - this is released when thread exits */
400 if (!try_module_get(THIS_MODULE)) {
Andreas Gruenbacher1ec861e2011-07-06 11:01:44 +0200401 drbd_err(connection, "Failed to get module reference in drbd_thread_start\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700402 spin_unlock_irqrestore(&thi->t_lock, flags);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100403 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700404 }
405
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200406 kref_get(&thi->connection->kref);
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +0200407
Philipp Reisnerb411b362009-09-25 16:07:19 -0700408 init_completion(&thi->stop);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700409 thi->reset_cpu_mask = 1;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100410 thi->t_state = RUNNING;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700411 spin_unlock_irqrestore(&thi->t_lock, flags);
412 flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
413
414 nt = kthread_create(drbd_thread_setup, (void *) thi,
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +0200415 "drbd_%c_%s", thi->name[0], thi->connection->resource->name);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700416
417 if (IS_ERR(nt)) {
Andreas Gruenbacher1ec861e2011-07-06 11:01:44 +0200418 drbd_err(connection, "Couldn't start thread\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -0700419
Andreas Gruenbacher05a10ec2011-06-07 22:54:17 +0200420 kref_put(&connection->kref, drbd_destroy_connection);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700421 module_put(THIS_MODULE);
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100422 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700423 }
424 spin_lock_irqsave(&thi->t_lock, flags);
425 thi->task = nt;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100426 thi->t_state = RUNNING;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700427 spin_unlock_irqrestore(&thi->t_lock, flags);
428 wake_up_process(nt);
429 break;
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100430 case EXITING:
431 thi->t_state = RESTARTING;
Andreas Gruenbacher1ec861e2011-07-06 11:01:44 +0200432 drbd_info(connection, "Restarting %s thread (from %s [%d])\n",
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100433 thi->name, current->comm, current->pid);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700434 /* fall through */
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100435 case RUNNING:
436 case RESTARTING:
Philipp Reisnerb411b362009-09-25 16:07:19 -0700437 default:
438 spin_unlock_irqrestore(&thi->t_lock, flags);
439 break;
440 }
441
Andreas Gruenbacher81e84652010-12-09 15:03:57 +0100442 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700443}
444
445
446void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
447{
448 unsigned long flags;
449
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100450 enum drbd_thread_state ns = restart ? RESTARTING : EXITING;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700451
452 /* may be called from state engine, holding the req lock irqsave */
453 spin_lock_irqsave(&thi->t_lock, flags);
454
Andreas Gruenbachere77a0a52011-01-25 15:43:39 +0100455 if (thi->t_state == NONE) {
Philipp Reisnerb411b362009-09-25 16:07:19 -0700456 spin_unlock_irqrestore(&thi->t_lock, flags);
457 if (restart)
458 drbd_thread_start(thi);
459 return;
460 }
461
462 if (thi->t_state != ns) {
463 if (thi->task == NULL) {
464 spin_unlock_irqrestore(&thi->t_lock, flags);
465 return;
466 }
467
468 thi->t_state = ns;
469 smp_mb();
470 init_completion(&thi->stop);
471 if (thi->task != current)
472 force_sig(DRBD_SIGKILL, thi->task);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700473 }
474
475 spin_unlock_irqrestore(&thi->t_lock, flags);
476
477 if (wait)
478 wait_for_completion(&thi->stop);
479}
480
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200481static struct drbd_thread *drbd_task_to_thread(struct drbd_connection *connection, struct task_struct *task)
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100482{
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100483 struct drbd_thread *thi =
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200484 task == connection->receiver.task ? &connection->receiver :
485 task == connection->asender.task ? &connection->asender :
486 task == connection->worker.task ? &connection->worker : NULL;
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100487
488 return thi;
489}
490
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200491char *drbd_task_to_thread_name(struct drbd_connection *connection, struct task_struct *task)
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100492{
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200493 struct drbd_thread *thi = drbd_task_to_thread(connection, task);
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100494 return thi ? thi->name : task->comm;
495}
496
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200497int conn_lowest_minor(struct drbd_connection *connection)
Philipp Reisner80822282011-02-08 12:46:30 +0100498{
Andreas Gruenbacherc06ece62011-06-21 17:23:59 +0200499 struct drbd_peer_device *peer_device;
500 int vnr = 0, minor = -1;
Philipp Reisner774b3052011-02-22 02:07:03 -0500501
Philipp Reisner695d08f2011-04-11 22:53:32 -0700502 rcu_read_lock();
Andreas Gruenbacherc06ece62011-06-21 17:23:59 +0200503 peer_device = idr_get_next(&connection->peer_devices, &vnr);
504 if (peer_device)
505 minor = device_to_minor(peer_device->device);
Philipp Reisner695d08f2011-04-11 22:53:32 -0700506 rcu_read_unlock();
507
Andreas Gruenbacherc06ece62011-06-21 17:23:59 +0200508 return minor;
Philipp Reisner80822282011-02-08 12:46:30 +0100509}
Philipp Reisner774b3052011-02-22 02:07:03 -0500510
Philipp Reisnerb411b362009-09-25 16:07:19 -0700511#ifdef CONFIG_SMP
512/**
513 * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200514 * @device: DRBD device.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700515 *
516 * Forces all threads of a device onto the same CPU. This is beneficial for
517 * DRBD's performance. May be overwritten by user's configuration.
518 */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200519void drbd_calc_cpu_mask(struct drbd_connection *connection)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700520{
521 int ord, cpu;
522
523 /* user override. */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200524 if (cpumask_weight(connection->cpu_mask))
Philipp Reisnerb411b362009-09-25 16:07:19 -0700525 return;
526
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200527 ord = conn_lowest_minor(connection) % cpumask_weight(cpu_online_mask);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700528 for_each_online_cpu(cpu) {
529 if (ord-- == 0) {
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200530 cpumask_set_cpu(cpu, connection->cpu_mask);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700531 return;
532 }
533 }
534 /* should not be reached */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200535 cpumask_setall(connection->cpu_mask);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700536}
537
538/**
539 * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200540 * @device: DRBD device.
Philipp Reisnerbc31fe32011-02-07 11:14:38 +0100541 * @thi: drbd_thread object
Philipp Reisnerb411b362009-09-25 16:07:19 -0700542 *
543 * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
544 * prematurely.
545 */
Philipp Reisner80822282011-02-08 12:46:30 +0100546void drbd_thread_current_set_cpu(struct drbd_thread *thi)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700547{
548 struct task_struct *p = current;
Philipp Reisnerbed879a2011-02-04 14:00:37 +0100549
Philipp Reisnerb411b362009-09-25 16:07:19 -0700550 if (!thi->reset_cpu_mask)
551 return;
552 thi->reset_cpu_mask = 0;
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200553 set_cpus_allowed_ptr(p, thi->connection->cpu_mask);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700554}
555#endif
556
Andreas Gruenbacher52b061a2011-03-30 11:38:49 +0200557/**
558 * drbd_header_size - size of a packet header
559 *
560 * The header size is a multiple of 8, so any payload following the header is
561 * word aligned on 64-bit architectures. (The bitmap send and receive code
562 * relies on this.)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700563 */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200564unsigned int drbd_header_size(struct drbd_connection *connection)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700565{
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200566 if (connection->agreed_pro_version >= 100) {
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +0200567 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header100), 8));
568 return sizeof(struct p_header100);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700569 } else {
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +0200570 BUILD_BUG_ON(sizeof(struct p_header80) !=
571 sizeof(struct p_header95));
572 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header80), 8));
573 return sizeof(struct p_header80);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700574 }
Andreas Gruenbacher52b061a2011-03-30 11:38:49 +0200575}
Philipp Reisnerb411b362009-09-25 16:07:19 -0700576
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200577static unsigned int prepare_header80(struct p_header80 *h, enum drbd_packet cmd, int size)
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100578{
579 h->magic = cpu_to_be32(DRBD_MAGIC);
580 h->command = cpu_to_be16(cmd);
581 h->length = cpu_to_be16(size);
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200582 return sizeof(struct p_header80);
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100583}
Philipp Reisnerb411b362009-09-25 16:07:19 -0700584
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200585static unsigned int prepare_header95(struct p_header95 *h, enum drbd_packet cmd, int size)
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100586{
587 h->magic = cpu_to_be16(DRBD_MAGIC_BIG);
588 h->command = cpu_to_be16(cmd);
Andreas Gruenbacherb55d84b2011-03-22 13:17:47 +0100589 h->length = cpu_to_be32(size);
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200590 return sizeof(struct p_header95);
Philipp Reisnerfd340c12011-01-19 16:57:39 +0100591}
592
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +0200593static unsigned int prepare_header100(struct p_header100 *h, enum drbd_packet cmd,
594 int size, int vnr)
Philipp Reisnerd38e7872011-02-07 15:32:04 +0100595{
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +0200596 h->magic = cpu_to_be32(DRBD_MAGIC_100);
597 h->volume = cpu_to_be16(vnr);
598 h->command = cpu_to_be16(cmd);
599 h->length = cpu_to_be32(size);
600 h->pad = 0;
601 return sizeof(struct p_header100);
602}
603
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200604static unsigned int prepare_header(struct drbd_connection *connection, int vnr,
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +0200605 void *buffer, enum drbd_packet cmd, int size)
606{
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200607 if (connection->agreed_pro_version >= 100)
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +0200608 return prepare_header100(buffer, cmd, size, vnr);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200609 else if (connection->agreed_pro_version >= 95 &&
Andreas Gruenbacher0c8e36d2011-03-30 16:00:17 +0200610 size > DRBD_MAX_SIZE_H80_PACKET)
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200611 return prepare_header95(buffer, cmd, size);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700612 else
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200613 return prepare_header80(buffer, cmd, size);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700614}
615
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200616static void *__conn_prepare_command(struct drbd_connection *connection,
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200617 struct drbd_socket *sock)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700618{
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200619 if (!sock->socket)
620 return NULL;
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200621 return sock->sbuf + drbd_header_size(connection);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700622}
623
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200624void *conn_prepare_command(struct drbd_connection *connection, struct drbd_socket *sock)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700625{
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200626 void *p;
627
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200628 mutex_lock(&sock->mutex);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200629 p = __conn_prepare_command(connection, sock);
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200630 if (!p)
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200631 mutex_unlock(&sock->mutex);
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200632
633 return p;
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200634}
635
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200636void *drbd_prepare_command(struct drbd_device *device, struct drbd_socket *sock)
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200637{
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +0200638 return conn_prepare_command(first_peer_device(device)->connection, sock);
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200639}
640
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200641static int __send_command(struct drbd_connection *connection, int vnr,
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200642 struct drbd_socket *sock, enum drbd_packet cmd,
643 unsigned int header_size, void *data,
644 unsigned int size)
645{
646 int msg_flags;
647 int err;
648
649 /*
650 * Called with @data == NULL and the size of the data blocks in @size
651 * for commands that send data blocks. For those commands, omit the
652 * MSG_MORE flag: this will increase the likelihood that data blocks
653 * which are page aligned on the sender will end up page aligned on the
654 * receiver.
655 */
656 msg_flags = data ? MSG_MORE : 0;
657
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200658 header_size += prepare_header(connection, vnr, sock->sbuf, cmd,
Andreas Gruenbachere6589832011-03-30 12:54:42 +0200659 header_size + size);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200660 err = drbd_send_all(connection, sock->socket, sock->sbuf, header_size,
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200661 msg_flags);
662 if (data && !err)
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200663 err = drbd_send_all(connection, sock->socket, data, size, 0);
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200664 return err;
665}
666
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200667static int __conn_send_command(struct drbd_connection *connection, struct drbd_socket *sock,
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200668 enum drbd_packet cmd, unsigned int header_size,
669 void *data, unsigned int size)
670{
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200671 return __send_command(connection, 0, sock, cmd, header_size, data, size);
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200672}
673
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200674int conn_send_command(struct drbd_connection *connection, struct drbd_socket *sock,
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200675 enum drbd_packet cmd, unsigned int header_size,
676 void *data, unsigned int size)
677{
678 int err;
679
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200680 err = __conn_send_command(connection, sock, cmd, header_size, data, size);
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200681 mutex_unlock(&sock->mutex);
682 return err;
683}
684
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200685int drbd_send_command(struct drbd_device *device, struct drbd_socket *sock,
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200686 enum drbd_packet cmd, unsigned int header_size,
687 void *data, unsigned int size)
688{
689 int err;
690
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +0200691 err = __send_command(first_peer_device(device)->connection, device->vnr, sock, cmd, header_size,
Andreas Gruenbacherdba58582011-03-29 16:55:40 +0200692 data, size);
693 mutex_unlock(&sock->mutex);
694 return err;
695}
696
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200697int drbd_send_ping(struct drbd_connection *connection)
Andreas Gruenbachere307f352011-03-22 10:55:48 +0100698{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200699 struct drbd_socket *sock;
700
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200701 sock = &connection->meta;
702 if (!conn_prepare_command(connection, sock))
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200703 return -EIO;
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200704 return conn_send_command(connection, sock, P_PING, 0, NULL, 0);
Andreas Gruenbachere307f352011-03-22 10:55:48 +0100705}
706
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200707int drbd_send_ping_ack(struct drbd_connection *connection)
Andreas Gruenbachere307f352011-03-22 10:55:48 +0100708{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200709 struct drbd_socket *sock;
710
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200711 sock = &connection->meta;
712 if (!conn_prepare_command(connection, sock))
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200713 return -EIO;
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200714 return conn_send_command(connection, sock, P_PING_ACK, 0, NULL, 0);
Andreas Gruenbachere307f352011-03-22 10:55:48 +0100715}
716
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200717int drbd_send_sync_param(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700718{
Andreas Gruenbacher7c967152011-03-22 00:49:36 +0100719 struct drbd_socket *sock;
Philipp Reisner8e26f9c2010-07-06 17:25:54 +0200720 struct p_rs_param_95 *p;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200721 int size;
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +0200722 const int apv = first_peer_device(device)->connection->agreed_pro_version;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200723 enum drbd_packet cmd;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200724 struct net_conf *nc;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200725 struct disk_conf *dc;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200726
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +0200727 sock = &first_peer_device(device)->connection->data;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200728 p = drbd_prepare_command(device, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200729 if (!p)
730 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700731
Philipp Reisner44ed1672011-04-19 17:10:19 +0200732 rcu_read_lock();
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +0200733 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700734
735 size = apv <= 87 ? sizeof(struct p_rs_param)
736 : apv == 88 ? sizeof(struct p_rs_param)
Philipp Reisner44ed1672011-04-19 17:10:19 +0200737 + strlen(nc->verify_alg) + 1
Philipp Reisner8e26f9c2010-07-06 17:25:54 +0200738 : apv <= 94 ? sizeof(struct p_rs_param_89)
739 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700740
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200741 cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700742
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200743 /* initialize verify_alg and csums_alg */
744 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700745
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200746 if (get_ldev(device)) {
747 dc = rcu_dereference(device->ldev->disk_conf);
Andreas Gruenbacher6394b932011-05-11 14:29:52 +0200748 p->resync_rate = cpu_to_be32(dc->resync_rate);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200749 p->c_plan_ahead = cpu_to_be32(dc->c_plan_ahead);
750 p->c_delay_target = cpu_to_be32(dc->c_delay_target);
751 p->c_fill_target = cpu_to_be32(dc->c_fill_target);
752 p->c_max_rate = cpu_to_be32(dc->c_max_rate);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200753 put_ldev(device);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200754 } else {
Andreas Gruenbacher6394b932011-05-11 14:29:52 +0200755 p->resync_rate = cpu_to_be32(DRBD_RESYNC_RATE_DEF);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200756 p->c_plan_ahead = cpu_to_be32(DRBD_C_PLAN_AHEAD_DEF);
757 p->c_delay_target = cpu_to_be32(DRBD_C_DELAY_TARGET_DEF);
758 p->c_fill_target = cpu_to_be32(DRBD_C_FILL_TARGET_DEF);
759 p->c_max_rate = cpu_to_be32(DRBD_C_MAX_RATE_DEF);
760 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700761
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200762 if (apv >= 88)
Philipp Reisner44ed1672011-04-19 17:10:19 +0200763 strcpy(p->verify_alg, nc->verify_alg);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200764 if (apv >= 89)
Philipp Reisner44ed1672011-04-19 17:10:19 +0200765 strcpy(p->csums_alg, nc->csums_alg);
766 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700767
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200768 return drbd_send_command(device, sock, cmd, size, NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700769}
770
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200771int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700772{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200773 struct drbd_socket *sock;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700774 struct p_protocol *p;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200775 struct net_conf *nc;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200776 int size, cf;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700777
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200778 sock = &connection->data;
779 p = __conn_prepare_command(connection, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200780 if (!p)
781 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700782
Philipp Reisner44ed1672011-04-19 17:10:19 +0200783 rcu_read_lock();
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200784 nc = rcu_dereference(connection->net_conf);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700785
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200786 if (nc->tentative && connection->agreed_pro_version < 92) {
Philipp Reisner44ed1672011-04-19 17:10:19 +0200787 rcu_read_unlock();
788 mutex_unlock(&sock->mutex);
Andreas Gruenbacher1ec861e2011-07-06 11:01:44 +0200789 drbd_err(connection, "--dry-run is not supported by peer");
Philipp Reisner44ed1672011-04-19 17:10:19 +0200790 return -EOPNOTSUPP;
Philipp Reisnercf14c2e2010-02-02 21:03:50 +0100791 }
Philipp Reisner44ed1672011-04-19 17:10:19 +0200792
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200793 size = sizeof(*p);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200794 if (connection->agreed_pro_version >= 87)
Philipp Reisner44ed1672011-04-19 17:10:19 +0200795 size += strlen(nc->integrity_alg) + 1;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700796
Philipp Reisner44ed1672011-04-19 17:10:19 +0200797 p->protocol = cpu_to_be32(nc->wire_protocol);
798 p->after_sb_0p = cpu_to_be32(nc->after_sb_0p);
799 p->after_sb_1p = cpu_to_be32(nc->after_sb_1p);
800 p->after_sb_2p = cpu_to_be32(nc->after_sb_2p);
801 p->two_primaries = cpu_to_be32(nc->two_primaries);
Philipp Reisnercf14c2e2010-02-02 21:03:50 +0100802 cf = 0;
Andreas Gruenbacher6139f602011-05-06 20:00:02 +0200803 if (nc->discard_my_data)
804 cf |= CF_DISCARD_MY_DATA;
Andreas Gruenbacher6dff2902011-06-28 14:18:12 +0200805 if (nc->tentative)
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200806 cf |= CF_DRY_RUN;
Philipp Reisnercf14c2e2010-02-02 21:03:50 +0100807 p->conn_flags = cpu_to_be32(cf);
808
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200809 if (connection->agreed_pro_version >= 87)
Philipp Reisner44ed1672011-04-19 17:10:19 +0200810 strcpy(p->integrity_alg, nc->integrity_alg);
811 rcu_read_unlock();
Philipp Reisnerb411b362009-09-25 16:07:19 -0700812
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200813 return __conn_send_command(connection, sock, cmd, size, NULL, 0);
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200814}
815
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200816int drbd_send_protocol(struct drbd_connection *connection)
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200817{
818 int err;
819
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +0200820 mutex_lock(&connection->data.mutex);
821 err = __drbd_send_protocol(connection, P_PROTOCOL);
822 mutex_unlock(&connection->data.mutex);
Andreas Gruenbachera7eb7bd2011-04-29 13:19:58 +0200823
824 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700825}
826
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200827static int _drbd_send_uuids(struct drbd_device *device, u64 uuid_flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700828{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200829 struct drbd_socket *sock;
830 struct p_uuids *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700831 int i;
832
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200833 if (!get_ldev_if_state(device, D_NEGOTIATING))
Andreas Gruenbacher2ae5f952011-03-16 01:07:20 +0100834 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700835
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +0200836 sock = &first_peer_device(device)->connection->data;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200837 p = drbd_prepare_command(device, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200838 if (!p) {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200839 put_ldev(device);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200840 return -EIO;
841 }
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200842 spin_lock_irq(&device->ldev->md.uuid_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700843 for (i = UI_CURRENT; i < UI_SIZE; i++)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200844 p->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]);
845 spin_unlock_irq(&device->ldev->md.uuid_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700846
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200847 device->comm_bm_set = drbd_bm_total_weight(device);
848 p->uuid[UI_SIZE] = cpu_to_be64(device->comm_bm_set);
Philipp Reisner44ed1672011-04-19 17:10:19 +0200849 rcu_read_lock();
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +0200850 uuid_flags |= rcu_dereference(first_peer_device(device)->connection->net_conf)->discard_my_data ? 1 : 0;
Philipp Reisner44ed1672011-04-19 17:10:19 +0200851 rcu_read_unlock();
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200852 uuid_flags |= test_bit(CRASHED_PRIMARY, &device->flags) ? 2 : 0;
853 uuid_flags |= device->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200854 p->uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700855
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200856 put_ldev(device);
857 return drbd_send_command(device, sock, P_UUIDS, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700858}
859
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200860int drbd_send_uuids(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700861{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200862 return _drbd_send_uuids(device, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700863}
864
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200865int drbd_send_uuids_skip_initial_sync(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700866{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200867 return _drbd_send_uuids(device, 8);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700868}
869
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200870void drbd_print_uuids(struct drbd_device *device, const char *text)
Lars Ellenberg62b0da32011-01-20 13:25:21 +0100871{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200872 if (get_ldev_if_state(device, D_NEGOTIATING)) {
873 u64 *uuid = device->ldev->md.uuid;
Andreas Gruenbacherd0180172011-07-03 17:53:52 +0200874 drbd_info(device, "%s %016llX:%016llX:%016llX:%016llX\n",
Lars Ellenberg62b0da32011-01-20 13:25:21 +0100875 text,
876 (unsigned long long)uuid[UI_CURRENT],
877 (unsigned long long)uuid[UI_BITMAP],
878 (unsigned long long)uuid[UI_HISTORY_START],
879 (unsigned long long)uuid[UI_HISTORY_END]);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200880 put_ldev(device);
Lars Ellenberg62b0da32011-01-20 13:25:21 +0100881 } else {
Andreas Gruenbacherd0180172011-07-03 17:53:52 +0200882 drbd_info(device, "%s effective data uuid: %016llX\n",
Lars Ellenberg62b0da32011-01-20 13:25:21 +0100883 text,
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200884 (unsigned long long)device->ed_uuid);
Lars Ellenberg62b0da32011-01-20 13:25:21 +0100885 }
886}
887
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200888void drbd_gen_and_send_sync_uuid(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700889{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200890 struct drbd_socket *sock;
891 struct p_rs_uuid *p;
Lars Ellenberg5a22db82010-12-17 21:14:23 +0100892 u64 uuid;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700893
Andreas Gruenbacher0b0ba1e2011-06-27 16:23:33 +0200894 D_ASSERT(device, device->state.disk == D_UP_TO_DATE);
Lars Ellenberg5a22db82010-12-17 21:14:23 +0100895
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200896 uuid = device->ldev->md.uuid[UI_BITMAP];
Philipp Reisner5ba3dac2011-10-05 15:54:18 +0200897 if (uuid && uuid != UUID_JUST_CREATED)
898 uuid = uuid + UUID_NEW_BM_OFFSET;
899 else
900 get_random_bytes(&uuid, sizeof(u64));
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200901 drbd_uuid_set(device, UI_BITMAP, uuid);
902 drbd_print_uuids(device, "updated sync UUID");
903 drbd_md_sync(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700904
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +0200905 sock = &first_peer_device(device)->connection->data;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200906 p = drbd_prepare_command(device, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200907 if (p) {
908 p->uuid = cpu_to_be64(uuid);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200909 drbd_send_command(device, sock, P_SYNC_UUID, sizeof(*p), NULL, 0);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200910 }
Philipp Reisnerb411b362009-09-25 16:07:19 -0700911}
912
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200913int drbd_send_sizes(struct drbd_device *device, int trigger_reply, enum dds_flags flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700914{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200915 struct drbd_socket *sock;
916 struct p_sizes *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700917 sector_t d_size, u_size;
Lars Ellenbergdb141b22012-06-25 19:15:58 +0200918 int q_order_type;
919 unsigned int max_bio_size;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700920
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200921 if (get_ldev_if_state(device, D_NEGOTIATING)) {
Andreas Gruenbacher0b0ba1e2011-06-27 16:23:33 +0200922 D_ASSERT(device, device->ldev->backing_bdev);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200923 d_size = drbd_get_max_capacity(device->ldev);
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200924 rcu_read_lock();
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200925 u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
Philipp Reisnerdaeda1c2011-05-03 15:00:55 +0200926 rcu_read_unlock();
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200927 q_order_type = drbd_queue_order_type(device);
928 max_bio_size = queue_max_hw_sectors(device->ldev->backing_bdev->bd_disk->queue) << 9;
Lars Ellenbergdb141b22012-06-25 19:15:58 +0200929 max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200930 put_ldev(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700931 } else {
932 d_size = 0;
933 u_size = 0;
934 q_order_type = QUEUE_ORDERED_NONE;
Philipp Reisner99432fc2011-05-20 16:39:13 +0200935 max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
Philipp Reisnerb411b362009-09-25 16:07:19 -0700936 }
937
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +0200938 sock = &first_peer_device(device)->connection->data;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200939 p = drbd_prepare_command(device, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200940 if (!p)
941 return -EIO;
Philipp Reisner2ffca4f2011-06-30 15:43:06 +0200942
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +0200943 if (first_peer_device(device)->connection->agreed_pro_version <= 94)
Lars Ellenbergdb141b22012-06-25 19:15:58 +0200944 max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +0200945 else if (first_peer_device(device)->connection->agreed_pro_version < 100)
Philipp Reisner98683652012-11-09 14:18:43 +0100946 max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE_P95);
Philipp Reisner68093842011-06-30 15:43:06 +0200947
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200948 p->d_size = cpu_to_be64(d_size);
949 p->u_size = cpu_to_be64(u_size);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200950 p->c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(device->this_bdev));
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200951 p->max_bio_size = cpu_to_be32(max_bio_size);
952 p->queue_order_type = cpu_to_be16(q_order_type);
953 p->dds_flags = cpu_to_be16(flags);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200954 return drbd_send_command(device, sock, P_SIZES, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700955}
956
957/**
Lars Ellenbergf479ea02011-10-27 16:52:30 +0200958 * drbd_send_current_state() - Sends the drbd state to the peer
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200959 * @device: DRBD device.
Philipp Reisnerb411b362009-09-25 16:07:19 -0700960 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200961int drbd_send_current_state(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700962{
Andreas Gruenbacher7c967152011-03-22 00:49:36 +0100963 struct drbd_socket *sock;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200964 struct p_state *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -0700965
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +0200966 sock = &first_peer_device(device)->connection->data;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200967 p = drbd_prepare_command(device, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200968 if (!p)
969 return -EIO;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200970 p->state = cpu_to_be32(device->state.i); /* Within the send mutex */
971 return drbd_send_command(device, sock, P_STATE, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -0700972}
973
Lars Ellenbergf479ea02011-10-27 16:52:30 +0200974/**
975 * drbd_send_state() - After a state change, sends the new state to the peer
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200976 * @device: DRBD device.
Philipp Reisner43de7c82011-11-10 13:16:13 +0100977 * @state: the state to send, not necessarily the current state.
Lars Ellenbergf479ea02011-10-27 16:52:30 +0200978 *
979 * Each state change queues an "after_state_ch" work, which will eventually
980 * send the resulting new state to the peer. If more state changes happen
981 * between queuing and processing of the after_state_ch work, we still
982 * want to send each intermediary state in the order it occurred.
983 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200984int drbd_send_state(struct drbd_device *device, union drbd_state state)
Lars Ellenbergf479ea02011-10-27 16:52:30 +0200985{
Philipp Reisner43de7c82011-11-10 13:16:13 +0100986 struct drbd_socket *sock;
987 struct p_state *p;
Lars Ellenbergf479ea02011-10-27 16:52:30 +0200988
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +0200989 sock = &first_peer_device(device)->connection->data;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200990 p = drbd_prepare_command(device, sock);
Philipp Reisner43de7c82011-11-10 13:16:13 +0100991 if (!p)
992 return -EIO;
993 p->state = cpu_to_be32(state.i); /* Within the send mutex */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200994 return drbd_send_command(device, sock, P_STATE, sizeof(*p), NULL, 0);
Philipp Reisner43de7c82011-11-10 13:16:13 +0100995}
Lars Ellenbergf479ea02011-10-27 16:52:30 +0200996
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +0200997int drbd_send_state_req(struct drbd_device *device, union drbd_state mask, union drbd_state val)
Philipp Reisnerb411b362009-09-25 16:07:19 -0700998{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +0200999 struct drbd_socket *sock;
1000 struct p_req_state *p;
Lars Ellenbergf479ea02011-10-27 16:52:30 +02001001
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001002 sock = &first_peer_device(device)->connection->data;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001003 p = drbd_prepare_command(device, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001004 if (!p)
1005 return -EIO;
1006 p->mask = cpu_to_be32(mask.i);
1007 p->val = cpu_to_be32(val.i);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001008 return drbd_send_command(device, sock, P_STATE_CHG_REQ, sizeof(*p), NULL, 0);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001009}
1010
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001011int conn_send_state_req(struct drbd_connection *connection, union drbd_state mask, union drbd_state val)
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001012{
1013 enum drbd_packet cmd;
1014 struct drbd_socket *sock;
1015 struct p_req_state *p;
1016
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001017 cmd = connection->agreed_pro_version < 100 ? P_STATE_CHG_REQ : P_CONN_ST_CHG_REQ;
1018 sock = &connection->data;
1019 p = conn_prepare_command(connection, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001020 if (!p)
1021 return -EIO;
1022 p->mask = cpu_to_be32(mask.i);
1023 p->val = cpu_to_be32(val.i);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001024 return conn_send_command(connection, sock, cmd, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001025}
1026
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001027void drbd_send_sr_reply(struct drbd_device *device, enum drbd_state_rv retcode)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001028{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001029 struct drbd_socket *sock;
1030 struct p_req_state_reply *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001031
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001032 sock = &first_peer_device(device)->connection->meta;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001033 p = drbd_prepare_command(device, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001034 if (p) {
1035 p->retcode = cpu_to_be32(retcode);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001036 drbd_send_command(device, sock, P_STATE_CHG_REPLY, sizeof(*p), NULL, 0);
Lars Ellenbergf479ea02011-10-27 16:52:30 +02001037 }
Lars Ellenbergf479ea02011-10-27 16:52:30 +02001038}
1039
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001040void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001041{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001042 struct drbd_socket *sock;
1043 struct p_req_state_reply *p;
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001044 enum drbd_packet cmd = connection->agreed_pro_version < 100 ? P_STATE_CHG_REPLY : P_CONN_ST_CHG_REPLY;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001045
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001046 sock = &connection->meta;
1047 p = conn_prepare_command(connection, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001048 if (p) {
1049 p->retcode = cpu_to_be32(retcode);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001050 conn_send_command(connection, sock, cmd, sizeof(*p), NULL, 0);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001051 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001052}
1053
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001054static void dcbp_set_code(struct p_compressed_bm *p, enum drbd_bitmap_code code)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001055{
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001056 BUG_ON(code & ~0xf);
1057 p->encoding = (p->encoding & ~0xf) | code;
1058}
Philipp Reisnerb411b362009-09-25 16:07:19 -07001059
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001060static void dcbp_set_start(struct p_compressed_bm *p, int set)
1061{
1062 p->encoding = (p->encoding & ~0x80) | (set ? 0x80 : 0);
1063}
Philipp Reisnerb411b362009-09-25 16:07:19 -07001064
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001065static void dcbp_set_pad_bits(struct p_compressed_bm *p, int n)
1066{
1067 BUG_ON(n & ~0x7);
1068 p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001069}
1070
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001071static int fill_bitmap_rle_bits(struct drbd_device *device,
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001072 struct p_compressed_bm *p,
1073 unsigned int size,
1074 struct bm_xfer_ctx *c)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001075{
1076 struct bitstream bs;
1077 unsigned long plain_bits;
1078 unsigned long tmp;
1079 unsigned long rl;
1080 unsigned len;
1081 unsigned toggle;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001082 int bits, use_rle;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001083
1084 /* may we use this feature? */
Philipp Reisner44ed1672011-04-19 17:10:19 +02001085 rcu_read_lock();
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001086 use_rle = rcu_dereference(first_peer_device(device)->connection->net_conf)->use_rle;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001087 rcu_read_unlock();
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001088 if (!use_rle || first_peer_device(device)->connection->agreed_pro_version < 90)
Philipp Reisner44ed1672011-04-19 17:10:19 +02001089 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001090
1091 if (c->bit_offset >= c->bm_bits)
1092 return 0; /* nothing to do. */
1093
1094 /* use at most thus many bytes */
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001095 bitstream_init(&bs, p->code, size, 0);
1096 memset(p->code, 0, size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001097 /* plain bits covered in this code string */
1098 plain_bits = 0;
1099
1100 /* p->encoding & 0x80 stores whether the first run length is set.
1101 * bit offset is implicit.
1102 * start with toggle == 2 to be able to tell the first iteration */
1103 toggle = 2;
1104
1105 /* see how much plain bits we can stuff into one packet
1106 * using RLE and VLI. */
1107 do {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001108 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(device, c->bit_offset)
1109 : _drbd_bm_find_next(device, c->bit_offset);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001110 if (tmp == -1UL)
1111 tmp = c->bm_bits;
1112 rl = tmp - c->bit_offset;
1113
1114 if (toggle == 2) { /* first iteration */
1115 if (rl == 0) {
1116 /* the first checked bit was set,
1117 * store start value, */
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001118 dcbp_set_start(p, 1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001119 /* but skip encoding of zero run length */
1120 toggle = !toggle;
1121 continue;
1122 }
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001123 dcbp_set_start(p, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001124 }
1125
1126 /* paranoia: catch zero runlength.
1127 * can only happen if bitmap is modified while we scan it. */
1128 if (rl == 0) {
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02001129 drbd_err(device, "unexpected zero runlength while encoding bitmap "
Philipp Reisnerb411b362009-09-25 16:07:19 -07001130 "t:%u bo:%lu\n", toggle, c->bit_offset);
1131 return -1;
1132 }
1133
1134 bits = vli_encode_bits(&bs, rl);
1135 if (bits == -ENOBUFS) /* buffer full */
1136 break;
1137 if (bits <= 0) {
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02001138 drbd_err(device, "error while encoding bitmap: %d\n", bits);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001139 return 0;
1140 }
1141
1142 toggle = !toggle;
1143 plain_bits += rl;
1144 c->bit_offset = tmp;
1145 } while (c->bit_offset < c->bm_bits);
1146
1147 len = bs.cur.b - p->code + !!bs.cur.bit;
1148
1149 if (plain_bits < (len << 3)) {
1150 /* incompressible with this method.
1151 * we need to rewind both word and bit position. */
1152 c->bit_offset -= plain_bits;
1153 bm_xfer_ctx_bit_to_word_offset(c);
1154 c->bit_offset = c->word_offset * BITS_PER_LONG;
1155 return 0;
1156 }
1157
1158 /* RLE + VLI was able to compress it just fine.
1159 * update c->word_offset. */
1160 bm_xfer_ctx_bit_to_word_offset(c);
1161
1162 /* store pad_bits */
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001163 dcbp_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001164
1165 return len;
1166}
1167
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001168/**
1169 * send_bitmap_rle_or_plain
1170 *
1171 * Return 0 when done, 1 when another iteration is needed, and a negative error
1172 * code upon failure.
1173 */
1174static int
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001175send_bitmap_rle_or_plain(struct drbd_device *device, struct bm_xfer_ctx *c)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001176{
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001177 struct drbd_socket *sock = &first_peer_device(device)->connection->data;
1178 unsigned int header_size = drbd_header_size(first_peer_device(device)->connection);
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001179 struct p_compressed_bm *p = sock->sbuf + header_size;
Andreas Gruenbachera982dd52010-12-10 00:45:25 +01001180 int len, err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001181
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001182 len = fill_bitmap_rle_bits(device, p,
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001183 DRBD_SOCKET_BUFFER_SIZE - header_size - sizeof(*p), c);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001184 if (len < 0)
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001185 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001186
1187 if (len) {
Andreas Gruenbachera02d1242011-03-22 17:20:45 +01001188 dcbp_set_code(p, RLE_VLI_Bits);
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001189 err = __send_command(first_peer_device(device)->connection, device->vnr, sock,
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001190 P_COMPRESSED_BITMAP, sizeof(*p) + len,
1191 NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001192 c->packets[0]++;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001193 c->bytes[0] += header_size + sizeof(*p) + len;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001194
1195 if (c->bit_offset >= c->bm_bits)
1196 len = 0; /* DONE */
1197 } else {
1198 /* was not compressible.
1199 * send a buffer full of plain text bits instead. */
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001200 unsigned int data_size;
1201 unsigned long num_words;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001202 unsigned long *p = sock->sbuf + header_size;
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001203
1204 data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001205 num_words = min_t(size_t, data_size / sizeof(*p),
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001206 c->bm_words - c->word_offset);
Andreas Gruenbachere6589832011-03-30 12:54:42 +02001207 len = num_words * sizeof(*p);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001208 if (len)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001209 drbd_bm_get_lel(device, c->word_offset, num_words, p);
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001210 err = __send_command(first_peer_device(device)->connection, device->vnr, sock, P_BITMAP, len, NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001211 c->word_offset += num_words;
1212 c->bit_offset = c->word_offset * BITS_PER_LONG;
1213
1214 c->packets[1]++;
Andreas Gruenbacher50d0b1a2011-03-30 11:53:51 +02001215 c->bytes[1] += header_size + len;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001216
1217 if (c->bit_offset > c->bm_bits)
1218 c->bit_offset = c->bm_bits;
1219 }
Andreas Gruenbachera982dd52010-12-10 00:45:25 +01001220 if (!err) {
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001221 if (len == 0) {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001222 INFO_bm_xfer_stats(device, "send", c);
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001223 return 0;
1224 } else
1225 return 1;
1226 }
1227 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001228}
1229
1230/* See the comment at receive_bitmap() */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001231static int _drbd_send_bitmap(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001232{
1233 struct bm_xfer_ctx c;
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001234 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001235
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001236 if (!expect(device->bitmap))
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001237 return false;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001238
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001239 if (get_ldev(device)) {
1240 if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC)) {
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02001241 drbd_info(device, "Writing the whole bitmap, MDF_FullSync was set.\n");
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001242 drbd_bm_set_all(device);
1243 if (drbd_bm_write(device)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001244 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
1245 * but otherwise process as per normal - need to tell other
1246 * side that a full resync is required! */
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02001247 drbd_err(device, "Failed to write bitmap to disk!\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07001248 } else {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001249 drbd_md_clear_flag(device, MDF_FULL_SYNC);
1250 drbd_md_sync(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001251 }
1252 }
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001253 put_ldev(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001254 }
1255
1256 c = (struct bm_xfer_ctx) {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001257 .bm_bits = drbd_bm_bits(device),
1258 .bm_words = drbd_bm_words(device),
Philipp Reisnerb411b362009-09-25 16:07:19 -07001259 };
1260
1261 do {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001262 err = send_bitmap_rle_or_plain(device, &c);
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001263 } while (err > 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001264
Andreas Gruenbacherf70af112010-12-11 18:51:50 +01001265 return err == 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001266}
1267
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001268int drbd_send_bitmap(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001269{
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001270 struct drbd_socket *sock = &first_peer_device(device)->connection->data;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001271 int err = -1;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001272
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001273 mutex_lock(&sock->mutex);
1274 if (sock->socket)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001275 err = !_drbd_send_bitmap(device);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001276 mutex_unlock(&sock->mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001277 return err;
1278}
1279
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001280void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr, u32 set_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001281{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001282 struct drbd_socket *sock;
1283 struct p_barrier_ack *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001284
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001285 if (connection->cstate < C_WF_REPORT_PARAMS)
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001286 return;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001287
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001288 sock = &connection->meta;
1289 p = conn_prepare_command(connection, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001290 if (!p)
1291 return;
1292 p->barrier = barrier_nr;
1293 p->set_size = cpu_to_be32(set_size);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001294 conn_send_command(connection, sock, P_BARRIER_ACK, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001295}
1296
1297/**
1298 * _drbd_send_ack() - Sends an ack packet
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001299 * @device: DRBD device.
Philipp Reisnerb411b362009-09-25 16:07:19 -07001300 * @cmd: Packet command code.
1301 * @sector: sector, needs to be in big endian byte order
1302 * @blksize: size in byte, needs to be in big endian byte order
1303 * @block_id: Id, big endian byte order
1304 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001305static int _drbd_send_ack(struct drbd_device *device, enum drbd_packet cmd,
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001306 u64 sector, u32 blksize, u64 block_id)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001307{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001308 struct drbd_socket *sock;
1309 struct p_block_ack *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001310
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001311 if (device->state.conn < C_CONNECTED)
Andreas Gruenbachera8c32aa2011-03-16 01:27:22 +01001312 return -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001313
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001314 sock = &first_peer_device(device)->connection->meta;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001315 p = drbd_prepare_command(device, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001316 if (!p)
1317 return -EIO;
1318 p->sector = sector;
1319 p->block_id = block_id;
1320 p->blksize = blksize;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001321 p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
1322 return drbd_send_command(device, sock, cmd, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001323}
1324
Lars Ellenberg2b2bf212010-10-06 11:46:55 +02001325/* dp->sector and dp->block_id already/still in network byte order,
1326 * data_size is payload size according to dp->head,
1327 * and may need to be corrected for digest size. */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001328void drbd_send_ack_dp(struct drbd_device *device, enum drbd_packet cmd,
Andreas Gruenbachera9a99942011-03-16 01:30:14 +01001329 struct p_data *dp, int data_size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001330{
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001331 if (first_peer_device(device)->connection->peer_integrity_tfm)
1332 data_size -= crypto_hash_digestsize(first_peer_device(device)->connection->peer_integrity_tfm);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001333 _drbd_send_ack(device, cmd, dp->sector, cpu_to_be32(data_size),
Andreas Gruenbachera9a99942011-03-16 01:30:14 +01001334 dp->block_id);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001335}
1336
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001337void drbd_send_ack_rp(struct drbd_device *device, enum drbd_packet cmd,
Andreas Gruenbachera9a99942011-03-16 01:30:14 +01001338 struct p_block_req *rp)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001339{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001340 _drbd_send_ack(device, cmd, rp->sector, rp->blksize, rp->block_id);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001341}
1342
1343/**
1344 * drbd_send_ack() - Sends an ack packet
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001345 * @device: DRBD device
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001346 * @cmd: packet command code
1347 * @peer_req: peer request
Philipp Reisnerb411b362009-09-25 16:07:19 -07001348 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001349int drbd_send_ack(struct drbd_device *device, enum drbd_packet cmd,
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001350 struct drbd_peer_request *peer_req)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001351{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001352 return _drbd_send_ack(device, cmd,
Andreas Gruenbacherdd516122011-03-16 15:39:08 +01001353 cpu_to_be64(peer_req->i.sector),
1354 cpu_to_be32(peer_req->i.size),
1355 peer_req->block_id);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001356}
1357
1358/* This function misuses the block_id field to signal if the blocks
1359 * are is sync or not. */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001360int drbd_send_ack_ex(struct drbd_device *device, enum drbd_packet cmd,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001361 sector_t sector, int blksize, u64 block_id)
1362{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001363 return _drbd_send_ack(device, cmd,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001364 cpu_to_be64(sector),
1365 cpu_to_be32(blksize),
1366 cpu_to_be64(block_id));
1367}
1368
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001369int drbd_send_drequest(struct drbd_device *device, int cmd,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001370 sector_t sector, int size, u64 block_id)
1371{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001372 struct drbd_socket *sock;
1373 struct p_block_req *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001374
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001375 sock = &first_peer_device(device)->connection->data;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001376 p = drbd_prepare_command(device, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001377 if (!p)
1378 return -EIO;
1379 p->sector = cpu_to_be64(sector);
1380 p->block_id = block_id;
1381 p->blksize = cpu_to_be32(size);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001382 return drbd_send_command(device, sock, cmd, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001383}
1384
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001385int drbd_send_drequest_csum(struct drbd_device *device, sector_t sector, int size,
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01001386 void *digest, int digest_size, enum drbd_packet cmd)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001387{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001388 struct drbd_socket *sock;
1389 struct p_block_req *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001390
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001391 /* FIXME: Put the digest into the preallocated socket buffer. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001392
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001393 sock = &first_peer_device(device)->connection->data;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001394 p = drbd_prepare_command(device, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001395 if (!p)
1396 return -EIO;
1397 p->sector = cpu_to_be64(sector);
1398 p->block_id = ID_SYNCER /* unused */;
1399 p->blksize = cpu_to_be32(size);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001400 return drbd_send_command(device, sock, cmd, sizeof(*p),
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001401 digest, digest_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001402}
1403
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001404int drbd_send_ov_request(struct drbd_device *device, sector_t sector, int size)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001405{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001406 struct drbd_socket *sock;
1407 struct p_block_req *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001408
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001409 sock = &first_peer_device(device)->connection->data;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001410 p = drbd_prepare_command(device, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001411 if (!p)
1412 return -EIO;
1413 p->sector = cpu_to_be64(sector);
1414 p->block_id = ID_SYNCER /* unused */;
1415 p->blksize = cpu_to_be32(size);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001416 return drbd_send_command(device, sock, P_OV_REQUEST, sizeof(*p), NULL, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001417}
1418
1419/* called on sndtimeo
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001420 * returns false if we should retry,
1421 * true if we think connection is dead
Philipp Reisnerb411b362009-09-25 16:07:19 -07001422 */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001423static int we_should_drop_the_connection(struct drbd_connection *connection, struct socket *sock)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001424{
1425 int drop_it;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001426 /* long elapsed = (long)(jiffies - device->last_received); */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001427
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001428 drop_it = connection->meta.socket == sock
1429 || !connection->asender.task
1430 || get_t_state(&connection->asender) != RUNNING
1431 || connection->cstate < C_WF_REPORT_PARAMS;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001432
1433 if (drop_it)
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01001434 return true;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001435
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001436 drop_it = !--connection->ko_count;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001437 if (!drop_it) {
Andreas Gruenbacher1ec861e2011-07-06 11:01:44 +02001438 drbd_err(connection, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001439 current->comm, current->pid, connection->ko_count);
1440 request_ping(connection);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001441 }
1442
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001443 return drop_it; /* && (device->state == R_PRIMARY) */;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001444}
1445
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001446static void drbd_update_congested(struct drbd_connection *connection)
Andreas Gruenbacher9e204cd2011-01-26 18:45:11 +01001447{
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001448 struct sock *sk = connection->data.socket->sk;
Andreas Gruenbacher9e204cd2011-01-26 18:45:11 +01001449 if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001450 set_bit(NET_CONGESTED, &connection->flags);
Andreas Gruenbacher9e204cd2011-01-26 18:45:11 +01001451}
1452
Philipp Reisnerb411b362009-09-25 16:07:19 -07001453/* The idea of sendpage seems to be to put some kind of reference
1454 * to the page into the skb, and to hand it over to the NIC. In
1455 * this process get_page() gets called.
1456 *
1457 * As soon as the page was really sent over the network put_page()
1458 * gets called by some part of the network layer. [ NIC driver? ]
1459 *
1460 * [ get_page() / put_page() increment/decrement the count. If count
1461 * reaches 0 the page will be freed. ]
1462 *
1463 * This works nicely with pages from FSs.
1464 * But this means that in protocol A we might signal IO completion too early!
1465 *
1466 * In order not to corrupt data during a resync we must make sure
1467 * that we do not reuse our own buffer pages (EEs) to early, therefore
1468 * we have the net_ee list.
1469 *
1470 * XFS seems to have problems, still, it submits pages with page_count == 0!
1471 * As a workaround, we disable sendpage on pages
1472 * with page_count == 0 or PageSlab.
1473 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001474static int _drbd_no_send_page(struct drbd_device *device, struct page *page,
Andreas Gruenbacherb9874272011-03-16 09:41:10 +01001475 int offset, size_t size, unsigned msg_flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001476{
Andreas Gruenbacherb9874272011-03-16 09:41:10 +01001477 struct socket *socket;
1478 void *addr;
1479 int err;
1480
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001481 socket = first_peer_device(device)->connection->data.socket;
Andreas Gruenbacherb9874272011-03-16 09:41:10 +01001482 addr = kmap(page) + offset;
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001483 err = drbd_send_all(first_peer_device(device)->connection, socket, addr, size, msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001484 kunmap(page);
Andreas Gruenbacherb9874272011-03-16 09:41:10 +01001485 if (!err)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001486 device->send_cnt += size >> 9;
Andreas Gruenbacherb9874272011-03-16 09:41:10 +01001487 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001488}
1489
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001490static int _drbd_send_page(struct drbd_device *device, struct page *page,
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001491 int offset, size_t size, unsigned msg_flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001492{
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001493 struct socket *socket = first_peer_device(device)->connection->data.socket;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001494 mm_segment_t oldfs = get_fs();
Philipp Reisnerb411b362009-09-25 16:07:19 -07001495 int len = size;
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001496 int err = -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001497
1498 /* e.g. XFS meta- & log-data is in slab pages, which have a
1499 * page_count of 0 and/or have PageSlab() set.
1500 * we cannot use send_page for those, as that does get_page();
1501 * put_page(); and would cause either a VM_BUG directly, or
1502 * __page_cache_release a page that would actually still be referenced
1503 * by someone, leading to some obscure delayed Oops somewhere else. */
1504 if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001505 return _drbd_no_send_page(device, page, offset, size, msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001506
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001507 msg_flags |= MSG_NOSIGNAL;
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001508 drbd_update_congested(first_peer_device(device)->connection);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001509 set_fs(KERNEL_DS);
1510 do {
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001511 int sent;
1512
1513 sent = socket->ops->sendpage(socket, page, offset, len, msg_flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001514 if (sent <= 0) {
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001515 if (sent == -EAGAIN) {
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001516 if (we_should_drop_the_connection(first_peer_device(device)->connection, socket))
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001517 break;
1518 continue;
1519 }
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02001520 drbd_warn(device, "%s: size=%d len=%d sent=%d\n",
Philipp Reisnerb411b362009-09-25 16:07:19 -07001521 __func__, (int)size, len, sent);
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001522 if (sent < 0)
1523 err = sent;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001524 break;
1525 }
1526 len -= sent;
1527 offset += sent;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001528 } while (len > 0 /* THINK && device->cstate >= C_CONNECTED*/);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001529 set_fs(oldfs);
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001530 clear_bit(NET_CONGESTED, &first_peer_device(device)->connection->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001531
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001532 if (len == 0) {
1533 err = 0;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001534 device->send_cnt += size >> 9;
Andreas Gruenbacher88b390f2011-03-16 10:44:16 +01001535 }
1536 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001537}
1538
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001539static int _drbd_send_bio(struct drbd_device *device, struct bio *bio)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001540{
Kent Overstreet79886132013-11-23 17:19:00 -08001541 struct bio_vec bvec;
1542 struct bvec_iter iter;
1543
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001544 /* hint all but last page with MSG_MORE */
Kent Overstreet79886132013-11-23 17:19:00 -08001545 bio_for_each_segment(bvec, bio, iter) {
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001546 int err;
1547
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001548 err = _drbd_no_send_page(device, bvec.bv_page,
Kent Overstreet79886132013-11-23 17:19:00 -08001549 bvec.bv_offset, bvec.bv_len,
Kent Overstreet4550dd62013-08-07 14:26:21 -07001550 bio_iter_last(bvec, iter)
Kent Overstreet79886132013-11-23 17:19:00 -08001551 ? 0 : MSG_MORE);
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001552 if (err)
1553 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001554 }
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001555 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001556}
1557
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001558static int _drbd_send_zc_bio(struct drbd_device *device, struct bio *bio)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001559{
Kent Overstreet79886132013-11-23 17:19:00 -08001560 struct bio_vec bvec;
1561 struct bvec_iter iter;
1562
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001563 /* hint all but last page with MSG_MORE */
Kent Overstreet79886132013-11-23 17:19:00 -08001564 bio_for_each_segment(bvec, bio, iter) {
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001565 int err;
1566
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001567 err = _drbd_send_page(device, bvec.bv_page,
Kent Overstreet79886132013-11-23 17:19:00 -08001568 bvec.bv_offset, bvec.bv_len,
Kent Overstreet4550dd62013-08-07 14:26:21 -07001569 bio_iter_last(bvec, iter) ? 0 : MSG_MORE);
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001570 if (err)
1571 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001572 }
Andreas Gruenbacher7fae55d2011-03-16 11:46:33 +01001573 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001574}
1575
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001576static int _drbd_send_zc_ee(struct drbd_device *device,
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001577 struct drbd_peer_request *peer_req)
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001578{
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001579 struct page *page = peer_req->pages;
1580 unsigned len = peer_req->i.size;
Andreas Gruenbacher9f692302011-03-16 10:49:09 +01001581 int err;
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001582
Lars Ellenbergba11ad92010-05-25 16:26:16 +02001583 /* hint all but last page with MSG_MORE */
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001584 page_chain_for_each(page) {
1585 unsigned l = min_t(unsigned, len, PAGE_SIZE);
Andreas Gruenbacher9f692302011-03-16 10:49:09 +01001586
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001587 err = _drbd_send_page(device, page, 0, l,
Andreas Gruenbacher9f692302011-03-16 10:49:09 +01001588 page_chain_next(page) ? MSG_MORE : 0);
1589 if (err)
1590 return err;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001591 len -= l;
1592 }
Andreas Gruenbacher9f692302011-03-16 10:49:09 +01001593 return 0;
Lars Ellenberg45bb9122010-05-14 17:10:48 +02001594}
1595
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001596static u32 bio_flags_to_wire(struct drbd_device *device, unsigned long bi_rw)
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001597{
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001598 if (first_peer_device(device)->connection->agreed_pro_version >= 95)
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001599 return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001600 (bi_rw & REQ_FUA ? DP_FUA : 0) |
1601 (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
1602 (bi_rw & REQ_DISCARD ? DP_DISCARD : 0);
1603 else
Jens Axboe721a9602011-03-09 11:56:30 +01001604 return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
Philipp Reisner76d2e7e2010-08-25 11:58:05 +02001605}
1606
Philipp Reisnerb411b362009-09-25 16:07:19 -07001607/* Used to send write requests
1608 * R_PRIMARY -> Peer (P_DATA)
1609 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001610int drbd_send_dblock(struct drbd_device *device, struct drbd_request *req)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001611{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001612 struct drbd_socket *sock;
1613 struct p_data *p;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001614 unsigned int dp_flags = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001615 int dgs;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001616 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001617
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001618 sock = &first_peer_device(device)->connection->data;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001619 p = drbd_prepare_command(device, sock);
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001620 dgs = first_peer_device(device)->connection->integrity_tfm ?
1621 crypto_hash_digestsize(first_peer_device(device)->connection->integrity_tfm) : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001622
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001623 if (!p)
1624 return -EIO;
1625 p->sector = cpu_to_be64(req->i.sector);
1626 p->block_id = (unsigned long)req;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001627 p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
1628 dp_flags = bio_flags_to_wire(device, req->master_bio->bi_rw);
1629 if (device->state.conn >= C_SYNC_SOURCE &&
1630 device->state.conn <= C_PAUSED_SYNC_T)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001631 dp_flags |= DP_MAY_SET_IN_SYNC;
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001632 if (first_peer_device(device)->connection->agreed_pro_version >= 100) {
Philipp Reisner303d1442011-04-13 16:24:47 -07001633 if (req->rq_state & RQ_EXP_RECEIVE_ACK)
1634 dp_flags |= DP_SEND_RECEIVE_ACK;
1635 if (req->rq_state & RQ_EXP_WRITE_ACK)
1636 dp_flags |= DP_SEND_WRITE_ACK;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001637 }
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001638 p->dp_flags = cpu_to_be32(dp_flags);
1639 if (dgs)
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001640 drbd_csum_bio(device, first_peer_device(device)->connection->integrity_tfm, req->master_bio, p + 1);
1641 err = __send_command(first_peer_device(device)->connection, device->vnr, sock, P_DATA, sizeof(*p) + dgs, NULL, req->i.size);
Andreas Gruenbacher6bdb9b02011-03-16 11:52:58 +01001642 if (!err) {
Lars Ellenberg470be442010-11-10 10:36:52 +01001643 /* For protocol A, we have to memcpy the payload into
1644 * socket buffers, as we may complete right away
1645 * as soon as we handed it over to tcp, at which point the data
1646 * pages may become invalid.
1647 *
1648 * For data-integrity enabled, we copy it as well, so we can be
1649 * sure that even if the bio pages may still be modified, it
1650 * won't change the data on the wire, thus if the digest checks
1651 * out ok after sending on this side, but does not fit on the
1652 * receiving side, we sure have detected corruption elsewhere.
1653 */
Philipp Reisner303d1442011-04-13 16:24:47 -07001654 if (!(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK)) || dgs)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001655 err = _drbd_send_bio(device, req->master_bio);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001656 else
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001657 err = _drbd_send_zc_bio(device, req->master_bio);
Lars Ellenberg470be442010-11-10 10:36:52 +01001658
1659 /* double check digest, sometimes buffers have been modified in flight. */
1660 if (dgs > 0 && dgs <= 64) {
Bart Van Assche24c48302011-05-21 18:32:29 +02001661 /* 64 byte, 512 bit, is the largest digest size
Lars Ellenberg470be442010-11-10 10:36:52 +01001662 * currently supported in kernel crypto. */
1663 unsigned char digest[64];
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001664 drbd_csum_bio(device, first_peer_device(device)->connection->integrity_tfm, req->master_bio, digest);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001665 if (memcmp(p + 1, digest, dgs)) {
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02001666 drbd_warn(device,
Lars Ellenberg470be442010-11-10 10:36:52 +01001667 "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
Andreas Gruenbacherace652a2011-01-03 17:09:58 +01001668 (unsigned long long)req->i.sector, req->i.size);
Lars Ellenberg470be442010-11-10 10:36:52 +01001669 }
1670 } /* else if (dgs > 64) {
1671 ... Be noisy about digest too large ...
1672 } */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001673 }
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001674 mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001675
Andreas Gruenbacher6bdb9b02011-03-16 11:52:58 +01001676 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001677}
1678
1679/* answer packet, used to send data back for read requests:
1680 * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
1681 * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
1682 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001683int drbd_send_block(struct drbd_device *device, enum drbd_packet cmd,
Andreas Gruenbacherdb830c42011-02-04 15:57:48 +01001684 struct drbd_peer_request *peer_req)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001685{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001686 struct drbd_socket *sock;
1687 struct p_data *p;
Andreas Gruenbacher7b57b89d2011-03-16 11:35:20 +01001688 int err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001689 int dgs;
1690
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001691 sock = &first_peer_device(device)->connection->data;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001692 p = drbd_prepare_command(device, sock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001693
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001694 dgs = first_peer_device(device)->connection->integrity_tfm ?
1695 crypto_hash_digestsize(first_peer_device(device)->connection->integrity_tfm) : 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001696
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001697 if (!p)
1698 return -EIO;
1699 p->sector = cpu_to_be64(peer_req->i.sector);
1700 p->block_id = peer_req->block_id;
1701 p->seq_num = 0; /* unused */
Lars Ellenbergb17f33c2012-02-08 15:32:51 +01001702 p->dp_flags = 0;
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001703 if (dgs)
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001704 drbd_csum_ee(device, first_peer_device(device)->connection->integrity_tfm, peer_req, p + 1);
1705 err = __send_command(first_peer_device(device)->connection, device->vnr, sock, cmd, sizeof(*p) + dgs, NULL, peer_req->i.size);
Andreas Gruenbacher7b57b89d2011-03-16 11:35:20 +01001706 if (!err)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001707 err = _drbd_send_zc_ee(device, peer_req);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001708 mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
Philipp Reisnerb411b362009-09-25 16:07:19 -07001709
Andreas Gruenbacher7b57b89d2011-03-16 11:35:20 +01001710 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001711}
1712
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001713int drbd_send_out_of_sync(struct drbd_device *device, struct drbd_request *req)
Philipp Reisner73a01a12010-10-27 14:33:00 +02001714{
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001715 struct drbd_socket *sock;
1716 struct p_block_desc *p;
Philipp Reisner73a01a12010-10-27 14:33:00 +02001717
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001718 sock = &first_peer_device(device)->connection->data;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001719 p = drbd_prepare_command(device, sock);
Andreas Gruenbacher9f5bdc32011-03-28 14:23:08 +02001720 if (!p)
1721 return -EIO;
1722 p->sector = cpu_to_be64(req->i.sector);
1723 p->blksize = cpu_to_be32(req->i.size);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001724 return drbd_send_command(device, sock, P_OUT_OF_SYNC, sizeof(*p), NULL, 0);
Philipp Reisner73a01a12010-10-27 14:33:00 +02001725}
1726
Philipp Reisnerb411b362009-09-25 16:07:19 -07001727/*
1728 drbd_send distinguishes two cases:
1729
1730 Packets sent via the data socket "sock"
1731 and packets sent via the meta data socket "msock"
1732
1733 sock msock
1734 -----------------+-------------------------+------------------------------
1735 timeout conf.timeout / 2 conf.timeout / 2
1736 timeout action send a ping via msock Abort communication
1737 and close all sockets
1738*/
1739
1740/*
1741 * you must have down()ed the appropriate [m]sock_mutex elsewhere!
1742 */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001743int drbd_send(struct drbd_connection *connection, struct socket *sock,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001744 void *buf, size_t size, unsigned msg_flags)
1745{
1746 struct kvec iov;
1747 struct msghdr msg;
1748 int rv, sent = 0;
1749
1750 if (!sock)
Andreas Gruenbacherc0d42c82010-12-09 23:52:22 +01001751 return -EBADR;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001752
1753 /* THINK if (signal_pending) return ... ? */
1754
1755 iov.iov_base = buf;
1756 iov.iov_len = size;
1757
1758 msg.msg_name = NULL;
1759 msg.msg_namelen = 0;
1760 msg.msg_control = NULL;
1761 msg.msg_controllen = 0;
1762 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
1763
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001764 if (sock == connection->data.socket) {
Philipp Reisner44ed1672011-04-19 17:10:19 +02001765 rcu_read_lock();
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001766 connection->ko_count = rcu_dereference(connection->net_conf)->ko_count;
Philipp Reisner44ed1672011-04-19 17:10:19 +02001767 rcu_read_unlock();
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001768 drbd_update_congested(connection);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001769 }
1770 do {
1771 /* STRANGE
1772 * tcp_sendmsg does _not_ use its size parameter at all ?
1773 *
1774 * -EAGAIN on timeout, -EINTR on signal.
1775 */
1776/* THINK
1777 * do we need to block DRBD_SIG if sock == &meta.socket ??
1778 * otherwise wake_asender() might interrupt some send_*Ack !
1779 */
1780 rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
1781 if (rv == -EAGAIN) {
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001782 if (we_should_drop_the_connection(connection, sock))
Philipp Reisnerb411b362009-09-25 16:07:19 -07001783 break;
1784 else
1785 continue;
1786 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07001787 if (rv == -EINTR) {
1788 flush_signals(current);
1789 rv = 0;
1790 }
1791 if (rv < 0)
1792 break;
1793 sent += rv;
1794 iov.iov_base += rv;
1795 iov.iov_len -= rv;
1796 } while (sent < size);
1797
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001798 if (sock == connection->data.socket)
1799 clear_bit(NET_CONGESTED, &connection->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001800
1801 if (rv <= 0) {
1802 if (rv != -EAGAIN) {
Andreas Gruenbacher1ec861e2011-07-06 11:01:44 +02001803 drbd_err(connection, "%s_sendmsg returned %d\n",
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001804 sock == connection->meta.socket ? "msock" : "sock",
Philipp Reisnerbedbd2a2011-02-07 15:08:48 +01001805 rv);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001806 conn_request_state(connection, NS(conn, C_BROKEN_PIPE), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001807 } else
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001808 conn_request_state(connection, NS(conn, C_TIMEOUT), CS_HARD);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001809 }
1810
1811 return sent;
1812}
1813
Andreas Gruenbacherfb708e42010-12-15 17:04:36 +01001814/**
1815 * drbd_send_all - Send an entire buffer
1816 *
1817 * Returns 0 upon success and a negative error value otherwise.
1818 */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001819int drbd_send_all(struct drbd_connection *connection, struct socket *sock, void *buffer,
Andreas Gruenbacherfb708e42010-12-15 17:04:36 +01001820 size_t size, unsigned msg_flags)
1821{
1822 int err;
1823
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02001824 err = drbd_send(connection, sock, buffer, size, msg_flags);
Andreas Gruenbacherfb708e42010-12-15 17:04:36 +01001825 if (err < 0)
1826 return err;
1827 if (err != size)
1828 return -EIO;
1829 return 0;
1830}
1831
Philipp Reisnerb411b362009-09-25 16:07:19 -07001832static int drbd_open(struct block_device *bdev, fmode_t mode)
1833{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001834 struct drbd_device *device = bdev->bd_disk->private_data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001835 unsigned long flags;
1836 int rv = 0;
1837
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02001838 mutex_lock(&drbd_main_mutex);
Andreas Gruenbacher05008132011-07-07 14:19:42 +02001839 spin_lock_irqsave(&device->resource->req_lock, flags);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001840 /* to have a stable device->state.role
Philipp Reisnerb411b362009-09-25 16:07:19 -07001841 * and no race with updating open_cnt */
1842
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001843 if (device->state.role != R_PRIMARY) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001844 if (mode & FMODE_WRITE)
1845 rv = -EROFS;
1846 else if (!allow_oos)
1847 rv = -EMEDIUMTYPE;
1848 }
1849
1850 if (!rv)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001851 device->open_cnt++;
Andreas Gruenbacher05008132011-07-07 14:19:42 +02001852 spin_unlock_irqrestore(&device->resource->req_lock, flags);
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02001853 mutex_unlock(&drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001854
1855 return rv;
1856}
1857
Al Virodb2a1442013-05-05 21:52:57 -04001858static void drbd_release(struct gendisk *gd, fmode_t mode)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001859{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001860 struct drbd_device *device = gd->private_data;
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02001861 mutex_lock(&drbd_main_mutex);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001862 device->open_cnt--;
Arnd Bergmann2a48fc02010-06-02 14:28:52 +02001863 mutex_unlock(&drbd_main_mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001864}
1865
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001866static void drbd_set_defaults(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001867{
Lars Ellenbergf3990022011-03-23 14:31:09 +01001868 /* Beware! The actual layout differs
1869 * between big endian and little endian */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001870 device->state = (union drbd_dev_state) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001871 { .role = R_SECONDARY,
1872 .peer = R_UNKNOWN,
1873 .conn = C_STANDALONE,
1874 .disk = D_DISKLESS,
1875 .pdsk = D_UNKNOWN,
Philipp Reisnerb411b362009-09-25 16:07:19 -07001876 } };
1877}
1878
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001879void drbd_init_set_defaults(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001880{
1881 /* the memset(,0,) did most of this.
1882 * note: only assignments, no allocation in here */
1883
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001884 drbd_set_defaults(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001885
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001886 atomic_set(&device->ap_bio_cnt, 0);
1887 atomic_set(&device->ap_pending_cnt, 0);
1888 atomic_set(&device->rs_pending_cnt, 0);
1889 atomic_set(&device->unacked_cnt, 0);
1890 atomic_set(&device->local_cnt, 0);
1891 atomic_set(&device->pp_in_use_by_net, 0);
1892 atomic_set(&device->rs_sect_in, 0);
1893 atomic_set(&device->rs_sect_ev, 0);
1894 atomic_set(&device->ap_in_flight, 0);
1895 atomic_set(&device->md_io_in_use, 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001896
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001897 mutex_init(&device->own_state_mutex);
1898 device->state_mutex = &device->own_state_mutex;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001899
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001900 spin_lock_init(&device->al_lock);
1901 spin_lock_init(&device->peer_seq_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001902
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001903 INIT_LIST_HEAD(&device->active_ee);
1904 INIT_LIST_HEAD(&device->sync_ee);
1905 INIT_LIST_HEAD(&device->done_ee);
1906 INIT_LIST_HEAD(&device->read_ee);
1907 INIT_LIST_HEAD(&device->net_ee);
1908 INIT_LIST_HEAD(&device->resync_reads);
1909 INIT_LIST_HEAD(&device->resync_work.list);
1910 INIT_LIST_HEAD(&device->unplug_work.list);
1911 INIT_LIST_HEAD(&device->go_diskless.list);
1912 INIT_LIST_HEAD(&device->md_sync_work.list);
1913 INIT_LIST_HEAD(&device->start_resync_work.list);
1914 INIT_LIST_HEAD(&device->bm_io_work.w.list);
Philipp Reisner0ced55a2010-04-30 15:26:20 +02001915
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001916 device->resync_work.cb = w_resync_timer;
1917 device->unplug_work.cb = w_send_write_hint;
1918 device->go_diskless.cb = w_go_diskless;
1919 device->md_sync_work.cb = w_md_sync;
1920 device->bm_io_work.w.cb = w_bitmap_io;
1921 device->start_resync_work.cb = w_start_resync;
Philipp Reisnera21e9292011-02-08 15:08:49 +01001922
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001923 device->resync_work.device = device;
1924 device->unplug_work.device = device;
1925 device->go_diskless.device = device;
1926 device->md_sync_work.device = device;
1927 device->bm_io_work.w.device = device;
1928 device->start_resync_work.device = device;
Philipp Reisnera21e9292011-02-08 15:08:49 +01001929
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001930 init_timer(&device->resync_timer);
1931 init_timer(&device->md_sync_timer);
1932 init_timer(&device->start_resync_timer);
1933 init_timer(&device->request_timer);
1934 device->resync_timer.function = resync_timer_fn;
1935 device->resync_timer.data = (unsigned long) device;
1936 device->md_sync_timer.function = md_sync_timer_fn;
1937 device->md_sync_timer.data = (unsigned long) device;
1938 device->start_resync_timer.function = start_resync_timer_fn;
1939 device->start_resync_timer.data = (unsigned long) device;
1940 device->request_timer.function = request_timer_fn;
1941 device->request_timer.data = (unsigned long) device;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001942
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001943 init_waitqueue_head(&device->misc_wait);
1944 init_waitqueue_head(&device->state_wait);
1945 init_waitqueue_head(&device->ee_wait);
1946 init_waitqueue_head(&device->al_wait);
1947 init_waitqueue_head(&device->seq_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001948
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001949 device->resync_wenr = LC_FREE;
1950 device->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
1951 device->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001952}
1953
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001954void drbd_device_cleanup(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -07001955{
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001956 int i;
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001957 if (first_peer_device(device)->connection->receiver.t_state != NONE)
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02001958 drbd_err(device, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02001959 first_peer_device(device)->connection->receiver.t_state);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001960
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001961 device->al_writ_cnt =
1962 device->bm_writ_cnt =
1963 device->read_cnt =
1964 device->recv_cnt =
1965 device->send_cnt =
1966 device->writ_cnt =
1967 device->p_size =
1968 device->rs_start =
1969 device->rs_total =
1970 device->rs_failed = 0;
1971 device->rs_last_events = 0;
1972 device->rs_last_sect_ev = 0;
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001973 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001974 device->rs_mark_left[i] = 0;
1975 device->rs_mark_time[i] = 0;
Lars Ellenberg1d7734a2010-08-11 21:21:50 +02001976 }
Andreas Gruenbacher0b0ba1e2011-06-27 16:23:33 +02001977 D_ASSERT(device, first_peer_device(device)->connection->net_conf == NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001978
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001979 drbd_set_my_capacity(device, 0);
1980 if (device->bitmap) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07001981 /* maybe never allocated. */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001982 drbd_bm_resize(device, 0, 1);
1983 drbd_bm_cleanup(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001984 }
1985
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001986 drbd_free_bc(device->ldev);
1987 device->ldev = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07001988
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02001989 clear_bit(AL_SUSPENDED, &device->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07001990
Andreas Gruenbacher0b0ba1e2011-06-27 16:23:33 +02001991 D_ASSERT(device, list_empty(&device->active_ee));
1992 D_ASSERT(device, list_empty(&device->sync_ee));
1993 D_ASSERT(device, list_empty(&device->done_ee));
1994 D_ASSERT(device, list_empty(&device->read_ee));
1995 D_ASSERT(device, list_empty(&device->net_ee));
1996 D_ASSERT(device, list_empty(&device->resync_reads));
1997 D_ASSERT(device, list_empty(&first_peer_device(device)->connection->sender_work.q));
1998 D_ASSERT(device, list_empty(&device->resync_work.list));
1999 D_ASSERT(device, list_empty(&device->unplug_work.list));
2000 D_ASSERT(device, list_empty(&device->go_diskless.list));
Lars Ellenberg2265b472010-12-16 15:41:26 +01002001
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002002 drbd_set_defaults(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002003}
2004
2005
2006static void drbd_destroy_mempools(void)
2007{
2008 struct page *page;
2009
2010 while (drbd_pp_pool) {
2011 page = drbd_pp_pool;
2012 drbd_pp_pool = (struct page *)page_private(page);
2013 __free_page(page);
2014 drbd_pp_vacant--;
2015 }
2016
Andreas Gruenbacher0b0ba1e2011-06-27 16:23:33 +02002017 /* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002018
Lars Ellenberg9476f392011-02-23 17:02:01 +01002019 if (drbd_md_io_bio_set)
2020 bioset_free(drbd_md_io_bio_set);
Lars Ellenberg42818082011-02-23 12:39:46 +01002021 if (drbd_md_io_page_pool)
2022 mempool_destroy(drbd_md_io_page_pool);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002023 if (drbd_ee_mempool)
2024 mempool_destroy(drbd_ee_mempool);
2025 if (drbd_request_mempool)
2026 mempool_destroy(drbd_request_mempool);
2027 if (drbd_ee_cache)
2028 kmem_cache_destroy(drbd_ee_cache);
2029 if (drbd_request_cache)
2030 kmem_cache_destroy(drbd_request_cache);
2031 if (drbd_bm_ext_cache)
2032 kmem_cache_destroy(drbd_bm_ext_cache);
2033 if (drbd_al_ext_cache)
2034 kmem_cache_destroy(drbd_al_ext_cache);
2035
Lars Ellenberg9476f392011-02-23 17:02:01 +01002036 drbd_md_io_bio_set = NULL;
Lars Ellenberg42818082011-02-23 12:39:46 +01002037 drbd_md_io_page_pool = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002038 drbd_ee_mempool = NULL;
2039 drbd_request_mempool = NULL;
2040 drbd_ee_cache = NULL;
2041 drbd_request_cache = NULL;
2042 drbd_bm_ext_cache = NULL;
2043 drbd_al_ext_cache = NULL;
2044
2045 return;
2046}
2047
2048static int drbd_create_mempools(void)
2049{
2050 struct page *page;
Lars Ellenberg1816a2b2010-11-11 15:19:07 +01002051 const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002052 int i;
2053
2054 /* prepare our caches and mempools */
2055 drbd_request_mempool = NULL;
2056 drbd_ee_cache = NULL;
2057 drbd_request_cache = NULL;
2058 drbd_bm_ext_cache = NULL;
2059 drbd_al_ext_cache = NULL;
2060 drbd_pp_pool = NULL;
Lars Ellenberg42818082011-02-23 12:39:46 +01002061 drbd_md_io_page_pool = NULL;
Lars Ellenberg9476f392011-02-23 17:02:01 +01002062 drbd_md_io_bio_set = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002063
2064 /* caches */
2065 drbd_request_cache = kmem_cache_create(
2066 "drbd_req", sizeof(struct drbd_request), 0, 0, NULL);
2067 if (drbd_request_cache == NULL)
2068 goto Enomem;
2069
2070 drbd_ee_cache = kmem_cache_create(
Andreas Gruenbacherf6ffca92011-02-04 15:30:34 +01002071 "drbd_ee", sizeof(struct drbd_peer_request), 0, 0, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002072 if (drbd_ee_cache == NULL)
2073 goto Enomem;
2074
2075 drbd_bm_ext_cache = kmem_cache_create(
2076 "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL);
2077 if (drbd_bm_ext_cache == NULL)
2078 goto Enomem;
2079
2080 drbd_al_ext_cache = kmem_cache_create(
2081 "drbd_al", sizeof(struct lc_element), 0, 0, NULL);
2082 if (drbd_al_ext_cache == NULL)
2083 goto Enomem;
2084
2085 /* mempools */
Lars Ellenberg9476f392011-02-23 17:02:01 +01002086 drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0);
2087 if (drbd_md_io_bio_set == NULL)
2088 goto Enomem;
Lars Ellenberg9476f392011-02-23 17:02:01 +01002089
Lars Ellenberg42818082011-02-23 12:39:46 +01002090 drbd_md_io_page_pool = mempool_create_page_pool(DRBD_MIN_POOL_PAGES, 0);
2091 if (drbd_md_io_page_pool == NULL)
2092 goto Enomem;
2093
Philipp Reisnerb411b362009-09-25 16:07:19 -07002094 drbd_request_mempool = mempool_create(number,
2095 mempool_alloc_slab, mempool_free_slab, drbd_request_cache);
2096 if (drbd_request_mempool == NULL)
2097 goto Enomem;
2098
2099 drbd_ee_mempool = mempool_create(number,
2100 mempool_alloc_slab, mempool_free_slab, drbd_ee_cache);
Nicolas Kaiser2027ae12010-10-28 06:15:26 -06002101 if (drbd_ee_mempool == NULL)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002102 goto Enomem;
2103
2104 /* drbd's page pool */
2105 spin_lock_init(&drbd_pp_lock);
2106
2107 for (i = 0; i < number; i++) {
2108 page = alloc_page(GFP_HIGHUSER);
2109 if (!page)
2110 goto Enomem;
2111 set_page_private(page, (unsigned long)drbd_pp_pool);
2112 drbd_pp_pool = page;
2113 }
2114 drbd_pp_vacant = number;
2115
2116 return 0;
2117
2118Enomem:
2119 drbd_destroy_mempools(); /* in case we allocated some */
2120 return -ENOMEM;
2121}
2122
2123static int drbd_notify_sys(struct notifier_block *this, unsigned long code,
2124 void *unused)
2125{
2126 /* just so we have it. you never know what interesting things we
2127 * might want to do here some day...
2128 */
2129
2130 return NOTIFY_DONE;
2131}
2132
2133static struct notifier_block drbd_notifier = {
2134 .notifier_call = drbd_notify_sys,
2135};
2136
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002137static void drbd_release_all_peer_reqs(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002138{
2139 int rr;
2140
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002141 rr = drbd_free_peer_reqs(device, &device->active_ee);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002142 if (rr)
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02002143 drbd_err(device, "%d EEs in active list found!\n", rr);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002144
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002145 rr = drbd_free_peer_reqs(device, &device->sync_ee);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002146 if (rr)
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02002147 drbd_err(device, "%d EEs in sync list found!\n", rr);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002148
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002149 rr = drbd_free_peer_reqs(device, &device->read_ee);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002150 if (rr)
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02002151 drbd_err(device, "%d EEs in read list found!\n", rr);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002152
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002153 rr = drbd_free_peer_reqs(device, &device->done_ee);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002154 if (rr)
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02002155 drbd_err(device, "%d EEs in done list found!\n", rr);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002156
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002157 rr = drbd_free_peer_reqs(device, &device->net_ee);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002158 if (rr)
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02002159 drbd_err(device, "%d EEs in net list found!\n", rr);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002160}
2161
Philipp Reisner774b3052011-02-22 02:07:03 -05002162/* caution. no locking. */
Andreas Gruenbacher05a10ec2011-06-07 22:54:17 +02002163void drbd_destroy_device(struct kref *kref)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002164{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002165 struct drbd_device *device = container_of(kref, struct drbd_device, kref);
Andreas Gruenbacher803ea132011-06-09 01:40:48 +02002166 struct drbd_resource *resource = device->resource;
2167 struct drbd_connection *connection;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002168
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002169 del_timer_sync(&device->request_timer);
Philipp Reisnerdfa8bed2011-06-29 14:06:08 +02002170
Philipp Reisnerb411b362009-09-25 16:07:19 -07002171 /* paranoia asserts */
Andreas Gruenbacher0b0ba1e2011-06-27 16:23:33 +02002172 D_ASSERT(device, device->open_cnt == 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002173 /* end paranoia asserts */
2174
Philipp Reisnerb411b362009-09-25 16:07:19 -07002175 /* cleanup stuff that may have been allocated during
2176 * device (re-)configuration or state changes */
2177
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002178 if (device->this_bdev)
2179 bdput(device->this_bdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002180
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002181 drbd_free_bc(device->ldev);
2182 device->ldev = NULL;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002183
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002184 drbd_release_all_peer_reqs(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002185
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002186 lc_destroy(device->act_log);
2187 lc_destroy(device->resync);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002188
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002189 kfree(device->p_uuid);
2190 /* device->p_uuid = NULL; */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002191
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002192 if (device->bitmap) /* should no longer be there. */
2193 drbd_bm_cleanup(device);
2194 __free_page(device->md_io_page);
2195 put_disk(device->vdisk);
2196 blk_cleanup_queue(device->rq_queue);
2197 kfree(device->rs_plan_s);
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02002198 kfree(first_peer_device(device));
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002199 kfree(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002200
Andreas Gruenbacher803ea132011-06-09 01:40:48 +02002201 for_each_connection(connection, resource)
2202 kref_put(&connection->kref, drbd_destroy_connection);
2203 kref_put(&resource->kref, drbd_destroy_resource);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002204}
2205
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01002206/* One global retry thread, if we need to push back some bio and have it
2207 * reinserted through our make request function.
2208 */
2209static struct retry_worker {
2210 struct workqueue_struct *wq;
2211 struct work_struct worker;
2212
2213 spinlock_t lock;
2214 struct list_head writes;
2215} retry;
2216
2217static void do_retry(struct work_struct *ws)
2218{
2219 struct retry_worker *retry = container_of(ws, struct retry_worker, worker);
2220 LIST_HEAD(writes);
2221 struct drbd_request *req, *tmp;
2222
2223 spin_lock_irq(&retry->lock);
2224 list_splice_init(&retry->writes, &writes);
2225 spin_unlock_irq(&retry->lock);
2226
2227 list_for_each_entry_safe(req, tmp, &writes, tl_requests) {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002228 struct drbd_device *device = req->w.device;
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01002229 struct bio *bio = req->master_bio;
2230 unsigned long start_time = req->start_time;
Lars Ellenberg9a278a72012-07-24 10:12:36 +02002231 bool expected;
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01002232
Lars Ellenberg9a278a72012-07-24 10:12:36 +02002233 expected =
2234 expect(atomic_read(&req->completion_ref) == 0) &&
2235 expect(req->rq_state & RQ_POSTPONED) &&
2236 expect((req->rq_state & RQ_LOCAL_PENDING) == 0 ||
2237 (req->rq_state & RQ_LOCAL_ABORTED) != 0);
2238
2239 if (!expected)
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02002240 drbd_err(device, "req=%p completion_ref=%d rq_state=%x\n",
Lars Ellenberg9a278a72012-07-24 10:12:36 +02002241 req, atomic_read(&req->completion_ref),
2242 req->rq_state);
2243
2244 /* We still need to put one kref associated with the
2245 * "completion_ref" going zero in the code path that queued it
2246 * here. The request object may still be referenced by a
2247 * frozen local req->private_bio, in case we force-detached.
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01002248 */
Lars Ellenberg9a278a72012-07-24 10:12:36 +02002249 kref_put(&req->kref, drbd_req_destroy);
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01002250
2251 /* A single suspended or otherwise blocking device may stall
2252 * all others as well. Fortunately, this code path is to
2253 * recover from a situation that "should not happen":
2254 * concurrent writes in multi-primary setup.
2255 * In a "normal" lifecycle, this workqueue is supposed to be
2256 * destroyed without ever doing anything.
2257 * If it turns out to be an issue anyways, we can do per
2258 * resource (replication group) or per device (minor) retry
2259 * workqueues instead.
2260 */
2261
2262 /* We are not just doing generic_make_request(),
2263 * as we want to keep the start_time information. */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002264 inc_ap_bio(device);
2265 __drbd_make_request(device, bio, start_time);
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01002266 }
2267}
2268
Lars Ellenberg9d05e7c2012-07-17 10:05:04 +02002269void drbd_restart_request(struct drbd_request *req)
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01002270{
2271 unsigned long flags;
2272 spin_lock_irqsave(&retry.lock, flags);
2273 list_move_tail(&req->tl_requests, &retry.writes);
2274 spin_unlock_irqrestore(&retry.lock, flags);
2275
2276 /* Drop the extra reference that would otherwise
2277 * have been dropped by complete_master_bio.
2278 * do_retry() needs to grab a new one. */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002279 dec_ap_bio(req->w.device);
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01002280
2281 queue_work(retry.wq, &retry.worker);
2282}
2283
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002284void drbd_destroy_resource(struct kref *kref)
2285{
2286 struct drbd_resource *resource =
2287 container_of(kref, struct drbd_resource, kref);
2288
Andreas Gruenbacher803ea132011-06-09 01:40:48 +02002289 idr_destroy(&resource->devices);
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002290 kfree(resource->name);
2291 kfree(resource);
2292}
2293
2294void drbd_free_resource(struct drbd_resource *resource)
2295{
2296 struct drbd_connection *connection, *tmp;
2297
2298 for_each_connection_safe(connection, tmp, resource) {
2299 list_del(&connection->connections);
2300 kref_put(&connection->kref, drbd_destroy_connection);
2301 }
2302 kref_put(&resource->kref, drbd_destroy_resource);
2303}
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01002304
Philipp Reisnerb411b362009-09-25 16:07:19 -07002305static void drbd_cleanup(void)
2306{
2307 unsigned int i;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002308 struct drbd_device *device;
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002309 struct drbd_resource *resource, *tmp;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002310
2311 unregister_reboot_notifier(&drbd_notifier);
2312
Lars Ellenberg17a93f32010-11-24 10:37:35 +01002313 /* first remove proc,
2314 * drbdsetup uses it's presence to detect
2315 * whether DRBD is loaded.
2316 * If we would get stuck in proc removal,
2317 * but have netlink already deregistered,
2318 * some drbdsetup commands may wait forever
2319 * for an answer.
2320 */
2321 if (drbd_proc)
2322 remove_proc_entry("drbd", NULL);
2323
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01002324 if (retry.wq)
2325 destroy_workqueue(retry.wq);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002326
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002327 drbd_genl_unregister();
Philipp Reisnerb411b362009-09-25 16:07:19 -07002328
Andreas Gruenbacher803ea132011-06-09 01:40:48 +02002329 idr_for_each_entry(&drbd_devices, device, i)
Andreas Gruenbacherf82795d2011-07-03 23:32:26 +02002330 drbd_delete_device(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002331
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02002332 /* not _rcu since, no other updater anymore. Genl already unregistered */
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002333 for_each_resource_safe(resource, tmp, &drbd_resources) {
2334 list_del(&resource->resources);
2335 drbd_free_resource(resource);
Philipp Reisner81fa2e62011-05-04 15:10:30 +02002336 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002337
Philipp Reisner81a5d602011-02-22 19:53:16 -05002338 drbd_destroy_mempools();
Philipp Reisnerb411b362009-09-25 16:07:19 -07002339 unregister_blkdev(DRBD_MAJOR, "drbd");
2340
Andreas Gruenbacher05a10ec2011-06-07 22:54:17 +02002341 idr_destroy(&drbd_devices);
Philipp Reisner81a5d602011-02-22 19:53:16 -05002342
Philipp Reisnerb411b362009-09-25 16:07:19 -07002343 printk(KERN_INFO "drbd: module cleanup done.\n");
2344}
2345
2346/**
Artem Bityutskiyd97482e2012-07-25 18:12:12 +03002347 * drbd_congested() - Callback for the flusher thread
Philipp Reisnerb411b362009-09-25 16:07:19 -07002348 * @congested_data: User data
Artem Bityutskiyd97482e2012-07-25 18:12:12 +03002349 * @bdi_bits: Bits the BDI flusher thread is currently interested in
Philipp Reisnerb411b362009-09-25 16:07:19 -07002350 *
2351 * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
2352 */
2353static int drbd_congested(void *congested_data, int bdi_bits)
2354{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002355 struct drbd_device *device = congested_data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002356 struct request_queue *q;
2357 char reason = '-';
2358 int r = 0;
2359
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002360 if (!may_inc_ap_bio(device)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002361 /* DRBD has frozen IO */
2362 r = bdi_bits;
2363 reason = 'd';
2364 goto out;
2365 }
2366
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02002367 if (test_bit(CALLBACK_PENDING, &first_peer_device(device)->connection->flags)) {
Lars Ellenbergc2ba6862012-06-14 15:14:06 +02002368 r |= (1 << BDI_async_congested);
2369 /* Without good local data, we would need to read from remote,
2370 * and that would need the worker thread as well, which is
2371 * currently blocked waiting for that usermode helper to
2372 * finish.
2373 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002374 if (!get_ldev_if_state(device, D_UP_TO_DATE))
Lars Ellenbergc2ba6862012-06-14 15:14:06 +02002375 r |= (1 << BDI_sync_congested);
2376 else
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002377 put_ldev(device);
Lars Ellenbergc2ba6862012-06-14 15:14:06 +02002378 r &= bdi_bits;
2379 reason = 'c';
2380 goto out;
2381 }
2382
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002383 if (get_ldev(device)) {
2384 q = bdev_get_queue(device->ldev->backing_bdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002385 r = bdi_congested(&q->backing_dev_info, bdi_bits);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002386 put_ldev(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002387 if (r)
2388 reason = 'b';
2389 }
2390
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02002391 if (bdi_bits & (1 << BDI_async_congested) &&
2392 test_bit(NET_CONGESTED, &first_peer_device(device)->connection->flags)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002393 r |= (1 << BDI_async_congested);
2394 reason = reason == 'b' ? 'a' : 'n';
2395 }
2396
2397out:
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002398 device->congestion_reason = reason;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002399 return r;
2400}
2401
Philipp Reisner6699b652011-02-09 11:10:24 +01002402static void drbd_init_workqueue(struct drbd_work_queue* wq)
2403{
Philipp Reisner6699b652011-02-09 11:10:24 +01002404 spin_lock_init(&wq->q_lock);
2405 INIT_LIST_HEAD(&wq->q);
Lars Ellenberg8c0785a2011-10-19 11:50:57 +02002406 init_waitqueue_head(&wq->q_wait);
Philipp Reisner6699b652011-02-09 11:10:24 +01002407}
2408
Andreas Gruenbacher4bc76042011-06-13 14:27:45 +02002409struct drbd_resource *drbd_find_resource(const char *name)
Philipp Reisner1aba4d72011-02-21 15:38:08 +01002410{
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002411 struct drbd_resource *resource;
Philipp Reisner1aba4d72011-02-21 15:38:08 +01002412
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002413 if (!name || !name[0])
2414 return NULL;
2415
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02002416 rcu_read_lock();
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002417 for_each_resource_rcu(resource, &drbd_resources) {
2418 if (!strcmp(resource->name, name)) {
Andreas Gruenbacher4bc76042011-06-13 14:27:45 +02002419 kref_get(&resource->kref);
Philipp Reisner1aba4d72011-02-21 15:38:08 +01002420 goto found;
Philipp Reisner0ace9df2011-04-24 10:53:19 +02002421 }
Philipp Reisner1aba4d72011-02-21 15:38:08 +01002422 }
Andreas Gruenbacher4bc76042011-06-13 14:27:45 +02002423 resource = NULL;
Philipp Reisner1aba4d72011-02-21 15:38:08 +01002424found:
Philipp Reisnerc141ebd2011-05-05 16:13:10 +02002425 rcu_read_unlock();
Andreas Gruenbacher4bc76042011-06-13 14:27:45 +02002426 return resource;
Philipp Reisner1aba4d72011-02-21 15:38:08 +01002427}
2428
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002429struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len,
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002430 void *peer_addr, int peer_addr_len)
2431{
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002432 struct drbd_resource *resource;
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002433 struct drbd_connection *connection;
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002434
2435 rcu_read_lock();
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002436 for_each_resource_rcu(resource, &drbd_resources) {
2437 for_each_connection_rcu(connection, resource) {
2438 if (connection->my_addr_len == my_addr_len &&
2439 connection->peer_addr_len == peer_addr_len &&
2440 !memcmp(&connection->my_addr, my_addr, my_addr_len) &&
2441 !memcmp(&connection->peer_addr, peer_addr, peer_addr_len)) {
2442 kref_get(&connection->kref);
2443 goto found;
2444 }
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002445 }
2446 }
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002447 connection = NULL;
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002448found:
2449 rcu_read_unlock();
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002450 return connection;
Andreas Gruenbacher089c0752011-06-14 18:28:09 +02002451}
2452
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002453static int drbd_alloc_socket(struct drbd_socket *socket)
2454{
2455 socket->rbuf = (void *) __get_free_page(GFP_KERNEL);
2456 if (!socket->rbuf)
2457 return -ENOMEM;
Andreas Gruenbacher5a87d922011-03-24 21:17:52 +01002458 socket->sbuf = (void *) __get_free_page(GFP_KERNEL);
2459 if (!socket->sbuf)
2460 return -ENOMEM;
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002461 return 0;
2462}
2463
2464static void drbd_free_socket(struct drbd_socket *socket)
2465{
Andreas Gruenbacher5a87d922011-03-24 21:17:52 +01002466 free_page((unsigned long) socket->sbuf);
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002467 free_page((unsigned long) socket->rbuf);
2468}
2469
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002470void conn_free_crypto(struct drbd_connection *connection)
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002471{
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002472 drbd_free_sock(connection);
Philipp Reisner1d041222011-04-22 15:20:23 +02002473
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002474 crypto_free_hash(connection->csums_tfm);
2475 crypto_free_hash(connection->verify_tfm);
2476 crypto_free_hash(connection->cram_hmac_tfm);
2477 crypto_free_hash(connection->integrity_tfm);
2478 crypto_free_hash(connection->peer_integrity_tfm);
2479 kfree(connection->int_dig_in);
2480 kfree(connection->int_dig_vv);
Philipp Reisner1d041222011-04-22 15:20:23 +02002481
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002482 connection->csums_tfm = NULL;
2483 connection->verify_tfm = NULL;
2484 connection->cram_hmac_tfm = NULL;
2485 connection->integrity_tfm = NULL;
2486 connection->peer_integrity_tfm = NULL;
2487 connection->int_dig_in = NULL;
2488 connection->int_dig_vv = NULL;
Philipp Reisner91fd4da2011-04-20 17:47:29 +02002489}
2490
Andreas Gruenbachereb6bea62011-06-21 16:11:28 +02002491int set_resource_options(struct drbd_resource *resource, struct res_opts *res_opts)
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02002492{
Andreas Gruenbachereb6bea62011-06-21 16:11:28 +02002493 struct drbd_connection *connection;
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02002494 cpumask_var_t new_cpu_mask;
2495 int err;
2496
2497 if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL))
2498 return -ENOMEM;
2499 /*
2500 retcode = ERR_NOMEM;
2501 drbd_msg_put_info("unable to allocate cpumask");
2502 */
2503
2504 /* silently ignore cpu mask on UP kernel */
2505 if (nr_cpu_ids > 1 && res_opts->cpu_mask[0] != 0) {
Andreas Gruenbacherf44d0432011-07-22 13:53:19 +02002506 err = bitmap_parse(res_opts->cpu_mask, DRBD_CPU_MASK_SIZE,
Philipp Reisnerc5b005a2012-04-30 12:53:52 +02002507 cpumask_bits(new_cpu_mask), nr_cpu_ids);
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02002508 if (err) {
Andreas Gruenbacher1ec861e2011-07-06 11:01:44 +02002509 drbd_warn(resource, "bitmap_parse() failed with %d\n", err);
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02002510 /* retcode = ERR_CPU_MASK_PARSE; */
2511 goto fail;
2512 }
2513 }
Andreas Gruenbachereb6bea62011-06-21 16:11:28 +02002514 resource->res_opts = *res_opts;
2515 for_each_connection_rcu(connection, resource) {
2516 if (!cpumask_equal(connection->cpu_mask, new_cpu_mask)) {
2517 cpumask_copy(connection->cpu_mask, new_cpu_mask);
2518 drbd_calc_cpu_mask(connection);
2519 connection->receiver.reset_cpu_mask = 1;
2520 connection->asender.reset_cpu_mask = 1;
2521 connection->worker.reset_cpu_mask = 1;
2522 }
Andreas Gruenbacherafbbfa82011-06-16 17:58:02 +02002523 }
2524 err = 0;
2525
2526fail:
2527 free_cpumask_var(new_cpu_mask);
2528 return err;
2529
2530}
2531
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002532struct drbd_resource *drbd_create_resource(const char *name)
2533{
2534 struct drbd_resource *resource;
2535
Andreas Gruenbacher6bbf53c2011-07-08 01:19:44 +02002536 resource = kzalloc(sizeof(struct drbd_resource), GFP_KERNEL);
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002537 if (!resource)
2538 return NULL;
2539 resource->name = kstrdup(name, GFP_KERNEL);
2540 if (!resource->name) {
2541 kfree(resource);
2542 return NULL;
2543 }
2544 kref_init(&resource->kref);
Andreas Gruenbacher803ea132011-06-09 01:40:48 +02002545 idr_init(&resource->devices);
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002546 INIT_LIST_HEAD(&resource->connections);
2547 list_add_tail_rcu(&resource->resources, &drbd_resources);
Andreas Gruenbacher05008132011-07-07 14:19:42 +02002548 mutex_init(&resource->conf_update);
2549 spin_lock_init(&resource->req_lock);
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002550 return resource;
2551}
2552
Philipp Reisnerec0bddb2011-05-04 15:47:01 +02002553/* caller must be under genl_lock() */
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002554struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts)
Philipp Reisner21114382011-01-19 12:26:59 +01002555{
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002556 struct drbd_resource *resource;
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002557 struct drbd_connection *connection;
Philipp Reisner21114382011-01-19 12:26:59 +01002558
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002559 connection = kzalloc(sizeof(struct drbd_connection), GFP_KERNEL);
2560 if (!connection)
Philipp Reisner21114382011-01-19 12:26:59 +01002561 return NULL;
2562
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002563 if (drbd_alloc_socket(&connection->data))
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002564 goto fail;
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002565 if (drbd_alloc_socket(&connection->meta))
Andreas Gruenbachere6ef8a52011-03-24 18:07:54 +01002566 goto fail;
2567
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002568 if (!zalloc_cpumask_var(&connection->cpu_mask, GFP_KERNEL))
Philipp Reisner774b3052011-02-22 02:07:03 -05002569 goto fail;
2570
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002571 connection->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
2572 if (!connection->current_epoch)
Philipp Reisner12038a32011-11-09 19:18:00 +01002573 goto fail;
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +01002574
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002575 INIT_LIST_HEAD(&connection->transfer_log);
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +01002576
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002577 INIT_LIST_HEAD(&connection->current_epoch->list);
2578 connection->epochs = 1;
2579 spin_lock_init(&connection->epoch_lock);
2580 connection->write_ordering = WO_bdev_flush;
Philipp Reisner4b0007c2011-11-09 20:12:34 +01002581
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002582 connection->send.seen_any_write_yet = false;
2583 connection->send.current_epoch_nr = 0;
2584 connection->send.current_epoch_writes = 0;
Lars Ellenbergb6dd1a82011-11-28 15:04:49 +01002585
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002586 resource = drbd_create_resource(name);
2587 if (!resource)
2588 goto fail;
2589
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002590 connection->cstate = C_STANDALONE;
2591 mutex_init(&connection->cstate_mutex);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002592 init_waitqueue_head(&connection->ping_wait);
Andreas Gruenbacherc06ece62011-06-21 17:23:59 +02002593 idr_init(&connection->peer_devices);
Philipp Reisnerb2fb6dbe2011-01-19 13:48:44 +01002594
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002595 drbd_init_workqueue(&connection->sender_work);
2596 mutex_init(&connection->data.mutex);
2597 mutex_init(&connection->meta.mutex);
Philipp Reisner6699b652011-02-09 11:10:24 +01002598
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002599 drbd_thread_init(connection, &connection->receiver, drbdd_init, "receiver");
2600 drbd_thread_init(connection, &connection->worker, drbd_worker, "worker");
2601 drbd_thread_init(connection, &connection->asender, drbd_asender, "asender");
Philipp Reisner392c8802011-02-09 10:33:31 +01002602
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002603 kref_init(&connection->kref);
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002604
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002605 connection->resource = resource;
Philipp Reisner21114382011-01-19 12:26:59 +01002606
Andreas Gruenbachereb6bea62011-06-21 16:11:28 +02002607 if (set_resource_options(resource, res_opts))
2608 goto fail_resource;
2609
2610 kref_get(&resource->kref);
2611 list_add_tail_rcu(&connection->connections, &resource->connections);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002612 return connection;
Philipp Reisner21114382011-01-19 12:26:59 +01002613
Andreas Gruenbachereb6bea62011-06-21 16:11:28 +02002614fail_resource:
2615 list_del(&resource->resources);
2616 drbd_free_resource(resource);
Philipp Reisner21114382011-01-19 12:26:59 +01002617fail:
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002618 kfree(connection->current_epoch);
2619 free_cpumask_var(connection->cpu_mask);
2620 drbd_free_socket(&connection->meta);
2621 drbd_free_socket(&connection->data);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002622 kfree(connection);
Philipp Reisner21114382011-01-19 12:26:59 +01002623 return NULL;
2624}
2625
Andreas Gruenbacher05a10ec2011-06-07 22:54:17 +02002626void drbd_destroy_connection(struct kref *kref)
Philipp Reisner21114382011-01-19 12:26:59 +01002627{
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002628 struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref);
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002629 struct drbd_resource *resource = connection->resource;
Philipp Reisner9dc9fbb2011-04-22 15:23:32 +02002630
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002631 if (atomic_read(&connection->current_epoch->epoch_size) != 0)
Andreas Gruenbacher1ec861e2011-07-06 11:01:44 +02002632 drbd_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size));
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002633 kfree(connection->current_epoch);
Philipp Reisner12038a32011-11-09 19:18:00 +01002634
Andreas Gruenbacherc06ece62011-06-21 17:23:59 +02002635 idr_destroy(&connection->peer_devices);
Philipp Reisner21114382011-01-19 12:26:59 +01002636
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002637 free_cpumask_var(connection->cpu_mask);
2638 drbd_free_socket(&connection->meta);
2639 drbd_free_socket(&connection->data);
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002640 kfree(connection->int_dig_in);
2641 kfree(connection->int_dig_vv);
2642 kfree(connection);
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002643 kref_put(&resource->kref, drbd_destroy_resource);
Philipp Reisner21114382011-01-19 12:26:59 +01002644}
2645
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002646static int init_submitter(struct drbd_device *device)
Lars Ellenberg113fef92013-03-22 18:14:40 -06002647{
2648 /* opencoded create_singlethread_workqueue(),
2649 * to be able to say "drbd%d", ..., minor */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002650 device->submit.wq = alloc_workqueue("drbd%u_submit",
2651 WQ_UNBOUND | WQ_MEM_RECLAIM, 1, device->minor);
2652 if (!device->submit.wq)
Lars Ellenberg113fef92013-03-22 18:14:40 -06002653 return -ENOMEM;
2654
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002655 INIT_WORK(&device->submit.worker, do_submit);
2656 spin_lock_init(&device->submit.lock);
2657 INIT_LIST_HEAD(&device->submit.writes);
Lars Ellenberg113fef92013-03-22 18:14:40 -06002658 return 0;
2659}
2660
Andreas Gruenbacher59515a22011-07-06 14:20:49 +02002661enum drbd_ret_code drbd_create_device(struct drbd_resource *resource, unsigned int minor, int vnr)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002662{
Andreas Gruenbacherb6f85ef2011-07-06 15:03:31 +02002663 struct drbd_connection *connection;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002664 struct drbd_device *device;
Andreas Gruenbacherb6f85ef2011-07-06 15:03:31 +02002665 struct drbd_peer_device *peer_device, *tmp_peer_device;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002666 struct gendisk *disk;
2667 struct request_queue *q;
Andreas Gruenbacher93e4bf72013-05-23 14:57:17 +02002668 int id;
Lars Ellenberg8432b312011-03-08 16:11:16 +01002669 enum drbd_ret_code err = ERR_NOMEM;
Philipp Reisner774b3052011-02-22 02:07:03 -05002670
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002671 device = minor_to_device(minor);
2672 if (device)
Philipp Reisner774b3052011-02-22 02:07:03 -05002673 return ERR_MINOR_EXISTS;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002674
2675 /* GFP_KERNEL, we are outside of all write-out paths */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002676 device = kzalloc(sizeof(struct drbd_device), GFP_KERNEL);
2677 if (!device)
Philipp Reisner774b3052011-02-22 02:07:03 -05002678 return ERR_NOMEM;
Andreas Gruenbacher803ea132011-06-09 01:40:48 +02002679 kref_init(&device->kref);
2680
Andreas Gruenbacher803ea132011-06-09 01:40:48 +02002681 kref_get(&resource->kref);
2682 device->resource = resource;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002683 device->minor = minor;
2684 device->vnr = vnr;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002685
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002686 drbd_init_set_defaults(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002687
2688 q = blk_alloc_queue(GFP_KERNEL);
2689 if (!q)
2690 goto out_no_q;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002691 device->rq_queue = q;
2692 q->queuedata = device;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002693
2694 disk = alloc_disk(1);
2695 if (!disk)
2696 goto out_no_disk;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002697 device->vdisk = disk;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002698
Andreas Gruenbacher81e84652010-12-09 15:03:57 +01002699 set_disk_ro(disk, true);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002700
2701 disk->queue = q;
2702 disk->major = DRBD_MAJOR;
2703 disk->first_minor = minor;
2704 disk->fops = &drbd_ops;
2705 sprintf(disk->disk_name, "drbd%d", minor);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002706 disk->private_data = device;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002707
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002708 device->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
Philipp Reisnerb411b362009-09-25 16:07:19 -07002709 /* we have no partitions. we contain only ourselves. */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002710 device->this_bdev->bd_contains = device->this_bdev;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002711
2712 q->backing_dev_info.congested_fn = drbd_congested;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002713 q->backing_dev_info.congested_data = device;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002714
Andreas Gruenbacher2f58dcf2010-12-13 17:48:19 +01002715 blk_queue_make_request(q, drbd_make_request);
Lars Ellenberga73ff322012-06-25 19:15:38 +02002716 blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
Philipp Reisner99432fc2011-05-20 16:39:13 +02002717 /* Setting the max_hw_sectors to an odd value of 8kibyte here
2718 This triggers a max_bio_size message upon first attach or connect */
2719 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002720 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
2721 blk_queue_merge_bvec(q, drbd_merge_bvec);
Andreas Gruenbacher05008132011-07-07 14:19:42 +02002722 q->queue_lock = &resource->req_lock;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002723
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002724 device->md_io_page = alloc_page(GFP_KERNEL);
2725 if (!device->md_io_page)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002726 goto out_no_io_page;
2727
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002728 if (drbd_bm_init(device))
Philipp Reisnerb411b362009-09-25 16:07:19 -07002729 goto out_no_bitmap;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002730 device->read_requests = RB_ROOT;
2731 device->write_requests = RB_ROOT;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002732
Andreas Gruenbacher93e4bf72013-05-23 14:57:17 +02002733 id = idr_alloc(&drbd_devices, device, minor, minor + 1, GFP_KERNEL);
2734 if (id < 0) {
2735 if (id == -ENOSPC) {
Tejun Heo56de2102013-02-27 17:04:01 -08002736 err = ERR_MINOR_EXISTS;
2737 drbd_msg_put_info("requested minor exists already");
2738 }
Lars Ellenberg8432b312011-03-08 16:11:16 +01002739 goto out_no_minor_idr;
Tejun Heo56de2102013-02-27 17:04:01 -08002740 }
Andreas Gruenbacher803ea132011-06-09 01:40:48 +02002741 kref_get(&device->kref);
2742
2743 id = idr_alloc(&resource->devices, device, vnr, vnr + 1, GFP_KERNEL);
2744 if (id < 0) {
2745 if (id == -ENOSPC) {
2746 err = ERR_MINOR_EXISTS;
2747 drbd_msg_put_info("requested minor exists already");
2748 }
2749 goto out_idr_remove_minor;
2750 }
2751 kref_get(&device->kref);
Tejun Heo56de2102013-02-27 17:04:01 -08002752
Andreas Gruenbacherb6f85ef2011-07-06 15:03:31 +02002753 INIT_LIST_HEAD(&device->peer_devices);
2754 for_each_connection(connection, resource) {
2755 peer_device = kzalloc(sizeof(struct drbd_peer_device), GFP_KERNEL);
2756 if (!peer_device)
2757 goto out_idr_remove_from_resource;
2758 peer_device->connection = connection;
2759 peer_device->device = device;
2760
2761 list_add(&peer_device->peer_devices, &device->peer_devices);
2762 kref_get(&device->kref);
2763
2764 id = idr_alloc(&connection->peer_devices, peer_device, vnr, vnr + 1, GFP_KERNEL);
2765 if (id < 0) {
2766 if (id == -ENOSPC) {
2767 err = ERR_INVALID_REQUEST;
2768 drbd_msg_put_info("requested volume exists already");
2769 }
2770 goto out_idr_remove_from_resource;
Tejun Heo56de2102013-02-27 17:04:01 -08002771 }
Andreas Gruenbacherb6f85ef2011-07-06 15:03:31 +02002772 kref_get(&connection->kref);
Lars Ellenberg569083c2011-03-07 09:49:02 +01002773 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07002774
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002775 if (init_submitter(device)) {
Lars Ellenberg113fef92013-03-22 18:14:40 -06002776 err = ERR_NOMEM;
2777 drbd_msg_put_info("unable to create submit workqueue");
2778 goto out_idr_remove_vol;
2779 }
2780
Philipp Reisner774b3052011-02-22 02:07:03 -05002781 add_disk(disk);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002782
Philipp Reisner2325eb62011-03-15 16:56:18 +01002783 /* inherit the connection state */
Andreas Gruenbacherb6f85ef2011-07-06 15:03:31 +02002784 device->state.conn = first_connection(resource)->cstate;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002785 if (device->state.conn == C_WF_REPORT_PARAMS)
2786 drbd_connected(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002787
Philipp Reisner774b3052011-02-22 02:07:03 -05002788 return NO_ERROR;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002789
Lars Ellenberg113fef92013-03-22 18:14:40 -06002790out_idr_remove_vol:
Andreas Gruenbacherc06ece62011-06-21 17:23:59 +02002791 idr_remove(&connection->peer_devices, vnr);
Andreas Gruenbacher803ea132011-06-09 01:40:48 +02002792out_idr_remove_from_resource:
Andreas Gruenbacherb6f85ef2011-07-06 15:03:31 +02002793 for_each_connection(connection, resource) {
2794 peer_device = idr_find(&connection->peer_devices, vnr);
2795 if (peer_device) {
2796 idr_remove(&connection->peer_devices, vnr);
2797 kref_put(&connection->kref, drbd_destroy_connection);
2798 }
2799 }
2800 for_each_peer_device_safe(peer_device, tmp_peer_device, device) {
2801 list_del(&peer_device->peer_devices);
2802 kfree(peer_device);
2803 }
Andreas Gruenbacher803ea132011-06-09 01:40:48 +02002804 idr_remove(&resource->devices, vnr);
Lars Ellenberg8432b312011-03-08 16:11:16 +01002805out_idr_remove_minor:
Andreas Gruenbacher93e4bf72013-05-23 14:57:17 +02002806 idr_remove(&drbd_devices, minor);
Lars Ellenberg569083c2011-03-07 09:49:02 +01002807 synchronize_rcu();
Lars Ellenberg8432b312011-03-08 16:11:16 +01002808out_no_minor_idr:
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002809 drbd_bm_cleanup(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002810out_no_bitmap:
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002811 __free_page(device->md_io_page);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002812out_no_io_page:
2813 put_disk(disk);
2814out_no_disk:
2815 blk_cleanup_queue(q);
2816out_no_q:
Andreas Gruenbacher803ea132011-06-09 01:40:48 +02002817 kref_put(&resource->kref, drbd_destroy_resource);
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02002818 kfree(device);
Lars Ellenberg8432b312011-03-08 16:11:16 +01002819 return err;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002820}
2821
Andreas Gruenbacherf82795d2011-07-03 23:32:26 +02002822void drbd_delete_device(struct drbd_device *device)
Andreas Gruenbacher803ea132011-06-09 01:40:48 +02002823{
2824 struct drbd_resource *resource = device->resource;
2825 struct drbd_connection *connection;
2826 int refs = 3;
2827
2828 for_each_connection(connection, resource) {
Andreas Gruenbacherc06ece62011-06-21 17:23:59 +02002829 idr_remove(&connection->peer_devices, device->vnr);
Andreas Gruenbacher803ea132011-06-09 01:40:48 +02002830 refs++;
2831 }
2832 idr_remove(&resource->devices, device->vnr);
2833 idr_remove(&drbd_devices, device_to_minor(device));
2834 del_gendisk(device->vdisk);
2835 synchronize_rcu();
2836 kref_sub(&device->kref, refs, drbd_destroy_device);
2837}
2838
Philipp Reisnerb411b362009-09-25 16:07:19 -07002839int __init drbd_init(void)
2840{
2841 int err;
2842
Philipp Reisner2b8a90b2011-01-10 11:15:17 +01002843 if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07002844 printk(KERN_ERR
Philipp Reisner81a5d602011-02-22 19:53:16 -05002845 "drbd: invalid minor_count (%d)\n", minor_count);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002846#ifdef MODULE
2847 return -EINVAL;
2848#else
Andreas Gruenbacher46530e82011-05-31 13:08:53 +02002849 minor_count = DRBD_MINOR_COUNT_DEF;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002850#endif
2851 }
2852
Philipp Reisnerb411b362009-09-25 16:07:19 -07002853 err = register_blkdev(DRBD_MAJOR, "drbd");
2854 if (err) {
2855 printk(KERN_ERR
2856 "drbd: unable to register block device major %d\n",
2857 DRBD_MAJOR);
2858 return err;
2859 }
2860
2861 register_reboot_notifier(&drbd_notifier);
2862
2863 /*
2864 * allocate all necessary structs
2865 */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002866 init_waitqueue_head(&drbd_pp_wait);
2867
2868 drbd_proc = NULL; /* play safe for drbd_cleanup */
Andreas Gruenbacher05a10ec2011-06-07 22:54:17 +02002869 idr_init(&drbd_devices);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002870
Lars Ellenberg69babf02013-10-23 10:59:15 +02002871 rwlock_init(&global_state_lock);
Andreas Gruenbacher77c556f2011-06-08 22:17:38 +02002872 INIT_LIST_HEAD(&drbd_resources);
Lars Ellenberg69babf02013-10-23 10:59:15 +02002873
2874 err = drbd_genl_register();
2875 if (err) {
2876 printk(KERN_ERR "drbd: unable to register generic netlink family\n");
2877 goto fail;
2878 }
2879
Philipp Reisnerb411b362009-09-25 16:07:19 -07002880 err = drbd_create_mempools();
2881 if (err)
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002882 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002883
Wei Yongjun6110d702013-06-25 16:50:04 +02002884 err = -ENOMEM;
Lars Ellenberg8c484ee2010-03-11 16:47:58 +01002885 drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002886 if (!drbd_proc) {
2887 printk(KERN_ERR "drbd: unable to register proc file\n");
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002888 goto fail;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002889 }
2890
Lars Ellenberg2312f0b32011-11-24 10:36:25 +01002891 retry.wq = create_singlethread_workqueue("drbd-reissue");
2892 if (!retry.wq) {
2893 printk(KERN_ERR "drbd: unable to create retry workqueue\n");
2894 goto fail;
2895 }
2896 INIT_WORK(&retry.worker, do_retry);
2897 spin_lock_init(&retry.lock);
2898 INIT_LIST_HEAD(&retry.writes);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002899
2900 printk(KERN_INFO "drbd: initialized. "
2901 "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
2902 API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
2903 printk(KERN_INFO "drbd: %s\n", drbd_buildtag());
2904 printk(KERN_INFO "drbd: registered as block device major %d\n",
2905 DRBD_MAJOR);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002906
2907 return 0; /* Success! */
2908
Lars Ellenberg3b98c0c2011-03-07 12:49:34 +01002909fail:
Philipp Reisnerb411b362009-09-25 16:07:19 -07002910 drbd_cleanup();
2911 if (err == -ENOMEM)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002912 printk(KERN_ERR "drbd: ran out of memory\n");
2913 else
2914 printk(KERN_ERR "drbd: initialization failure\n");
2915 return err;
2916}
2917
2918void drbd_free_bc(struct drbd_backing_dev *ldev)
2919{
2920 if (ldev == NULL)
2921 return;
2922
Tejun Heoe525fd82010-11-13 11:55:17 +01002923 blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
2924 blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002925
Lars Ellenberg94ad0a12013-03-27 14:08:42 +01002926 kfree(ldev->disk_conf);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002927 kfree(ldev);
2928}
2929
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002930void drbd_free_sock(struct drbd_connection *connection)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002931{
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002932 if (connection->data.socket) {
2933 mutex_lock(&connection->data.mutex);
2934 kernel_sock_shutdown(connection->data.socket, SHUT_RDWR);
2935 sock_release(connection->data.socket);
2936 connection->data.socket = NULL;
2937 mutex_unlock(&connection->data.mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002938 }
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002939 if (connection->meta.socket) {
2940 mutex_lock(&connection->meta.mutex);
2941 kernel_sock_shutdown(connection->meta.socket, SHUT_RDWR);
2942 sock_release(connection->meta.socket);
2943 connection->meta.socket = NULL;
2944 mutex_unlock(&connection->meta.mutex);
Philipp Reisnerb411b362009-09-25 16:07:19 -07002945 }
2946}
2947
Philipp Reisnerb411b362009-09-25 16:07:19 -07002948/* meta data management */
2949
Andreas Gruenbacherbde89a92011-05-30 16:32:41 +02002950void conn_md_sync(struct drbd_connection *connection)
Philipp Reisner19fffd72012-08-28 16:48:03 +02002951{
Andreas Gruenbacherc06ece62011-06-21 17:23:59 +02002952 struct drbd_peer_device *peer_device;
Philipp Reisner19fffd72012-08-28 16:48:03 +02002953 int vnr;
2954
2955 rcu_read_lock();
Andreas Gruenbacherc06ece62011-06-21 17:23:59 +02002956 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
2957 struct drbd_device *device = peer_device->device;
2958
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002959 kref_get(&device->kref);
Philipp Reisner19fffd72012-08-28 16:48:03 +02002960 rcu_read_unlock();
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002961 drbd_md_sync(device);
Andreas Gruenbacher05a10ec2011-06-07 22:54:17 +02002962 kref_put(&device->kref, drbd_destroy_device);
Philipp Reisner19fffd72012-08-28 16:48:03 +02002963 rcu_read_lock();
2964 }
2965 rcu_read_unlock();
2966}
2967
Lars Ellenbergae8bf312013-03-19 18:16:43 +01002968/* aligned 4kByte */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002969struct meta_data_on_disk {
Lars Ellenbergcccac982013-03-19 18:16:46 +01002970 u64 la_size_sect; /* last agreed size. */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002971 u64 uuid[UI_SIZE]; /* UUIDs. */
2972 u64 device_uuid;
2973 u64 reserved_u64_1;
2974 u32 flags; /* MDF */
2975 u32 magic;
2976 u32 md_size_sect;
2977 u32 al_offset; /* offset to this block */
Lars Ellenbergae8bf312013-03-19 18:16:43 +01002978 u32 al_nr_extents; /* important for restoring the AL (userspace) */
Lars Ellenbergf3990022011-03-23 14:31:09 +01002979 /* `-- act_log->nr_elements <-- ldev->dc.al_extents */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002980 u32 bm_offset; /* offset to the bitmap, from here */
2981 u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */
Philipp Reisner99432fc2011-05-20 16:39:13 +02002982 u32 la_peer_max_bio_size; /* last peer max_bio_size */
Philipp Reisnerb411b362009-09-25 16:07:19 -07002983
Lars Ellenberg3a4d4eb2013-03-19 18:16:44 +01002984 /* see al_tr_number_to_on_disk_sector() */
2985 u32 al_stripes;
2986 u32 al_stripe_size_4k;
2987
2988 u8 reserved_u8[4096 - (7*8 + 10*4)];
Philipp Reisnerb411b362009-09-25 16:07:19 -07002989} __packed;
2990
Philipp Reisnerd752b262013-06-25 16:50:08 +02002991
2992
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02002993void drbd_md_write(struct drbd_device *device, void *b)
Philipp Reisnerb411b362009-09-25 16:07:19 -07002994{
Philipp Reisnerd752b262013-06-25 16:50:08 +02002995 struct meta_data_on_disk *buffer = b;
Philipp Reisnerb411b362009-09-25 16:07:19 -07002996 sector_t sector;
2997 int i;
2998
Lars Ellenbergae8bf312013-03-19 18:16:43 +01002999 memset(buffer, 0, sizeof(*buffer));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003000
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003001 buffer->la_size_sect = cpu_to_be64(drbd_get_capacity(device->this_bdev));
Philipp Reisnerb411b362009-09-25 16:07:19 -07003002 for (i = UI_CURRENT; i < UI_SIZE; i++)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003003 buffer->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]);
3004 buffer->flags = cpu_to_be32(device->ldev->md.flags);
Lars Ellenbergd5d7ebd2011-07-05 20:59:26 +02003005 buffer->magic = cpu_to_be32(DRBD_MD_MAGIC_84_UNCLEAN);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003006
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003007 buffer->md_size_sect = cpu_to_be32(device->ldev->md.md_size_sect);
3008 buffer->al_offset = cpu_to_be32(device->ldev->md.al_offset);
3009 buffer->al_nr_extents = cpu_to_be32(device->act_log->nr_elements);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003010 buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003011 buffer->device_uuid = cpu_to_be64(device->ldev->md.device_uuid);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003012
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003013 buffer->bm_offset = cpu_to_be32(device->ldev->md.bm_offset);
3014 buffer->la_peer_max_bio_size = cpu_to_be32(device->peer_max_bio_size);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003015
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003016 buffer->al_stripes = cpu_to_be32(device->ldev->md.al_stripes);
3017 buffer->al_stripe_size_4k = cpu_to_be32(device->ldev->md.al_stripe_size_4k);
Lars Ellenberg3a4d4eb2013-03-19 18:16:44 +01003018
Andreas Gruenbacher0b0ba1e2011-06-27 16:23:33 +02003019 D_ASSERT(device, drbd_md_ss(device->ldev) == device->ldev->md.md_offset);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003020 sector = device->ldev->md.md_offset;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003021
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003022 if (drbd_md_sync_page_io(device, device->ldev, sector, WRITE)) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003023 /* this was a try anyways ... */
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02003024 drbd_err(device, "meta data update failed!\n");
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003025 drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003026 }
Philipp Reisnerd752b262013-06-25 16:50:08 +02003027}
3028
3029/**
3030 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003031 * @device: DRBD device.
Philipp Reisnerd752b262013-06-25 16:50:08 +02003032 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003033void drbd_md_sync(struct drbd_device *device)
Philipp Reisnerd752b262013-06-25 16:50:08 +02003034{
3035 struct meta_data_on_disk *buffer;
3036
3037 /* Don't accidentally change the DRBD meta data layout. */
3038 BUILD_BUG_ON(UI_SIZE != 4);
3039 BUILD_BUG_ON(sizeof(struct meta_data_on_disk) != 4096);
3040
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003041 del_timer(&device->md_sync_timer);
Philipp Reisnerd752b262013-06-25 16:50:08 +02003042 /* timer may be rearmed by drbd_md_mark_dirty() now. */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003043 if (!test_and_clear_bit(MD_DIRTY, &device->flags))
Philipp Reisnerd752b262013-06-25 16:50:08 +02003044 return;
3045
3046 /* We use here D_FAILED and not D_ATTACHING because we try to write
3047 * metadata even if we detach due to a disk failure! */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003048 if (!get_ldev_if_state(device, D_FAILED))
Philipp Reisnerd752b262013-06-25 16:50:08 +02003049 return;
3050
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003051 buffer = drbd_md_get_buffer(device);
Philipp Reisnerd752b262013-06-25 16:50:08 +02003052 if (!buffer)
3053 goto out;
3054
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003055 drbd_md_write(device, buffer);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003056
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003057 /* Update device->ldev->md.la_size_sect,
Philipp Reisnerb411b362009-09-25 16:07:19 -07003058 * since we updated it on metadata. */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003059 device->ldev->md.la_size_sect = drbd_get_capacity(device->this_bdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003060
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003061 drbd_md_put_buffer(device);
Philipp Reisnere1711732011-06-27 11:51:46 +02003062out:
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003063 put_ldev(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003064}
3065
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003066static int check_activity_log_stripe_size(struct drbd_device *device,
Lars Ellenberg3a4d4eb2013-03-19 18:16:44 +01003067 struct meta_data_on_disk *on_disk,
3068 struct drbd_md *in_core)
3069{
3070 u32 al_stripes = be32_to_cpu(on_disk->al_stripes);
3071 u32 al_stripe_size_4k = be32_to_cpu(on_disk->al_stripe_size_4k);
3072 u64 al_size_4k;
3073
3074 /* both not set: default to old fixed size activity log */
3075 if (al_stripes == 0 && al_stripe_size_4k == 0) {
3076 al_stripes = 1;
3077 al_stripe_size_4k = MD_32kB_SECT/8;
3078 }
3079
3080 /* some paranoia plausibility checks */
3081
3082 /* we need both values to be set */
3083 if (al_stripes == 0 || al_stripe_size_4k == 0)
3084 goto err;
3085
3086 al_size_4k = (u64)al_stripes * al_stripe_size_4k;
3087
3088 /* Upper limit of activity log area, to avoid potential overflow
3089 * problems in al_tr_number_to_on_disk_sector(). As right now, more
3090 * than 72 * 4k blocks total only increases the amount of history,
3091 * limiting this arbitrarily to 16 GB is not a real limitation ;-) */
3092 if (al_size_4k > (16 * 1024 * 1024/4))
3093 goto err;
3094
3095 /* Lower limit: we need at least 8 transaction slots (32kB)
3096 * to not break existing setups */
3097 if (al_size_4k < MD_32kB_SECT/8)
3098 goto err;
3099
3100 in_core->al_stripe_size_4k = al_stripe_size_4k;
3101 in_core->al_stripes = al_stripes;
3102 in_core->al_size_4k = al_size_4k;
3103
3104 return 0;
3105err:
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02003106 drbd_err(device, "invalid activity log striping: al_stripes=%u, al_stripe_size_4k=%u\n",
Lars Ellenberg3a4d4eb2013-03-19 18:16:44 +01003107 al_stripes, al_stripe_size_4k);
3108 return -EINVAL;
3109}
3110
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003111static int check_offsets_and_sizes(struct drbd_device *device, struct drbd_backing_dev *bdev)
Lars Ellenbergc04ccaa2013-03-19 18:16:47 +01003112{
3113 sector_t capacity = drbd_get_capacity(bdev->md_bdev);
3114 struct drbd_md *in_core = &bdev->md;
3115 s32 on_disk_al_sect;
3116 s32 on_disk_bm_sect;
3117
3118 /* The on-disk size of the activity log, calculated from offsets, and
3119 * the size of the activity log calculated from the stripe settings,
3120 * should match.
3121 * Though we could relax this a bit: it is ok, if the striped activity log
3122 * fits in the available on-disk activity log size.
3123 * Right now, that would break how resize is implemented.
3124 * TODO: make drbd_determine_dev_size() (and the drbdmeta tool) aware
3125 * of possible unused padding space in the on disk layout. */
3126 if (in_core->al_offset < 0) {
3127 if (in_core->bm_offset > in_core->al_offset)
3128 goto err;
3129 on_disk_al_sect = -in_core->al_offset;
3130 on_disk_bm_sect = in_core->al_offset - in_core->bm_offset;
3131 } else {
3132 if (in_core->al_offset != MD_4kB_SECT)
3133 goto err;
3134 if (in_core->bm_offset < in_core->al_offset + in_core->al_size_4k * MD_4kB_SECT)
3135 goto err;
3136
3137 on_disk_al_sect = in_core->bm_offset - MD_4kB_SECT;
3138 on_disk_bm_sect = in_core->md_size_sect - in_core->bm_offset;
3139 }
3140
3141 /* old fixed size meta data is exactly that: fixed. */
3142 if (in_core->meta_dev_idx >= 0) {
3143 if (in_core->md_size_sect != MD_128MB_SECT
3144 || in_core->al_offset != MD_4kB_SECT
3145 || in_core->bm_offset != MD_4kB_SECT + MD_32kB_SECT
3146 || in_core->al_stripes != 1
3147 || in_core->al_stripe_size_4k != MD_32kB_SECT/8)
3148 goto err;
3149 }
3150
3151 if (capacity < in_core->md_size_sect)
3152 goto err;
3153 if (capacity - in_core->md_size_sect < drbd_md_first_sector(bdev))
3154 goto err;
3155
3156 /* should be aligned, and at least 32k */
3157 if ((on_disk_al_sect & 7) || (on_disk_al_sect < MD_32kB_SECT))
3158 goto err;
3159
3160 /* should fit (for now: exactly) into the available on-disk space;
3161 * overflow prevention is in check_activity_log_stripe_size() above. */
3162 if (on_disk_al_sect != in_core->al_size_4k * MD_4kB_SECT)
3163 goto err;
3164
3165 /* again, should be aligned */
3166 if (in_core->bm_offset & 7)
3167 goto err;
3168
3169 /* FIXME check for device grow with flex external meta data? */
3170
3171 /* can the available bitmap space cover the last agreed device size? */
3172 if (on_disk_bm_sect < (in_core->la_size_sect+7)/MD_4kB_SECT/8/512)
3173 goto err;
3174
3175 return 0;
3176
3177err:
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02003178 drbd_err(device, "meta data offsets don't make sense: idx=%d "
Lars Ellenbergc04ccaa2013-03-19 18:16:47 +01003179 "al_s=%u, al_sz4k=%u, al_offset=%d, bm_offset=%d, "
3180 "md_size_sect=%u, la_size=%llu, md_capacity=%llu\n",
3181 in_core->meta_dev_idx,
3182 in_core->al_stripes, in_core->al_stripe_size_4k,
3183 in_core->al_offset, in_core->bm_offset, in_core->md_size_sect,
3184 (unsigned long long)in_core->la_size_sect,
3185 (unsigned long long)capacity);
3186
3187 return -EINVAL;
3188}
3189
3190
Philipp Reisnerb411b362009-09-25 16:07:19 -07003191/**
3192 * drbd_md_read() - Reads in the meta data super block
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003193 * @device: DRBD device.
Philipp Reisnerb411b362009-09-25 16:07:19 -07003194 * @bdev: Device from which the meta data should be read in.
3195 *
Lars Ellenberg3a4d4eb2013-03-19 18:16:44 +01003196 * Return NO_ERROR on success, and an enum drbd_ret_code in case
Lars Ellenbergd5d7ebd2011-07-05 20:59:26 +02003197 * something goes wrong.
Lars Ellenberg3a4d4eb2013-03-19 18:16:44 +01003198 *
Lars Ellenbergc04ccaa2013-03-19 18:16:47 +01003199 * Called exactly once during drbd_adm_attach(), while still being D_DISKLESS,
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003200 * even before @bdev is assigned to @device->ldev.
Philipp Reisnerb411b362009-09-25 16:07:19 -07003201 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003202int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003203{
3204 struct meta_data_on_disk *buffer;
Lars Ellenbergd5d7ebd2011-07-05 20:59:26 +02003205 u32 magic, flags;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003206 int i, rv = NO_ERROR;
3207
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003208 if (device->state.disk != D_DISKLESS)
Lars Ellenbergc04ccaa2013-03-19 18:16:47 +01003209 return ERR_DISK_CONFIGURED;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003210
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003211 buffer = drbd_md_get_buffer(device);
Philipp Reisnere1711732011-06-27 11:51:46 +02003212 if (!buffer)
Lars Ellenbergc04ccaa2013-03-19 18:16:47 +01003213 return ERR_NOMEM;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003214
Lars Ellenbergc04ccaa2013-03-19 18:16:47 +01003215 /* First, figure out where our meta data superblock is located,
3216 * and read it. */
Lars Ellenberg3a4d4eb2013-03-19 18:16:44 +01003217 bdev->md.meta_dev_idx = bdev->disk_conf->meta_dev_idx;
3218 bdev->md.md_offset = drbd_md_ss(bdev);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003219
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003220 if (drbd_md_sync_page_io(device, bdev, bdev->md.md_offset, READ)) {
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003221 /* NOTE: can't do normal error processing here as this is
Philipp Reisnerb411b362009-09-25 16:07:19 -07003222 called BEFORE disk is attached */
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02003223 drbd_err(device, "Error while reading metadata.\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07003224 rv = ERR_IO_MD_DISK;
3225 goto err;
3226 }
3227
Lars Ellenbergd5d7ebd2011-07-05 20:59:26 +02003228 magic = be32_to_cpu(buffer->magic);
3229 flags = be32_to_cpu(buffer->flags);
3230 if (magic == DRBD_MD_MAGIC_84_UNCLEAN ||
3231 (magic == DRBD_MD_MAGIC_08 && !(flags & MDF_AL_CLEAN))) {
3232 /* btw: that's Activity Log clean, not "all" clean. */
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02003233 drbd_err(device, "Found unclean meta data. Did you \"drbdadm apply-al\"?\n");
Lars Ellenbergd5d7ebd2011-07-05 20:59:26 +02003234 rv = ERR_MD_UNCLEAN;
3235 goto err;
3236 }
Lars Ellenberg3a4d4eb2013-03-19 18:16:44 +01003237
3238 rv = ERR_MD_INVALID;
Lars Ellenbergd5d7ebd2011-07-05 20:59:26 +02003239 if (magic != DRBD_MD_MAGIC_08) {
Philipp Reisner43de7c82011-11-10 13:16:13 +01003240 if (magic == DRBD_MD_MAGIC_07)
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02003241 drbd_err(device, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n");
Lars Ellenbergd5d7ebd2011-07-05 20:59:26 +02003242 else
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02003243 drbd_err(device, "Meta data magic not found. Did you \"drbdadm create-md\"?\n");
Philipp Reisnerb411b362009-09-25 16:07:19 -07003244 goto err;
3245 }
3246
3247 if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02003248 drbd_err(device, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
Philipp Reisnerb411b362009-09-25 16:07:19 -07003249 be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003250 goto err;
3251 }
3252
Lars Ellenbergc04ccaa2013-03-19 18:16:47 +01003253
3254 /* convert to in_core endian */
3255 bdev->md.la_size_sect = be64_to_cpu(buffer->la_size_sect);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003256 for (i = UI_CURRENT; i < UI_SIZE; i++)
3257 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
3258 bdev->md.flags = be32_to_cpu(buffer->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003259 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
3260
Lars Ellenbergc04ccaa2013-03-19 18:16:47 +01003261 bdev->md.md_size_sect = be32_to_cpu(buffer->md_size_sect);
3262 bdev->md.al_offset = be32_to_cpu(buffer->al_offset);
3263 bdev->md.bm_offset = be32_to_cpu(buffer->bm_offset);
3264
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003265 if (check_activity_log_stripe_size(device, buffer, &bdev->md))
Lars Ellenbergc04ccaa2013-03-19 18:16:47 +01003266 goto err;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003267 if (check_offsets_and_sizes(device, bdev))
Lars Ellenbergc04ccaa2013-03-19 18:16:47 +01003268 goto err;
3269
Philipp Reisnerb411b362009-09-25 16:07:19 -07003270 if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02003271 drbd_err(device, "unexpected bm_offset: %d (expected %d)\n",
Philipp Reisnerb411b362009-09-25 16:07:19 -07003272 be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003273 goto err;
3274 }
3275 if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02003276 drbd_err(device, "unexpected md_size: %u (expected %u)\n",
Philipp Reisnerb411b362009-09-25 16:07:19 -07003277 be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003278 goto err;
3279 }
3280
Lars Ellenberg3a4d4eb2013-03-19 18:16:44 +01003281 rv = NO_ERROR;
3282
Andreas Gruenbacher05008132011-07-07 14:19:42 +02003283 spin_lock_irq(&device->resource->req_lock);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003284 if (device->state.conn < C_CONNECTED) {
Lars Ellenbergdb141b22012-06-25 19:15:58 +02003285 unsigned int peer;
Philipp Reisner99432fc2011-05-20 16:39:13 +02003286 peer = be32_to_cpu(buffer->la_peer_max_bio_size);
Lars Ellenbergdb141b22012-06-25 19:15:58 +02003287 peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003288 device->peer_max_bio_size = peer;
Philipp Reisner99432fc2011-05-20 16:39:13 +02003289 }
Andreas Gruenbacher05008132011-07-07 14:19:42 +02003290 spin_unlock_irq(&device->resource->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003291
3292 err:
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003293 drbd_md_put_buffer(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003294
3295 return rv;
3296}
3297
3298/**
3299 * drbd_md_mark_dirty() - Mark meta data super block as dirty
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003300 * @device: DRBD device.
Philipp Reisnerb411b362009-09-25 16:07:19 -07003301 *
3302 * Call this function if you change anything that should be written to
3303 * the meta-data super block. This function sets MD_DIRTY, and starts a
3304 * timer that ensures that within five seconds you have to call drbd_md_sync().
3305 */
Lars Ellenbergca0e6092010-10-14 15:01:21 +02003306#ifdef DEBUG
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003307void drbd_md_mark_dirty_(struct drbd_device *device, unsigned int line, const char *func)
Lars Ellenbergee15b032010-09-03 10:00:09 +02003308{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003309 if (!test_and_set_bit(MD_DIRTY, &device->flags)) {
3310 mod_timer(&device->md_sync_timer, jiffies + HZ);
3311 device->last_md_mark_dirty.line = line;
3312 device->last_md_mark_dirty.func = func;
Lars Ellenbergee15b032010-09-03 10:00:09 +02003313 }
3314}
3315#else
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003316void drbd_md_mark_dirty(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003317{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003318 if (!test_and_set_bit(MD_DIRTY, &device->flags))
3319 mod_timer(&device->md_sync_timer, jiffies + 5*HZ);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003320}
Lars Ellenbergee15b032010-09-03 10:00:09 +02003321#endif
Philipp Reisnerb411b362009-09-25 16:07:19 -07003322
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003323void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003324{
3325 int i;
3326
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003327 for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003328 device->ldev->md.uuid[i+1] = device->ldev->md.uuid[i];
Philipp Reisnerb411b362009-09-25 16:07:19 -07003329}
3330
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003331void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003332{
3333 if (idx == UI_CURRENT) {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003334 if (device->state.role == R_PRIMARY)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003335 val |= 1;
3336 else
3337 val &= ~((u64)1);
3338
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003339 drbd_set_ed_uuid(device, val);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003340 }
3341
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003342 device->ldev->md.uuid[idx] = val;
3343 drbd_md_mark_dirty(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003344}
3345
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003346void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
Philipp Reisner9f2247b2012-08-16 14:25:58 +02003347{
3348 unsigned long flags;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003349 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
3350 __drbd_uuid_set(device, idx, val);
3351 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
Philipp Reisner9f2247b2012-08-16 14:25:58 +02003352}
Philipp Reisnerb411b362009-09-25 16:07:19 -07003353
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003354void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003355{
Philipp Reisner9f2247b2012-08-16 14:25:58 +02003356 unsigned long flags;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003357 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
3358 if (device->ldev->md.uuid[idx]) {
3359 drbd_uuid_move_history(device);
3360 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[idx];
Philipp Reisnerb411b362009-09-25 16:07:19 -07003361 }
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003362 __drbd_uuid_set(device, idx, val);
3363 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003364}
3365
3366/**
3367 * drbd_uuid_new_current() - Creates a new current UUID
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003368 * @device: DRBD device.
Philipp Reisnerb411b362009-09-25 16:07:19 -07003369 *
3370 * Creates a new current UUID, and rotates the old current UUID into
3371 * the bitmap slot. Causes an incremental resync upon next connect.
3372 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003373void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003374{
3375 u64 val;
Philipp Reisner9f2247b2012-08-16 14:25:58 +02003376 unsigned long long bm_uuid;
3377
3378 get_random_bytes(&val, sizeof(u64));
3379
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003380 spin_lock_irq(&device->ldev->md.uuid_lock);
3381 bm_uuid = device->ldev->md.uuid[UI_BITMAP];
Philipp Reisnerb411b362009-09-25 16:07:19 -07003382
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003383 if (bm_uuid)
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02003384 drbd_warn(device, "bm UUID was already set: %llX\n", bm_uuid);
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003385
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003386 device->ldev->md.uuid[UI_BITMAP] = device->ldev->md.uuid[UI_CURRENT];
3387 __drbd_uuid_set(device, UI_CURRENT, val);
3388 spin_unlock_irq(&device->ldev->md.uuid_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003389
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003390 drbd_print_uuids(device, "new current UUID");
Lars Ellenbergaaa8e2b2010-10-15 13:16:53 +02003391 /* get it to stable storage _now_ */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003392 drbd_md_sync(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003393}
3394
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003395void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003396{
Philipp Reisner9f2247b2012-08-16 14:25:58 +02003397 unsigned long flags;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003398 if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003399 return;
3400
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003401 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003402 if (val == 0) {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003403 drbd_uuid_move_history(device);
3404 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP];
3405 device->ldev->md.uuid[UI_BITMAP] = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003406 } else {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003407 unsigned long long bm_uuid = device->ldev->md.uuid[UI_BITMAP];
Lars Ellenberg62b0da32011-01-20 13:25:21 +01003408 if (bm_uuid)
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02003409 drbd_warn(device, "bm UUID was already set: %llX\n", bm_uuid);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003410
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003411 device->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003412 }
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003413 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
Philipp Reisner9f2247b2012-08-16 14:25:58 +02003414
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003415 drbd_md_mark_dirty(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003416}
3417
3418/**
3419 * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003420 * @device: DRBD device.
Philipp Reisnerb411b362009-09-25 16:07:19 -07003421 *
3422 * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
3423 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003424int drbd_bmio_set_n_write(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003425{
3426 int rv = -EIO;
3427
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003428 if (get_ldev_if_state(device, D_ATTACHING)) {
3429 drbd_md_set_flag(device, MDF_FULL_SYNC);
3430 drbd_md_sync(device);
3431 drbd_bm_set_all(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003432
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003433 rv = drbd_bm_write(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003434
3435 if (!rv) {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003436 drbd_md_clear_flag(device, MDF_FULL_SYNC);
3437 drbd_md_sync(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003438 }
3439
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003440 put_ldev(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003441 }
3442
3443 return rv;
3444}
3445
3446/**
3447 * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003448 * @device: DRBD device.
Philipp Reisnerb411b362009-09-25 16:07:19 -07003449 *
3450 * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
3451 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003452int drbd_bmio_clear_n_write(struct drbd_device *device)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003453{
3454 int rv = -EIO;
3455
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003456 drbd_resume_al(device);
3457 if (get_ldev_if_state(device, D_ATTACHING)) {
3458 drbd_bm_clear_all(device);
3459 rv = drbd_bm_write(device);
3460 put_ldev(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003461 }
3462
3463 return rv;
3464}
3465
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003466static int w_bitmap_io(struct drbd_work *w, int unused)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003467{
3468 struct bm_io_work *work = container_of(w, struct bm_io_work, w);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003469 struct drbd_device *device = w->device;
Lars Ellenberg02851e92010-12-16 14:47:39 +01003470 int rv = -EIO;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003471
Andreas Gruenbacher0b0ba1e2011-06-27 16:23:33 +02003472 D_ASSERT(device, atomic_read(&device->ap_bio_cnt) == 0);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003473
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003474 if (get_ldev(device)) {
3475 drbd_bm_lock(device, work->why, work->flags);
3476 rv = work->io_fn(device);
3477 drbd_bm_unlock(device);
3478 put_ldev(device);
Lars Ellenberg02851e92010-12-16 14:47:39 +01003479 }
Philipp Reisnerb411b362009-09-25 16:07:19 -07003480
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003481 clear_bit_unlock(BITMAP_IO, &device->flags);
3482 wake_up(&device->misc_wait);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003483
3484 if (work->done)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003485 work->done(device, rv);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003486
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003487 clear_bit(BITMAP_IO_QUEUED, &device->flags);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003488 work->why = NULL;
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003489 work->flags = 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003490
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003491 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003492}
3493
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003494void drbd_ldev_destroy(struct drbd_device *device)
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003495{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003496 lc_destroy(device->resync);
3497 device->resync = NULL;
3498 lc_destroy(device->act_log);
3499 device->act_log = NULL;
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003500 __no_warn(local,
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003501 drbd_free_bc(device->ldev);
3502 device->ldev = NULL;);
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003503
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003504 clear_bit(GO_DISKLESS, &device->flags);
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003505}
3506
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003507static int w_go_diskless(struct drbd_work *w, int unused)
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003508{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003509 struct drbd_device *device = w->device;
Philipp Reisner00d56942011-02-09 18:09:48 +01003510
Andreas Gruenbacher0b0ba1e2011-06-27 16:23:33 +02003511 D_ASSERT(device, device->state.disk == D_FAILED);
Lars Ellenberg9d282872010-10-14 13:57:07 +02003512 /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
3513 * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
Lars Ellenberg82f59cc2010-10-16 12:13:47 +02003514 * the protected members anymore, though, so once put_ldev reaches zero
3515 * again, it will be safe to free them. */
Lars Ellenberga2a3c74f2012-09-22 12:26:57 +02003516
3517 /* Try to write changed bitmap pages, read errors may have just
3518 * set some bits outside the area covered by the activity log.
3519 *
3520 * If we have an IO error during the bitmap writeout,
3521 * we will want a full sync next time, just in case.
3522 * (Do we want a specific meta data flag for this?)
3523 *
3524 * If that does not make it to stable storage either,
Philipp Reisnerfd0017c2012-10-19 14:19:23 +02003525 * we cannot do anything about that anymore.
3526 *
3527 * We still need to check if both bitmap and ldev are present, we may
3528 * end up here after a failed attach, before ldev was even assigned.
3529 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003530 if (device->bitmap && device->ldev) {
Philipp Reisnerbb451852013-03-27 14:08:39 +01003531 /* An interrupted resync or similar is allowed to recounts bits
3532 * while we detach.
3533 * Any modifications would not be expected anymore, though.
3534 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003535 if (drbd_bitmap_io_from_worker(device, drbd_bm_write,
Philipp Reisnerbb451852013-03-27 14:08:39 +01003536 "detach", BM_LOCKED_TEST_ALLOWED)) {
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003537 if (test_bit(WAS_READ_ERROR, &device->flags)) {
3538 drbd_md_set_flag(device, MDF_FULL_SYNC);
3539 drbd_md_sync(device);
Lars Ellenberga2a3c74f2012-09-22 12:26:57 +02003540 }
3541 }
3542 }
3543
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003544 drbd_force_state(device, NS(disk, D_DISKLESS));
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003545 return 0;
Lars Ellenberge9e6f3e2010-09-14 20:26:27 +02003546}
3547
Philipp Reisnerb411b362009-09-25 16:07:19 -07003548/**
3549 * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003550 * @device: DRBD device.
Philipp Reisnerb411b362009-09-25 16:07:19 -07003551 * @io_fn: IO callback to be called when bitmap IO is possible
3552 * @done: callback to be called after the bitmap IO was performed
3553 * @why: Descriptive text of the reason for doing the IO
3554 *
3555 * While IO on the bitmap happens we freeze application IO thus we ensure
3556 * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
3557 * called from worker context. It MUST NOT be used while a previous such
3558 * work is still pending!
3559 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003560void drbd_queue_bitmap_io(struct drbd_device *device,
Andreas Gruenbacher54761692011-05-30 16:15:21 +02003561 int (*io_fn)(struct drbd_device *),
3562 void (*done)(struct drbd_device *, int),
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003563 char *why, enum bm_flag flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003564{
Andreas Gruenbacher0b0ba1e2011-06-27 16:23:33 +02003565 D_ASSERT(device, current == first_peer_device(device)->connection->worker.task);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003566
Andreas Gruenbacher0b0ba1e2011-06-27 16:23:33 +02003567 D_ASSERT(device, !test_bit(BITMAP_IO_QUEUED, &device->flags));
3568 D_ASSERT(device, !test_bit(BITMAP_IO, &device->flags));
3569 D_ASSERT(device, list_empty(&device->bm_io_work.w.list));
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003570 if (device->bm_io_work.why)
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02003571 drbd_err(device, "FIXME going to queue '%s' but '%s' still pending?\n",
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003572 why, device->bm_io_work.why);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003573
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003574 device->bm_io_work.io_fn = io_fn;
3575 device->bm_io_work.done = done;
3576 device->bm_io_work.why = why;
3577 device->bm_io_work.flags = flags;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003578
Andreas Gruenbacher05008132011-07-07 14:19:42 +02003579 spin_lock_irq(&device->resource->req_lock);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003580 set_bit(BITMAP_IO, &device->flags);
3581 if (atomic_read(&device->ap_bio_cnt) == 0) {
3582 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02003583 drbd_queue_work(&first_peer_device(device)->connection->sender_work, &device->bm_io_work.w);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003584 }
Andreas Gruenbacher05008132011-07-07 14:19:42 +02003585 spin_unlock_irq(&device->resource->req_lock);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003586}
3587
3588/**
3589 * drbd_bitmap_io() - Does an IO operation on the whole bitmap
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003590 * @device: DRBD device.
Philipp Reisnerb411b362009-09-25 16:07:19 -07003591 * @io_fn: IO callback to be called when bitmap IO is possible
3592 * @why: Descriptive text of the reason for doing the IO
3593 *
3594 * freezes application IO while that the actual IO operations runs. This
3595 * functions MAY NOT be called from worker context.
3596 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003597int drbd_bitmap_io(struct drbd_device *device, int (*io_fn)(struct drbd_device *),
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003598 char *why, enum bm_flag flags)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003599{
3600 int rv;
3601
Andreas Gruenbacher0b0ba1e2011-06-27 16:23:33 +02003602 D_ASSERT(device, current != first_peer_device(device)->connection->worker.task);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003603
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003604 if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003605 drbd_suspend_io(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003606
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003607 drbd_bm_lock(device, why, flags);
3608 rv = io_fn(device);
3609 drbd_bm_unlock(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003610
Lars Ellenberg20ceb2b2011-01-21 10:56:44 +01003611 if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003612 drbd_resume_io(device);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003613
3614 return rv;
3615}
3616
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003617void drbd_md_set_flag(struct drbd_device *device, int flag) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003618{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003619 if ((device->ldev->md.flags & flag) != flag) {
3620 drbd_md_mark_dirty(device);
3621 device->ldev->md.flags |= flag;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003622 }
3623}
3624
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003625void drbd_md_clear_flag(struct drbd_device *device, int flag) __must_hold(local)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003626{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003627 if ((device->ldev->md.flags & flag) != 0) {
3628 drbd_md_mark_dirty(device);
3629 device->ldev->md.flags &= ~flag;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003630 }
3631}
3632int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
3633{
3634 return (bdev->md.flags & flag) != 0;
3635}
3636
3637static void md_sync_timer_fn(unsigned long data)
3638{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003639 struct drbd_device *device = (struct drbd_device *) data;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003640
Lars Ellenbergb792b652012-08-22 14:59:06 +02003641 /* must not double-queue! */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003642 if (list_empty(&device->md_sync_work.list))
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02003643 drbd_queue_work_front(&first_peer_device(device)->connection->sender_work, &device->md_sync_work);
Philipp Reisnerb411b362009-09-25 16:07:19 -07003644}
3645
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003646static int w_md_sync(struct drbd_work *w, int unused)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003647{
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003648 struct drbd_device *device = w->device;
Philipp Reisner00d56942011-02-09 18:09:48 +01003649
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02003650 drbd_warn(device, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
Lars Ellenbergee15b032010-09-03 10:00:09 +02003651#ifdef DEBUG
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02003652 drbd_warn(device, "last md_mark_dirty: %s:%u\n",
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003653 device->last_md_mark_dirty.func, device->last_md_mark_dirty.line);
Lars Ellenbergee15b032010-09-03 10:00:09 +02003654#endif
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003655 drbd_md_sync(device);
Andreas Gruenbacher99920dc2011-03-16 15:31:39 +01003656 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003657}
3658
Andreas Gruenbacherd8763022011-01-26 17:39:41 +01003659const char *cmdname(enum drbd_packet cmd)
Andreas Gruenbacherf2ad9062011-01-26 17:13:25 +01003660{
3661 /* THINK may need to become several global tables
3662 * when we want to support more than
3663 * one PRO_VERSION */
3664 static const char *cmdnames[] = {
3665 [P_DATA] = "Data",
3666 [P_DATA_REPLY] = "DataReply",
3667 [P_RS_DATA_REPLY] = "RSDataReply",
3668 [P_BARRIER] = "Barrier",
3669 [P_BITMAP] = "ReportBitMap",
3670 [P_BECOME_SYNC_TARGET] = "BecomeSyncTarget",
3671 [P_BECOME_SYNC_SOURCE] = "BecomeSyncSource",
3672 [P_UNPLUG_REMOTE] = "UnplugRemote",
3673 [P_DATA_REQUEST] = "DataRequest",
3674 [P_RS_DATA_REQUEST] = "RSDataRequest",
3675 [P_SYNC_PARAM] = "SyncParam",
3676 [P_SYNC_PARAM89] = "SyncParam89",
3677 [P_PROTOCOL] = "ReportProtocol",
3678 [P_UUIDS] = "ReportUUIDs",
3679 [P_SIZES] = "ReportSizes",
3680 [P_STATE] = "ReportState",
3681 [P_SYNC_UUID] = "ReportSyncUUID",
3682 [P_AUTH_CHALLENGE] = "AuthChallenge",
3683 [P_AUTH_RESPONSE] = "AuthResponse",
3684 [P_PING] = "Ping",
3685 [P_PING_ACK] = "PingAck",
3686 [P_RECV_ACK] = "RecvAck",
3687 [P_WRITE_ACK] = "WriteAck",
3688 [P_RS_WRITE_ACK] = "RSWriteAck",
Lars Ellenbergd4dabbe2012-08-01 12:33:51 +02003689 [P_SUPERSEDED] = "Superseded",
Andreas Gruenbacherf2ad9062011-01-26 17:13:25 +01003690 [P_NEG_ACK] = "NegAck",
3691 [P_NEG_DREPLY] = "NegDReply",
3692 [P_NEG_RS_DREPLY] = "NegRSDReply",
3693 [P_BARRIER_ACK] = "BarrierAck",
3694 [P_STATE_CHG_REQ] = "StateChgRequest",
3695 [P_STATE_CHG_REPLY] = "StateChgReply",
3696 [P_OV_REQUEST] = "OVRequest",
3697 [P_OV_REPLY] = "OVReply",
3698 [P_OV_RESULT] = "OVResult",
3699 [P_CSUM_RS_REQUEST] = "CsumRSRequest",
3700 [P_RS_IS_IN_SYNC] = "CsumRSIsInSync",
3701 [P_COMPRESSED_BITMAP] = "CBitmap",
3702 [P_DELAY_PROBE] = "DelayProbe",
3703 [P_OUT_OF_SYNC] = "OutOfSync",
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003704 [P_RETRY_WRITE] = "RetryWrite",
Lars Ellenbergae25b332011-04-24 00:01:16 +02003705 [P_RS_CANCEL] = "RSCancel",
3706 [P_CONN_ST_CHG_REQ] = "conn_st_chg_req",
3707 [P_CONN_ST_CHG_REPLY] = "conn_st_chg_reply",
Philipp Reisner036b17e2011-05-16 17:38:11 +02003708 [P_RETRY_WRITE] = "retry_write",
3709 [P_PROTOCOL_UPDATE] = "protocol_update",
Lars Ellenbergae25b332011-04-24 00:01:16 +02003710
3711 /* enum drbd_packet, but not commands - obsoleted flags:
3712 * P_MAY_IGNORE
3713 * P_MAX_OPT_CMD
3714 */
Andreas Gruenbacherf2ad9062011-01-26 17:13:25 +01003715 };
3716
Lars Ellenbergae25b332011-04-24 00:01:16 +02003717 /* too big for the array: 0xfffX */
Andreas Gruenbachere5d6f332011-03-28 16:44:40 +02003718 if (cmd == P_INITIAL_META)
3719 return "InitialMeta";
3720 if (cmd == P_INITIAL_DATA)
3721 return "InitialData";
Andreas Gruenbacher60381782011-03-28 17:05:50 +02003722 if (cmd == P_CONNECTION_FEATURES)
3723 return "ConnectionFeatures";
Andreas Gruenbacher6e849ce2011-03-14 17:27:45 +01003724 if (cmd >= ARRAY_SIZE(cmdnames))
Andreas Gruenbacherf2ad9062011-01-26 17:13:25 +01003725 return "Unknown";
3726 return cmdnames[cmd];
3727}
3728
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003729/**
3730 * drbd_wait_misc - wait for a request to make progress
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003731 * @device: device associated with the request
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003732 * @i: the struct drbd_interval embedded in struct drbd_request or
3733 * struct drbd_peer_request
3734 */
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003735int drbd_wait_misc(struct drbd_device *device, struct drbd_interval *i)
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003736{
Philipp Reisner44ed1672011-04-19 17:10:19 +02003737 struct net_conf *nc;
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003738 DEFINE_WAIT(wait);
3739 long timeout;
3740
Philipp Reisner44ed1672011-04-19 17:10:19 +02003741 rcu_read_lock();
Andreas Gruenbachera6b32bc2011-05-31 14:33:49 +02003742 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
Philipp Reisner44ed1672011-04-19 17:10:19 +02003743 if (!nc) {
3744 rcu_read_unlock();
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003745 return -ETIMEDOUT;
Philipp Reisner44ed1672011-04-19 17:10:19 +02003746 }
3747 timeout = nc->ko_count ? nc->timeout * HZ / 10 * nc->ko_count : MAX_SCHEDULE_TIMEOUT;
3748 rcu_read_unlock();
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003749
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003750 /* Indicate to wake up device->misc_wait on progress. */
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003751 i->waiting = true;
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003752 prepare_to_wait(&device->misc_wait, &wait, TASK_INTERRUPTIBLE);
Andreas Gruenbacher05008132011-07-07 14:19:42 +02003753 spin_unlock_irq(&device->resource->req_lock);
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003754 timeout = schedule_timeout(timeout);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003755 finish_wait(&device->misc_wait, &wait);
Andreas Gruenbacher05008132011-07-07 14:19:42 +02003756 spin_lock_irq(&device->resource->req_lock);
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003757 if (!timeout || device->state.conn < C_CONNECTED)
Andreas Gruenbacher7be8da02011-02-22 02:15:32 +01003758 return -ETIMEDOUT;
3759 if (signal_pending(current))
3760 return -ERESTARTSYS;
3761 return 0;
Philipp Reisnerb411b362009-09-25 16:07:19 -07003762}
3763
3764#ifdef CONFIG_DRBD_FAULT_INJECTION
3765/* Fault insertion support including random number generator shamelessly
3766 * stolen from kernel/rcutorture.c */
3767struct fault_random_state {
3768 unsigned long state;
3769 unsigned long count;
3770};
3771
3772#define FAULT_RANDOM_MULT 39916801 /* prime */
3773#define FAULT_RANDOM_ADD 479001701 /* prime */
3774#define FAULT_RANDOM_REFRESH 10000
3775
3776/*
3777 * Crude but fast random-number generator. Uses a linear congruential
3778 * generator, with occasional help from get_random_bytes().
3779 */
3780static unsigned long
3781_drbd_fault_random(struct fault_random_state *rsp)
3782{
3783 long refresh;
3784
Roel Kluin49829ea2009-12-15 22:55:44 +01003785 if (!rsp->count--) {
Philipp Reisnerb411b362009-09-25 16:07:19 -07003786 get_random_bytes(&refresh, sizeof(refresh));
3787 rsp->state += refresh;
3788 rsp->count = FAULT_RANDOM_REFRESH;
3789 }
3790 rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
3791 return swahw32(rsp->state);
3792}
3793
3794static char *
3795_drbd_fault_str(unsigned int type) {
3796 static char *_faults[] = {
3797 [DRBD_FAULT_MD_WR] = "Meta-data write",
3798 [DRBD_FAULT_MD_RD] = "Meta-data read",
3799 [DRBD_FAULT_RS_WR] = "Resync write",
3800 [DRBD_FAULT_RS_RD] = "Resync read",
3801 [DRBD_FAULT_DT_WR] = "Data write",
3802 [DRBD_FAULT_DT_RD] = "Data read",
3803 [DRBD_FAULT_DT_RA] = "Data read ahead",
3804 [DRBD_FAULT_BM_ALLOC] = "BM allocation",
Philipp Reisner6b4388a2010-04-26 14:11:45 +02003805 [DRBD_FAULT_AL_EE] = "EE allocation",
3806 [DRBD_FAULT_RECEIVE] = "receive data corruption",
Philipp Reisnerb411b362009-09-25 16:07:19 -07003807 };
3808
3809 return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
3810}
3811
3812unsigned int
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003813_drbd_insert_fault(struct drbd_device *device, unsigned int type)
Philipp Reisnerb411b362009-09-25 16:07:19 -07003814{
3815 static struct fault_random_state rrs = {0, 0};
3816
3817 unsigned int ret = (
3818 (fault_devs == 0 ||
Andreas Gruenbacherb30ab792011-07-03 13:26:43 +02003819 ((1 << device_to_minor(device)) & fault_devs) != 0) &&
Philipp Reisnerb411b362009-09-25 16:07:19 -07003820 (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate));
3821
3822 if (ret) {
3823 fault_count++;
3824
Lars Ellenberg73835062010-05-27 11:51:56 +02003825 if (__ratelimit(&drbd_ratelimit_state))
Andreas Gruenbacherd0180172011-07-03 17:53:52 +02003826 drbd_warn(device, "***Simulating %s failure\n",
Philipp Reisnerb411b362009-09-25 16:07:19 -07003827 _drbd_fault_str(type));
3828 }
3829
3830 return ret;
3831}
3832#endif
3833
3834const char *drbd_buildtag(void)
3835{
3836 /* DRBD built from external sources has here a reference to the
3837 git hash of the source code. */
3838
3839 static char buildtag[38] = "\0uilt-in";
3840
3841 if (buildtag[0] == 0) {
Cong Wangbc4854b2012-04-03 14:13:36 +08003842#ifdef MODULE
3843 sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
3844#else
3845 buildtag[0] = 'b';
Philipp Reisnerb411b362009-09-25 16:07:19 -07003846#endif
Philipp Reisnerb411b362009-09-25 16:07:19 -07003847 }
3848
3849 return buildtag;
3850}
3851
3852module_init(drbd_init)
3853module_exit(drbd_cleanup)
3854
Philipp Reisnerb411b362009-09-25 16:07:19 -07003855EXPORT_SYMBOL(drbd_conn_str);
3856EXPORT_SYMBOL(drbd_role_str);
3857EXPORT_SYMBOL(drbd_disk_str);
3858EXPORT_SYMBOL(drbd_set_st_err_str);