Thomas Gleixner | c6ae4c0 | 2019-05-22 09:51:37 +0200 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 2 | /* |
| 3 | drbd_req.c |
| 4 | |
| 5 | This file is part of DRBD by Philipp Reisner and Lars Ellenberg. |
| 6 | |
| 7 | Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. |
| 8 | Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. |
| 9 | Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. |
| 10 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 11 | |
| 12 | */ |
| 13 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 14 | #include <linux/module.h> |
| 15 | |
| 16 | #include <linux/slab.h> |
| 17 | #include <linux/drbd.h> |
| 18 | #include "drbd_int.h" |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 19 | #include "drbd_req.h" |
| 20 | |
| 21 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 22 | static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size); |
Philipp Reisner | 57bcb6c | 2011-12-03 11:18:56 +0100 | [diff] [blame] | 23 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 24 | /* Update disk stats at start of I/O request */ |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 25 | static void _drbd_start_io_acct(struct drbd_device *device, struct drbd_request *req) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 26 | { |
Jens Axboe | d62e26b | 2017-06-30 21:55:08 -0600 | [diff] [blame] | 27 | struct request_queue *q = device->rq_queue; |
| 28 | |
Michael Callahan | ddcf35d | 2018-07-18 04:47:39 -0700 | [diff] [blame] | 29 | generic_start_io_acct(q, bio_op(req->master_bio), |
Jens Axboe | d62e26b | 2017-06-30 21:55:08 -0600 | [diff] [blame] | 30 | req->i.size >> 9, &device->vdisk->part0); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 31 | } |
| 32 | |
| 33 | /* Update disk stats when completing request upwards */ |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 34 | static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *req) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 35 | { |
Jens Axboe | d62e26b | 2017-06-30 21:55:08 -0600 | [diff] [blame] | 36 | struct request_queue *q = device->rq_queue; |
| 37 | |
Michael Callahan | ddcf35d | 2018-07-18 04:47:39 -0700 | [diff] [blame] | 38 | generic_end_io_acct(q, bio_op(req->master_bio), |
Gu Zheng | 2448085 | 2014-11-24 11:05:25 +0800 | [diff] [blame] | 39 | &device->vdisk->part0, req->start_jif); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 40 | } |
| 41 | |
Lars Ellenberg | 9104d31 | 2016-06-14 00:26:31 +0200 | [diff] [blame] | 42 | static struct drbd_request *drbd_req_new(struct drbd_device *device, struct bio *bio_src) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 43 | { |
Andreas Gruenbacher | 9e204cd | 2011-01-26 18:45:11 +0100 | [diff] [blame] | 44 | struct drbd_request *req; |
| 45 | |
Kent Overstreet | 0892fac | 2018-05-20 18:25:48 -0400 | [diff] [blame] | 46 | req = mempool_alloc(&drbd_request_mempool, GFP_NOIO); |
Andreas Gruenbacher | 9e204cd | 2011-01-26 18:45:11 +0100 | [diff] [blame] | 47 | if (!req) |
| 48 | return NULL; |
David Rientjes | 23fe8f8 | 2015-03-24 16:22:32 -0700 | [diff] [blame] | 49 | memset(req, 0, sizeof(*req)); |
Andreas Gruenbacher | 9e204cd | 2011-01-26 18:45:11 +0100 | [diff] [blame] | 50 | |
| 51 | drbd_req_make_private_bio(req, bio_src); |
Lars Ellenberg | 9104d31 | 2016-06-14 00:26:31 +0200 | [diff] [blame] | 52 | req->rq_state = (bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0) |
| 53 | | (bio_op(bio_src) == REQ_OP_WRITE_SAME ? RQ_WSAME : 0) |
Lars Ellenberg | f31e583 | 2018-12-20 17:23:42 +0100 | [diff] [blame] | 54 | | (bio_op(bio_src) == REQ_OP_WRITE_ZEROES ? RQ_ZEROES : 0) |
Lars Ellenberg | 9104d31 | 2016-06-14 00:26:31 +0200 | [diff] [blame] | 55 | | (bio_op(bio_src) == REQ_OP_DISCARD ? RQ_UNMAP : 0); |
| 56 | req->device = device; |
| 57 | req->master_bio = bio_src; |
| 58 | req->epoch = 0; |
Andreas Gruenbacher | 5384064 | 2011-01-28 10:31:04 +0100 | [diff] [blame] | 59 | |
Andreas Gruenbacher | 9e204cd | 2011-01-26 18:45:11 +0100 | [diff] [blame] | 60 | drbd_clear_interval(&req->i); |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 61 | req->i.sector = bio_src->bi_iter.bi_sector; |
| 62 | req->i.size = bio_src->bi_iter.bi_size; |
Andreas Gruenbacher | 5e47226 | 2011-01-27 14:42:51 +0100 | [diff] [blame] | 63 | req->i.local = true; |
Andreas Gruenbacher | 5384064 | 2011-01-28 10:31:04 +0100 | [diff] [blame] | 64 | req->i.waiting = false; |
| 65 | |
Andreas Gruenbacher | 9e204cd | 2011-01-26 18:45:11 +0100 | [diff] [blame] | 66 | INIT_LIST_HEAD(&req->tl_requests); |
| 67 | INIT_LIST_HEAD(&req->w.list); |
Lars Ellenberg | 844a6ae7 | 2013-11-22 12:52:03 +0100 | [diff] [blame] | 68 | INIT_LIST_HEAD(&req->req_pending_master_completion); |
| 69 | INIT_LIST_HEAD(&req->req_pending_local); |
Andreas Gruenbacher | 9e204cd | 2011-01-26 18:45:11 +0100 | [diff] [blame] | 70 | |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 71 | /* one reference to be put by __drbd_make_request */ |
Lars Ellenberg | b406777 | 2012-01-24 16:58:11 +0100 | [diff] [blame] | 72 | atomic_set(&req->completion_ref, 1); |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 73 | /* one kref as long as completion_ref > 0 */ |
Lars Ellenberg | b406777 | 2012-01-24 16:58:11 +0100 | [diff] [blame] | 74 | kref_init(&req->kref); |
Andreas Gruenbacher | 9e204cd | 2011-01-26 18:45:11 +0100 | [diff] [blame] | 75 | return req; |
| 76 | } |
| 77 | |
Lars Ellenberg | 08d0dab | 2014-03-20 11:19:22 +0100 | [diff] [blame] | 78 | static void drbd_remove_request_interval(struct rb_root *root, |
| 79 | struct drbd_request *req) |
| 80 | { |
| 81 | struct drbd_device *device = req->device; |
| 82 | struct drbd_interval *i = &req->i; |
| 83 | |
| 84 | drbd_remove_interval(root, i); |
| 85 | |
| 86 | /* Wake up any processes waiting for this request to complete. */ |
| 87 | if (i->waiting) |
| 88 | wake_up(&device->misc_wait); |
| 89 | } |
| 90 | |
Lars Ellenberg | 9a278a7 | 2012-07-24 10:12:36 +0200 | [diff] [blame] | 91 | void drbd_req_destroy(struct kref *kref) |
Andreas Gruenbacher | 9e204cd | 2011-01-26 18:45:11 +0100 | [diff] [blame] | 92 | { |
Lars Ellenberg | b406777 | 2012-01-24 16:58:11 +0100 | [diff] [blame] | 93 | struct drbd_request *req = container_of(kref, struct drbd_request, kref); |
Andreas Gruenbacher | 84b8c06 | 2011-07-28 15:27:51 +0200 | [diff] [blame] | 94 | struct drbd_device *device = req->device; |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 95 | const unsigned s = req->rq_state; |
| 96 | |
| 97 | if ((req->master_bio && !(s & RQ_POSTPONED)) || |
| 98 | atomic_read(&req->completion_ref) || |
| 99 | (s & RQ_LOCAL_PENDING) || |
| 100 | ((s & RQ_NET_MASK) && !(s & RQ_NET_DONE))) { |
Andreas Gruenbacher | d018017 | 2011-07-03 17:53:52 +0200 | [diff] [blame] | 101 | drbd_err(device, "drbd_req_destroy: Logic BUG rq_state = 0x%x, completion_ref = %d\n", |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 102 | s, atomic_read(&req->completion_ref)); |
| 103 | return; |
| 104 | } |
Philipp Reisner | 288f422 | 2010-05-27 15:07:43 +0200 | [diff] [blame] | 105 | |
Lars Ellenberg | 844a6ae7 | 2013-11-22 12:52:03 +0100 | [diff] [blame] | 106 | /* If called from mod_rq_state (expected normal case) or |
| 107 | * drbd_send_and_submit (the less likely normal path), this holds the |
| 108 | * req_lock, and req->tl_requests will typicaly be on ->transfer_log, |
| 109 | * though it may be still empty (never added to the transfer log). |
| 110 | * |
| 111 | * If called from do_retry(), we do NOT hold the req_lock, but we are |
| 112 | * still allowed to unconditionally list_del(&req->tl_requests), |
| 113 | * because it will be on a local on-stack list only. */ |
Lars Ellenberg | 2312f0b3 | 2011-11-24 10:36:25 +0100 | [diff] [blame] | 114 | list_del_init(&req->tl_requests); |
Philipp Reisner | 288f422 | 2010-05-27 15:07:43 +0200 | [diff] [blame] | 115 | |
Lars Ellenberg | 08d0dab | 2014-03-20 11:19:22 +0100 | [diff] [blame] | 116 | /* finally remove the request from the conflict detection |
| 117 | * respective block_id verification interval tree. */ |
| 118 | if (!drbd_interval_empty(&req->i)) { |
| 119 | struct rb_root *root; |
| 120 | |
| 121 | if (s & RQ_WRITE) |
| 122 | root = &device->write_requests; |
| 123 | else |
| 124 | root = &device->read_requests; |
| 125 | drbd_remove_request_interval(root, req); |
| 126 | } else if (s & (RQ_NET_MASK & ~RQ_NET_DONE) && req->i.size != 0) |
| 127 | drbd_err(device, "drbd_req_destroy: Logic BUG: interval empty, but: rq_state=0x%x, sect=%llu, size=%u\n", |
| 128 | s, (unsigned long long)req->i.sector, req->i.size); |
| 129 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 130 | /* if it was a write, we may have to set the corresponding |
| 131 | * bit(s) out-of-sync first. If it had a local part, we need to |
| 132 | * release the reference to the activity log. */ |
Lars Ellenberg | b406777 | 2012-01-24 16:58:11 +0100 | [diff] [blame] | 133 | if (s & RQ_WRITE) { |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 134 | /* Set out-of-sync unless both OK flags are set |
| 135 | * (local only or remote failed). |
| 136 | * Other places where we set out-of-sync: |
| 137 | * READ with local io-error */ |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 138 | |
Lars Ellenberg | 70f17b6 | 2012-09-03 14:08:35 +0200 | [diff] [blame] | 139 | /* There is a special case: |
| 140 | * we may notice late that IO was suspended, |
| 141 | * and postpone, or schedule for retry, a write, |
| 142 | * before it even was submitted or sent. |
| 143 | * In that case we do not want to touch the bitmap at all. |
| 144 | */ |
| 145 | if ((s & (RQ_POSTPONED|RQ_LOCAL_MASK|RQ_NET_MASK)) != RQ_POSTPONED) { |
Philipp Reisner | d764401 | 2012-08-28 14:39:44 +0200 | [diff] [blame] | 146 | if (!(s & RQ_NET_OK) || !(s & RQ_LOCAL_OK)) |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 147 | drbd_set_out_of_sync(device, req->i.sector, req->i.size); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 148 | |
Philipp Reisner | d764401 | 2012-08-28 14:39:44 +0200 | [diff] [blame] | 149 | if ((s & RQ_NET_OK) && (s & RQ_LOCAL_OK) && (s & RQ_NET_SIS)) |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 150 | drbd_set_in_sync(device, req->i.sector, req->i.size); |
Philipp Reisner | d764401 | 2012-08-28 14:39:44 +0200 | [diff] [blame] | 151 | } |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 152 | |
| 153 | /* one might be tempted to move the drbd_al_complete_io |
Andreas Gruenbacher | fcefa62 | 2011-02-17 16:46:59 +0100 | [diff] [blame] | 154 | * to the local io completion callback drbd_request_endio. |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 155 | * but, if this was a mirror write, we may only |
| 156 | * drbd_al_complete_io after this is RQ_NET_DONE, |
| 157 | * otherwise the extent could be dropped from the al |
| 158 | * before it has actually been written on the peer. |
| 159 | * if we crash before our peer knows about the request, |
| 160 | * but after the extent has been dropped from the al, |
| 161 | * we would forget to resync the corresponding extent. |
| 162 | */ |
Philipp Reisner | 76590cd | 2012-08-29 15:23:14 +0200 | [diff] [blame] | 163 | if (s & RQ_IN_ACT_LOG) { |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 164 | if (get_ldev_if_state(device, D_FAILED)) { |
| 165 | drbd_al_complete_io(device, &req->i); |
| 166 | put_ldev(device); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 167 | } else if (__ratelimit(&drbd_ratelimit_state)) { |
Andreas Gruenbacher | d018017 | 2011-07-03 17:53:52 +0200 | [diff] [blame] | 168 | drbd_warn(device, "Should have called drbd_al_complete_io(, %llu, %u), " |
Lars Ellenberg | 181286a | 2011-03-31 15:18:56 +0200 | [diff] [blame] | 169 | "but my Disk seems to have failed :(\n", |
| 170 | (unsigned long long) req->i.sector, req->i.size); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 171 | } |
| 172 | } |
| 173 | } |
| 174 | |
Kent Overstreet | 0892fac | 2018-05-20 18:25:48 -0400 | [diff] [blame] | 175 | mempool_free(req, &drbd_request_mempool); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 176 | } |
| 177 | |
Andreas Gruenbacher | bde89a9 | 2011-05-30 16:32:41 +0200 | [diff] [blame] | 178 | static void wake_all_senders(struct drbd_connection *connection) |
| 179 | { |
| 180 | wake_up(&connection->sender_work.q_wait); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 181 | } |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 182 | |
Lars Ellenberg | b6dd1a8 | 2011-11-28 15:04:49 +0100 | [diff] [blame] | 183 | /* must hold resource->req_lock */ |
Andreas Gruenbacher | bde89a9 | 2011-05-30 16:32:41 +0200 | [diff] [blame] | 184 | void start_new_tl_epoch(struct drbd_connection *connection) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 185 | { |
Lars Ellenberg | 99b4d8f | 2012-08-07 06:42:09 +0200 | [diff] [blame] | 186 | /* no point closing an epoch, if it is empty, anyways. */ |
Andreas Gruenbacher | bde89a9 | 2011-05-30 16:32:41 +0200 | [diff] [blame] | 187 | if (connection->current_tle_writes == 0) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 188 | return; |
| 189 | |
Andreas Gruenbacher | bde89a9 | 2011-05-30 16:32:41 +0200 | [diff] [blame] | 190 | connection->current_tle_writes = 0; |
| 191 | atomic_inc(&connection->current_tle_nr); |
| 192 | wake_all_senders(connection); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 193 | } |
| 194 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 195 | void complete_master_bio(struct drbd_device *device, |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 196 | struct bio_and_error *m) |
| 197 | { |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 198 | m->bio->bi_status = errno_to_blk_status(m->error); |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 199 | bio_endio(m->bio); |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 200 | dec_ap_bio(device); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 201 | } |
| 202 | |
Andreas Gruenbacher | 5384064 | 2011-01-28 10:31:04 +0100 | [diff] [blame] | 203 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 204 | /* Helper for __req_mod(). |
| 205 | * Set m->bio to the master bio, if it is fit to be completed, |
| 206 | * or leave it alone (it is initialized to NULL in __req_mod), |
| 207 | * if it has already been completed, or cannot be completed yet. |
| 208 | * If m->bio is set, the error status to be returned is placed in m->error. |
| 209 | */ |
Lars Ellenberg | 6870ca6 | 2012-03-26 17:02:45 +0200 | [diff] [blame] | 210 | static |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 211 | void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 212 | { |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 213 | const unsigned s = req->rq_state; |
Andreas Gruenbacher | 84b8c06 | 2011-07-28 15:27:51 +0200 | [diff] [blame] | 214 | struct drbd_device *device = req->device; |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 215 | int error, ok; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 216 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 217 | /* we must not complete the master bio, while it is |
| 218 | * still being processed by _drbd_send_zc_bio (drbd_send_dblock) |
| 219 | * not yet acknowledged by the peer |
| 220 | * not yet completed by the local io subsystem |
| 221 | * these flags may get cleared in any order by |
| 222 | * the worker, |
| 223 | * the receiver, |
| 224 | * the bio_endio completion callbacks. |
| 225 | */ |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 226 | if ((s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED)) || |
| 227 | (s & RQ_NET_QUEUED) || (s & RQ_NET_PENDING) || |
| 228 | (s & RQ_COMPLETION_SUSP)) { |
Andreas Gruenbacher | d018017 | 2011-07-03 17:53:52 +0200 | [diff] [blame] | 229 | drbd_err(device, "drbd_req_complete: Logic BUG rq_state = 0x%x\n", s); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 230 | return; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 231 | } |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 232 | |
| 233 | if (!req->master_bio) { |
Andreas Gruenbacher | d018017 | 2011-07-03 17:53:52 +0200 | [diff] [blame] | 234 | drbd_err(device, "drbd_req_complete: Logic BUG, master_bio == NULL!\n"); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 235 | return; |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 236 | } |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 237 | |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 238 | /* |
| 239 | * figure out whether to report success or failure. |
| 240 | * |
| 241 | * report success when at least one of the operations succeeded. |
| 242 | * or, to put the other way, |
| 243 | * only report failure, when both operations failed. |
| 244 | * |
| 245 | * what to do about the failures is handled elsewhere. |
| 246 | * what we need to do here is just: complete the master_bio. |
| 247 | * |
| 248 | * local completion error, if any, has been stored as ERR_PTR |
| 249 | * in private_bio within drbd_request_endio. |
| 250 | */ |
| 251 | ok = (s & RQ_LOCAL_OK) || (s & RQ_NET_OK); |
| 252 | error = PTR_ERR(req->private_bio); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 253 | |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 254 | /* Before we can signal completion to the upper layers, |
| 255 | * we may need to close the current transfer log epoch. |
| 256 | * We are within the request lock, so we can simply compare |
| 257 | * the request epoch number with the current transfer log |
| 258 | * epoch number. If they match, increase the current_tle_nr, |
| 259 | * and reset the transfer log epoch write_cnt. |
| 260 | */ |
Christoph Hellwig | 7024628 | 2016-07-19 11:28:41 +0200 | [diff] [blame] | 261 | if (op_is_write(bio_op(req->master_bio)) && |
Andreas Gruenbacher | a6b32bc | 2011-05-31 14:33:49 +0200 | [diff] [blame] | 262 | req->epoch == atomic_read(&first_peer_device(device)->connection->current_tle_nr)) |
| 263 | start_new_tl_epoch(first_peer_device(device)->connection); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 264 | |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 265 | /* Update disk stats */ |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 266 | _drbd_end_io_acct(device, req); |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 267 | |
| 268 | /* If READ failed, |
| 269 | * have it be pushed back to the retry work queue, |
| 270 | * so it will re-enter __drbd_make_request(), |
| 271 | * and be re-assigned to a suitable local or remote path, |
| 272 | * or failed if we do not have access to good data anymore. |
| 273 | * |
| 274 | * Unless it was failed early by __drbd_make_request(), |
| 275 | * because no path was available, in which case |
| 276 | * it was not even added to the transfer_log. |
| 277 | * |
Christoph Hellwig | 7024628 | 2016-07-19 11:28:41 +0200 | [diff] [blame] | 278 | * read-ahead may fail, and will not be retried. |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 279 | * |
| 280 | * WRITE should have used all available paths already. |
| 281 | */ |
Christoph Hellwig | 7024628 | 2016-07-19 11:28:41 +0200 | [diff] [blame] | 282 | if (!ok && |
| 283 | bio_op(req->master_bio) == REQ_OP_READ && |
Jens Axboe | 1eff9d3 | 2016-08-05 15:35:16 -0600 | [diff] [blame] | 284 | !(req->master_bio->bi_opf & REQ_RAHEAD) && |
Christoph Hellwig | 7024628 | 2016-07-19 11:28:41 +0200 | [diff] [blame] | 285 | !list_empty(&req->tl_requests)) |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 286 | req->rq_state |= RQ_POSTPONED; |
| 287 | |
| 288 | if (!(req->rq_state & RQ_POSTPONED)) { |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 289 | m->error = ok ? 0 : (error ?: -EIO); |
| 290 | m->bio = req->master_bio; |
| 291 | req->master_bio = NULL; |
Lars Ellenberg | 08d0dab | 2014-03-20 11:19:22 +0100 | [diff] [blame] | 292 | /* We leave it in the tree, to be able to verify later |
| 293 | * write-acks in protocol != C during resync. |
| 294 | * But we mark it as "complete", so it won't be counted as |
| 295 | * conflict in a multi-primary setup. */ |
| 296 | req->i.completed = true; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 297 | } |
Lars Ellenberg | 08d0dab | 2014-03-20 11:19:22 +0100 | [diff] [blame] | 298 | |
| 299 | if (req->i.waiting) |
| 300 | wake_up(&device->misc_wait); |
Lars Ellenberg | 844a6ae7 | 2013-11-22 12:52:03 +0100 | [diff] [blame] | 301 | |
| 302 | /* Either we are about to complete to upper layers, |
| 303 | * or we will restart this request. |
| 304 | * In either case, the request object will be destroyed soon, |
| 305 | * so better remove it from all lists. */ |
| 306 | list_del_init(&req->req_pending_master_completion); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 307 | } |
| 308 | |
Lars Ellenberg | 844a6ae7 | 2013-11-22 12:52:03 +0100 | [diff] [blame] | 309 | /* still holds resource->req_lock */ |
Lars Ellenberg | a00ebd1 | 2017-05-11 10:21:46 +0200 | [diff] [blame] | 310 | static void drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put) |
Philipp Reisner | cfa0341 | 2010-06-23 17:18:51 +0200 | [diff] [blame] | 311 | { |
Andreas Gruenbacher | 84b8c06 | 2011-07-28 15:27:51 +0200 | [diff] [blame] | 312 | struct drbd_device *device = req->device; |
Andreas Gruenbacher | 0b0ba1e | 2011-06-27 16:23:33 +0200 | [diff] [blame] | 313 | D_ASSERT(device, m || (req->rq_state & RQ_POSTPONED)); |
Philipp Reisner | cfa0341 | 2010-06-23 17:18:51 +0200 | [diff] [blame] | 314 | |
Lars Ellenberg | a00ebd1 | 2017-05-11 10:21:46 +0200 | [diff] [blame] | 315 | if (!put) |
| 316 | return; |
| 317 | |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 318 | if (!atomic_sub_and_test(put, &req->completion_ref)) |
Lars Ellenberg | a00ebd1 | 2017-05-11 10:21:46 +0200 | [diff] [blame] | 319 | return; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 320 | |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 321 | drbd_req_complete(req, m); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 322 | |
Lars Ellenberg | a00ebd1 | 2017-05-11 10:21:46 +0200 | [diff] [blame] | 323 | /* local completion may still come in later, |
| 324 | * we need to keep the req object around. */ |
| 325 | if (req->rq_state & RQ_LOCAL_ABORTED) |
| 326 | return; |
| 327 | |
Lars Ellenberg | 9a278a7 | 2012-07-24 10:12:36 +0200 | [diff] [blame] | 328 | if (req->rq_state & RQ_POSTPONED) { |
| 329 | /* don't destroy the req object just yet, |
| 330 | * but queue it for retry */ |
| 331 | drbd_restart_request(req); |
Lars Ellenberg | a00ebd1 | 2017-05-11 10:21:46 +0200 | [diff] [blame] | 332 | return; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 333 | } |
| 334 | |
Lars Ellenberg | a00ebd1 | 2017-05-11 10:21:46 +0200 | [diff] [blame] | 335 | kref_put(&req->kref, drbd_req_destroy); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 336 | } |
| 337 | |
Lars Ellenberg | 7753a4c1 | 2013-11-22 13:00:12 +0100 | [diff] [blame] | 338 | static void set_if_null_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req) |
| 339 | { |
| 340 | struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; |
| 341 | if (!connection) |
| 342 | return; |
| 343 | if (connection->req_next == NULL) |
| 344 | connection->req_next = req; |
| 345 | } |
| 346 | |
| 347 | static void advance_conn_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req) |
| 348 | { |
| 349 | struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; |
| 350 | if (!connection) |
| 351 | return; |
| 352 | if (connection->req_next != req) |
| 353 | return; |
| 354 | list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) { |
| 355 | const unsigned s = req->rq_state; |
| 356 | if (s & RQ_NET_QUEUED) |
| 357 | break; |
| 358 | } |
| 359 | if (&req->tl_requests == &connection->transfer_log) |
| 360 | req = NULL; |
| 361 | connection->req_next = req; |
| 362 | } |
| 363 | |
| 364 | static void set_if_null_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req) |
| 365 | { |
| 366 | struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; |
| 367 | if (!connection) |
| 368 | return; |
| 369 | if (connection->req_ack_pending == NULL) |
| 370 | connection->req_ack_pending = req; |
| 371 | } |
| 372 | |
| 373 | static void advance_conn_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req) |
| 374 | { |
| 375 | struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; |
| 376 | if (!connection) |
| 377 | return; |
| 378 | if (connection->req_ack_pending != req) |
| 379 | return; |
| 380 | list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) { |
| 381 | const unsigned s = req->rq_state; |
| 382 | if ((s & RQ_NET_SENT) && (s & RQ_NET_PENDING)) |
| 383 | break; |
| 384 | } |
| 385 | if (&req->tl_requests == &connection->transfer_log) |
| 386 | req = NULL; |
| 387 | connection->req_ack_pending = req; |
| 388 | } |
| 389 | |
| 390 | static void set_if_null_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req) |
| 391 | { |
| 392 | struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; |
| 393 | if (!connection) |
| 394 | return; |
| 395 | if (connection->req_not_net_done == NULL) |
| 396 | connection->req_not_net_done = req; |
| 397 | } |
| 398 | |
| 399 | static void advance_conn_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req) |
| 400 | { |
| 401 | struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; |
| 402 | if (!connection) |
| 403 | return; |
| 404 | if (connection->req_not_net_done != req) |
| 405 | return; |
| 406 | list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) { |
| 407 | const unsigned s = req->rq_state; |
| 408 | if ((s & RQ_NET_SENT) && !(s & RQ_NET_DONE)) |
| 409 | break; |
| 410 | } |
| 411 | if (&req->tl_requests == &connection->transfer_log) |
| 412 | req = NULL; |
| 413 | connection->req_not_net_done = req; |
| 414 | } |
| 415 | |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 416 | /* I'd like this to be the only place that manipulates |
| 417 | * req->completion_ref and req->kref. */ |
| 418 | static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m, |
| 419 | int clear, int set) |
| 420 | { |
Andreas Gruenbacher | 84b8c06 | 2011-07-28 15:27:51 +0200 | [diff] [blame] | 421 | struct drbd_device *device = req->device; |
Lars Ellenberg | 7753a4c1 | 2013-11-22 13:00:12 +0100 | [diff] [blame] | 422 | struct drbd_peer_device *peer_device = first_peer_device(device); |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 423 | unsigned s = req->rq_state; |
| 424 | int c_put = 0; |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 425 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 426 | if (drbd_suspended(device) && !((s | clear) & RQ_COMPLETION_SUSP)) |
Philipp Reisner | 5af2e8c | 2012-08-14 11:28:52 +0200 | [diff] [blame] | 427 | set |= RQ_COMPLETION_SUSP; |
| 428 | |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 429 | /* apply */ |
| 430 | |
| 431 | req->rq_state &= ~clear; |
| 432 | req->rq_state |= set; |
| 433 | |
| 434 | /* no change? */ |
| 435 | if (req->rq_state == s) |
| 436 | return; |
| 437 | |
| 438 | /* intent: get references */ |
| 439 | |
Peter Zijlstra | bdfafc4 | 2016-11-14 17:34:19 +0100 | [diff] [blame] | 440 | kref_get(&req->kref); |
| 441 | |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 442 | if (!(s & RQ_LOCAL_PENDING) && (set & RQ_LOCAL_PENDING)) |
| 443 | atomic_inc(&req->completion_ref); |
| 444 | |
| 445 | if (!(s & RQ_NET_PENDING) && (set & RQ_NET_PENDING)) { |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 446 | inc_ap_pending(device); |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 447 | atomic_inc(&req->completion_ref); |
| 448 | } |
| 449 | |
Lars Ellenberg | e5f891b | 2013-11-22 12:32:01 +0100 | [diff] [blame] | 450 | if (!(s & RQ_NET_QUEUED) && (set & RQ_NET_QUEUED)) { |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 451 | atomic_inc(&req->completion_ref); |
Lars Ellenberg | 7753a4c1 | 2013-11-22 13:00:12 +0100 | [diff] [blame] | 452 | set_if_null_req_next(peer_device, req); |
Lars Ellenberg | e5f891b | 2013-11-22 12:32:01 +0100 | [diff] [blame] | 453 | } |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 454 | |
| 455 | if (!(s & RQ_EXP_BARR_ACK) && (set & RQ_EXP_BARR_ACK)) |
| 456 | kref_get(&req->kref); /* wait for the DONE */ |
| 457 | |
Lars Ellenberg | e5f891b | 2013-11-22 12:32:01 +0100 | [diff] [blame] | 458 | if (!(s & RQ_NET_SENT) && (set & RQ_NET_SENT)) { |
Philipp Reisner | 668700b | 2015-03-16 16:08:29 +0100 | [diff] [blame] | 459 | /* potentially already completed in the ack_receiver thread */ |
Lars Ellenberg | 7753a4c1 | 2013-11-22 13:00:12 +0100 | [diff] [blame] | 460 | if (!(s & RQ_NET_DONE)) { |
Lars Ellenberg | e5f891b | 2013-11-22 12:32:01 +0100 | [diff] [blame] | 461 | atomic_add(req->i.size >> 9, &device->ap_in_flight); |
Lars Ellenberg | 7753a4c1 | 2013-11-22 13:00:12 +0100 | [diff] [blame] | 462 | set_if_null_req_not_net_done(peer_device, req); |
| 463 | } |
Lars Ellenberg | f85d9f2 | 2015-05-18 14:08:46 +0200 | [diff] [blame] | 464 | if (req->rq_state & RQ_NET_PENDING) |
Lars Ellenberg | 7753a4c1 | 2013-11-22 13:00:12 +0100 | [diff] [blame] | 465 | set_if_null_req_ack_pending(peer_device, req); |
Lars Ellenberg | e5f891b | 2013-11-22 12:32:01 +0100 | [diff] [blame] | 466 | } |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 467 | |
Philipp Reisner | 5af2e8c | 2012-08-14 11:28:52 +0200 | [diff] [blame] | 468 | if (!(s & RQ_COMPLETION_SUSP) && (set & RQ_COMPLETION_SUSP)) |
| 469 | atomic_inc(&req->completion_ref); |
| 470 | |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 471 | /* progress: put references */ |
| 472 | |
| 473 | if ((s & RQ_COMPLETION_SUSP) && (clear & RQ_COMPLETION_SUSP)) |
| 474 | ++c_put; |
| 475 | |
| 476 | if (!(s & RQ_LOCAL_ABORTED) && (set & RQ_LOCAL_ABORTED)) { |
Andreas Gruenbacher | 0b0ba1e | 2011-06-27 16:23:33 +0200 | [diff] [blame] | 477 | D_ASSERT(device, req->rq_state & RQ_LOCAL_PENDING); |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 478 | ++c_put; |
| 479 | } |
| 480 | |
| 481 | if ((s & RQ_LOCAL_PENDING) && (clear & RQ_LOCAL_PENDING)) { |
| 482 | if (req->rq_state & RQ_LOCAL_ABORTED) |
Peter Zijlstra | bdfafc4 | 2016-11-14 17:34:19 +0100 | [diff] [blame] | 483 | kref_put(&req->kref, drbd_req_destroy); |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 484 | else |
| 485 | ++c_put; |
Lars Ellenberg | 844a6ae7 | 2013-11-22 12:52:03 +0100 | [diff] [blame] | 486 | list_del_init(&req->req_pending_local); |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 487 | } |
| 488 | |
| 489 | if ((s & RQ_NET_PENDING) && (clear & RQ_NET_PENDING)) { |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 490 | dec_ap_pending(device); |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 491 | ++c_put; |
Lars Ellenberg | e5f891b | 2013-11-22 12:32:01 +0100 | [diff] [blame] | 492 | req->acked_jif = jiffies; |
Lars Ellenberg | 7753a4c1 | 2013-11-22 13:00:12 +0100 | [diff] [blame] | 493 | advance_conn_req_ack_pending(peer_device, req); |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 494 | } |
| 495 | |
Lars Ellenberg | 7753a4c1 | 2013-11-22 13:00:12 +0100 | [diff] [blame] | 496 | if ((s & RQ_NET_QUEUED) && (clear & RQ_NET_QUEUED)) { |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 497 | ++c_put; |
Lars Ellenberg | 7753a4c1 | 2013-11-22 13:00:12 +0100 | [diff] [blame] | 498 | advance_conn_req_next(peer_device, req); |
| 499 | } |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 500 | |
Lars Ellenberg | e5f891b | 2013-11-22 12:32:01 +0100 | [diff] [blame] | 501 | if (!(s & RQ_NET_DONE) && (set & RQ_NET_DONE)) { |
| 502 | if (s & RQ_NET_SENT) |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 503 | atomic_sub(req->i.size >> 9, &device->ap_in_flight); |
Lars Ellenberg | e5f891b | 2013-11-22 12:32:01 +0100 | [diff] [blame] | 504 | if (s & RQ_EXP_BARR_ACK) |
Peter Zijlstra | bdfafc4 | 2016-11-14 17:34:19 +0100 | [diff] [blame] | 505 | kref_put(&req->kref, drbd_req_destroy); |
Lars Ellenberg | e5f891b | 2013-11-22 12:32:01 +0100 | [diff] [blame] | 506 | req->net_done_jif = jiffies; |
Lars Ellenberg | 7753a4c1 | 2013-11-22 13:00:12 +0100 | [diff] [blame] | 507 | |
| 508 | /* in ahead/behind mode, or just in case, |
| 509 | * before we finally destroy this request, |
| 510 | * the caching pointers must not reference it anymore */ |
| 511 | advance_conn_req_next(peer_device, req); |
| 512 | advance_conn_req_ack_pending(peer_device, req); |
| 513 | advance_conn_req_not_net_done(peer_device, req); |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 514 | } |
| 515 | |
| 516 | /* potentially complete and destroy */ |
| 517 | |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 518 | /* If we made progress, retry conflicting peer requests, if any. */ |
| 519 | if (req->i.waiting) |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 520 | wake_up(&device->misc_wait); |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 521 | |
Lars Ellenberg | a00ebd1 | 2017-05-11 10:21:46 +0200 | [diff] [blame] | 522 | drbd_req_put_completion_ref(req, m, c_put); |
| 523 | kref_put(&req->kref, drbd_req_destroy); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 524 | } |
| 525 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 526 | static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req) |
Lars Ellenberg | ccae786 | 2012-09-26 14:07:04 +0200 | [diff] [blame] | 527 | { |
| 528 | char b[BDEVNAME_SIZE]; |
| 529 | |
Lars Ellenberg | 42839f6 | 2012-09-27 15:19:38 +0200 | [diff] [blame] | 530 | if (!__ratelimit(&drbd_ratelimit_state)) |
Lars Ellenberg | ccae786 | 2012-09-26 14:07:04 +0200 | [diff] [blame] | 531 | return; |
| 532 | |
Andreas Gruenbacher | d018017 | 2011-07-03 17:53:52 +0200 | [diff] [blame] | 533 | drbd_warn(device, "local %s IO error sector %llu+%u on %s\n", |
Lars Ellenberg | ccae786 | 2012-09-26 14:07:04 +0200 | [diff] [blame] | 534 | (req->rq_state & RQ_WRITE) ? "WRITE" : "READ", |
Lars Ellenberg | 42839f6 | 2012-09-27 15:19:38 +0200 | [diff] [blame] | 535 | (unsigned long long)req->i.sector, |
| 536 | req->i.size >> 9, |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 537 | bdevname(device->ldev->backing_bdev, b)); |
Lars Ellenberg | ccae786 | 2012-09-26 14:07:04 +0200 | [diff] [blame] | 538 | } |
| 539 | |
Lars Ellenberg | e5f891b | 2013-11-22 12:32:01 +0100 | [diff] [blame] | 540 | /* Helper for HANDED_OVER_TO_NETWORK. |
| 541 | * Is this a protocol A write (neither WRITE_ACK nor RECEIVE_ACK expected)? |
| 542 | * Is it also still "PENDING"? |
| 543 | * --> If so, clear PENDING and set NET_OK below. |
| 544 | * If it is a protocol A write, but not RQ_PENDING anymore, neg-ack was faster |
| 545 | * (and we must not set RQ_NET_OK) */ |
| 546 | static inline bool is_pending_write_protocol_A(struct drbd_request *req) |
| 547 | { |
| 548 | return (req->rq_state & |
| 549 | (RQ_WRITE|RQ_NET_PENDING|RQ_EXP_WRITE_ACK|RQ_EXP_RECEIVE_ACK)) |
| 550 | == (RQ_WRITE|RQ_NET_PENDING); |
| 551 | } |
| 552 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 553 | /* obviously this could be coded as many single functions |
| 554 | * instead of one huge switch, |
| 555 | * or by putting the code directly in the respective locations |
| 556 | * (as it has been before). |
| 557 | * |
| 558 | * but having it this way |
| 559 | * enforces that it is all in this one place, where it is easier to audit, |
| 560 | * it makes it obvious that whatever "event" "happens" to a request should |
| 561 | * happen "atomically" within the req_lock, |
| 562 | * and it enforces that we have to think in a very structured manner |
| 563 | * about the "events" that may happen to a request during its life time ... |
| 564 | */ |
Philipp Reisner | 2a80699 | 2010-06-09 14:07:43 +0200 | [diff] [blame] | 565 | int __req_mod(struct drbd_request *req, enum drbd_req_event what, |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 566 | struct bio_and_error *m) |
| 567 | { |
Lars Ellenberg | 44a4d55 | 2013-11-22 12:40:58 +0100 | [diff] [blame] | 568 | struct drbd_device *const device = req->device; |
| 569 | struct drbd_peer_device *const peer_device = first_peer_device(device); |
| 570 | struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL; |
Philipp Reisner | 44ed167 | 2011-04-19 17:10:19 +0200 | [diff] [blame] | 571 | struct net_conf *nc; |
Philipp Reisner | 303d144 | 2011-04-13 16:24:47 -0700 | [diff] [blame] | 572 | int p, rv = 0; |
Andreas Gruenbacher | 7be8da0 | 2011-02-22 02:15:32 +0100 | [diff] [blame] | 573 | |
| 574 | if (m) |
| 575 | m->bio = NULL; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 576 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 577 | switch (what) { |
| 578 | default: |
Andreas Gruenbacher | d018017 | 2011-07-03 17:53:52 +0200 | [diff] [blame] | 579 | drbd_err(device, "LOGIC BUG in %s:%u\n", __FILE__ , __LINE__); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 580 | break; |
| 581 | |
| 582 | /* does not happen... |
| 583 | * initialization done in drbd_req_new |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 584 | case CREATED: |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 585 | break; |
| 586 | */ |
| 587 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 588 | case TO_BE_SENT: /* via network */ |
Andreas Gruenbacher | 7be8da0 | 2011-02-22 02:15:32 +0100 | [diff] [blame] | 589 | /* reached via __drbd_make_request |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 590 | * and from w_read_retry_remote */ |
Andreas Gruenbacher | 0b0ba1e | 2011-06-27 16:23:33 +0200 | [diff] [blame] | 591 | D_ASSERT(device, !(req->rq_state & RQ_NET_MASK)); |
Philipp Reisner | 44ed167 | 2011-04-19 17:10:19 +0200 | [diff] [blame] | 592 | rcu_read_lock(); |
Lars Ellenberg | 44a4d55 | 2013-11-22 12:40:58 +0100 | [diff] [blame] | 593 | nc = rcu_dereference(connection->net_conf); |
Philipp Reisner | 44ed167 | 2011-04-19 17:10:19 +0200 | [diff] [blame] | 594 | p = nc->wire_protocol; |
| 595 | rcu_read_unlock(); |
Philipp Reisner | 303d144 | 2011-04-13 16:24:47 -0700 | [diff] [blame] | 596 | req->rq_state |= |
| 597 | p == DRBD_PROT_C ? RQ_EXP_WRITE_ACK : |
| 598 | p == DRBD_PROT_B ? RQ_EXP_RECEIVE_ACK : 0; |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 599 | mod_rq_state(req, m, 0, RQ_NET_PENDING); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 600 | break; |
| 601 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 602 | case TO_BE_SUBMITTED: /* locally */ |
Andreas Gruenbacher | 7be8da0 | 2011-02-22 02:15:32 +0100 | [diff] [blame] | 603 | /* reached via __drbd_make_request */ |
Andreas Gruenbacher | 0b0ba1e | 2011-06-27 16:23:33 +0200 | [diff] [blame] | 604 | D_ASSERT(device, !(req->rq_state & RQ_LOCAL_MASK)); |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 605 | mod_rq_state(req, m, 0, RQ_LOCAL_PENDING); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 606 | break; |
| 607 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 608 | case COMPLETED_OK: |
Philipp Reisner | 2b4dd36 | 2011-03-14 13:01:50 +0100 | [diff] [blame] | 609 | if (req->rq_state & RQ_WRITE) |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 610 | device->writ_cnt += req->i.size >> 9; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 611 | else |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 612 | device->read_cnt += req->i.size >> 9; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 613 | |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 614 | mod_rq_state(req, m, RQ_LOCAL_PENDING, |
| 615 | RQ_LOCAL_COMPLETED|RQ_LOCAL_OK); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 616 | break; |
| 617 | |
Philipp Reisner | cdfda63 | 2011-07-05 15:38:59 +0200 | [diff] [blame] | 618 | case ABORT_DISK_IO: |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 619 | mod_rq_state(req, m, 0, RQ_LOCAL_ABORTED); |
Philipp Reisner | 2b4dd36 | 2011-03-14 13:01:50 +0100 | [diff] [blame] | 620 | break; |
| 621 | |
Lars Ellenberg | edc9f5e | 2012-09-27 15:18:21 +0200 | [diff] [blame] | 622 | case WRITE_COMPLETED_WITH_ERROR: |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 623 | drbd_report_io_error(device, req); |
| 624 | __drbd_chk_io_error(device, DRBD_WRITE_ERROR); |
Lars Ellenberg | edc9f5e | 2012-09-27 15:18:21 +0200 | [diff] [blame] | 625 | mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 626 | break; |
| 627 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 628 | case READ_COMPLETED_WITH_ERROR: |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 629 | drbd_set_out_of_sync(device, req->i.sector, req->i.size); |
| 630 | drbd_report_io_error(device, req); |
| 631 | __drbd_chk_io_error(device, DRBD_READ_ERROR); |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 632 | /* fall through. */ |
| 633 | case READ_AHEAD_COMPLETED_WITH_ERROR: |
Christoph Hellwig | 7024628 | 2016-07-19 11:28:41 +0200 | [diff] [blame] | 634 | /* it is legal to fail read-ahead, no __drbd_chk_io_error in that case. */ |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 635 | mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED); |
Lars Ellenberg | 4439c40 | 2012-03-26 17:29:30 +0200 | [diff] [blame] | 636 | break; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 637 | |
Lars Ellenberg | 2f632ae | 2014-04-28 18:43:24 +0200 | [diff] [blame] | 638 | case DISCARD_COMPLETED_NOTSUPP: |
| 639 | case DISCARD_COMPLETED_WITH_ERROR: |
| 640 | /* I'd rather not detach from local disk just because it |
Bart Van Assche | 9305455 | 2018-10-03 13:56:25 -0700 | [diff] [blame] | 641 | * failed a REQ_OP_DISCARD. */ |
Lars Ellenberg | 2f632ae | 2014-04-28 18:43:24 +0200 | [diff] [blame] | 642 | mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED); |
| 643 | break; |
| 644 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 645 | case QUEUE_FOR_NET_READ: |
Christoph Hellwig | 7024628 | 2016-07-19 11:28:41 +0200 | [diff] [blame] | 646 | /* READ, and |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 647 | * no local disk, |
| 648 | * or target area marked as invalid, |
| 649 | * or just got an io-error. */ |
Andreas Gruenbacher | 7be8da0 | 2011-02-22 02:15:32 +0100 | [diff] [blame] | 650 | /* from __drbd_make_request |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 651 | * or from bio_endio during read io-error recovery */ |
| 652 | |
Lars Ellenberg | 6870ca6 | 2012-03-26 17:02:45 +0200 | [diff] [blame] | 653 | /* So we can verify the handle in the answer packet. |
| 654 | * Corresponding drbd_remove_request_interval is in |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 655 | * drbd_req_complete() */ |
Andreas Gruenbacher | 0b0ba1e | 2011-06-27 16:23:33 +0200 | [diff] [blame] | 656 | D_ASSERT(device, drbd_interval_empty(&req->i)); |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 657 | drbd_insert_interval(&device->read_requests, &req->i); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 658 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 659 | set_bit(UNPLUG_REMOTE, &device->flags); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 660 | |
Andreas Gruenbacher | 0b0ba1e | 2011-06-27 16:23:33 +0200 | [diff] [blame] | 661 | D_ASSERT(device, req->rq_state & RQ_NET_PENDING); |
| 662 | D_ASSERT(device, (req->rq_state & RQ_LOCAL_MASK) == 0); |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 663 | mod_rq_state(req, m, 0, RQ_NET_QUEUED); |
Lars Ellenberg | 4439c40 | 2012-03-26 17:29:30 +0200 | [diff] [blame] | 664 | req->w.cb = w_send_read_req; |
Lars Ellenberg | 44a4d55 | 2013-11-22 12:40:58 +0100 | [diff] [blame] | 665 | drbd_queue_work(&connection->sender_work, |
Andreas Gruenbacher | 84b8c06 | 2011-07-28 15:27:51 +0200 | [diff] [blame] | 666 | &req->w); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 667 | break; |
| 668 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 669 | case QUEUE_FOR_NET_WRITE: |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 670 | /* assert something? */ |
Andreas Gruenbacher | 7be8da0 | 2011-02-22 02:15:32 +0100 | [diff] [blame] | 671 | /* from __drbd_make_request only */ |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 672 | |
Lars Ellenberg | 6870ca6 | 2012-03-26 17:02:45 +0200 | [diff] [blame] | 673 | /* Corresponding drbd_remove_request_interval is in |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 674 | * drbd_req_complete() */ |
Andreas Gruenbacher | 0b0ba1e | 2011-06-27 16:23:33 +0200 | [diff] [blame] | 675 | D_ASSERT(device, drbd_interval_empty(&req->i)); |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 676 | drbd_insert_interval(&device->write_requests, &req->i); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 677 | |
| 678 | /* NOTE |
| 679 | * In case the req ended up on the transfer log before being |
| 680 | * queued on the worker, it could lead to this request being |
| 681 | * missed during cleanup after connection loss. |
| 682 | * So we have to do both operations here, |
| 683 | * within the same lock that protects the transfer log. |
| 684 | * |
| 685 | * _req_add_to_epoch(req); this has to be after the |
| 686 | * _maybe_start_new_epoch(req); which happened in |
Andreas Gruenbacher | 7be8da0 | 2011-02-22 02:15:32 +0100 | [diff] [blame] | 687 | * __drbd_make_request, because we now may set the bit |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 688 | * again ourselves to close the current epoch. |
| 689 | * |
| 690 | * Add req to the (now) current epoch (barrier). */ |
| 691 | |
Lars Ellenberg | 83c3883 | 2009-11-03 02:22:06 +0100 | [diff] [blame] | 692 | /* otherwise we may lose an unplug, which may cause some remote |
| 693 | * io-scheduler timeout to expire, increasing maximum latency, |
| 694 | * hurting performance. */ |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 695 | set_bit(UNPLUG_REMOTE, &device->flags); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 696 | |
| 697 | /* queue work item to send data */ |
Andreas Gruenbacher | 0b0ba1e | 2011-06-27 16:23:33 +0200 | [diff] [blame] | 698 | D_ASSERT(device, req->rq_state & RQ_NET_PENDING); |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 699 | mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 700 | req->w.cb = w_send_dblock; |
Lars Ellenberg | 44a4d55 | 2013-11-22 12:40:58 +0100 | [diff] [blame] | 701 | drbd_queue_work(&connection->sender_work, |
Andreas Gruenbacher | 84b8c06 | 2011-07-28 15:27:51 +0200 | [diff] [blame] | 702 | &req->w); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 703 | |
| 704 | /* close the epoch, in case it outgrew the limit */ |
Philipp Reisner | 44ed167 | 2011-04-19 17:10:19 +0200 | [diff] [blame] | 705 | rcu_read_lock(); |
Lars Ellenberg | 44a4d55 | 2013-11-22 12:40:58 +0100 | [diff] [blame] | 706 | nc = rcu_dereference(connection->net_conf); |
Philipp Reisner | 44ed167 | 2011-04-19 17:10:19 +0200 | [diff] [blame] | 707 | p = nc->max_epoch_size; |
| 708 | rcu_read_unlock(); |
Lars Ellenberg | 44a4d55 | 2013-11-22 12:40:58 +0100 | [diff] [blame] | 709 | if (connection->current_tle_writes >= p) |
| 710 | start_new_tl_epoch(connection); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 711 | |
| 712 | break; |
| 713 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 714 | case QUEUE_FOR_SEND_OOS: |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 715 | mod_rq_state(req, m, 0, RQ_NET_QUEUED); |
Andreas Gruenbacher | 8f7bed7 | 2010-12-19 23:53:14 +0100 | [diff] [blame] | 716 | req->w.cb = w_send_out_of_sync; |
Lars Ellenberg | 44a4d55 | 2013-11-22 12:40:58 +0100 | [diff] [blame] | 717 | drbd_queue_work(&connection->sender_work, |
Andreas Gruenbacher | 84b8c06 | 2011-07-28 15:27:51 +0200 | [diff] [blame] | 718 | &req->w); |
Philipp Reisner | 73a01a1 | 2010-10-27 14:33:00 +0200 | [diff] [blame] | 719 | break; |
| 720 | |
Lars Ellenberg | ea9d672 | 2012-03-26 16:46:39 +0200 | [diff] [blame] | 721 | case READ_RETRY_REMOTE_CANCELED: |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 722 | case SEND_CANCELED: |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 723 | case SEND_FAILED: |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 724 | /* real cleanup will be done from tl_clear. just update flags |
| 725 | * so it is no longer marked as on the worker queue */ |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 726 | mod_rq_state(req, m, RQ_NET_QUEUED, 0); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 727 | break; |
| 728 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 729 | case HANDED_OVER_TO_NETWORK: |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 730 | /* assert something? */ |
Lars Ellenberg | e5f891b | 2013-11-22 12:32:01 +0100 | [diff] [blame] | 731 | if (is_pending_write_protocol_A(req)) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 732 | /* this is what is dangerous about protocol A: |
| 733 | * pretend it was successfully written on the peer. */ |
Lars Ellenberg | e5f891b | 2013-11-22 12:32:01 +0100 | [diff] [blame] | 734 | mod_rq_state(req, m, RQ_NET_QUEUED|RQ_NET_PENDING, |
| 735 | RQ_NET_SENT|RQ_NET_OK); |
| 736 | else |
| 737 | mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_SENT); |
| 738 | /* It is still not yet RQ_NET_DONE until the |
| 739 | * corresponding epoch barrier got acked as well, |
| 740 | * so we know what to dirty on connection loss. */ |
Lars Ellenberg | 6d49e10 | 2012-01-11 09:43:25 +0100 | [diff] [blame] | 741 | break; |
| 742 | |
Lars Ellenberg | 27a434f | 2012-03-26 16:44:59 +0200 | [diff] [blame] | 743 | case OOS_HANDED_TO_NETWORK: |
Lars Ellenberg | 6d49e10 | 2012-01-11 09:43:25 +0100 | [diff] [blame] | 744 | /* Was not set PENDING, no longer QUEUED, so is now DONE |
| 745 | * as far as this connection is concerned. */ |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 746 | mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_DONE); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 747 | break; |
| 748 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 749 | case CONNECTION_LOST_WHILE_PENDING: |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 750 | /* transfer log cleanup after connection loss */ |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 751 | mod_rq_state(req, m, |
| 752 | RQ_NET_OK|RQ_NET_PENDING|RQ_COMPLETION_SUSP, |
| 753 | RQ_NET_DONE); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 754 | break; |
| 755 | |
Lars Ellenberg | d4dabbe | 2012-08-01 12:33:51 +0200 | [diff] [blame] | 756 | case CONFLICT_RESOLVED: |
| 757 | /* for superseded conflicting writes of multiple primaries, |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 758 | * there is no need to keep anything in the tl, potential |
Lars Ellenberg | 934722a | 2012-07-24 09:31:18 +0200 | [diff] [blame] | 759 | * node crashes are covered by the activity log. |
| 760 | * |
| 761 | * If this request had been marked as RQ_POSTPONED before, |
Lars Ellenberg | d4dabbe | 2012-08-01 12:33:51 +0200 | [diff] [blame] | 762 | * it will actually not be completed, but "restarted", |
Lars Ellenberg | 934722a | 2012-07-24 09:31:18 +0200 | [diff] [blame] | 763 | * resubmitted from the retry worker context. */ |
Andreas Gruenbacher | 0b0ba1e | 2011-06-27 16:23:33 +0200 | [diff] [blame] | 764 | D_ASSERT(device, req->rq_state & RQ_NET_PENDING); |
| 765 | D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK); |
Lars Ellenberg | 934722a | 2012-07-24 09:31:18 +0200 | [diff] [blame] | 766 | mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_DONE|RQ_NET_OK); |
| 767 | break; |
| 768 | |
Lars Ellenberg | 0afd569 | 2012-03-26 16:51:11 +0200 | [diff] [blame] | 769 | case WRITE_ACKED_BY_PEER_AND_SIS: |
Lars Ellenberg | 934722a | 2012-07-24 09:31:18 +0200 | [diff] [blame] | 770 | req->rq_state |= RQ_NET_SIS; |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 771 | case WRITE_ACKED_BY_PEER: |
Lars Ellenberg | 08d0dab | 2014-03-20 11:19:22 +0100 | [diff] [blame] | 772 | /* Normal operation protocol C: successfully written on peer. |
| 773 | * During resync, even in protocol != C, |
| 774 | * we requested an explicit write ack anyways. |
| 775 | * Which means we cannot even assert anything here. |
Lars Ellenberg | d64957c | 2012-03-23 14:42:19 +0100 | [diff] [blame] | 776 | * Nothing more to do here. |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 777 | * We want to keep the tl in place for all protocols, to cater |
Lars Ellenberg | d64957c | 2012-03-23 14:42:19 +0100 | [diff] [blame] | 778 | * for volatile write-back caches on lower level devices. */ |
Philipp Reisner | 303d144 | 2011-04-13 16:24:47 -0700 | [diff] [blame] | 779 | goto ack_common; |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 780 | case RECV_ACKED_BY_PEER: |
Andreas Gruenbacher | 0b0ba1e | 2011-06-27 16:23:33 +0200 | [diff] [blame] | 781 | D_ASSERT(device, req->rq_state & RQ_EXP_RECEIVE_ACK); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 782 | /* protocol B; pretends to be successfully written on peer. |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 783 | * see also notes above in HANDED_OVER_TO_NETWORK about |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 784 | * protocol != C */ |
Philipp Reisner | 303d144 | 2011-04-13 16:24:47 -0700 | [diff] [blame] | 785 | ack_common: |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 786 | mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 787 | break; |
| 788 | |
Andreas Gruenbacher | 7be8da0 | 2011-02-22 02:15:32 +0100 | [diff] [blame] | 789 | case POSTPONE_WRITE: |
Andreas Gruenbacher | 0b0ba1e | 2011-06-27 16:23:33 +0200 | [diff] [blame] | 790 | D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK); |
Philipp Reisner | 303d144 | 2011-04-13 16:24:47 -0700 | [diff] [blame] | 791 | /* If this node has already detected the write conflict, the |
Andreas Gruenbacher | 7be8da0 | 2011-02-22 02:15:32 +0100 | [diff] [blame] | 792 | * worker will be waiting on misc_wait. Wake it up once this |
| 793 | * request has completed locally. |
| 794 | */ |
Andreas Gruenbacher | 0b0ba1e | 2011-06-27 16:23:33 +0200 | [diff] [blame] | 795 | D_ASSERT(device, req->rq_state & RQ_NET_PENDING); |
Andreas Gruenbacher | 7be8da0 | 2011-02-22 02:15:32 +0100 | [diff] [blame] | 796 | req->rq_state |= RQ_POSTPONED; |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 797 | if (req->i.waiting) |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 798 | wake_up(&device->misc_wait); |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 799 | /* Do not clear RQ_NET_PENDING. This request will make further |
| 800 | * progress via restart_conflicting_writes() or |
| 801 | * fail_postponed_requests(). Hopefully. */ |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 802 | break; |
| 803 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 804 | case NEG_ACKED: |
Lars Ellenberg | 46e21bb | 2012-08-07 06:47:14 +0200 | [diff] [blame] | 805 | mod_rq_state(req, m, RQ_NET_OK|RQ_NET_PENDING, 0); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 806 | break; |
| 807 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 808 | case FAIL_FROZEN_DISK_IO: |
Philipp Reisner | 265be2d | 2010-05-31 10:14:17 +0200 | [diff] [blame] | 809 | if (!(req->rq_state & RQ_LOCAL_COMPLETED)) |
| 810 | break; |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 811 | mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0); |
Philipp Reisner | 265be2d | 2010-05-31 10:14:17 +0200 | [diff] [blame] | 812 | break; |
| 813 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 814 | case RESTART_FROZEN_DISK_IO: |
Philipp Reisner | 265be2d | 2010-05-31 10:14:17 +0200 | [diff] [blame] | 815 | if (!(req->rq_state & RQ_LOCAL_COMPLETED)) |
| 816 | break; |
| 817 | |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 818 | mod_rq_state(req, m, |
| 819 | RQ_COMPLETION_SUSP|RQ_LOCAL_COMPLETED, |
| 820 | RQ_LOCAL_PENDING); |
Philipp Reisner | 265be2d | 2010-05-31 10:14:17 +0200 | [diff] [blame] | 821 | |
| 822 | rv = MR_READ; |
| 823 | if (bio_data_dir(req->master_bio) == WRITE) |
| 824 | rv = MR_WRITE; |
| 825 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 826 | get_ldev(device); /* always succeeds in this call path */ |
Philipp Reisner | 265be2d | 2010-05-31 10:14:17 +0200 | [diff] [blame] | 827 | req->w.cb = w_restart_disk_io; |
Lars Ellenberg | 44a4d55 | 2013-11-22 12:40:58 +0100 | [diff] [blame] | 828 | drbd_queue_work(&connection->sender_work, |
Andreas Gruenbacher | 84b8c06 | 2011-07-28 15:27:51 +0200 | [diff] [blame] | 829 | &req->w); |
Philipp Reisner | 265be2d | 2010-05-31 10:14:17 +0200 | [diff] [blame] | 830 | break; |
| 831 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 832 | case RESEND: |
Philipp Reisner | 509fc01 | 2012-07-31 11:22:58 +0200 | [diff] [blame] | 833 | /* Simply complete (local only) READs. */ |
| 834 | if (!(req->rq_state & RQ_WRITE) && !req->w.cb) { |
Philipp Reisner | 8a0bab2 | 2012-08-07 13:28:00 +0200 | [diff] [blame] | 835 | mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0); |
Philipp Reisner | 509fc01 | 2012-07-31 11:22:58 +0200 | [diff] [blame] | 836 | break; |
| 837 | } |
| 838 | |
Philipp Reisner | 11b58e7 | 2010-05-12 17:08:26 +0200 | [diff] [blame] | 839 | /* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 840 | before the connection loss (B&C only); only P_BARRIER_ACK |
| 841 | (or the local completion?) was missing when we suspended. |
Lars Ellenberg | 6870ca6 | 2012-03-26 17:02:45 +0200 | [diff] [blame] | 842 | Throwing them out of the TL here by pretending we got a BARRIER_ACK. |
| 843 | During connection handshake, we ensure that the peer was not rebooted. */ |
Philipp Reisner | 11b58e7 | 2010-05-12 17:08:26 +0200 | [diff] [blame] | 844 | if (!(req->rq_state & RQ_NET_OK)) { |
Andreas Gruenbacher | 84b8c06 | 2011-07-28 15:27:51 +0200 | [diff] [blame] | 845 | /* FIXME could this possibly be a req->dw.cb == w_send_out_of_sync? |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 846 | * in that case we must not set RQ_NET_PENDING. */ |
| 847 | |
| 848 | mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING); |
Philipp Reisner | 11b58e7 | 2010-05-12 17:08:26 +0200 | [diff] [blame] | 849 | if (req->w.cb) { |
Lars Ellenberg | 44a4d55 | 2013-11-22 12:40:58 +0100 | [diff] [blame] | 850 | /* w.cb expected to be w_send_dblock, or w_send_read_req */ |
| 851 | drbd_queue_work(&connection->sender_work, |
Andreas Gruenbacher | 84b8c06 | 2011-07-28 15:27:51 +0200 | [diff] [blame] | 852 | &req->w); |
Philipp Reisner | 11b58e7 | 2010-05-12 17:08:26 +0200 | [diff] [blame] | 853 | rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ; |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 854 | } /* else: FIXME can this happen? */ |
Philipp Reisner | 11b58e7 | 2010-05-12 17:08:26 +0200 | [diff] [blame] | 855 | break; |
| 856 | } |
Gustavo A. R. Silva | e16fb3a | 2019-01-23 00:33:09 -0600 | [diff] [blame] | 857 | /* else, fall through - to BARRIER_ACKED */ |
Philipp Reisner | 11b58e7 | 2010-05-12 17:08:26 +0200 | [diff] [blame] | 858 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 859 | case BARRIER_ACKED: |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 860 | /* barrier ack for READ requests does not make sense */ |
Philipp Reisner | 288f422 | 2010-05-27 15:07:43 +0200 | [diff] [blame] | 861 | if (!(req->rq_state & RQ_WRITE)) |
| 862 | break; |
| 863 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 864 | if (req->rq_state & RQ_NET_PENDING) { |
Andreas Gruenbacher | a209b4a | 2011-08-17 12:43:25 +0200 | [diff] [blame] | 865 | /* barrier came in before all requests were acked. |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 866 | * this is bad, because if the connection is lost now, |
| 867 | * we won't be able to clean them up... */ |
Andreas Gruenbacher | d018017 | 2011-07-03 17:53:52 +0200 | [diff] [blame] | 868 | drbd_err(device, "FIXME (BARRIER_ACKED but pending)\n"); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 869 | } |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 870 | /* Allowed to complete requests, even while suspended. |
| 871 | * As this is called for all requests within a matching epoch, |
| 872 | * we need to filter, and only set RQ_NET_DONE for those that |
| 873 | * have actually been on the wire. */ |
| 874 | mod_rq_state(req, m, RQ_COMPLETION_SUSP, |
| 875 | (req->rq_state & RQ_NET_MASK) ? RQ_NET_DONE : 0); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 876 | break; |
| 877 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 878 | case DATA_RECEIVED: |
Andreas Gruenbacher | 0b0ba1e | 2011-06-27 16:23:33 +0200 | [diff] [blame] | 879 | D_ASSERT(device, req->rq_state & RQ_NET_PENDING); |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 880 | mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK|RQ_NET_DONE); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 881 | break; |
Lars Ellenberg | 7074e4a | 2013-03-27 14:08:41 +0100 | [diff] [blame] | 882 | |
| 883 | case QUEUE_AS_DRBD_BARRIER: |
Lars Ellenberg | 44a4d55 | 2013-11-22 12:40:58 +0100 | [diff] [blame] | 884 | start_new_tl_epoch(connection); |
Lars Ellenberg | 7074e4a | 2013-03-27 14:08:41 +0100 | [diff] [blame] | 885 | mod_rq_state(req, m, 0, RQ_NET_OK|RQ_NET_DONE); |
| 886 | break; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 887 | }; |
Philipp Reisner | 2a80699 | 2010-06-09 14:07:43 +0200 | [diff] [blame] | 888 | |
| 889 | return rv; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 890 | } |
| 891 | |
| 892 | /* we may do a local read if: |
| 893 | * - we are consistent (of course), |
| 894 | * - or we are generally inconsistent, |
| 895 | * BUT we are still/already IN SYNC for this area. |
| 896 | * since size may be bigger than BM_BLOCK_SIZE, |
| 897 | * we may need to check several bits. |
| 898 | */ |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 899 | static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 900 | { |
| 901 | unsigned long sbnr, ebnr; |
| 902 | sector_t esector, nr_sectors; |
| 903 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 904 | if (device->state.disk == D_UP_TO_DATE) |
Andreas Gruenbacher | 0da34df | 2010-12-19 20:48:29 +0100 | [diff] [blame] | 905 | return true; |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 906 | if (device->state.disk != D_INCONSISTENT) |
Andreas Gruenbacher | 0da34df | 2010-12-19 20:48:29 +0100 | [diff] [blame] | 907 | return false; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 908 | esector = sector + (size >> 9) - 1; |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 909 | nr_sectors = drbd_get_capacity(device->this_bdev); |
Andreas Gruenbacher | 0b0ba1e | 2011-06-27 16:23:33 +0200 | [diff] [blame] | 910 | D_ASSERT(device, sector < nr_sectors); |
| 911 | D_ASSERT(device, esector < nr_sectors); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 912 | |
| 913 | sbnr = BM_SECT_TO_BIT(sector); |
| 914 | ebnr = BM_SECT_TO_BIT(esector); |
| 915 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 916 | return drbd_bm_count_bits(device, sbnr, ebnr) == 0; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 917 | } |
| 918 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 919 | static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t sector, |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 920 | enum drbd_read_balancing rbm) |
Philipp Reisner | 380207d | 2011-11-11 12:31:20 +0100 | [diff] [blame] | 921 | { |
Philipp Reisner | 380207d | 2011-11-11 12:31:20 +0100 | [diff] [blame] | 922 | struct backing_dev_info *bdi; |
Philipp Reisner | d60de03 | 2011-11-17 10:12:31 +0100 | [diff] [blame] | 923 | int stripe_shift; |
Philipp Reisner | 380207d | 2011-11-11 12:31:20 +0100 | [diff] [blame] | 924 | |
Philipp Reisner | 380207d | 2011-11-11 12:31:20 +0100 | [diff] [blame] | 925 | switch (rbm) { |
| 926 | case RB_CONGESTED_REMOTE: |
Jan Kara | dc3b17c | 2017-02-02 15:56:50 +0100 | [diff] [blame] | 927 | bdi = device->ldev->backing_bdev->bd_disk->queue->backing_dev_info; |
Philipp Reisner | 380207d | 2011-11-11 12:31:20 +0100 | [diff] [blame] | 928 | return bdi_read_congested(bdi); |
| 929 | case RB_LEAST_PENDING: |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 930 | return atomic_read(&device->local_cnt) > |
| 931 | atomic_read(&device->ap_pending_cnt) + atomic_read(&device->rs_pending_cnt); |
Philipp Reisner | d60de03 | 2011-11-17 10:12:31 +0100 | [diff] [blame] | 932 | case RB_32K_STRIPING: /* stripe_shift = 15 */ |
| 933 | case RB_64K_STRIPING: |
| 934 | case RB_128K_STRIPING: |
| 935 | case RB_256K_STRIPING: |
| 936 | case RB_512K_STRIPING: |
| 937 | case RB_1M_STRIPING: /* stripe_shift = 20 */ |
| 938 | stripe_shift = (rbm - RB_32K_STRIPING + 15); |
| 939 | return (sector >> (stripe_shift - 9)) & 1; |
Philipp Reisner | 380207d | 2011-11-11 12:31:20 +0100 | [diff] [blame] | 940 | case RB_ROUND_ROBIN: |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 941 | return test_and_change_bit(READ_BALANCE_RR, &device->flags); |
Philipp Reisner | 380207d | 2011-11-11 12:31:20 +0100 | [diff] [blame] | 942 | case RB_PREFER_REMOTE: |
| 943 | return true; |
| 944 | case RB_PREFER_LOCAL: |
| 945 | default: |
| 946 | return false; |
| 947 | } |
| 948 | } |
| 949 | |
Andreas Gruenbacher | 6024fec | 2011-01-28 15:53:51 +0100 | [diff] [blame] | 950 | /* |
| 951 | * complete_conflicting_writes - wait for any conflicting write requests |
| 952 | * |
| 953 | * The write_requests tree contains all active write requests which we |
| 954 | * currently know about. Wait for any requests to complete which conflict with |
| 955 | * the new one. |
Lars Ellenberg | 648e46b | 2012-03-26 20:12:24 +0200 | [diff] [blame] | 956 | * |
| 957 | * Only way out: remove the conflicting intervals from the tree. |
Andreas Gruenbacher | 6024fec | 2011-01-28 15:53:51 +0100 | [diff] [blame] | 958 | */ |
Lars Ellenberg | 648e46b | 2012-03-26 20:12:24 +0200 | [diff] [blame] | 959 | static void complete_conflicting_writes(struct drbd_request *req) |
Andreas Gruenbacher | 6024fec | 2011-01-28 15:53:51 +0100 | [diff] [blame] | 960 | { |
Lars Ellenberg | 648e46b | 2012-03-26 20:12:24 +0200 | [diff] [blame] | 961 | DEFINE_WAIT(wait); |
Andreas Gruenbacher | 84b8c06 | 2011-07-28 15:27:51 +0200 | [diff] [blame] | 962 | struct drbd_device *device = req->device; |
Lars Ellenberg | 648e46b | 2012-03-26 20:12:24 +0200 | [diff] [blame] | 963 | struct drbd_interval *i; |
| 964 | sector_t sector = req->i.sector; |
| 965 | int size = req->i.size; |
Andreas Gruenbacher | 6024fec | 2011-01-28 15:53:51 +0100 | [diff] [blame] | 966 | |
Lars Ellenberg | 648e46b | 2012-03-26 20:12:24 +0200 | [diff] [blame] | 967 | for (;;) { |
Lars Ellenberg | 1b228c9 | 2016-06-14 00:26:17 +0200 | [diff] [blame] | 968 | drbd_for_each_overlap(i, &device->write_requests, sector, size) { |
| 969 | /* Ignore, if already completed to upper layers. */ |
| 970 | if (i->completed) |
| 971 | continue; |
| 972 | /* Handle the first found overlap. After the schedule |
| 973 | * we have to restart the tree walk. */ |
Lars Ellenberg | 648e46b | 2012-03-26 20:12:24 +0200 | [diff] [blame] | 974 | break; |
Lars Ellenberg | 1b228c9 | 2016-06-14 00:26:17 +0200 | [diff] [blame] | 975 | } |
| 976 | if (!i) /* if any */ |
| 977 | break; |
| 978 | |
Lars Ellenberg | 648e46b | 2012-03-26 20:12:24 +0200 | [diff] [blame] | 979 | /* Indicate to wake up device->misc_wait on progress. */ |
Lars Ellenberg | 1b228c9 | 2016-06-14 00:26:17 +0200 | [diff] [blame] | 980 | prepare_to_wait(&device->misc_wait, &wait, TASK_UNINTERRUPTIBLE); |
Lars Ellenberg | 648e46b | 2012-03-26 20:12:24 +0200 | [diff] [blame] | 981 | i->waiting = true; |
Andreas Gruenbacher | 0500813 | 2011-07-07 14:19:42 +0200 | [diff] [blame] | 982 | spin_unlock_irq(&device->resource->req_lock); |
Lars Ellenberg | 648e46b | 2012-03-26 20:12:24 +0200 | [diff] [blame] | 983 | schedule(); |
Andreas Gruenbacher | 0500813 | 2011-07-07 14:19:42 +0200 | [diff] [blame] | 984 | spin_lock_irq(&device->resource->req_lock); |
Andreas Gruenbacher | 6024fec | 2011-01-28 15:53:51 +0100 | [diff] [blame] | 985 | } |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 986 | finish_wait(&device->misc_wait, &wait); |
Andreas Gruenbacher | 6024fec | 2011-01-28 15:53:51 +0100 | [diff] [blame] | 987 | } |
| 988 | |
Fabian Frederick | 7e5fec3 | 2016-06-14 00:26:35 +0200 | [diff] [blame] | 989 | /* called within req_lock */ |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 990 | static void maybe_pull_ahead(struct drbd_device *device) |
Lars Ellenberg | 0d5934e | 2012-06-08 14:17:36 +0200 | [diff] [blame] | 991 | { |
Andreas Gruenbacher | a6b32bc | 2011-05-31 14:33:49 +0200 | [diff] [blame] | 992 | struct drbd_connection *connection = first_peer_device(device)->connection; |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 993 | struct net_conf *nc; |
| 994 | bool congested = false; |
| 995 | enum drbd_on_congestion on_congestion; |
| 996 | |
Lars Ellenberg | 607f25e | 2013-03-27 14:08:45 +0100 | [diff] [blame] | 997 | rcu_read_lock(); |
Andreas Gruenbacher | bde89a9 | 2011-05-30 16:32:41 +0200 | [diff] [blame] | 998 | nc = rcu_dereference(connection->net_conf); |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 999 | on_congestion = nc ? nc->on_congestion : OC_BLOCK; |
Lars Ellenberg | 607f25e | 2013-03-27 14:08:45 +0100 | [diff] [blame] | 1000 | rcu_read_unlock(); |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1001 | if (on_congestion == OC_BLOCK || |
Andreas Gruenbacher | bde89a9 | 2011-05-30 16:32:41 +0200 | [diff] [blame] | 1002 | connection->agreed_pro_version < 96) |
Lars Ellenberg | 3b9ef85 | 2012-07-30 09:06:26 +0200 | [diff] [blame] | 1003 | return; |
Lars Ellenberg | 0d5934e | 2012-06-08 14:17:36 +0200 | [diff] [blame] | 1004 | |
Lars Ellenberg | 0c066bc | 2014-03-20 14:04:35 +0100 | [diff] [blame] | 1005 | if (on_congestion == OC_PULL_AHEAD && device->state.conn == C_AHEAD) |
| 1006 | return; /* nothing to do ... */ |
| 1007 | |
Lars Ellenberg | 0d5934e | 2012-06-08 14:17:36 +0200 | [diff] [blame] | 1008 | /* If I don't even have good local storage, we can not reasonably try |
| 1009 | * to pull ahead of the peer. We also need the local reference to make |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1010 | * sure device->act_log is there. |
Lars Ellenberg | 0d5934e | 2012-06-08 14:17:36 +0200 | [diff] [blame] | 1011 | */ |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1012 | if (!get_ldev_if_state(device, D_UP_TO_DATE)) |
Lars Ellenberg | 0d5934e | 2012-06-08 14:17:36 +0200 | [diff] [blame] | 1013 | return; |
| 1014 | |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1015 | if (nc->cong_fill && |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1016 | atomic_read(&device->ap_in_flight) >= nc->cong_fill) { |
Andreas Gruenbacher | d018017 | 2011-07-03 17:53:52 +0200 | [diff] [blame] | 1017 | drbd_info(device, "Congestion-fill threshold reached\n"); |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1018 | congested = true; |
Lars Ellenberg | 0d5934e | 2012-06-08 14:17:36 +0200 | [diff] [blame] | 1019 | } |
| 1020 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1021 | if (device->act_log->used >= nc->cong_extents) { |
Andreas Gruenbacher | d018017 | 2011-07-03 17:53:52 +0200 | [diff] [blame] | 1022 | drbd_info(device, "Congestion-extents threshold reached\n"); |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1023 | congested = true; |
Lars Ellenberg | 0d5934e | 2012-06-08 14:17:36 +0200 | [diff] [blame] | 1024 | } |
| 1025 | |
| 1026 | if (congested) { |
Lars Ellenberg | 99b4d8f | 2012-08-07 06:42:09 +0200 | [diff] [blame] | 1027 | /* start a new epoch for non-mirrored writes */ |
Andreas Gruenbacher | a6b32bc | 2011-05-31 14:33:49 +0200 | [diff] [blame] | 1028 | start_new_tl_epoch(first_peer_device(device)->connection); |
Lars Ellenberg | 0d5934e | 2012-06-08 14:17:36 +0200 | [diff] [blame] | 1029 | |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1030 | if (on_congestion == OC_PULL_AHEAD) |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1031 | _drbd_set_state(_NS(device, conn, C_AHEAD), 0, NULL); |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1032 | else /*nc->on_congestion == OC_DISCONNECT */ |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1033 | _drbd_set_state(_NS(device, conn, C_DISCONNECTING), 0, NULL); |
Lars Ellenberg | 0d5934e | 2012-06-08 14:17:36 +0200 | [diff] [blame] | 1034 | } |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1035 | put_ldev(device); |
Lars Ellenberg | 0d5934e | 2012-06-08 14:17:36 +0200 | [diff] [blame] | 1036 | } |
| 1037 | |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1038 | /* If this returns false, and req->private_bio is still set, |
| 1039 | * this should be submitted locally. |
| 1040 | * |
| 1041 | * If it returns false, but req->private_bio is not set, |
| 1042 | * we do not have access to good data :( |
| 1043 | * |
| 1044 | * Otherwise, this destroys req->private_bio, if any, |
| 1045 | * and returns true. |
| 1046 | */ |
| 1047 | static bool do_remote_read(struct drbd_request *req) |
| 1048 | { |
Andreas Gruenbacher | 84b8c06 | 2011-07-28 15:27:51 +0200 | [diff] [blame] | 1049 | struct drbd_device *device = req->device; |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1050 | enum drbd_read_balancing rbm; |
| 1051 | |
| 1052 | if (req->private_bio) { |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1053 | if (!drbd_may_do_local_read(device, |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1054 | req->i.sector, req->i.size)) { |
| 1055 | bio_put(req->private_bio); |
| 1056 | req->private_bio = NULL; |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1057 | put_ldev(device); |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1058 | } |
| 1059 | } |
| 1060 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1061 | if (device->state.pdsk != D_UP_TO_DATE) |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1062 | return false; |
| 1063 | |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 1064 | if (req->private_bio == NULL) |
| 1065 | return true; |
| 1066 | |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1067 | /* TODO: improve read balancing decisions, take into account drbd |
| 1068 | * protocol, pending requests etc. */ |
| 1069 | |
| 1070 | rcu_read_lock(); |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1071 | rbm = rcu_dereference(device->ldev->disk_conf)->read_balancing; |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1072 | rcu_read_unlock(); |
| 1073 | |
| 1074 | if (rbm == RB_PREFER_LOCAL && req->private_bio) |
| 1075 | return false; /* submit locally */ |
| 1076 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1077 | if (remote_due_to_read_balancing(device, req->i.sector, rbm)) { |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1078 | if (req->private_bio) { |
| 1079 | bio_put(req->private_bio); |
| 1080 | req->private_bio = NULL; |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1081 | put_ldev(device); |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1082 | } |
| 1083 | return true; |
| 1084 | } |
| 1085 | |
| 1086 | return false; |
| 1087 | } |
| 1088 | |
Andreas Gruenbacher | 2e9ffde | 2014-08-08 17:48:00 +0200 | [diff] [blame] | 1089 | bool drbd_should_do_remote(union drbd_dev_state s) |
| 1090 | { |
| 1091 | return s.pdsk == D_UP_TO_DATE || |
| 1092 | (s.pdsk >= D_INCONSISTENT && |
| 1093 | s.conn >= C_WF_BITMAP_T && |
| 1094 | s.conn < C_AHEAD); |
| 1095 | /* Before proto 96 that was >= CONNECTED instead of >= C_WF_BITMAP_T. |
| 1096 | That is equivalent since before 96 IO was frozen in the C_WF_BITMAP* |
| 1097 | states. */ |
| 1098 | } |
| 1099 | |
| 1100 | static bool drbd_should_send_out_of_sync(union drbd_dev_state s) |
| 1101 | { |
| 1102 | return s.conn == C_AHEAD || s.conn == C_WF_BITMAP_S; |
| 1103 | /* pdsk = D_INCONSISTENT as a consequence. Protocol 96 check not necessary |
| 1104 | since we enter state C_AHEAD only if proto >= 96 */ |
| 1105 | } |
| 1106 | |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1107 | /* returns number of connections (== 1, for drbd 8.4) |
| 1108 | * expected to actually write this data, |
| 1109 | * which does NOT include those that we are L_AHEAD for. */ |
| 1110 | static int drbd_process_write_request(struct drbd_request *req) |
| 1111 | { |
Andreas Gruenbacher | 84b8c06 | 2011-07-28 15:27:51 +0200 | [diff] [blame] | 1112 | struct drbd_device *device = req->device; |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1113 | int remote, send_oos; |
| 1114 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1115 | remote = drbd_should_do_remote(device->state); |
| 1116 | send_oos = drbd_should_send_out_of_sync(device->state); |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1117 | |
Lars Ellenberg | 519b6d3e | 2012-08-03 02:19:09 +0200 | [diff] [blame] | 1118 | /* Need to replicate writes. Unless it is an empty flush, |
| 1119 | * which is better mapped to a DRBD P_BARRIER packet, |
| 1120 | * also for drbd wire protocol compatibility reasons. |
| 1121 | * If this was a flush, just start a new epoch. |
| 1122 | * Unless the current epoch was empty anyways, or we are not currently |
| 1123 | * replicating, in which case there is no point. */ |
| 1124 | if (unlikely(req->i.size == 0)) { |
| 1125 | /* The only size==0 bios we expect are empty flushes. */ |
Jens Axboe | 1eff9d3 | 2016-08-05 15:35:16 -0600 | [diff] [blame] | 1126 | D_ASSERT(device, req->master_bio->bi_opf & REQ_PREFLUSH); |
Lars Ellenberg | 99b4d8f | 2012-08-07 06:42:09 +0200 | [diff] [blame] | 1127 | if (remote) |
Lars Ellenberg | 7074e4a | 2013-03-27 14:08:41 +0100 | [diff] [blame] | 1128 | _req_mod(req, QUEUE_AS_DRBD_BARRIER); |
| 1129 | return remote; |
Lars Ellenberg | 519b6d3e | 2012-08-03 02:19:09 +0200 | [diff] [blame] | 1130 | } |
| 1131 | |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1132 | if (!remote && !send_oos) |
| 1133 | return 0; |
| 1134 | |
Andreas Gruenbacher | 0b0ba1e | 2011-06-27 16:23:33 +0200 | [diff] [blame] | 1135 | D_ASSERT(device, !(remote && send_oos)); |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1136 | |
| 1137 | if (remote) { |
| 1138 | _req_mod(req, TO_BE_SENT); |
| 1139 | _req_mod(req, QUEUE_FOR_NET_WRITE); |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1140 | } else if (drbd_set_out_of_sync(device, req->i.sector, req->i.size)) |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1141 | _req_mod(req, QUEUE_FOR_SEND_OOS); |
| 1142 | |
| 1143 | return remote; |
| 1144 | } |
| 1145 | |
Lars Ellenberg | f31e583 | 2018-12-20 17:23:42 +0100 | [diff] [blame] | 1146 | static void drbd_process_discard_or_zeroes_req(struct drbd_request *req, int flags) |
Lars Ellenberg | 7435e90 | 2016-06-14 00:26:22 +0200 | [diff] [blame] | 1147 | { |
Lars Ellenberg | f31e583 | 2018-12-20 17:23:42 +0100 | [diff] [blame] | 1148 | int err = drbd_issue_discard_or_zero_out(req->device, |
| 1149 | req->i.sector, req->i.size >> 9, flags); |
| 1150 | if (err) |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 1151 | req->private_bio->bi_status = BLK_STS_IOERR; |
Lars Ellenberg | 7435e90 | 2016-06-14 00:26:22 +0200 | [diff] [blame] | 1152 | bio_endio(req->private_bio); |
| 1153 | } |
| 1154 | |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1155 | static void |
| 1156 | drbd_submit_req_private_bio(struct drbd_request *req) |
| 1157 | { |
Andreas Gruenbacher | 84b8c06 | 2011-07-28 15:27:51 +0200 | [diff] [blame] | 1158 | struct drbd_device *device = req->device; |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1159 | struct bio *bio = req->private_bio; |
Christoph Hellwig | 7024628 | 2016-07-19 11:28:41 +0200 | [diff] [blame] | 1160 | unsigned int type; |
| 1161 | |
| 1162 | if (bio_op(bio) != REQ_OP_READ) |
| 1163 | type = DRBD_FAULT_DT_WR; |
Jens Axboe | 1eff9d3 | 2016-08-05 15:35:16 -0600 | [diff] [blame] | 1164 | else if (bio->bi_opf & REQ_RAHEAD) |
Christoph Hellwig | 7024628 | 2016-07-19 11:28:41 +0200 | [diff] [blame] | 1165 | type = DRBD_FAULT_DT_RA; |
| 1166 | else |
| 1167 | type = DRBD_FAULT_DT_RD; |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1168 | |
Christoph Hellwig | 74d4699 | 2017-08-23 19:10:32 +0200 | [diff] [blame] | 1169 | bio_set_dev(bio, device->ldev->backing_bdev); |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1170 | |
| 1171 | /* State may have changed since we grabbed our reference on the |
| 1172 | * ->ldev member. Double check, and short-circuit to endio. |
| 1173 | * In case the last activity log transaction failed to get on |
| 1174 | * stable storage, and this is a WRITE, we may not even submit |
| 1175 | * this bio. */ |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1176 | if (get_ldev(device)) { |
Christoph Hellwig | 7024628 | 2016-07-19 11:28:41 +0200 | [diff] [blame] | 1177 | if (drbd_insert_fault(device, type)) |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 1178 | bio_io_error(bio); |
Lars Ellenberg | f31e583 | 2018-12-20 17:23:42 +0100 | [diff] [blame] | 1179 | else if (bio_op(bio) == REQ_OP_WRITE_ZEROES) |
| 1180 | drbd_process_discard_or_zeroes_req(req, EE_ZEROOUT | |
| 1181 | ((bio->bi_opf & REQ_NOUNMAP) ? 0 : EE_TRIM)); |
| 1182 | else if (bio_op(bio) == REQ_OP_DISCARD) |
| 1183 | drbd_process_discard_or_zeroes_req(req, EE_TRIM); |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1184 | else |
| 1185 | generic_make_request(bio); |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1186 | put_ldev(device); |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1187 | } else |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 1188 | bio_io_error(bio); |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1189 | } |
| 1190 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1191 | static void drbd_queue_write(struct drbd_device *device, struct drbd_request *req) |
Lars Ellenberg | 779b3fe | 2013-03-19 18:16:54 +0100 | [diff] [blame] | 1192 | { |
Lars Ellenberg | 844a6ae7 | 2013-11-22 12:52:03 +0100 | [diff] [blame] | 1193 | spin_lock_irq(&device->resource->req_lock); |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1194 | list_add_tail(&req->tl_requests, &device->submit.writes); |
Lars Ellenberg | 844a6ae7 | 2013-11-22 12:52:03 +0100 | [diff] [blame] | 1195 | list_add_tail(&req->req_pending_master_completion, |
| 1196 | &device->pending_master_completion[1 /* WRITE */]); |
| 1197 | spin_unlock_irq(&device->resource->req_lock); |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1198 | queue_work(device->submit.wq, &device->submit.worker); |
Lars Ellenberg | f5b90b6 | 2014-05-07 22:41:28 +0200 | [diff] [blame] | 1199 | /* do_submit() may sleep internally on al_wait, too */ |
| 1200 | wake_up(&device->al_wait); |
Lars Ellenberg | 779b3fe | 2013-03-19 18:16:54 +0100 | [diff] [blame] | 1201 | } |
| 1202 | |
Lars Ellenberg | 6d9febe | 2013-03-19 18:16:50 +0100 | [diff] [blame] | 1203 | /* returns the new drbd_request pointer, if the caller is expected to |
| 1204 | * drbd_send_and_submit() it (to save latency), or NULL if we queued the |
| 1205 | * request on the submitter thread. |
| 1206 | * Returns ERR_PTR(-ENOMEM) if we cannot allocate a drbd_request. |
| 1207 | */ |
Rashika Kheria | 01cd263 | 2013-12-19 15:12:27 +0530 | [diff] [blame] | 1208 | static struct drbd_request * |
Lars Ellenberg | e5f891b | 2013-11-22 12:32:01 +0100 | [diff] [blame] | 1209 | drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long start_jif) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1210 | { |
Lars Ellenberg | 6d9febe | 2013-03-19 18:16:50 +0100 | [diff] [blame] | 1211 | const int rw = bio_data_dir(bio); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1212 | struct drbd_request *req; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1213 | |
| 1214 | /* allocate outside of all locks; */ |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1215 | req = drbd_req_new(device, bio); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1216 | if (!req) { |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1217 | dec_ap_bio(device); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1218 | /* only pass the error to the upper layers. |
| 1219 | * if user cannot handle io errors, that's not our business. */ |
Andreas Gruenbacher | d018017 | 2011-07-03 17:53:52 +0200 | [diff] [blame] | 1220 | drbd_err(device, "could not kmalloc() req\n"); |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 1221 | bio->bi_status = BLK_STS_RESOURCE; |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 1222 | bio_endio(bio); |
Lars Ellenberg | 6d9febe | 2013-03-19 18:16:50 +0100 | [diff] [blame] | 1223 | return ERR_PTR(-ENOMEM); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1224 | } |
Lars Ellenberg | e5f891b | 2013-11-22 12:32:01 +0100 | [diff] [blame] | 1225 | req->start_jif = start_jif; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1226 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1227 | if (!get_ldev(device)) { |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1228 | bio_put(req->private_bio); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1229 | req->private_bio = NULL; |
| 1230 | } |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1231 | |
Lars Ellenberg | 7e8c288 | 2013-03-19 18:16:57 +0100 | [diff] [blame] | 1232 | /* Update disk stats */ |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1233 | _drbd_start_io_acct(device, req); |
Lars Ellenberg | 7e8c288 | 2013-03-19 18:16:57 +0100 | [diff] [blame] | 1234 | |
Lars Ellenberg | 7435e90 | 2016-06-14 00:26:22 +0200 | [diff] [blame] | 1235 | /* process discards always from our submitter thread */ |
Bart Van Assche | fad2d4e | 2018-06-25 15:51:30 -0700 | [diff] [blame] | 1236 | if (bio_op(bio) == REQ_OP_WRITE_ZEROES || |
| 1237 | bio_op(bio) == REQ_OP_DISCARD) |
Lars Ellenberg | 7435e90 | 2016-06-14 00:26:22 +0200 | [diff] [blame] | 1238 | goto queue_for_submitter_thread; |
| 1239 | |
Lars Ellenberg | 519b6d3e | 2012-08-03 02:19:09 +0200 | [diff] [blame] | 1240 | if (rw == WRITE && req->private_bio && req->i.size |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1241 | && !test_bit(AL_SUSPENDED, &device->flags)) { |
Lars Ellenberg | 7435e90 | 2016-06-14 00:26:22 +0200 | [diff] [blame] | 1242 | if (!drbd_al_begin_io_fastpath(device, &req->i)) |
| 1243 | goto queue_for_submitter_thread; |
Philipp Reisner | 0778286 | 2010-08-31 12:00:50 +0200 | [diff] [blame] | 1244 | req->rq_state |= RQ_IN_ACT_LOG; |
Lars Ellenberg | e5f891b | 2013-11-22 12:32:01 +0100 | [diff] [blame] | 1245 | req->in_actlog_jif = jiffies; |
Philipp Reisner | 0778286 | 2010-08-31 12:00:50 +0200 | [diff] [blame] | 1246 | } |
Lars Ellenberg | 6d9febe | 2013-03-19 18:16:50 +0100 | [diff] [blame] | 1247 | return req; |
Lars Ellenberg | 7435e90 | 2016-06-14 00:26:22 +0200 | [diff] [blame] | 1248 | |
| 1249 | queue_for_submitter_thread: |
| 1250 | atomic_inc(&device->ap_actlog_cnt); |
| 1251 | drbd_queue_write(device, req); |
| 1252 | return NULL; |
Lars Ellenberg | 6d9febe | 2013-03-19 18:16:50 +0100 | [diff] [blame] | 1253 | } |
| 1254 | |
Lars Ellenberg | 0ead5cc | 2016-06-14 00:26:27 +0200 | [diff] [blame] | 1255 | /* Require at least one path to current data. |
| 1256 | * We don't want to allow writes on C_STANDALONE D_INCONSISTENT: |
| 1257 | * We would not allow to read what was written, |
| 1258 | * we would not have bumped the data generation uuids, |
| 1259 | * we would cause data divergence for all the wrong reasons. |
| 1260 | * |
| 1261 | * If we don't see at least one D_UP_TO_DATE, we will fail this request, |
| 1262 | * which either returns EIO, or, if OND_SUSPEND_IO is set, suspends IO, |
| 1263 | * and queues for retry later. |
| 1264 | */ |
| 1265 | static bool may_do_writes(struct drbd_device *device) |
| 1266 | { |
| 1267 | const union drbd_dev_state s = device->state; |
| 1268 | return s.disk == D_UP_TO_DATE || s.pdsk == D_UP_TO_DATE; |
| 1269 | } |
| 1270 | |
Lars Ellenberg | c51a0ef | 2017-08-29 10:20:32 +0200 | [diff] [blame] | 1271 | struct drbd_plug_cb { |
| 1272 | struct blk_plug_cb cb; |
| 1273 | struct drbd_request *most_recent_req; |
| 1274 | /* do we need more? */ |
| 1275 | }; |
| 1276 | |
| 1277 | static void drbd_unplug(struct blk_plug_cb *cb, bool from_schedule) |
| 1278 | { |
| 1279 | struct drbd_plug_cb *plug = container_of(cb, struct drbd_plug_cb, cb); |
| 1280 | struct drbd_resource *resource = plug->cb.data; |
| 1281 | struct drbd_request *req = plug->most_recent_req; |
| 1282 | |
Lars Ellenberg | de6978b | 2017-08-29 10:20:34 +0200 | [diff] [blame] | 1283 | kfree(cb); |
Lars Ellenberg | c51a0ef | 2017-08-29 10:20:32 +0200 | [diff] [blame] | 1284 | if (!req) |
| 1285 | return; |
| 1286 | |
| 1287 | spin_lock_irq(&resource->req_lock); |
| 1288 | /* In case the sender did not process it yet, raise the flag to |
| 1289 | * have it followed with P_UNPLUG_REMOTE just after. */ |
| 1290 | req->rq_state |= RQ_UNPLUG; |
| 1291 | /* but also queue a generic unplug */ |
| 1292 | drbd_queue_unplug(req->device); |
Lars Ellenberg | c51a0ef | 2017-08-29 10:20:32 +0200 | [diff] [blame] | 1293 | kref_put(&req->kref, drbd_req_destroy); |
Lars Ellenberg | de6978b | 2017-08-29 10:20:34 +0200 | [diff] [blame] | 1294 | spin_unlock_irq(&resource->req_lock); |
Lars Ellenberg | c51a0ef | 2017-08-29 10:20:32 +0200 | [diff] [blame] | 1295 | } |
| 1296 | |
| 1297 | static struct drbd_plug_cb* drbd_check_plugged(struct drbd_resource *resource) |
| 1298 | { |
| 1299 | /* A lot of text to say |
| 1300 | * return (struct drbd_plug_cb*)blk_check_plugged(); */ |
| 1301 | struct drbd_plug_cb *plug; |
| 1302 | struct blk_plug_cb *cb = blk_check_plugged(drbd_unplug, resource, sizeof(*plug)); |
| 1303 | |
| 1304 | if (cb) |
| 1305 | plug = container_of(cb, struct drbd_plug_cb, cb); |
| 1306 | else |
| 1307 | plug = NULL; |
| 1308 | return plug; |
| 1309 | } |
| 1310 | |
| 1311 | static void drbd_update_plug(struct drbd_plug_cb *plug, struct drbd_request *req) |
| 1312 | { |
| 1313 | struct drbd_request *tmp = plug->most_recent_req; |
| 1314 | /* Will be sent to some peer. |
| 1315 | * Remember to tag it with UNPLUG_REMOTE on unplug */ |
| 1316 | kref_get(&req->kref); |
| 1317 | plug->most_recent_req = req; |
| 1318 | if (tmp) |
| 1319 | kref_put(&tmp->kref, drbd_req_destroy); |
| 1320 | } |
| 1321 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1322 | static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req) |
Lars Ellenberg | 6d9febe | 2013-03-19 18:16:50 +0100 | [diff] [blame] | 1323 | { |
Lars Ellenberg | 35b5ed5 | 2013-12-04 12:07:09 +0100 | [diff] [blame] | 1324 | struct drbd_resource *resource = device->resource; |
Christoph Hellwig | 7024628 | 2016-07-19 11:28:41 +0200 | [diff] [blame] | 1325 | const int rw = bio_data_dir(req->master_bio); |
Lars Ellenberg | 6d9febe | 2013-03-19 18:16:50 +0100 | [diff] [blame] | 1326 | struct bio_and_error m = { NULL, }; |
| 1327 | bool no_remote = false; |
Lars Ellenberg | 35b5ed5 | 2013-12-04 12:07:09 +0100 | [diff] [blame] | 1328 | bool submit_private_bio = false; |
Lars Ellenberg | 6d9febe | 2013-03-19 18:16:50 +0100 | [diff] [blame] | 1329 | |
Lars Ellenberg | 35b5ed5 | 2013-12-04 12:07:09 +0100 | [diff] [blame] | 1330 | spin_lock_irq(&resource->req_lock); |
Andreas Gruenbacher | 6024fec | 2011-01-28 15:53:51 +0100 | [diff] [blame] | 1331 | if (rw == WRITE) { |
Lars Ellenberg | 648e46b | 2012-03-26 20:12:24 +0200 | [diff] [blame] | 1332 | /* This may temporarily give up the req_lock, |
| 1333 | * but will re-aquire it before it returns here. |
| 1334 | * Needs to be before the check on drbd_suspended() */ |
| 1335 | complete_conflicting_writes(req); |
Lars Ellenberg | 607f25e | 2013-03-27 14:08:45 +0100 | [diff] [blame] | 1336 | /* no more giving up req_lock from now on! */ |
| 1337 | |
| 1338 | /* check for congestion, and potentially stop sending |
| 1339 | * full data updates, but start sending "dirty bits" only. */ |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1340 | maybe_pull_ahead(device); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1341 | } |
| 1342 | |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1343 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1344 | if (drbd_suspended(device)) { |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1345 | /* push back and retry: */ |
| 1346 | req->rq_state |= RQ_POSTPONED; |
| 1347 | if (req->private_bio) { |
| 1348 | bio_put(req->private_bio); |
| 1349 | req->private_bio = NULL; |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1350 | put_ldev(device); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1351 | } |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1352 | goto out; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1353 | } |
| 1354 | |
Christoph Hellwig | 7024628 | 2016-07-19 11:28:41 +0200 | [diff] [blame] | 1355 | /* We fail READ early, if we can not serve it. |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1356 | * We must do this before req is registered on any lists. |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 1357 | * Otherwise, drbd_req_complete() will queue failed READ for retry. */ |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1358 | if (rw != WRITE) { |
| 1359 | if (!do_remote_read(req) && !req->private_bio) |
| 1360 | goto nodata; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1361 | } |
| 1362 | |
Lars Ellenberg | b6dd1a8 | 2011-11-28 15:04:49 +0100 | [diff] [blame] | 1363 | /* which transfer log epoch does this belong to? */ |
Andreas Gruenbacher | a6b32bc | 2011-05-31 14:33:49 +0200 | [diff] [blame] | 1364 | req->epoch = atomic_read(&first_peer_device(device)->connection->current_tle_nr); |
Philipp Reisner | 288f422 | 2010-05-27 15:07:43 +0200 | [diff] [blame] | 1365 | |
Lars Ellenberg | 227f052 | 2012-07-31 09:31:11 +0200 | [diff] [blame] | 1366 | /* no point in adding empty flushes to the transfer log, |
| 1367 | * they are mapped to drbd barriers already. */ |
Lars Ellenberg | 99b4d8f | 2012-08-07 06:42:09 +0200 | [diff] [blame] | 1368 | if (likely(req->i.size!=0)) { |
| 1369 | if (rw == WRITE) |
Andreas Gruenbacher | a6b32bc | 2011-05-31 14:33:49 +0200 | [diff] [blame] | 1370 | first_peer_device(device)->connection->current_tle_writes++; |
Philipp Reisner | 288f422 | 2010-05-27 15:07:43 +0200 | [diff] [blame] | 1371 | |
Andreas Gruenbacher | a6b32bc | 2011-05-31 14:33:49 +0200 | [diff] [blame] | 1372 | list_add_tail(&req->tl_requests, &first_peer_device(device)->connection->transfer_log); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1373 | } |
Philipp Reisner | 6753171 | 2010-10-27 12:21:30 +0200 | [diff] [blame] | 1374 | |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1375 | if (rw == WRITE) { |
Lars Ellenberg | 0ead5cc | 2016-06-14 00:26:27 +0200 | [diff] [blame] | 1376 | if (req->private_bio && !may_do_writes(device)) { |
| 1377 | bio_put(req->private_bio); |
| 1378 | req->private_bio = NULL; |
| 1379 | put_ldev(device); |
| 1380 | goto nodata; |
| 1381 | } |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1382 | if (!drbd_process_write_request(req)) |
| 1383 | no_remote = true; |
| 1384 | } else { |
| 1385 | /* We either have a private_bio, or we can read from remote. |
| 1386 | * Otherwise we had done the goto nodata above. */ |
| 1387 | if (req->private_bio == NULL) { |
| 1388 | _req_mod(req, TO_BE_SENT); |
| 1389 | _req_mod(req, QUEUE_FOR_NET_READ); |
Lars Ellenberg | 6719fb0 | 2010-10-18 23:04:07 +0200 | [diff] [blame] | 1390 | } else |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1391 | no_remote = true; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1392 | } |
| 1393 | |
Lars Ellenberg | de6978b | 2017-08-29 10:20:34 +0200 | [diff] [blame] | 1394 | if (no_remote == false) { |
| 1395 | struct drbd_plug_cb *plug = drbd_check_plugged(resource); |
| 1396 | if (plug) |
| 1397 | drbd_update_plug(plug, req); |
| 1398 | } |
Lars Ellenberg | c51a0ef | 2017-08-29 10:20:32 +0200 | [diff] [blame] | 1399 | |
Lars Ellenberg | 844a6ae7 | 2013-11-22 12:52:03 +0100 | [diff] [blame] | 1400 | /* If it took the fast path in drbd_request_prepare, add it here. |
| 1401 | * The slow path has added it already. */ |
| 1402 | if (list_empty(&req->req_pending_master_completion)) |
| 1403 | list_add_tail(&req->req_pending_master_completion, |
| 1404 | &device->pending_master_completion[rw == WRITE]); |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1405 | if (req->private_bio) { |
| 1406 | /* needs to be marked within the same spinlock */ |
Lars Ellenberg | 05cbbb3 | 2015-01-16 17:41:55 +0100 | [diff] [blame] | 1407 | req->pre_submit_jif = jiffies; |
Lars Ellenberg | 844a6ae7 | 2013-11-22 12:52:03 +0100 | [diff] [blame] | 1408 | list_add_tail(&req->req_pending_local, |
| 1409 | &device->pending_completion[rw == WRITE]); |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1410 | _req_mod(req, TO_BE_SUBMITTED); |
| 1411 | /* but we need to give up the spinlock to submit */ |
Lars Ellenberg | 35b5ed5 | 2013-12-04 12:07:09 +0100 | [diff] [blame] | 1412 | submit_private_bio = true; |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1413 | } else if (no_remote) { |
| 1414 | nodata: |
| 1415 | if (__ratelimit(&drbd_ratelimit_state)) |
Andreas Gruenbacher | d018017 | 2011-07-03 17:53:52 +0200 | [diff] [blame] | 1416 | drbd_err(device, "IO ERROR: neither local nor remote data, sector %llu+%u\n", |
Lars Ellenberg | 42839f6 | 2012-09-27 15:19:38 +0200 | [diff] [blame] | 1417 | (unsigned long long)req->i.sector, req->i.size >> 9); |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1418 | /* A write may have been queued for send_oos, however. |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 1419 | * So we can not simply free it, we must go through drbd_req_put_completion_ref() */ |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1420 | } |
| 1421 | |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1422 | out: |
Lars Ellenberg | a00ebd1 | 2017-05-11 10:21:46 +0200 | [diff] [blame] | 1423 | drbd_req_put_completion_ref(req, &m, 1); |
Lars Ellenberg | 35b5ed5 | 2013-12-04 12:07:09 +0100 | [diff] [blame] | 1424 | spin_unlock_irq(&resource->req_lock); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1425 | |
Lars Ellenberg | 35b5ed5 | 2013-12-04 12:07:09 +0100 | [diff] [blame] | 1426 | /* Even though above is a kref_put(), this is safe. |
| 1427 | * As long as we still need to submit our private bio, |
| 1428 | * we hold a completion ref, and the request cannot disappear. |
| 1429 | * If however this request did not even have a private bio to submit |
| 1430 | * (e.g. remote read), req may already be invalid now. |
| 1431 | * That's why we cannot check on req->private_bio. */ |
| 1432 | if (submit_private_bio) |
| 1433 | drbd_submit_req_private_bio(req); |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1434 | if (m.bio) |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1435 | complete_master_bio(device, &m); |
Lars Ellenberg | 6d9febe | 2013-03-19 18:16:50 +0100 | [diff] [blame] | 1436 | } |
| 1437 | |
Lars Ellenberg | e5f891b | 2013-11-22 12:32:01 +0100 | [diff] [blame] | 1438 | void __drbd_make_request(struct drbd_device *device, struct bio *bio, unsigned long start_jif) |
Lars Ellenberg | 6d9febe | 2013-03-19 18:16:50 +0100 | [diff] [blame] | 1439 | { |
Lars Ellenberg | e5f891b | 2013-11-22 12:32:01 +0100 | [diff] [blame] | 1440 | struct drbd_request *req = drbd_request_prepare(device, bio, start_jif); |
Lars Ellenberg | 6d9febe | 2013-03-19 18:16:50 +0100 | [diff] [blame] | 1441 | if (IS_ERR_OR_NULL(req)) |
| 1442 | return; |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1443 | drbd_send_and_submit(device, req); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1444 | } |
| 1445 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1446 | static void submit_fast_path(struct drbd_device *device, struct list_head *incoming) |
Lars Ellenberg | 113fef9 | 2013-03-22 18:14:40 -0600 | [diff] [blame] | 1447 | { |
Lars Ellenberg | de6978b | 2017-08-29 10:20:34 +0200 | [diff] [blame] | 1448 | struct blk_plug plug; |
Lars Ellenberg | 08a1dda | 2013-03-19 18:16:56 +0100 | [diff] [blame] | 1449 | struct drbd_request *req, *tmp; |
Lars Ellenberg | de6978b | 2017-08-29 10:20:34 +0200 | [diff] [blame] | 1450 | |
| 1451 | blk_start_plug(&plug); |
Lars Ellenberg | 08a1dda | 2013-03-19 18:16:56 +0100 | [diff] [blame] | 1452 | list_for_each_entry_safe(req, tmp, incoming, tl_requests) { |
| 1453 | const int rw = bio_data_dir(req->master_bio); |
Lars Ellenberg | 113fef9 | 2013-03-22 18:14:40 -0600 | [diff] [blame] | 1454 | |
Lars Ellenberg | 08a1dda | 2013-03-19 18:16:56 +0100 | [diff] [blame] | 1455 | if (rw == WRITE /* rw != WRITE should not even end up here! */ |
| 1456 | && req->private_bio && req->i.size |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1457 | && !test_bit(AL_SUSPENDED, &device->flags)) { |
| 1458 | if (!drbd_al_begin_io_fastpath(device, &req->i)) |
Lars Ellenberg | 08a1dda | 2013-03-19 18:16:56 +0100 | [diff] [blame] | 1459 | continue; |
| 1460 | |
| 1461 | req->rq_state |= RQ_IN_ACT_LOG; |
Lars Ellenberg | e5f891b | 2013-11-22 12:32:01 +0100 | [diff] [blame] | 1462 | req->in_actlog_jif = jiffies; |
Lars Ellenberg | ad3fee7 | 2013-12-20 11:22:13 +0100 | [diff] [blame] | 1463 | atomic_dec(&device->ap_actlog_cnt); |
Lars Ellenberg | 08a1dda | 2013-03-19 18:16:56 +0100 | [diff] [blame] | 1464 | } |
| 1465 | |
| 1466 | list_del_init(&req->tl_requests); |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1467 | drbd_send_and_submit(device, req); |
Lars Ellenberg | 113fef9 | 2013-03-22 18:14:40 -0600 | [diff] [blame] | 1468 | } |
Lars Ellenberg | de6978b | 2017-08-29 10:20:34 +0200 | [diff] [blame] | 1469 | blk_finish_plug(&plug); |
Lars Ellenberg | 113fef9 | 2013-03-22 18:14:40 -0600 | [diff] [blame] | 1470 | } |
| 1471 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1472 | static bool prepare_al_transaction_nonblock(struct drbd_device *device, |
Lars Ellenberg | 08a1dda | 2013-03-19 18:16:56 +0100 | [diff] [blame] | 1473 | struct list_head *incoming, |
Lars Ellenberg | f5b90b6 | 2014-05-07 22:41:28 +0200 | [diff] [blame] | 1474 | struct list_head *pending, |
| 1475 | struct list_head *later) |
Lars Ellenberg | 08a1dda | 2013-03-19 18:16:56 +0100 | [diff] [blame] | 1476 | { |
Lars Ellenberg | 9da10e8 | 2017-08-29 10:20:33 +0200 | [diff] [blame] | 1477 | struct drbd_request *req; |
Lars Ellenberg | 08a1dda | 2013-03-19 18:16:56 +0100 | [diff] [blame] | 1478 | int wake = 0; |
| 1479 | int err; |
| 1480 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1481 | spin_lock_irq(&device->al_lock); |
Lars Ellenberg | 9da10e8 | 2017-08-29 10:20:33 +0200 | [diff] [blame] | 1482 | while ((req = list_first_entry_or_null(incoming, struct drbd_request, tl_requests))) { |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1483 | err = drbd_al_begin_io_nonblock(device, &req->i); |
Lars Ellenberg | f5b90b6 | 2014-05-07 22:41:28 +0200 | [diff] [blame] | 1484 | if (err == -ENOBUFS) |
| 1485 | break; |
Lars Ellenberg | 08a1dda | 2013-03-19 18:16:56 +0100 | [diff] [blame] | 1486 | if (err == -EBUSY) |
| 1487 | wake = 1; |
| 1488 | if (err) |
Lars Ellenberg | f5b90b6 | 2014-05-07 22:41:28 +0200 | [diff] [blame] | 1489 | list_move_tail(&req->tl_requests, later); |
| 1490 | else |
| 1491 | list_move_tail(&req->tl_requests, pending); |
Lars Ellenberg | 08a1dda | 2013-03-19 18:16:56 +0100 | [diff] [blame] | 1492 | } |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1493 | spin_unlock_irq(&device->al_lock); |
Lars Ellenberg | 08a1dda | 2013-03-19 18:16:56 +0100 | [diff] [blame] | 1494 | if (wake) |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1495 | wake_up(&device->al_wait); |
Lars Ellenberg | 08a1dda | 2013-03-19 18:16:56 +0100 | [diff] [blame] | 1496 | return !list_empty(pending); |
| 1497 | } |
Lars Ellenberg | 113fef9 | 2013-03-22 18:14:40 -0600 | [diff] [blame] | 1498 | |
Lars Ellenberg | de6978b | 2017-08-29 10:20:34 +0200 | [diff] [blame] | 1499 | static void send_and_submit_pending(struct drbd_device *device, struct list_head *pending) |
Lars Ellenberg | f5b90b6 | 2014-05-07 22:41:28 +0200 | [diff] [blame] | 1500 | { |
Lars Ellenberg | de6978b | 2017-08-29 10:20:34 +0200 | [diff] [blame] | 1501 | struct blk_plug plug; |
Lars Ellenberg | 9da10e8 | 2017-08-29 10:20:33 +0200 | [diff] [blame] | 1502 | struct drbd_request *req; |
Lars Ellenberg | f5b90b6 | 2014-05-07 22:41:28 +0200 | [diff] [blame] | 1503 | |
Lars Ellenberg | de6978b | 2017-08-29 10:20:34 +0200 | [diff] [blame] | 1504 | blk_start_plug(&plug); |
Lars Ellenberg | 9da10e8 | 2017-08-29 10:20:33 +0200 | [diff] [blame] | 1505 | while ((req = list_first_entry_or_null(pending, struct drbd_request, tl_requests))) { |
Lars Ellenberg | f5b90b6 | 2014-05-07 22:41:28 +0200 | [diff] [blame] | 1506 | req->rq_state |= RQ_IN_ACT_LOG; |
| 1507 | req->in_actlog_jif = jiffies; |
| 1508 | atomic_dec(&device->ap_actlog_cnt); |
| 1509 | list_del_init(&req->tl_requests); |
| 1510 | drbd_send_and_submit(device, req); |
| 1511 | } |
Lars Ellenberg | de6978b | 2017-08-29 10:20:34 +0200 | [diff] [blame] | 1512 | blk_finish_plug(&plug); |
Lars Ellenberg | f5b90b6 | 2014-05-07 22:41:28 +0200 | [diff] [blame] | 1513 | } |
| 1514 | |
Lars Ellenberg | 113fef9 | 2013-03-22 18:14:40 -0600 | [diff] [blame] | 1515 | void do_submit(struct work_struct *ws) |
| 1516 | { |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1517 | struct drbd_device *device = container_of(ws, struct drbd_device, submit.worker); |
Lars Ellenberg | f5b90b6 | 2014-05-07 22:41:28 +0200 | [diff] [blame] | 1518 | LIST_HEAD(incoming); /* from drbd_make_request() */ |
| 1519 | LIST_HEAD(pending); /* to be submitted after next AL-transaction commit */ |
| 1520 | LIST_HEAD(busy); /* blocked by resync requests */ |
| 1521 | |
| 1522 | /* grab new incoming requests */ |
| 1523 | spin_lock_irq(&device->resource->req_lock); |
| 1524 | list_splice_tail_init(&device->submit.writes, &incoming); |
| 1525 | spin_unlock_irq(&device->resource->req_lock); |
Lars Ellenberg | 113fef9 | 2013-03-22 18:14:40 -0600 | [diff] [blame] | 1526 | |
Lars Ellenberg | 08a1dda | 2013-03-19 18:16:56 +0100 | [diff] [blame] | 1527 | for (;;) { |
Lars Ellenberg | f5b90b6 | 2014-05-07 22:41:28 +0200 | [diff] [blame] | 1528 | DEFINE_WAIT(wait); |
Lars Ellenberg | 113fef9 | 2013-03-22 18:14:40 -0600 | [diff] [blame] | 1529 | |
Lars Ellenberg | f5b90b6 | 2014-05-07 22:41:28 +0200 | [diff] [blame] | 1530 | /* move used-to-be-busy back to front of incoming */ |
| 1531 | list_splice_init(&busy, &incoming); |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1532 | submit_fast_path(device, &incoming); |
Lars Ellenberg | 08a1dda | 2013-03-19 18:16:56 +0100 | [diff] [blame] | 1533 | if (list_empty(&incoming)) |
| 1534 | break; |
| 1535 | |
Lars Ellenberg | 45ad07b | 2013-03-19 18:16:58 +0100 | [diff] [blame] | 1536 | for (;;) { |
Lars Ellenberg | f5b90b6 | 2014-05-07 22:41:28 +0200 | [diff] [blame] | 1537 | prepare_to_wait(&device->al_wait, &wait, TASK_UNINTERRUPTIBLE); |
| 1538 | |
| 1539 | list_splice_init(&busy, &incoming); |
| 1540 | prepare_al_transaction_nonblock(device, &incoming, &pending, &busy); |
| 1541 | if (!list_empty(&pending)) |
| 1542 | break; |
| 1543 | |
| 1544 | schedule(); |
| 1545 | |
| 1546 | /* If all currently "hot" activity log extents are kept busy by |
| 1547 | * incoming requests, we still must not totally starve new |
| 1548 | * requests to "cold" extents. |
| 1549 | * Something left on &incoming means there had not been |
| 1550 | * enough update slots available, and the activity log |
| 1551 | * has been marked as "starving". |
| 1552 | * |
| 1553 | * Try again now, without looking for new requests, |
| 1554 | * effectively blocking all new requests until we made |
| 1555 | * at least _some_ progress with what we currently have. |
| 1556 | */ |
| 1557 | if (!list_empty(&incoming)) |
| 1558 | continue; |
| 1559 | |
| 1560 | /* Nothing moved to pending, but nothing left |
| 1561 | * on incoming: all moved to busy! |
| 1562 | * Grab new and iterate. */ |
| 1563 | spin_lock_irq(&device->resource->req_lock); |
| 1564 | list_splice_tail_init(&device->submit.writes, &incoming); |
| 1565 | spin_unlock_irq(&device->resource->req_lock); |
| 1566 | } |
| 1567 | finish_wait(&device->al_wait, &wait); |
| 1568 | |
| 1569 | /* If the transaction was full, before all incoming requests |
| 1570 | * had been processed, skip ahead to commit, and iterate |
| 1571 | * without splicing in more incoming requests from upper layers. |
| 1572 | * |
| 1573 | * Else, if all incoming have been processed, |
| 1574 | * they have become either "pending" (to be submitted after |
| 1575 | * next transaction commit) or "busy" (blocked by resync). |
| 1576 | * |
| 1577 | * Maybe more was queued, while we prepared the transaction? |
| 1578 | * Try to stuff those into this transaction as well. |
| 1579 | * Be strictly non-blocking here, |
| 1580 | * we already have something to commit. |
| 1581 | * |
| 1582 | * Commit if we don't make any more progres. |
| 1583 | */ |
| 1584 | |
| 1585 | while (list_empty(&incoming)) { |
Lars Ellenberg | 45ad07b | 2013-03-19 18:16:58 +0100 | [diff] [blame] | 1586 | LIST_HEAD(more_pending); |
| 1587 | LIST_HEAD(more_incoming); |
| 1588 | bool made_progress; |
| 1589 | |
| 1590 | /* It is ok to look outside the lock, |
| 1591 | * it's only an optimization anyways */ |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1592 | if (list_empty(&device->submit.writes)) |
Lars Ellenberg | 45ad07b | 2013-03-19 18:16:58 +0100 | [diff] [blame] | 1593 | break; |
| 1594 | |
Lars Ellenberg | 844a6ae7 | 2013-11-22 12:52:03 +0100 | [diff] [blame] | 1595 | spin_lock_irq(&device->resource->req_lock); |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1596 | list_splice_tail_init(&device->submit.writes, &more_incoming); |
Lars Ellenberg | 844a6ae7 | 2013-11-22 12:52:03 +0100 | [diff] [blame] | 1597 | spin_unlock_irq(&device->resource->req_lock); |
Lars Ellenberg | 45ad07b | 2013-03-19 18:16:58 +0100 | [diff] [blame] | 1598 | |
| 1599 | if (list_empty(&more_incoming)) |
| 1600 | break; |
| 1601 | |
Lars Ellenberg | f5b90b6 | 2014-05-07 22:41:28 +0200 | [diff] [blame] | 1602 | made_progress = prepare_al_transaction_nonblock(device, &more_incoming, &more_pending, &busy); |
Lars Ellenberg | 45ad07b | 2013-03-19 18:16:58 +0100 | [diff] [blame] | 1603 | |
| 1604 | list_splice_tail_init(&more_pending, &pending); |
| 1605 | list_splice_tail_init(&more_incoming, &incoming); |
Lars Ellenberg | 45ad07b | 2013-03-19 18:16:58 +0100 | [diff] [blame] | 1606 | if (!made_progress) |
| 1607 | break; |
| 1608 | } |
Lars Ellenberg | f5b90b6 | 2014-05-07 22:41:28 +0200 | [diff] [blame] | 1609 | |
Lars Ellenberg | 4dd726f | 2014-02-11 11:15:36 +0100 | [diff] [blame] | 1610 | drbd_al_begin_io_commit(device); |
Lars Ellenberg | f5b90b6 | 2014-05-07 22:41:28 +0200 | [diff] [blame] | 1611 | send_and_submit_pending(device, &pending); |
Lars Ellenberg | 113fef9 | 2013-03-22 18:14:40 -0600 | [diff] [blame] | 1612 | } |
| 1613 | } |
| 1614 | |
Jens Axboe | dece163 | 2015-11-05 10:41:16 -0700 | [diff] [blame] | 1615 | blk_qc_t drbd_make_request(struct request_queue *q, struct bio *bio) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1616 | { |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1617 | struct drbd_device *device = (struct drbd_device *) q->queuedata; |
Lars Ellenberg | e5f891b | 2013-11-22 12:32:01 +0100 | [diff] [blame] | 1618 | unsigned long start_jif; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1619 | |
NeilBrown | af67c31 | 2017-06-18 14:38:57 +1000 | [diff] [blame] | 1620 | blk_queue_split(q, &bio); |
Kent Overstreet | 54efd50 | 2015-04-23 22:37:18 -0700 | [diff] [blame] | 1621 | |
Lars Ellenberg | e5f891b | 2013-11-22 12:32:01 +0100 | [diff] [blame] | 1622 | start_jif = jiffies; |
Philipp Reisner | aeda1cd6 | 2010-11-09 17:45:06 +0100 | [diff] [blame] | 1623 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1624 | /* |
| 1625 | * what we "blindly" assume: |
| 1626 | */ |
Andreas Gruenbacher | 0b0ba1e | 2011-06-27 16:23:33 +0200 | [diff] [blame] | 1627 | D_ASSERT(device, IS_ALIGNED(bio->bi_iter.bi_size, 512)); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1628 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1629 | inc_ap_bio(device); |
Lars Ellenberg | e5f891b | 2013-11-22 12:32:01 +0100 | [diff] [blame] | 1630 | __drbd_make_request(device, bio, start_jif); |
Jens Axboe | dece163 | 2015-11-05 10:41:16 -0700 | [diff] [blame] | 1631 | return BLK_QC_T_NONE; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1632 | } |
| 1633 | |
Lars Ellenberg | 84d34f2 | 2015-02-19 13:54:11 +0100 | [diff] [blame] | 1634 | static bool net_timeout_reached(struct drbd_request *net_req, |
| 1635 | struct drbd_connection *connection, |
| 1636 | unsigned long now, unsigned long ent, |
| 1637 | unsigned int ko_count, unsigned int timeout) |
| 1638 | { |
| 1639 | struct drbd_device *device = net_req->device; |
| 1640 | |
| 1641 | if (!time_after(now, net_req->pre_send_jif + ent)) |
| 1642 | return false; |
| 1643 | |
| 1644 | if (time_in_range(now, connection->last_reconnect_jif, connection->last_reconnect_jif + ent)) |
| 1645 | return false; |
| 1646 | |
| 1647 | if (net_req->rq_state & RQ_NET_PENDING) { |
| 1648 | drbd_warn(device, "Remote failed to finish a request within %ums > ko-count (%u) * timeout (%u * 0.1s)\n", |
| 1649 | jiffies_to_msecs(now - net_req->pre_send_jif), ko_count, timeout); |
| 1650 | return true; |
| 1651 | } |
| 1652 | |
| 1653 | /* We received an ACK already (or are using protocol A), |
| 1654 | * but are waiting for the epoch closing barrier ack. |
| 1655 | * Check if we sent the barrier already. We should not blame the peer |
| 1656 | * for being unresponsive, if we did not even ask it yet. */ |
| 1657 | if (net_req->epoch == connection->send.current_epoch_nr) { |
| 1658 | drbd_warn(device, |
| 1659 | "We did not send a P_BARRIER for %ums > ko-count (%u) * timeout (%u * 0.1s); drbd kernel thread blocked?\n", |
| 1660 | jiffies_to_msecs(now - net_req->pre_send_jif), ko_count, timeout); |
| 1661 | return false; |
| 1662 | } |
| 1663 | |
| 1664 | /* Worst case: we may have been blocked for whatever reason, then |
| 1665 | * suddenly are able to send a lot of requests (and epoch separating |
| 1666 | * barriers) in quick succession. |
| 1667 | * The timestamp of the net_req may be much too old and not correspond |
| 1668 | * to the sending time of the relevant unack'ed barrier packet, so |
| 1669 | * would trigger a spurious timeout. The latest barrier packet may |
| 1670 | * have a too recent timestamp to trigger the timeout, potentially miss |
| 1671 | * a timeout. Right now we don't have a place to conveniently store |
| 1672 | * these timestamps. |
| 1673 | * But in this particular situation, the application requests are still |
| 1674 | * completed to upper layers, DRBD should still "feel" responsive. |
| 1675 | * No need yet to kill this connection, it may still recover. |
| 1676 | * If not, eventually we will have queued enough into the network for |
| 1677 | * us to block. From that point of view, the timestamp of the last sent |
| 1678 | * barrier packet is relevant enough. |
| 1679 | */ |
| 1680 | if (time_after(now, connection->send.last_sent_barrier_jif + ent)) { |
| 1681 | drbd_warn(device, "Remote failed to answer a P_BARRIER (sent at %lu jif; now=%lu jif) within %ums > ko-count (%u) * timeout (%u * 0.1s)\n", |
| 1682 | connection->send.last_sent_barrier_jif, now, |
| 1683 | jiffies_to_msecs(now - connection->send.last_sent_barrier_jif), ko_count, timeout); |
| 1684 | return true; |
| 1685 | } |
| 1686 | return false; |
| 1687 | } |
| 1688 | |
| 1689 | /* A request is considered timed out, if |
| 1690 | * - we have some effective timeout from the configuration, |
| 1691 | * with some state restrictions applied, |
| 1692 | * - the oldest request is waiting for a response from the network |
| 1693 | * resp. the local disk, |
| 1694 | * - the oldest request is in fact older than the effective timeout, |
| 1695 | * - the connection was established (resp. disk was attached) |
| 1696 | * for longer than the timeout already. |
| 1697 | * Note that for 32bit jiffies and very stable connections/disks, |
| 1698 | * we may have a wrap around, which is catched by |
| 1699 | * !time_in_range(now, last_..._jif, last_..._jif + timeout). |
| 1700 | * |
| 1701 | * Side effect: once per 32bit wrap-around interval, which means every |
| 1702 | * ~198 days with 250 HZ, we have a window where the timeout would need |
| 1703 | * to expire twice (worst case) to become effective. Good enough. |
| 1704 | */ |
| 1705 | |
Kees Cook | 2bccef3 | 2017-10-17 20:33:01 -0700 | [diff] [blame] | 1706 | void request_timer_fn(struct timer_list *t) |
Philipp Reisner | 7fde2be | 2011-03-01 11:08:28 +0100 | [diff] [blame] | 1707 | { |
Kees Cook | 2bccef3 | 2017-10-17 20:33:01 -0700 | [diff] [blame] | 1708 | struct drbd_device *device = from_timer(device, t, request_timer); |
Andreas Gruenbacher | a6b32bc | 2011-05-31 14:33:49 +0200 | [diff] [blame] | 1709 | struct drbd_connection *connection = first_peer_device(device)->connection; |
Lars Ellenberg | 7753a4c1 | 2013-11-22 13:00:12 +0100 | [diff] [blame] | 1710 | struct drbd_request *req_read, *req_write, *req_peer; /* oldest request */ |
Philipp Reisner | 44ed167 | 2011-04-19 17:10:19 +0200 | [diff] [blame] | 1711 | struct net_conf *nc; |
Lars Ellenberg | 7753a4c1 | 2013-11-22 13:00:12 +0100 | [diff] [blame] | 1712 | unsigned long oldest_submit_jif; |
Philipp Reisner | dfa8bed | 2011-06-29 14:06:08 +0200 | [diff] [blame] | 1713 | unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */ |
Lars Ellenberg | ba280c0 | 2012-04-25 11:46:14 +0200 | [diff] [blame] | 1714 | unsigned long now; |
Lars Ellenberg | 84d34f2 | 2015-02-19 13:54:11 +0100 | [diff] [blame] | 1715 | unsigned int ko_count = 0, timeout = 0; |
Philipp Reisner | 7fde2be | 2011-03-01 11:08:28 +0100 | [diff] [blame] | 1716 | |
Philipp Reisner | 44ed167 | 2011-04-19 17:10:19 +0200 | [diff] [blame] | 1717 | rcu_read_lock(); |
Andreas Gruenbacher | bde89a9 | 2011-05-30 16:32:41 +0200 | [diff] [blame] | 1718 | nc = rcu_dereference(connection->net_conf); |
Lars Ellenberg | 84d34f2 | 2015-02-19 13:54:11 +0100 | [diff] [blame] | 1719 | if (nc && device->state.conn >= C_WF_REPORT_PARAMS) { |
| 1720 | ko_count = nc->ko_count; |
| 1721 | timeout = nc->timeout; |
| 1722 | } |
Philipp Reisner | cdfda63 | 2011-07-05 15:38:59 +0200 | [diff] [blame] | 1723 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1724 | if (get_ldev(device)) { /* implicit state.disk >= D_INCONSISTENT */ |
| 1725 | dt = rcu_dereference(device->ldev->disk_conf)->disk_timeout * HZ / 10; |
| 1726 | put_ldev(device); |
Philipp Reisner | dfa8bed | 2011-06-29 14:06:08 +0200 | [diff] [blame] | 1727 | } |
Philipp Reisner | 44ed167 | 2011-04-19 17:10:19 +0200 | [diff] [blame] | 1728 | rcu_read_unlock(); |
| 1729 | |
Lars Ellenberg | 84d34f2 | 2015-02-19 13:54:11 +0100 | [diff] [blame] | 1730 | |
| 1731 | ent = timeout * HZ/10 * ko_count; |
Philipp Reisner | dfa8bed | 2011-06-29 14:06:08 +0200 | [diff] [blame] | 1732 | et = min_not_zero(dt, ent); |
| 1733 | |
Lars Ellenberg | ba280c0 | 2012-04-25 11:46:14 +0200 | [diff] [blame] | 1734 | if (!et) |
Philipp Reisner | 7fde2be | 2011-03-01 11:08:28 +0100 | [diff] [blame] | 1735 | return; /* Recurring timer stopped */ |
| 1736 | |
Lars Ellenberg | ba280c0 | 2012-04-25 11:46:14 +0200 | [diff] [blame] | 1737 | now = jiffies; |
Lars Ellenberg | 7753a4c1 | 2013-11-22 13:00:12 +0100 | [diff] [blame] | 1738 | nt = now + et; |
Lars Ellenberg | ba280c0 | 2012-04-25 11:46:14 +0200 | [diff] [blame] | 1739 | |
Andreas Gruenbacher | 0500813 | 2011-07-07 14:19:42 +0200 | [diff] [blame] | 1740 | spin_lock_irq(&device->resource->req_lock); |
Lars Ellenberg | 7753a4c1 | 2013-11-22 13:00:12 +0100 | [diff] [blame] | 1741 | req_read = list_first_entry_or_null(&device->pending_completion[0], struct drbd_request, req_pending_local); |
| 1742 | req_write = list_first_entry_or_null(&device->pending_completion[1], struct drbd_request, req_pending_local); |
Lars Ellenberg | 84d34f2 | 2015-02-19 13:54:11 +0100 | [diff] [blame] | 1743 | |
Lars Ellenberg | 7753a4c1 | 2013-11-22 13:00:12 +0100 | [diff] [blame] | 1744 | /* maybe the oldest request waiting for the peer is in fact still |
Lars Ellenberg | 84d34f2 | 2015-02-19 13:54:11 +0100 | [diff] [blame] | 1745 | * blocking in tcp sendmsg. That's ok, though, that's handled via the |
| 1746 | * socket send timeout, requesting a ping, and bumping ko-count in |
| 1747 | * we_should_drop_the_connection(). |
| 1748 | */ |
| 1749 | |
| 1750 | /* check the oldest request we did successfully sent, |
| 1751 | * but which is still waiting for an ACK. */ |
| 1752 | req_peer = connection->req_ack_pending; |
| 1753 | |
| 1754 | /* if we don't have such request (e.g. protocoll A) |
| 1755 | * check the oldest requests which is still waiting on its epoch |
| 1756 | * closing barrier ack. */ |
| 1757 | if (!req_peer) |
| 1758 | req_peer = connection->req_not_net_done; |
Lars Ellenberg | 7753a4c1 | 2013-11-22 13:00:12 +0100 | [diff] [blame] | 1759 | |
| 1760 | /* evaluate the oldest peer request only in one timer! */ |
| 1761 | if (req_peer && req_peer->device != device) |
| 1762 | req_peer = NULL; |
| 1763 | |
| 1764 | /* do we have something to evaluate? */ |
| 1765 | if (req_peer == NULL && req_write == NULL && req_read == NULL) |
| 1766 | goto out; |
| 1767 | |
| 1768 | oldest_submit_jif = |
| 1769 | (req_write && req_read) |
| 1770 | ? ( time_before(req_write->pre_submit_jif, req_read->pre_submit_jif) |
| 1771 | ? req_write->pre_submit_jif : req_read->pre_submit_jif ) |
| 1772 | : req_write ? req_write->pre_submit_jif |
| 1773 | : req_read ? req_read->pre_submit_jif : now; |
Philipp Reisner | 7fde2be | 2011-03-01 11:08:28 +0100 | [diff] [blame] | 1774 | |
Lars Ellenberg | 84d34f2 | 2015-02-19 13:54:11 +0100 | [diff] [blame] | 1775 | if (ent && req_peer && net_timeout_reached(req_peer, connection, now, ent, ko_count, timeout)) |
Philipp Reisner | 9581f97 | 2014-11-10 17:21:14 +0100 | [diff] [blame] | 1776 | _conn_request_state(connection, NS(conn, C_TIMEOUT), CS_VERBOSE | CS_HARD); |
Lars Ellenberg | 84d34f2 | 2015-02-19 13:54:11 +0100 | [diff] [blame] | 1777 | |
Lars Ellenberg | 7753a4c1 | 2013-11-22 13:00:12 +0100 | [diff] [blame] | 1778 | if (dt && oldest_submit_jif != now && |
| 1779 | time_after(now, oldest_submit_jif + dt) && |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1780 | !time_in_range(now, device->last_reattach_jif, device->last_reattach_jif + dt)) { |
Andreas Gruenbacher | d018017 | 2011-07-03 17:53:52 +0200 | [diff] [blame] | 1781 | drbd_warn(device, "Local backing device failed to meet the disk-timeout\n"); |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1782 | __drbd_chk_io_error(device, DRBD_FORCE_DETACH); |
Philipp Reisner | dfa8bed | 2011-06-29 14:06:08 +0200 | [diff] [blame] | 1783 | } |
Lars Ellenberg | 0853546 | 2014-04-28 18:43:31 +0200 | [diff] [blame] | 1784 | |
| 1785 | /* Reschedule timer for the nearest not already expired timeout. |
| 1786 | * Fallback to now + min(effective network timeout, disk timeout). */ |
Lars Ellenberg | 7753a4c1 | 2013-11-22 13:00:12 +0100 | [diff] [blame] | 1787 | ent = (ent && req_peer && time_before(now, req_peer->pre_send_jif + ent)) |
| 1788 | ? req_peer->pre_send_jif + ent : now + et; |
| 1789 | dt = (dt && oldest_submit_jif != now && time_before(now, oldest_submit_jif + dt)) |
| 1790 | ? oldest_submit_jif + dt : now + et; |
Lars Ellenberg | 0853546 | 2014-04-28 18:43:31 +0200 | [diff] [blame] | 1791 | nt = time_before(ent, dt) ? ent : dt; |
Lars Ellenberg | 7753a4c1 | 2013-11-22 13:00:12 +0100 | [diff] [blame] | 1792 | out: |
Andreas Gruenbacher | 8d4ba3f | 2014-09-11 14:29:08 +0200 | [diff] [blame] | 1793 | spin_unlock_irq(&device->resource->req_lock); |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1794 | mod_timer(&device->request_timer, nt); |
Philipp Reisner | 7fde2be | 2011-03-01 11:08:28 +0100 | [diff] [blame] | 1795 | } |