Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1 | /* |
| 2 | drbd_req.c |
| 3 | |
| 4 | This file is part of DRBD by Philipp Reisner and Lars Ellenberg. |
| 5 | |
| 6 | Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. |
| 7 | Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. |
| 8 | Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. |
| 9 | |
| 10 | drbd is free software; you can redistribute it and/or modify |
| 11 | it under the terms of the GNU General Public License as published by |
| 12 | the Free Software Foundation; either version 2, or (at your option) |
| 13 | any later version. |
| 14 | |
| 15 | drbd is distributed in the hope that it will be useful, |
| 16 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 17 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 18 | GNU General Public License for more details. |
| 19 | |
| 20 | You should have received a copy of the GNU General Public License |
| 21 | along with drbd; see the file COPYING. If not, write to |
| 22 | the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. |
| 23 | |
| 24 | */ |
| 25 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 26 | #include <linux/module.h> |
| 27 | |
| 28 | #include <linux/slab.h> |
| 29 | #include <linux/drbd.h> |
| 30 | #include "drbd_int.h" |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 31 | #include "drbd_req.h" |
| 32 | |
| 33 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 34 | static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size); |
Philipp Reisner | 57bcb6c | 2011-12-03 11:18:56 +0100 | [diff] [blame] | 35 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 36 | /* Update disk stats at start of I/O request */ |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 37 | static void _drbd_start_io_acct(struct drbd_device *device, struct drbd_request *req) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 38 | { |
Lars Ellenberg | 6d9febe | 2013-03-19 18:16:50 +0100 | [diff] [blame] | 39 | const int rw = bio_data_dir(req->master_bio); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 40 | int cpu; |
| 41 | cpu = part_stat_lock(); |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 42 | part_round_stats(cpu, &device->vdisk->part0); |
| 43 | part_stat_inc(cpu, &device->vdisk->part0, ios[rw]); |
| 44 | part_stat_add(cpu, &device->vdisk->part0, sectors[rw], req->i.size >> 9); |
Philipp Reisner | 376694a | 2011-11-07 10:54:28 +0100 | [diff] [blame] | 45 | (void) cpu; /* The macro invocations above want the cpu argument, I do not like |
| 46 | the compiler warning about cpu only assigned but never used... */ |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 47 | part_inc_in_flight(&device->vdisk->part0, rw); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 48 | part_stat_unlock(); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 49 | } |
| 50 | |
| 51 | /* Update disk stats when completing request upwards */ |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 52 | static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *req) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 53 | { |
| 54 | int rw = bio_data_dir(req->master_bio); |
Lars Ellenberg | e5f891b | 2013-11-22 12:32:01 +0100 | [diff] [blame] | 55 | unsigned long duration = jiffies - req->start_jif; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 56 | int cpu; |
| 57 | cpu = part_stat_lock(); |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 58 | part_stat_add(cpu, &device->vdisk->part0, ticks[rw], duration); |
| 59 | part_round_stats(cpu, &device->vdisk->part0); |
| 60 | part_dec_in_flight(&device->vdisk->part0, rw); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 61 | part_stat_unlock(); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 62 | } |
| 63 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 64 | static struct drbd_request *drbd_req_new(struct drbd_device *device, |
Andreas Gruenbacher | 9e204cd | 2011-01-26 18:45:11 +0100 | [diff] [blame] | 65 | struct bio *bio_src) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 66 | { |
Andreas Gruenbacher | 9e204cd | 2011-01-26 18:45:11 +0100 | [diff] [blame] | 67 | struct drbd_request *req; |
| 68 | |
Lars Ellenberg | e5f891b | 2013-11-22 12:32:01 +0100 | [diff] [blame] | 69 | req = mempool_alloc(drbd_request_mempool, GFP_NOIO | __GFP_ZERO); |
Andreas Gruenbacher | 9e204cd | 2011-01-26 18:45:11 +0100 | [diff] [blame] | 70 | if (!req) |
| 71 | return NULL; |
| 72 | |
| 73 | drbd_req_make_private_bio(req, bio_src); |
| 74 | req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0; |
Andreas Gruenbacher | 84b8c06 | 2011-07-28 15:27:51 +0200 | [diff] [blame] | 75 | req->device = device; |
Andreas Gruenbacher | 9e204cd | 2011-01-26 18:45:11 +0100 | [diff] [blame] | 76 | req->master_bio = bio_src; |
| 77 | req->epoch = 0; |
Andreas Gruenbacher | 5384064 | 2011-01-28 10:31:04 +0100 | [diff] [blame] | 78 | |
Andreas Gruenbacher | 9e204cd | 2011-01-26 18:45:11 +0100 | [diff] [blame] | 79 | drbd_clear_interval(&req->i); |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 80 | req->i.sector = bio_src->bi_iter.bi_sector; |
| 81 | req->i.size = bio_src->bi_iter.bi_size; |
Andreas Gruenbacher | 5e47226 | 2011-01-27 14:42:51 +0100 | [diff] [blame] | 82 | req->i.local = true; |
Andreas Gruenbacher | 5384064 | 2011-01-28 10:31:04 +0100 | [diff] [blame] | 83 | req->i.waiting = false; |
| 84 | |
Andreas Gruenbacher | 9e204cd | 2011-01-26 18:45:11 +0100 | [diff] [blame] | 85 | INIT_LIST_HEAD(&req->tl_requests); |
| 86 | INIT_LIST_HEAD(&req->w.list); |
Lars Ellenberg | 844a6ae7 | 2013-11-22 12:52:03 +0100 | [diff] [blame] | 87 | INIT_LIST_HEAD(&req->req_pending_master_completion); |
| 88 | INIT_LIST_HEAD(&req->req_pending_local); |
Andreas Gruenbacher | 9e204cd | 2011-01-26 18:45:11 +0100 | [diff] [blame] | 89 | |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 90 | /* one reference to be put by __drbd_make_request */ |
Lars Ellenberg | b406777 | 2012-01-24 16:58:11 +0100 | [diff] [blame] | 91 | atomic_set(&req->completion_ref, 1); |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 92 | /* one kref as long as completion_ref > 0 */ |
Lars Ellenberg | b406777 | 2012-01-24 16:58:11 +0100 | [diff] [blame] | 93 | kref_init(&req->kref); |
Andreas Gruenbacher | 9e204cd | 2011-01-26 18:45:11 +0100 | [diff] [blame] | 94 | return req; |
| 95 | } |
| 96 | |
Lars Ellenberg | 08d0dab | 2014-03-20 11:19:22 +0100 | [diff] [blame] | 97 | static void drbd_remove_request_interval(struct rb_root *root, |
| 98 | struct drbd_request *req) |
| 99 | { |
| 100 | struct drbd_device *device = req->device; |
| 101 | struct drbd_interval *i = &req->i; |
| 102 | |
| 103 | drbd_remove_interval(root, i); |
| 104 | |
| 105 | /* Wake up any processes waiting for this request to complete. */ |
| 106 | if (i->waiting) |
| 107 | wake_up(&device->misc_wait); |
| 108 | } |
| 109 | |
Lars Ellenberg | 9a278a7 | 2012-07-24 10:12:36 +0200 | [diff] [blame] | 110 | void drbd_req_destroy(struct kref *kref) |
Andreas Gruenbacher | 9e204cd | 2011-01-26 18:45:11 +0100 | [diff] [blame] | 111 | { |
Lars Ellenberg | b406777 | 2012-01-24 16:58:11 +0100 | [diff] [blame] | 112 | struct drbd_request *req = container_of(kref, struct drbd_request, kref); |
Andreas Gruenbacher | 84b8c06 | 2011-07-28 15:27:51 +0200 | [diff] [blame] | 113 | struct drbd_device *device = req->device; |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 114 | const unsigned s = req->rq_state; |
| 115 | |
| 116 | if ((req->master_bio && !(s & RQ_POSTPONED)) || |
| 117 | atomic_read(&req->completion_ref) || |
| 118 | (s & RQ_LOCAL_PENDING) || |
| 119 | ((s & RQ_NET_MASK) && !(s & RQ_NET_DONE))) { |
Andreas Gruenbacher | d018017 | 2011-07-03 17:53:52 +0200 | [diff] [blame] | 120 | drbd_err(device, "drbd_req_destroy: Logic BUG rq_state = 0x%x, completion_ref = %d\n", |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 121 | s, atomic_read(&req->completion_ref)); |
| 122 | return; |
| 123 | } |
Philipp Reisner | 288f422 | 2010-05-27 15:07:43 +0200 | [diff] [blame] | 124 | |
Lars Ellenberg | 844a6ae7 | 2013-11-22 12:52:03 +0100 | [diff] [blame] | 125 | /* If called from mod_rq_state (expected normal case) or |
| 126 | * drbd_send_and_submit (the less likely normal path), this holds the |
| 127 | * req_lock, and req->tl_requests will typicaly be on ->transfer_log, |
| 128 | * though it may be still empty (never added to the transfer log). |
| 129 | * |
| 130 | * If called from do_retry(), we do NOT hold the req_lock, but we are |
| 131 | * still allowed to unconditionally list_del(&req->tl_requests), |
| 132 | * because it will be on a local on-stack list only. */ |
Lars Ellenberg | 2312f0b3 | 2011-11-24 10:36:25 +0100 | [diff] [blame] | 133 | list_del_init(&req->tl_requests); |
Philipp Reisner | 288f422 | 2010-05-27 15:07:43 +0200 | [diff] [blame] | 134 | |
Lars Ellenberg | 08d0dab | 2014-03-20 11:19:22 +0100 | [diff] [blame] | 135 | /* finally remove the request from the conflict detection |
| 136 | * respective block_id verification interval tree. */ |
| 137 | if (!drbd_interval_empty(&req->i)) { |
| 138 | struct rb_root *root; |
| 139 | |
| 140 | if (s & RQ_WRITE) |
| 141 | root = &device->write_requests; |
| 142 | else |
| 143 | root = &device->read_requests; |
| 144 | drbd_remove_request_interval(root, req); |
| 145 | } else if (s & (RQ_NET_MASK & ~RQ_NET_DONE) && req->i.size != 0) |
| 146 | drbd_err(device, "drbd_req_destroy: Logic BUG: interval empty, but: rq_state=0x%x, sect=%llu, size=%u\n", |
| 147 | s, (unsigned long long)req->i.sector, req->i.size); |
| 148 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 149 | /* if it was a write, we may have to set the corresponding |
| 150 | * bit(s) out-of-sync first. If it had a local part, we need to |
| 151 | * release the reference to the activity log. */ |
Lars Ellenberg | b406777 | 2012-01-24 16:58:11 +0100 | [diff] [blame] | 152 | if (s & RQ_WRITE) { |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 153 | /* Set out-of-sync unless both OK flags are set |
| 154 | * (local only or remote failed). |
| 155 | * Other places where we set out-of-sync: |
| 156 | * READ with local io-error */ |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 157 | |
Lars Ellenberg | 70f17b6 | 2012-09-03 14:08:35 +0200 | [diff] [blame] | 158 | /* There is a special case: |
| 159 | * we may notice late that IO was suspended, |
| 160 | * and postpone, or schedule for retry, a write, |
| 161 | * before it even was submitted or sent. |
| 162 | * In that case we do not want to touch the bitmap at all. |
| 163 | */ |
| 164 | if ((s & (RQ_POSTPONED|RQ_LOCAL_MASK|RQ_NET_MASK)) != RQ_POSTPONED) { |
Philipp Reisner | d764401 | 2012-08-28 14:39:44 +0200 | [diff] [blame] | 165 | if (!(s & RQ_NET_OK) || !(s & RQ_LOCAL_OK)) |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 166 | drbd_set_out_of_sync(device, req->i.sector, req->i.size); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 167 | |
Philipp Reisner | d764401 | 2012-08-28 14:39:44 +0200 | [diff] [blame] | 168 | if ((s & RQ_NET_OK) && (s & RQ_LOCAL_OK) && (s & RQ_NET_SIS)) |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 169 | drbd_set_in_sync(device, req->i.sector, req->i.size); |
Philipp Reisner | d764401 | 2012-08-28 14:39:44 +0200 | [diff] [blame] | 170 | } |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 171 | |
| 172 | /* one might be tempted to move the drbd_al_complete_io |
Andreas Gruenbacher | fcefa62 | 2011-02-17 16:46:59 +0100 | [diff] [blame] | 173 | * to the local io completion callback drbd_request_endio. |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 174 | * but, if this was a mirror write, we may only |
| 175 | * drbd_al_complete_io after this is RQ_NET_DONE, |
| 176 | * otherwise the extent could be dropped from the al |
| 177 | * before it has actually been written on the peer. |
| 178 | * if we crash before our peer knows about the request, |
| 179 | * but after the extent has been dropped from the al, |
| 180 | * we would forget to resync the corresponding extent. |
| 181 | */ |
Philipp Reisner | 76590cd | 2012-08-29 15:23:14 +0200 | [diff] [blame] | 182 | if (s & RQ_IN_ACT_LOG) { |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 183 | if (get_ldev_if_state(device, D_FAILED)) { |
| 184 | drbd_al_complete_io(device, &req->i); |
| 185 | put_ldev(device); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 186 | } else if (__ratelimit(&drbd_ratelimit_state)) { |
Andreas Gruenbacher | d018017 | 2011-07-03 17:53:52 +0200 | [diff] [blame] | 187 | drbd_warn(device, "Should have called drbd_al_complete_io(, %llu, %u), " |
Lars Ellenberg | 181286a | 2011-03-31 15:18:56 +0200 | [diff] [blame] | 188 | "but my Disk seems to have failed :(\n", |
| 189 | (unsigned long long) req->i.sector, req->i.size); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 190 | } |
| 191 | } |
| 192 | } |
| 193 | |
Lars Ellenberg | 9a278a7 | 2012-07-24 10:12:36 +0200 | [diff] [blame] | 194 | mempool_free(req, drbd_request_mempool); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 195 | } |
| 196 | |
Andreas Gruenbacher | bde89a9 | 2011-05-30 16:32:41 +0200 | [diff] [blame] | 197 | static void wake_all_senders(struct drbd_connection *connection) |
| 198 | { |
| 199 | wake_up(&connection->sender_work.q_wait); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 200 | } |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 201 | |
Lars Ellenberg | b6dd1a8 | 2011-11-28 15:04:49 +0100 | [diff] [blame] | 202 | /* must hold resource->req_lock */ |
Andreas Gruenbacher | bde89a9 | 2011-05-30 16:32:41 +0200 | [diff] [blame] | 203 | void start_new_tl_epoch(struct drbd_connection *connection) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 204 | { |
Lars Ellenberg | 99b4d8f | 2012-08-07 06:42:09 +0200 | [diff] [blame] | 205 | /* no point closing an epoch, if it is empty, anyways. */ |
Andreas Gruenbacher | bde89a9 | 2011-05-30 16:32:41 +0200 | [diff] [blame] | 206 | if (connection->current_tle_writes == 0) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 207 | return; |
| 208 | |
Andreas Gruenbacher | bde89a9 | 2011-05-30 16:32:41 +0200 | [diff] [blame] | 209 | connection->current_tle_writes = 0; |
| 210 | atomic_inc(&connection->current_tle_nr); |
| 211 | wake_all_senders(connection); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 212 | } |
| 213 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 214 | void complete_master_bio(struct drbd_device *device, |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 215 | struct bio_and_error *m) |
| 216 | { |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 217 | bio_endio(m->bio, m->error); |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 218 | dec_ap_bio(device); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 219 | } |
| 220 | |
Andreas Gruenbacher | 5384064 | 2011-01-28 10:31:04 +0100 | [diff] [blame] | 221 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 222 | /* Helper for __req_mod(). |
| 223 | * Set m->bio to the master bio, if it is fit to be completed, |
| 224 | * or leave it alone (it is initialized to NULL in __req_mod), |
| 225 | * if it has already been completed, or cannot be completed yet. |
| 226 | * If m->bio is set, the error status to be returned is placed in m->error. |
| 227 | */ |
Lars Ellenberg | 6870ca6 | 2012-03-26 17:02:45 +0200 | [diff] [blame] | 228 | static |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 229 | void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 230 | { |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 231 | const unsigned s = req->rq_state; |
Andreas Gruenbacher | 84b8c06 | 2011-07-28 15:27:51 +0200 | [diff] [blame] | 232 | struct drbd_device *device = req->device; |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 233 | int rw; |
| 234 | int error, ok; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 235 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 236 | /* we must not complete the master bio, while it is |
| 237 | * still being processed by _drbd_send_zc_bio (drbd_send_dblock) |
| 238 | * not yet acknowledged by the peer |
| 239 | * not yet completed by the local io subsystem |
| 240 | * these flags may get cleared in any order by |
| 241 | * the worker, |
| 242 | * the receiver, |
| 243 | * the bio_endio completion callbacks. |
| 244 | */ |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 245 | if ((s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED)) || |
| 246 | (s & RQ_NET_QUEUED) || (s & RQ_NET_PENDING) || |
| 247 | (s & RQ_COMPLETION_SUSP)) { |
Andreas Gruenbacher | d018017 | 2011-07-03 17:53:52 +0200 | [diff] [blame] | 248 | drbd_err(device, "drbd_req_complete: Logic BUG rq_state = 0x%x\n", s); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 249 | return; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 250 | } |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 251 | |
| 252 | if (!req->master_bio) { |
Andreas Gruenbacher | d018017 | 2011-07-03 17:53:52 +0200 | [diff] [blame] | 253 | drbd_err(device, "drbd_req_complete: Logic BUG, master_bio == NULL!\n"); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 254 | return; |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 255 | } |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 256 | |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 257 | rw = bio_rw(req->master_bio); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 258 | |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 259 | /* |
| 260 | * figure out whether to report success or failure. |
| 261 | * |
| 262 | * report success when at least one of the operations succeeded. |
| 263 | * or, to put the other way, |
| 264 | * only report failure, when both operations failed. |
| 265 | * |
| 266 | * what to do about the failures is handled elsewhere. |
| 267 | * what we need to do here is just: complete the master_bio. |
| 268 | * |
| 269 | * local completion error, if any, has been stored as ERR_PTR |
| 270 | * in private_bio within drbd_request_endio. |
| 271 | */ |
| 272 | ok = (s & RQ_LOCAL_OK) || (s & RQ_NET_OK); |
| 273 | error = PTR_ERR(req->private_bio); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 274 | |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 275 | /* Before we can signal completion to the upper layers, |
| 276 | * we may need to close the current transfer log epoch. |
| 277 | * We are within the request lock, so we can simply compare |
| 278 | * the request epoch number with the current transfer log |
| 279 | * epoch number. If they match, increase the current_tle_nr, |
| 280 | * and reset the transfer log epoch write_cnt. |
| 281 | */ |
| 282 | if (rw == WRITE && |
Andreas Gruenbacher | a6b32bc | 2011-05-31 14:33:49 +0200 | [diff] [blame] | 283 | req->epoch == atomic_read(&first_peer_device(device)->connection->current_tle_nr)) |
| 284 | start_new_tl_epoch(first_peer_device(device)->connection); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 285 | |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 286 | /* Update disk stats */ |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 287 | _drbd_end_io_acct(device, req); |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 288 | |
| 289 | /* If READ failed, |
| 290 | * have it be pushed back to the retry work queue, |
| 291 | * so it will re-enter __drbd_make_request(), |
| 292 | * and be re-assigned to a suitable local or remote path, |
| 293 | * or failed if we do not have access to good data anymore. |
| 294 | * |
| 295 | * Unless it was failed early by __drbd_make_request(), |
| 296 | * because no path was available, in which case |
| 297 | * it was not even added to the transfer_log. |
| 298 | * |
| 299 | * READA may fail, and will not be retried. |
| 300 | * |
| 301 | * WRITE should have used all available paths already. |
| 302 | */ |
| 303 | if (!ok && rw == READ && !list_empty(&req->tl_requests)) |
| 304 | req->rq_state |= RQ_POSTPONED; |
| 305 | |
| 306 | if (!(req->rq_state & RQ_POSTPONED)) { |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 307 | m->error = ok ? 0 : (error ?: -EIO); |
| 308 | m->bio = req->master_bio; |
| 309 | req->master_bio = NULL; |
Lars Ellenberg | 08d0dab | 2014-03-20 11:19:22 +0100 | [diff] [blame] | 310 | /* We leave it in the tree, to be able to verify later |
| 311 | * write-acks in protocol != C during resync. |
| 312 | * But we mark it as "complete", so it won't be counted as |
| 313 | * conflict in a multi-primary setup. */ |
| 314 | req->i.completed = true; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 315 | } |
Lars Ellenberg | 08d0dab | 2014-03-20 11:19:22 +0100 | [diff] [blame] | 316 | |
| 317 | if (req->i.waiting) |
| 318 | wake_up(&device->misc_wait); |
Lars Ellenberg | 844a6ae7 | 2013-11-22 12:52:03 +0100 | [diff] [blame] | 319 | |
| 320 | /* Either we are about to complete to upper layers, |
| 321 | * or we will restart this request. |
| 322 | * In either case, the request object will be destroyed soon, |
| 323 | * so better remove it from all lists. */ |
| 324 | list_del_init(&req->req_pending_master_completion); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 325 | } |
| 326 | |
Lars Ellenberg | 844a6ae7 | 2013-11-22 12:52:03 +0100 | [diff] [blame] | 327 | /* still holds resource->req_lock */ |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 328 | static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put) |
Philipp Reisner | cfa0341 | 2010-06-23 17:18:51 +0200 | [diff] [blame] | 329 | { |
Andreas Gruenbacher | 84b8c06 | 2011-07-28 15:27:51 +0200 | [diff] [blame] | 330 | struct drbd_device *device = req->device; |
Andreas Gruenbacher | 0b0ba1e | 2011-06-27 16:23:33 +0200 | [diff] [blame] | 331 | D_ASSERT(device, m || (req->rq_state & RQ_POSTPONED)); |
Philipp Reisner | cfa0341 | 2010-06-23 17:18:51 +0200 | [diff] [blame] | 332 | |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 333 | if (!atomic_sub_and_test(put, &req->completion_ref)) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 334 | return 0; |
| 335 | |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 336 | drbd_req_complete(req, m); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 337 | |
Lars Ellenberg | 9a278a7 | 2012-07-24 10:12:36 +0200 | [diff] [blame] | 338 | if (req->rq_state & RQ_POSTPONED) { |
| 339 | /* don't destroy the req object just yet, |
| 340 | * but queue it for retry */ |
| 341 | drbd_restart_request(req); |
| 342 | return 0; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 343 | } |
| 344 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 345 | return 1; |
| 346 | } |
| 347 | |
Lars Ellenberg | 7753a4c1 | 2013-11-22 13:00:12 +0100 | [diff] [blame] | 348 | static void set_if_null_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req) |
| 349 | { |
| 350 | struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; |
| 351 | if (!connection) |
| 352 | return; |
| 353 | if (connection->req_next == NULL) |
| 354 | connection->req_next = req; |
| 355 | } |
| 356 | |
| 357 | static void advance_conn_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req) |
| 358 | { |
| 359 | struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; |
| 360 | if (!connection) |
| 361 | return; |
| 362 | if (connection->req_next != req) |
| 363 | return; |
| 364 | list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) { |
| 365 | const unsigned s = req->rq_state; |
| 366 | if (s & RQ_NET_QUEUED) |
| 367 | break; |
| 368 | } |
| 369 | if (&req->tl_requests == &connection->transfer_log) |
| 370 | req = NULL; |
| 371 | connection->req_next = req; |
| 372 | } |
| 373 | |
| 374 | static void set_if_null_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req) |
| 375 | { |
| 376 | struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; |
| 377 | if (!connection) |
| 378 | return; |
| 379 | if (connection->req_ack_pending == NULL) |
| 380 | connection->req_ack_pending = req; |
| 381 | } |
| 382 | |
| 383 | static void advance_conn_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req) |
| 384 | { |
| 385 | struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; |
| 386 | if (!connection) |
| 387 | return; |
| 388 | if (connection->req_ack_pending != req) |
| 389 | return; |
| 390 | list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) { |
| 391 | const unsigned s = req->rq_state; |
| 392 | if ((s & RQ_NET_SENT) && (s & RQ_NET_PENDING)) |
| 393 | break; |
| 394 | } |
| 395 | if (&req->tl_requests == &connection->transfer_log) |
| 396 | req = NULL; |
| 397 | connection->req_ack_pending = req; |
| 398 | } |
| 399 | |
| 400 | static void set_if_null_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req) |
| 401 | { |
| 402 | struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; |
| 403 | if (!connection) |
| 404 | return; |
| 405 | if (connection->req_not_net_done == NULL) |
| 406 | connection->req_not_net_done = req; |
| 407 | } |
| 408 | |
| 409 | static void advance_conn_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req) |
| 410 | { |
| 411 | struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; |
| 412 | if (!connection) |
| 413 | return; |
| 414 | if (connection->req_not_net_done != req) |
| 415 | return; |
| 416 | list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) { |
| 417 | const unsigned s = req->rq_state; |
| 418 | if ((s & RQ_NET_SENT) && !(s & RQ_NET_DONE)) |
| 419 | break; |
| 420 | } |
| 421 | if (&req->tl_requests == &connection->transfer_log) |
| 422 | req = NULL; |
| 423 | connection->req_not_net_done = req; |
| 424 | } |
| 425 | |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 426 | /* I'd like this to be the only place that manipulates |
| 427 | * req->completion_ref and req->kref. */ |
| 428 | static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m, |
| 429 | int clear, int set) |
| 430 | { |
Andreas Gruenbacher | 84b8c06 | 2011-07-28 15:27:51 +0200 | [diff] [blame] | 431 | struct drbd_device *device = req->device; |
Lars Ellenberg | 7753a4c1 | 2013-11-22 13:00:12 +0100 | [diff] [blame] | 432 | struct drbd_peer_device *peer_device = first_peer_device(device); |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 433 | unsigned s = req->rq_state; |
| 434 | int c_put = 0; |
| 435 | int k_put = 0; |
| 436 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 437 | if (drbd_suspended(device) && !((s | clear) & RQ_COMPLETION_SUSP)) |
Philipp Reisner | 5af2e8c | 2012-08-14 11:28:52 +0200 | [diff] [blame] | 438 | set |= RQ_COMPLETION_SUSP; |
| 439 | |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 440 | /* apply */ |
| 441 | |
| 442 | req->rq_state &= ~clear; |
| 443 | req->rq_state |= set; |
| 444 | |
| 445 | /* no change? */ |
| 446 | if (req->rq_state == s) |
| 447 | return; |
| 448 | |
| 449 | /* intent: get references */ |
| 450 | |
| 451 | if (!(s & RQ_LOCAL_PENDING) && (set & RQ_LOCAL_PENDING)) |
| 452 | atomic_inc(&req->completion_ref); |
| 453 | |
| 454 | if (!(s & RQ_NET_PENDING) && (set & RQ_NET_PENDING)) { |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 455 | inc_ap_pending(device); |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 456 | atomic_inc(&req->completion_ref); |
| 457 | } |
| 458 | |
Lars Ellenberg | e5f891b | 2013-11-22 12:32:01 +0100 | [diff] [blame] | 459 | if (!(s & RQ_NET_QUEUED) && (set & RQ_NET_QUEUED)) { |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 460 | atomic_inc(&req->completion_ref); |
Lars Ellenberg | 7753a4c1 | 2013-11-22 13:00:12 +0100 | [diff] [blame] | 461 | set_if_null_req_next(peer_device, req); |
Lars Ellenberg | e5f891b | 2013-11-22 12:32:01 +0100 | [diff] [blame] | 462 | } |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 463 | |
| 464 | if (!(s & RQ_EXP_BARR_ACK) && (set & RQ_EXP_BARR_ACK)) |
| 465 | kref_get(&req->kref); /* wait for the DONE */ |
| 466 | |
Lars Ellenberg | e5f891b | 2013-11-22 12:32:01 +0100 | [diff] [blame] | 467 | if (!(s & RQ_NET_SENT) && (set & RQ_NET_SENT)) { |
| 468 | /* potentially already completed in the asender thread */ |
Lars Ellenberg | 7753a4c1 | 2013-11-22 13:00:12 +0100 | [diff] [blame] | 469 | if (!(s & RQ_NET_DONE)) { |
Lars Ellenberg | e5f891b | 2013-11-22 12:32:01 +0100 | [diff] [blame] | 470 | atomic_add(req->i.size >> 9, &device->ap_in_flight); |
Lars Ellenberg | 7753a4c1 | 2013-11-22 13:00:12 +0100 | [diff] [blame] | 471 | set_if_null_req_not_net_done(peer_device, req); |
| 472 | } |
| 473 | if (s & RQ_NET_PENDING) |
| 474 | set_if_null_req_ack_pending(peer_device, req); |
Lars Ellenberg | e5f891b | 2013-11-22 12:32:01 +0100 | [diff] [blame] | 475 | } |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 476 | |
Philipp Reisner | 5af2e8c | 2012-08-14 11:28:52 +0200 | [diff] [blame] | 477 | if (!(s & RQ_COMPLETION_SUSP) && (set & RQ_COMPLETION_SUSP)) |
| 478 | atomic_inc(&req->completion_ref); |
| 479 | |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 480 | /* progress: put references */ |
| 481 | |
| 482 | if ((s & RQ_COMPLETION_SUSP) && (clear & RQ_COMPLETION_SUSP)) |
| 483 | ++c_put; |
| 484 | |
| 485 | if (!(s & RQ_LOCAL_ABORTED) && (set & RQ_LOCAL_ABORTED)) { |
Andreas Gruenbacher | 0b0ba1e | 2011-06-27 16:23:33 +0200 | [diff] [blame] | 486 | D_ASSERT(device, req->rq_state & RQ_LOCAL_PENDING); |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 487 | /* local completion may still come in later, |
| 488 | * we need to keep the req object around. */ |
| 489 | kref_get(&req->kref); |
| 490 | ++c_put; |
| 491 | } |
| 492 | |
| 493 | if ((s & RQ_LOCAL_PENDING) && (clear & RQ_LOCAL_PENDING)) { |
| 494 | if (req->rq_state & RQ_LOCAL_ABORTED) |
| 495 | ++k_put; |
| 496 | else |
| 497 | ++c_put; |
Lars Ellenberg | 844a6ae7 | 2013-11-22 12:52:03 +0100 | [diff] [blame] | 498 | list_del_init(&req->req_pending_local); |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 499 | } |
| 500 | |
| 501 | if ((s & RQ_NET_PENDING) && (clear & RQ_NET_PENDING)) { |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 502 | dec_ap_pending(device); |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 503 | ++c_put; |
Lars Ellenberg | e5f891b | 2013-11-22 12:32:01 +0100 | [diff] [blame] | 504 | req->acked_jif = jiffies; |
Lars Ellenberg | 7753a4c1 | 2013-11-22 13:00:12 +0100 | [diff] [blame] | 505 | advance_conn_req_ack_pending(peer_device, req); |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 506 | } |
| 507 | |
Lars Ellenberg | 7753a4c1 | 2013-11-22 13:00:12 +0100 | [diff] [blame] | 508 | if ((s & RQ_NET_QUEUED) && (clear & RQ_NET_QUEUED)) { |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 509 | ++c_put; |
Lars Ellenberg | 7753a4c1 | 2013-11-22 13:00:12 +0100 | [diff] [blame] | 510 | advance_conn_req_next(peer_device, req); |
| 511 | } |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 512 | |
Lars Ellenberg | e5f891b | 2013-11-22 12:32:01 +0100 | [diff] [blame] | 513 | if (!(s & RQ_NET_DONE) && (set & RQ_NET_DONE)) { |
| 514 | if (s & RQ_NET_SENT) |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 515 | atomic_sub(req->i.size >> 9, &device->ap_in_flight); |
Lars Ellenberg | e5f891b | 2013-11-22 12:32:01 +0100 | [diff] [blame] | 516 | if (s & RQ_EXP_BARR_ACK) |
| 517 | ++k_put; |
| 518 | req->net_done_jif = jiffies; |
Lars Ellenberg | 7753a4c1 | 2013-11-22 13:00:12 +0100 | [diff] [blame] | 519 | |
| 520 | /* in ahead/behind mode, or just in case, |
| 521 | * before we finally destroy this request, |
| 522 | * the caching pointers must not reference it anymore */ |
| 523 | advance_conn_req_next(peer_device, req); |
| 524 | advance_conn_req_ack_pending(peer_device, req); |
| 525 | advance_conn_req_not_net_done(peer_device, req); |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 526 | } |
| 527 | |
| 528 | /* potentially complete and destroy */ |
| 529 | |
| 530 | if (k_put || c_put) { |
| 531 | /* Completion does it's own kref_put. If we are going to |
| 532 | * kref_sub below, we need req to be still around then. */ |
| 533 | int at_least = k_put + !!c_put; |
| 534 | int refcount = atomic_read(&req->kref.refcount); |
| 535 | if (refcount < at_least) |
Andreas Gruenbacher | d018017 | 2011-07-03 17:53:52 +0200 | [diff] [blame] | 536 | drbd_err(device, |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 537 | "mod_rq_state: Logic BUG: %x -> %x: refcount = %d, should be >= %d\n", |
| 538 | s, req->rq_state, refcount, at_least); |
| 539 | } |
| 540 | |
| 541 | /* If we made progress, retry conflicting peer requests, if any. */ |
| 542 | if (req->i.waiting) |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 543 | wake_up(&device->misc_wait); |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 544 | |
| 545 | if (c_put) |
| 546 | k_put += drbd_req_put_completion_ref(req, m, c_put); |
| 547 | if (k_put) |
| 548 | kref_sub(&req->kref, k_put, drbd_req_destroy); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 549 | } |
| 550 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 551 | static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req) |
Lars Ellenberg | ccae786 | 2012-09-26 14:07:04 +0200 | [diff] [blame] | 552 | { |
| 553 | char b[BDEVNAME_SIZE]; |
| 554 | |
Lars Ellenberg | 42839f6 | 2012-09-27 15:19:38 +0200 | [diff] [blame] | 555 | if (!__ratelimit(&drbd_ratelimit_state)) |
Lars Ellenberg | ccae786 | 2012-09-26 14:07:04 +0200 | [diff] [blame] | 556 | return; |
| 557 | |
Andreas Gruenbacher | d018017 | 2011-07-03 17:53:52 +0200 | [diff] [blame] | 558 | drbd_warn(device, "local %s IO error sector %llu+%u on %s\n", |
Lars Ellenberg | ccae786 | 2012-09-26 14:07:04 +0200 | [diff] [blame] | 559 | (req->rq_state & RQ_WRITE) ? "WRITE" : "READ", |
Lars Ellenberg | 42839f6 | 2012-09-27 15:19:38 +0200 | [diff] [blame] | 560 | (unsigned long long)req->i.sector, |
| 561 | req->i.size >> 9, |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 562 | bdevname(device->ldev->backing_bdev, b)); |
Lars Ellenberg | ccae786 | 2012-09-26 14:07:04 +0200 | [diff] [blame] | 563 | } |
| 564 | |
Lars Ellenberg | e5f891b | 2013-11-22 12:32:01 +0100 | [diff] [blame] | 565 | /* Helper for HANDED_OVER_TO_NETWORK. |
| 566 | * Is this a protocol A write (neither WRITE_ACK nor RECEIVE_ACK expected)? |
| 567 | * Is it also still "PENDING"? |
| 568 | * --> If so, clear PENDING and set NET_OK below. |
| 569 | * If it is a protocol A write, but not RQ_PENDING anymore, neg-ack was faster |
| 570 | * (and we must not set RQ_NET_OK) */ |
| 571 | static inline bool is_pending_write_protocol_A(struct drbd_request *req) |
| 572 | { |
| 573 | return (req->rq_state & |
| 574 | (RQ_WRITE|RQ_NET_PENDING|RQ_EXP_WRITE_ACK|RQ_EXP_RECEIVE_ACK)) |
| 575 | == (RQ_WRITE|RQ_NET_PENDING); |
| 576 | } |
| 577 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 578 | /* obviously this could be coded as many single functions |
| 579 | * instead of one huge switch, |
| 580 | * or by putting the code directly in the respective locations |
| 581 | * (as it has been before). |
| 582 | * |
| 583 | * but having it this way |
| 584 | * enforces that it is all in this one place, where it is easier to audit, |
| 585 | * it makes it obvious that whatever "event" "happens" to a request should |
| 586 | * happen "atomically" within the req_lock, |
| 587 | * and it enforces that we have to think in a very structured manner |
| 588 | * about the "events" that may happen to a request during its life time ... |
| 589 | */ |
Philipp Reisner | 2a80699 | 2010-06-09 14:07:43 +0200 | [diff] [blame] | 590 | int __req_mod(struct drbd_request *req, enum drbd_req_event what, |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 591 | struct bio_and_error *m) |
| 592 | { |
Lars Ellenberg | 44a4d55 | 2013-11-22 12:40:58 +0100 | [diff] [blame] | 593 | struct drbd_device *const device = req->device; |
| 594 | struct drbd_peer_device *const peer_device = first_peer_device(device); |
| 595 | struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL; |
Philipp Reisner | 44ed167 | 2011-04-19 17:10:19 +0200 | [diff] [blame] | 596 | struct net_conf *nc; |
Philipp Reisner | 303d144 | 2011-04-13 16:24:47 -0700 | [diff] [blame] | 597 | int p, rv = 0; |
Andreas Gruenbacher | 7be8da0 | 2011-02-22 02:15:32 +0100 | [diff] [blame] | 598 | |
| 599 | if (m) |
| 600 | m->bio = NULL; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 601 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 602 | switch (what) { |
| 603 | default: |
Andreas Gruenbacher | d018017 | 2011-07-03 17:53:52 +0200 | [diff] [blame] | 604 | drbd_err(device, "LOGIC BUG in %s:%u\n", __FILE__ , __LINE__); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 605 | break; |
| 606 | |
| 607 | /* does not happen... |
| 608 | * initialization done in drbd_req_new |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 609 | case CREATED: |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 610 | break; |
| 611 | */ |
| 612 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 613 | case TO_BE_SENT: /* via network */ |
Andreas Gruenbacher | 7be8da0 | 2011-02-22 02:15:32 +0100 | [diff] [blame] | 614 | /* reached via __drbd_make_request |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 615 | * and from w_read_retry_remote */ |
Andreas Gruenbacher | 0b0ba1e | 2011-06-27 16:23:33 +0200 | [diff] [blame] | 616 | D_ASSERT(device, !(req->rq_state & RQ_NET_MASK)); |
Philipp Reisner | 44ed167 | 2011-04-19 17:10:19 +0200 | [diff] [blame] | 617 | rcu_read_lock(); |
Lars Ellenberg | 44a4d55 | 2013-11-22 12:40:58 +0100 | [diff] [blame] | 618 | nc = rcu_dereference(connection->net_conf); |
Philipp Reisner | 44ed167 | 2011-04-19 17:10:19 +0200 | [diff] [blame] | 619 | p = nc->wire_protocol; |
| 620 | rcu_read_unlock(); |
Philipp Reisner | 303d144 | 2011-04-13 16:24:47 -0700 | [diff] [blame] | 621 | req->rq_state |= |
| 622 | p == DRBD_PROT_C ? RQ_EXP_WRITE_ACK : |
| 623 | p == DRBD_PROT_B ? RQ_EXP_RECEIVE_ACK : 0; |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 624 | mod_rq_state(req, m, 0, RQ_NET_PENDING); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 625 | break; |
| 626 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 627 | case TO_BE_SUBMITTED: /* locally */ |
Andreas Gruenbacher | 7be8da0 | 2011-02-22 02:15:32 +0100 | [diff] [blame] | 628 | /* reached via __drbd_make_request */ |
Andreas Gruenbacher | 0b0ba1e | 2011-06-27 16:23:33 +0200 | [diff] [blame] | 629 | D_ASSERT(device, !(req->rq_state & RQ_LOCAL_MASK)); |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 630 | mod_rq_state(req, m, 0, RQ_LOCAL_PENDING); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 631 | break; |
| 632 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 633 | case COMPLETED_OK: |
Philipp Reisner | 2b4dd36 | 2011-03-14 13:01:50 +0100 | [diff] [blame] | 634 | if (req->rq_state & RQ_WRITE) |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 635 | device->writ_cnt += req->i.size >> 9; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 636 | else |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 637 | device->read_cnt += req->i.size >> 9; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 638 | |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 639 | mod_rq_state(req, m, RQ_LOCAL_PENDING, |
| 640 | RQ_LOCAL_COMPLETED|RQ_LOCAL_OK); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 641 | break; |
| 642 | |
Philipp Reisner | cdfda63 | 2011-07-05 15:38:59 +0200 | [diff] [blame] | 643 | case ABORT_DISK_IO: |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 644 | mod_rq_state(req, m, 0, RQ_LOCAL_ABORTED); |
Philipp Reisner | 2b4dd36 | 2011-03-14 13:01:50 +0100 | [diff] [blame] | 645 | break; |
| 646 | |
Lars Ellenberg | edc9f5e | 2012-09-27 15:18:21 +0200 | [diff] [blame] | 647 | case WRITE_COMPLETED_WITH_ERROR: |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 648 | drbd_report_io_error(device, req); |
| 649 | __drbd_chk_io_error(device, DRBD_WRITE_ERROR); |
Lars Ellenberg | edc9f5e | 2012-09-27 15:18:21 +0200 | [diff] [blame] | 650 | mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 651 | break; |
| 652 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 653 | case READ_COMPLETED_WITH_ERROR: |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 654 | drbd_set_out_of_sync(device, req->i.sector, req->i.size); |
| 655 | drbd_report_io_error(device, req); |
| 656 | __drbd_chk_io_error(device, DRBD_READ_ERROR); |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 657 | /* fall through. */ |
| 658 | case READ_AHEAD_COMPLETED_WITH_ERROR: |
| 659 | /* it is legal to fail READA, no __drbd_chk_io_error in that case. */ |
| 660 | mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED); |
Lars Ellenberg | 4439c40 | 2012-03-26 17:29:30 +0200 | [diff] [blame] | 661 | break; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 662 | |
Lars Ellenberg | 2f632ae | 2014-04-28 18:43:24 +0200 | [diff] [blame] | 663 | case DISCARD_COMPLETED_NOTSUPP: |
| 664 | case DISCARD_COMPLETED_WITH_ERROR: |
| 665 | /* I'd rather not detach from local disk just because it |
| 666 | * failed a REQ_DISCARD. */ |
| 667 | mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED); |
| 668 | break; |
| 669 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 670 | case QUEUE_FOR_NET_READ: |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 671 | /* READ or READA, and |
| 672 | * no local disk, |
| 673 | * or target area marked as invalid, |
| 674 | * or just got an io-error. */ |
Andreas Gruenbacher | 7be8da0 | 2011-02-22 02:15:32 +0100 | [diff] [blame] | 675 | /* from __drbd_make_request |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 676 | * or from bio_endio during read io-error recovery */ |
| 677 | |
Lars Ellenberg | 6870ca6 | 2012-03-26 17:02:45 +0200 | [diff] [blame] | 678 | /* So we can verify the handle in the answer packet. |
| 679 | * Corresponding drbd_remove_request_interval is in |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 680 | * drbd_req_complete() */ |
Andreas Gruenbacher | 0b0ba1e | 2011-06-27 16:23:33 +0200 | [diff] [blame] | 681 | D_ASSERT(device, drbd_interval_empty(&req->i)); |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 682 | drbd_insert_interval(&device->read_requests, &req->i); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 683 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 684 | set_bit(UNPLUG_REMOTE, &device->flags); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 685 | |
Andreas Gruenbacher | 0b0ba1e | 2011-06-27 16:23:33 +0200 | [diff] [blame] | 686 | D_ASSERT(device, req->rq_state & RQ_NET_PENDING); |
| 687 | D_ASSERT(device, (req->rq_state & RQ_LOCAL_MASK) == 0); |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 688 | mod_rq_state(req, m, 0, RQ_NET_QUEUED); |
Lars Ellenberg | 4439c40 | 2012-03-26 17:29:30 +0200 | [diff] [blame] | 689 | req->w.cb = w_send_read_req; |
Lars Ellenberg | 44a4d55 | 2013-11-22 12:40:58 +0100 | [diff] [blame] | 690 | drbd_queue_work(&connection->sender_work, |
Andreas Gruenbacher | 84b8c06 | 2011-07-28 15:27:51 +0200 | [diff] [blame] | 691 | &req->w); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 692 | break; |
| 693 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 694 | case QUEUE_FOR_NET_WRITE: |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 695 | /* assert something? */ |
Andreas Gruenbacher | 7be8da0 | 2011-02-22 02:15:32 +0100 | [diff] [blame] | 696 | /* from __drbd_make_request only */ |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 697 | |
Lars Ellenberg | 6870ca6 | 2012-03-26 17:02:45 +0200 | [diff] [blame] | 698 | /* Corresponding drbd_remove_request_interval is in |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 699 | * drbd_req_complete() */ |
Andreas Gruenbacher | 0b0ba1e | 2011-06-27 16:23:33 +0200 | [diff] [blame] | 700 | D_ASSERT(device, drbd_interval_empty(&req->i)); |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 701 | drbd_insert_interval(&device->write_requests, &req->i); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 702 | |
| 703 | /* NOTE |
| 704 | * In case the req ended up on the transfer log before being |
| 705 | * queued on the worker, it could lead to this request being |
| 706 | * missed during cleanup after connection loss. |
| 707 | * So we have to do both operations here, |
| 708 | * within the same lock that protects the transfer log. |
| 709 | * |
| 710 | * _req_add_to_epoch(req); this has to be after the |
| 711 | * _maybe_start_new_epoch(req); which happened in |
Andreas Gruenbacher | 7be8da0 | 2011-02-22 02:15:32 +0100 | [diff] [blame] | 712 | * __drbd_make_request, because we now may set the bit |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 713 | * again ourselves to close the current epoch. |
| 714 | * |
| 715 | * Add req to the (now) current epoch (barrier). */ |
| 716 | |
Lars Ellenberg | 83c3883 | 2009-11-03 02:22:06 +0100 | [diff] [blame] | 717 | /* otherwise we may lose an unplug, which may cause some remote |
| 718 | * io-scheduler timeout to expire, increasing maximum latency, |
| 719 | * hurting performance. */ |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 720 | set_bit(UNPLUG_REMOTE, &device->flags); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 721 | |
| 722 | /* queue work item to send data */ |
Andreas Gruenbacher | 0b0ba1e | 2011-06-27 16:23:33 +0200 | [diff] [blame] | 723 | D_ASSERT(device, req->rq_state & RQ_NET_PENDING); |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 724 | mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 725 | req->w.cb = w_send_dblock; |
Lars Ellenberg | 44a4d55 | 2013-11-22 12:40:58 +0100 | [diff] [blame] | 726 | drbd_queue_work(&connection->sender_work, |
Andreas Gruenbacher | 84b8c06 | 2011-07-28 15:27:51 +0200 | [diff] [blame] | 727 | &req->w); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 728 | |
| 729 | /* close the epoch, in case it outgrew the limit */ |
Philipp Reisner | 44ed167 | 2011-04-19 17:10:19 +0200 | [diff] [blame] | 730 | rcu_read_lock(); |
Lars Ellenberg | 44a4d55 | 2013-11-22 12:40:58 +0100 | [diff] [blame] | 731 | nc = rcu_dereference(connection->net_conf); |
Philipp Reisner | 44ed167 | 2011-04-19 17:10:19 +0200 | [diff] [blame] | 732 | p = nc->max_epoch_size; |
| 733 | rcu_read_unlock(); |
Lars Ellenberg | 44a4d55 | 2013-11-22 12:40:58 +0100 | [diff] [blame] | 734 | if (connection->current_tle_writes >= p) |
| 735 | start_new_tl_epoch(connection); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 736 | |
| 737 | break; |
| 738 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 739 | case QUEUE_FOR_SEND_OOS: |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 740 | mod_rq_state(req, m, 0, RQ_NET_QUEUED); |
Andreas Gruenbacher | 8f7bed7 | 2010-12-19 23:53:14 +0100 | [diff] [blame] | 741 | req->w.cb = w_send_out_of_sync; |
Lars Ellenberg | 44a4d55 | 2013-11-22 12:40:58 +0100 | [diff] [blame] | 742 | drbd_queue_work(&connection->sender_work, |
Andreas Gruenbacher | 84b8c06 | 2011-07-28 15:27:51 +0200 | [diff] [blame] | 743 | &req->w); |
Philipp Reisner | 73a01a1 | 2010-10-27 14:33:00 +0200 | [diff] [blame] | 744 | break; |
| 745 | |
Lars Ellenberg | ea9d672 | 2012-03-26 16:46:39 +0200 | [diff] [blame] | 746 | case READ_RETRY_REMOTE_CANCELED: |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 747 | case SEND_CANCELED: |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 748 | case SEND_FAILED: |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 749 | /* real cleanup will be done from tl_clear. just update flags |
| 750 | * so it is no longer marked as on the worker queue */ |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 751 | mod_rq_state(req, m, RQ_NET_QUEUED, 0); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 752 | break; |
| 753 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 754 | case HANDED_OVER_TO_NETWORK: |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 755 | /* assert something? */ |
Lars Ellenberg | e5f891b | 2013-11-22 12:32:01 +0100 | [diff] [blame] | 756 | if (is_pending_write_protocol_A(req)) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 757 | /* this is what is dangerous about protocol A: |
| 758 | * pretend it was successfully written on the peer. */ |
Lars Ellenberg | e5f891b | 2013-11-22 12:32:01 +0100 | [diff] [blame] | 759 | mod_rq_state(req, m, RQ_NET_QUEUED|RQ_NET_PENDING, |
| 760 | RQ_NET_SENT|RQ_NET_OK); |
| 761 | else |
| 762 | mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_SENT); |
| 763 | /* It is still not yet RQ_NET_DONE until the |
| 764 | * corresponding epoch barrier got acked as well, |
| 765 | * so we know what to dirty on connection loss. */ |
Lars Ellenberg | 6d49e10 | 2012-01-11 09:43:25 +0100 | [diff] [blame] | 766 | break; |
| 767 | |
Lars Ellenberg | 27a434f | 2012-03-26 16:44:59 +0200 | [diff] [blame] | 768 | case OOS_HANDED_TO_NETWORK: |
Lars Ellenberg | 6d49e10 | 2012-01-11 09:43:25 +0100 | [diff] [blame] | 769 | /* Was not set PENDING, no longer QUEUED, so is now DONE |
| 770 | * as far as this connection is concerned. */ |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 771 | mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_DONE); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 772 | break; |
| 773 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 774 | case CONNECTION_LOST_WHILE_PENDING: |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 775 | /* transfer log cleanup after connection loss */ |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 776 | mod_rq_state(req, m, |
| 777 | RQ_NET_OK|RQ_NET_PENDING|RQ_COMPLETION_SUSP, |
| 778 | RQ_NET_DONE); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 779 | break; |
| 780 | |
Lars Ellenberg | d4dabbe | 2012-08-01 12:33:51 +0200 | [diff] [blame] | 781 | case CONFLICT_RESOLVED: |
| 782 | /* for superseded conflicting writes of multiple primaries, |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 783 | * there is no need to keep anything in the tl, potential |
Lars Ellenberg | 934722a | 2012-07-24 09:31:18 +0200 | [diff] [blame] | 784 | * node crashes are covered by the activity log. |
| 785 | * |
| 786 | * If this request had been marked as RQ_POSTPONED before, |
Lars Ellenberg | d4dabbe | 2012-08-01 12:33:51 +0200 | [diff] [blame] | 787 | * it will actually not be completed, but "restarted", |
Lars Ellenberg | 934722a | 2012-07-24 09:31:18 +0200 | [diff] [blame] | 788 | * resubmitted from the retry worker context. */ |
Andreas Gruenbacher | 0b0ba1e | 2011-06-27 16:23:33 +0200 | [diff] [blame] | 789 | D_ASSERT(device, req->rq_state & RQ_NET_PENDING); |
| 790 | D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK); |
Lars Ellenberg | 934722a | 2012-07-24 09:31:18 +0200 | [diff] [blame] | 791 | mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_DONE|RQ_NET_OK); |
| 792 | break; |
| 793 | |
Lars Ellenberg | 0afd569 | 2012-03-26 16:51:11 +0200 | [diff] [blame] | 794 | case WRITE_ACKED_BY_PEER_AND_SIS: |
Lars Ellenberg | 934722a | 2012-07-24 09:31:18 +0200 | [diff] [blame] | 795 | req->rq_state |= RQ_NET_SIS; |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 796 | case WRITE_ACKED_BY_PEER: |
Lars Ellenberg | 08d0dab | 2014-03-20 11:19:22 +0100 | [diff] [blame] | 797 | /* Normal operation protocol C: successfully written on peer. |
| 798 | * During resync, even in protocol != C, |
| 799 | * we requested an explicit write ack anyways. |
| 800 | * Which means we cannot even assert anything here. |
Lars Ellenberg | d64957c | 2012-03-23 14:42:19 +0100 | [diff] [blame] | 801 | * Nothing more to do here. |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 802 | * We want to keep the tl in place for all protocols, to cater |
Lars Ellenberg | d64957c | 2012-03-23 14:42:19 +0100 | [diff] [blame] | 803 | * for volatile write-back caches on lower level devices. */ |
Philipp Reisner | 303d144 | 2011-04-13 16:24:47 -0700 | [diff] [blame] | 804 | goto ack_common; |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 805 | case RECV_ACKED_BY_PEER: |
Andreas Gruenbacher | 0b0ba1e | 2011-06-27 16:23:33 +0200 | [diff] [blame] | 806 | D_ASSERT(device, req->rq_state & RQ_EXP_RECEIVE_ACK); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 807 | /* protocol B; pretends to be successfully written on peer. |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 808 | * see also notes above in HANDED_OVER_TO_NETWORK about |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 809 | * protocol != C */ |
Philipp Reisner | 303d144 | 2011-04-13 16:24:47 -0700 | [diff] [blame] | 810 | ack_common: |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 811 | mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 812 | break; |
| 813 | |
Andreas Gruenbacher | 7be8da0 | 2011-02-22 02:15:32 +0100 | [diff] [blame] | 814 | case POSTPONE_WRITE: |
Andreas Gruenbacher | 0b0ba1e | 2011-06-27 16:23:33 +0200 | [diff] [blame] | 815 | D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK); |
Philipp Reisner | 303d144 | 2011-04-13 16:24:47 -0700 | [diff] [blame] | 816 | /* If this node has already detected the write conflict, the |
Andreas Gruenbacher | 7be8da0 | 2011-02-22 02:15:32 +0100 | [diff] [blame] | 817 | * worker will be waiting on misc_wait. Wake it up once this |
| 818 | * request has completed locally. |
| 819 | */ |
Andreas Gruenbacher | 0b0ba1e | 2011-06-27 16:23:33 +0200 | [diff] [blame] | 820 | D_ASSERT(device, req->rq_state & RQ_NET_PENDING); |
Andreas Gruenbacher | 7be8da0 | 2011-02-22 02:15:32 +0100 | [diff] [blame] | 821 | req->rq_state |= RQ_POSTPONED; |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 822 | if (req->i.waiting) |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 823 | wake_up(&device->misc_wait); |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 824 | /* Do not clear RQ_NET_PENDING. This request will make further |
| 825 | * progress via restart_conflicting_writes() or |
| 826 | * fail_postponed_requests(). Hopefully. */ |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 827 | break; |
| 828 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 829 | case NEG_ACKED: |
Lars Ellenberg | 46e21bb | 2012-08-07 06:47:14 +0200 | [diff] [blame] | 830 | mod_rq_state(req, m, RQ_NET_OK|RQ_NET_PENDING, 0); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 831 | break; |
| 832 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 833 | case FAIL_FROZEN_DISK_IO: |
Philipp Reisner | 265be2d | 2010-05-31 10:14:17 +0200 | [diff] [blame] | 834 | if (!(req->rq_state & RQ_LOCAL_COMPLETED)) |
| 835 | break; |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 836 | mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0); |
Philipp Reisner | 265be2d | 2010-05-31 10:14:17 +0200 | [diff] [blame] | 837 | break; |
| 838 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 839 | case RESTART_FROZEN_DISK_IO: |
Philipp Reisner | 265be2d | 2010-05-31 10:14:17 +0200 | [diff] [blame] | 840 | if (!(req->rq_state & RQ_LOCAL_COMPLETED)) |
| 841 | break; |
| 842 | |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 843 | mod_rq_state(req, m, |
| 844 | RQ_COMPLETION_SUSP|RQ_LOCAL_COMPLETED, |
| 845 | RQ_LOCAL_PENDING); |
Philipp Reisner | 265be2d | 2010-05-31 10:14:17 +0200 | [diff] [blame] | 846 | |
| 847 | rv = MR_READ; |
| 848 | if (bio_data_dir(req->master_bio) == WRITE) |
| 849 | rv = MR_WRITE; |
| 850 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 851 | get_ldev(device); /* always succeeds in this call path */ |
Philipp Reisner | 265be2d | 2010-05-31 10:14:17 +0200 | [diff] [blame] | 852 | req->w.cb = w_restart_disk_io; |
Lars Ellenberg | 44a4d55 | 2013-11-22 12:40:58 +0100 | [diff] [blame] | 853 | drbd_queue_work(&connection->sender_work, |
Andreas Gruenbacher | 84b8c06 | 2011-07-28 15:27:51 +0200 | [diff] [blame] | 854 | &req->w); |
Philipp Reisner | 265be2d | 2010-05-31 10:14:17 +0200 | [diff] [blame] | 855 | break; |
| 856 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 857 | case RESEND: |
Philipp Reisner | 509fc01 | 2012-07-31 11:22:58 +0200 | [diff] [blame] | 858 | /* Simply complete (local only) READs. */ |
| 859 | if (!(req->rq_state & RQ_WRITE) && !req->w.cb) { |
Philipp Reisner | 8a0bab2 | 2012-08-07 13:28:00 +0200 | [diff] [blame] | 860 | mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0); |
Philipp Reisner | 509fc01 | 2012-07-31 11:22:58 +0200 | [diff] [blame] | 861 | break; |
| 862 | } |
| 863 | |
Philipp Reisner | 11b58e7 | 2010-05-12 17:08:26 +0200 | [diff] [blame] | 864 | /* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 865 | before the connection loss (B&C only); only P_BARRIER_ACK |
| 866 | (or the local completion?) was missing when we suspended. |
Lars Ellenberg | 6870ca6 | 2012-03-26 17:02:45 +0200 | [diff] [blame] | 867 | Throwing them out of the TL here by pretending we got a BARRIER_ACK. |
| 868 | During connection handshake, we ensure that the peer was not rebooted. */ |
Philipp Reisner | 11b58e7 | 2010-05-12 17:08:26 +0200 | [diff] [blame] | 869 | if (!(req->rq_state & RQ_NET_OK)) { |
Andreas Gruenbacher | 84b8c06 | 2011-07-28 15:27:51 +0200 | [diff] [blame] | 870 | /* FIXME could this possibly be a req->dw.cb == w_send_out_of_sync? |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 871 | * in that case we must not set RQ_NET_PENDING. */ |
| 872 | |
| 873 | mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING); |
Philipp Reisner | 11b58e7 | 2010-05-12 17:08:26 +0200 | [diff] [blame] | 874 | if (req->w.cb) { |
Lars Ellenberg | 44a4d55 | 2013-11-22 12:40:58 +0100 | [diff] [blame] | 875 | /* w.cb expected to be w_send_dblock, or w_send_read_req */ |
| 876 | drbd_queue_work(&connection->sender_work, |
Andreas Gruenbacher | 84b8c06 | 2011-07-28 15:27:51 +0200 | [diff] [blame] | 877 | &req->w); |
Philipp Reisner | 11b58e7 | 2010-05-12 17:08:26 +0200 | [diff] [blame] | 878 | rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ; |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 879 | } /* else: FIXME can this happen? */ |
Philipp Reisner | 11b58e7 | 2010-05-12 17:08:26 +0200 | [diff] [blame] | 880 | break; |
| 881 | } |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 882 | /* else, fall through to BARRIER_ACKED */ |
Philipp Reisner | 11b58e7 | 2010-05-12 17:08:26 +0200 | [diff] [blame] | 883 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 884 | case BARRIER_ACKED: |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 885 | /* barrier ack for READ requests does not make sense */ |
Philipp Reisner | 288f422 | 2010-05-27 15:07:43 +0200 | [diff] [blame] | 886 | if (!(req->rq_state & RQ_WRITE)) |
| 887 | break; |
| 888 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 889 | if (req->rq_state & RQ_NET_PENDING) { |
Andreas Gruenbacher | a209b4a | 2011-08-17 12:43:25 +0200 | [diff] [blame] | 890 | /* barrier came in before all requests were acked. |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 891 | * this is bad, because if the connection is lost now, |
| 892 | * we won't be able to clean them up... */ |
Andreas Gruenbacher | d018017 | 2011-07-03 17:53:52 +0200 | [diff] [blame] | 893 | drbd_err(device, "FIXME (BARRIER_ACKED but pending)\n"); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 894 | } |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 895 | /* Allowed to complete requests, even while suspended. |
| 896 | * As this is called for all requests within a matching epoch, |
| 897 | * we need to filter, and only set RQ_NET_DONE for those that |
| 898 | * have actually been on the wire. */ |
| 899 | mod_rq_state(req, m, RQ_COMPLETION_SUSP, |
| 900 | (req->rq_state & RQ_NET_MASK) ? RQ_NET_DONE : 0); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 901 | break; |
| 902 | |
Andreas Gruenbacher | 8554df1 | 2011-01-25 15:37:43 +0100 | [diff] [blame] | 903 | case DATA_RECEIVED: |
Andreas Gruenbacher | 0b0ba1e | 2011-06-27 16:23:33 +0200 | [diff] [blame] | 904 | D_ASSERT(device, req->rq_state & RQ_NET_PENDING); |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 905 | mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK|RQ_NET_DONE); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 906 | break; |
Lars Ellenberg | 7074e4a | 2013-03-27 14:08:41 +0100 | [diff] [blame] | 907 | |
| 908 | case QUEUE_AS_DRBD_BARRIER: |
Lars Ellenberg | 44a4d55 | 2013-11-22 12:40:58 +0100 | [diff] [blame] | 909 | start_new_tl_epoch(connection); |
Lars Ellenberg | 7074e4a | 2013-03-27 14:08:41 +0100 | [diff] [blame] | 910 | mod_rq_state(req, m, 0, RQ_NET_OK|RQ_NET_DONE); |
| 911 | break; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 912 | }; |
Philipp Reisner | 2a80699 | 2010-06-09 14:07:43 +0200 | [diff] [blame] | 913 | |
| 914 | return rv; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 915 | } |
| 916 | |
| 917 | /* we may do a local read if: |
| 918 | * - we are consistent (of course), |
| 919 | * - or we are generally inconsistent, |
| 920 | * BUT we are still/already IN SYNC for this area. |
| 921 | * since size may be bigger than BM_BLOCK_SIZE, |
| 922 | * we may need to check several bits. |
| 923 | */ |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 924 | static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 925 | { |
| 926 | unsigned long sbnr, ebnr; |
| 927 | sector_t esector, nr_sectors; |
| 928 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 929 | if (device->state.disk == D_UP_TO_DATE) |
Andreas Gruenbacher | 0da34df | 2010-12-19 20:48:29 +0100 | [diff] [blame] | 930 | return true; |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 931 | if (device->state.disk != D_INCONSISTENT) |
Andreas Gruenbacher | 0da34df | 2010-12-19 20:48:29 +0100 | [diff] [blame] | 932 | return false; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 933 | esector = sector + (size >> 9) - 1; |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 934 | nr_sectors = drbd_get_capacity(device->this_bdev); |
Andreas Gruenbacher | 0b0ba1e | 2011-06-27 16:23:33 +0200 | [diff] [blame] | 935 | D_ASSERT(device, sector < nr_sectors); |
| 936 | D_ASSERT(device, esector < nr_sectors); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 937 | |
| 938 | sbnr = BM_SECT_TO_BIT(sector); |
| 939 | ebnr = BM_SECT_TO_BIT(esector); |
| 940 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 941 | return drbd_bm_count_bits(device, sbnr, ebnr) == 0; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 942 | } |
| 943 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 944 | static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t sector, |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 945 | enum drbd_read_balancing rbm) |
Philipp Reisner | 380207d | 2011-11-11 12:31:20 +0100 | [diff] [blame] | 946 | { |
Philipp Reisner | 380207d | 2011-11-11 12:31:20 +0100 | [diff] [blame] | 947 | struct backing_dev_info *bdi; |
Philipp Reisner | d60de03 | 2011-11-17 10:12:31 +0100 | [diff] [blame] | 948 | int stripe_shift; |
Philipp Reisner | 380207d | 2011-11-11 12:31:20 +0100 | [diff] [blame] | 949 | |
Philipp Reisner | 380207d | 2011-11-11 12:31:20 +0100 | [diff] [blame] | 950 | switch (rbm) { |
| 951 | case RB_CONGESTED_REMOTE: |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 952 | bdi = &device->ldev->backing_bdev->bd_disk->queue->backing_dev_info; |
Philipp Reisner | 380207d | 2011-11-11 12:31:20 +0100 | [diff] [blame] | 953 | return bdi_read_congested(bdi); |
| 954 | case RB_LEAST_PENDING: |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 955 | return atomic_read(&device->local_cnt) > |
| 956 | atomic_read(&device->ap_pending_cnt) + atomic_read(&device->rs_pending_cnt); |
Philipp Reisner | d60de03 | 2011-11-17 10:12:31 +0100 | [diff] [blame] | 957 | case RB_32K_STRIPING: /* stripe_shift = 15 */ |
| 958 | case RB_64K_STRIPING: |
| 959 | case RB_128K_STRIPING: |
| 960 | case RB_256K_STRIPING: |
| 961 | case RB_512K_STRIPING: |
| 962 | case RB_1M_STRIPING: /* stripe_shift = 20 */ |
| 963 | stripe_shift = (rbm - RB_32K_STRIPING + 15); |
| 964 | return (sector >> (stripe_shift - 9)) & 1; |
Philipp Reisner | 380207d | 2011-11-11 12:31:20 +0100 | [diff] [blame] | 965 | case RB_ROUND_ROBIN: |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 966 | return test_and_change_bit(READ_BALANCE_RR, &device->flags); |
Philipp Reisner | 380207d | 2011-11-11 12:31:20 +0100 | [diff] [blame] | 967 | case RB_PREFER_REMOTE: |
| 968 | return true; |
| 969 | case RB_PREFER_LOCAL: |
| 970 | default: |
| 971 | return false; |
| 972 | } |
| 973 | } |
| 974 | |
Andreas Gruenbacher | 6024fec | 2011-01-28 15:53:51 +0100 | [diff] [blame] | 975 | /* |
| 976 | * complete_conflicting_writes - wait for any conflicting write requests |
| 977 | * |
| 978 | * The write_requests tree contains all active write requests which we |
| 979 | * currently know about. Wait for any requests to complete which conflict with |
| 980 | * the new one. |
Lars Ellenberg | 648e46b | 2012-03-26 20:12:24 +0200 | [diff] [blame] | 981 | * |
| 982 | * Only way out: remove the conflicting intervals from the tree. |
Andreas Gruenbacher | 6024fec | 2011-01-28 15:53:51 +0100 | [diff] [blame] | 983 | */ |
Lars Ellenberg | 648e46b | 2012-03-26 20:12:24 +0200 | [diff] [blame] | 984 | static void complete_conflicting_writes(struct drbd_request *req) |
Andreas Gruenbacher | 6024fec | 2011-01-28 15:53:51 +0100 | [diff] [blame] | 985 | { |
Lars Ellenberg | 648e46b | 2012-03-26 20:12:24 +0200 | [diff] [blame] | 986 | DEFINE_WAIT(wait); |
Andreas Gruenbacher | 84b8c06 | 2011-07-28 15:27:51 +0200 | [diff] [blame] | 987 | struct drbd_device *device = req->device; |
Lars Ellenberg | 648e46b | 2012-03-26 20:12:24 +0200 | [diff] [blame] | 988 | struct drbd_interval *i; |
| 989 | sector_t sector = req->i.sector; |
| 990 | int size = req->i.size; |
Andreas Gruenbacher | 6024fec | 2011-01-28 15:53:51 +0100 | [diff] [blame] | 991 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 992 | i = drbd_find_overlap(&device->write_requests, sector, size); |
Lars Ellenberg | 648e46b | 2012-03-26 20:12:24 +0200 | [diff] [blame] | 993 | if (!i) |
| 994 | return; |
| 995 | |
| 996 | for (;;) { |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 997 | prepare_to_wait(&device->misc_wait, &wait, TASK_UNINTERRUPTIBLE); |
| 998 | i = drbd_find_overlap(&device->write_requests, sector, size); |
Andreas Gruenbacher | 6024fec | 2011-01-28 15:53:51 +0100 | [diff] [blame] | 999 | if (!i) |
Lars Ellenberg | 648e46b | 2012-03-26 20:12:24 +0200 | [diff] [blame] | 1000 | break; |
| 1001 | /* Indicate to wake up device->misc_wait on progress. */ |
| 1002 | i->waiting = true; |
Andreas Gruenbacher | 0500813 | 2011-07-07 14:19:42 +0200 | [diff] [blame] | 1003 | spin_unlock_irq(&device->resource->req_lock); |
Lars Ellenberg | 648e46b | 2012-03-26 20:12:24 +0200 | [diff] [blame] | 1004 | schedule(); |
Andreas Gruenbacher | 0500813 | 2011-07-07 14:19:42 +0200 | [diff] [blame] | 1005 | spin_lock_irq(&device->resource->req_lock); |
Andreas Gruenbacher | 6024fec | 2011-01-28 15:53:51 +0100 | [diff] [blame] | 1006 | } |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1007 | finish_wait(&device->misc_wait, &wait); |
Andreas Gruenbacher | 6024fec | 2011-01-28 15:53:51 +0100 | [diff] [blame] | 1008 | } |
| 1009 | |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1010 | /* called within req_lock and rcu_read_lock() */ |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1011 | static void maybe_pull_ahead(struct drbd_device *device) |
Lars Ellenberg | 0d5934e | 2012-06-08 14:17:36 +0200 | [diff] [blame] | 1012 | { |
Andreas Gruenbacher | a6b32bc | 2011-05-31 14:33:49 +0200 | [diff] [blame] | 1013 | struct drbd_connection *connection = first_peer_device(device)->connection; |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1014 | struct net_conf *nc; |
| 1015 | bool congested = false; |
| 1016 | enum drbd_on_congestion on_congestion; |
| 1017 | |
Lars Ellenberg | 607f25e | 2013-03-27 14:08:45 +0100 | [diff] [blame] | 1018 | rcu_read_lock(); |
Andreas Gruenbacher | bde89a9 | 2011-05-30 16:32:41 +0200 | [diff] [blame] | 1019 | nc = rcu_dereference(connection->net_conf); |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1020 | on_congestion = nc ? nc->on_congestion : OC_BLOCK; |
Lars Ellenberg | 607f25e | 2013-03-27 14:08:45 +0100 | [diff] [blame] | 1021 | rcu_read_unlock(); |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1022 | if (on_congestion == OC_BLOCK || |
Andreas Gruenbacher | bde89a9 | 2011-05-30 16:32:41 +0200 | [diff] [blame] | 1023 | connection->agreed_pro_version < 96) |
Lars Ellenberg | 3b9ef85 | 2012-07-30 09:06:26 +0200 | [diff] [blame] | 1024 | return; |
Lars Ellenberg | 0d5934e | 2012-06-08 14:17:36 +0200 | [diff] [blame] | 1025 | |
Lars Ellenberg | 0c066bc | 2014-03-20 14:04:35 +0100 | [diff] [blame] | 1026 | if (on_congestion == OC_PULL_AHEAD && device->state.conn == C_AHEAD) |
| 1027 | return; /* nothing to do ... */ |
| 1028 | |
Lars Ellenberg | 0d5934e | 2012-06-08 14:17:36 +0200 | [diff] [blame] | 1029 | /* If I don't even have good local storage, we can not reasonably try |
| 1030 | * to pull ahead of the peer. We also need the local reference to make |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1031 | * sure device->act_log is there. |
Lars Ellenberg | 0d5934e | 2012-06-08 14:17:36 +0200 | [diff] [blame] | 1032 | */ |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1033 | if (!get_ldev_if_state(device, D_UP_TO_DATE)) |
Lars Ellenberg | 0d5934e | 2012-06-08 14:17:36 +0200 | [diff] [blame] | 1034 | return; |
| 1035 | |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1036 | if (nc->cong_fill && |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1037 | atomic_read(&device->ap_in_flight) >= nc->cong_fill) { |
Andreas Gruenbacher | d018017 | 2011-07-03 17:53:52 +0200 | [diff] [blame] | 1038 | drbd_info(device, "Congestion-fill threshold reached\n"); |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1039 | congested = true; |
Lars Ellenberg | 0d5934e | 2012-06-08 14:17:36 +0200 | [diff] [blame] | 1040 | } |
| 1041 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1042 | if (device->act_log->used >= nc->cong_extents) { |
Andreas Gruenbacher | d018017 | 2011-07-03 17:53:52 +0200 | [diff] [blame] | 1043 | drbd_info(device, "Congestion-extents threshold reached\n"); |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1044 | congested = true; |
Lars Ellenberg | 0d5934e | 2012-06-08 14:17:36 +0200 | [diff] [blame] | 1045 | } |
| 1046 | |
| 1047 | if (congested) { |
Lars Ellenberg | 99b4d8f | 2012-08-07 06:42:09 +0200 | [diff] [blame] | 1048 | /* start a new epoch for non-mirrored writes */ |
Andreas Gruenbacher | a6b32bc | 2011-05-31 14:33:49 +0200 | [diff] [blame] | 1049 | start_new_tl_epoch(first_peer_device(device)->connection); |
Lars Ellenberg | 0d5934e | 2012-06-08 14:17:36 +0200 | [diff] [blame] | 1050 | |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1051 | if (on_congestion == OC_PULL_AHEAD) |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1052 | _drbd_set_state(_NS(device, conn, C_AHEAD), 0, NULL); |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1053 | else /*nc->on_congestion == OC_DISCONNECT */ |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1054 | _drbd_set_state(_NS(device, conn, C_DISCONNECTING), 0, NULL); |
Lars Ellenberg | 0d5934e | 2012-06-08 14:17:36 +0200 | [diff] [blame] | 1055 | } |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1056 | put_ldev(device); |
Lars Ellenberg | 0d5934e | 2012-06-08 14:17:36 +0200 | [diff] [blame] | 1057 | } |
| 1058 | |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1059 | /* If this returns false, and req->private_bio is still set, |
| 1060 | * this should be submitted locally. |
| 1061 | * |
| 1062 | * If it returns false, but req->private_bio is not set, |
| 1063 | * we do not have access to good data :( |
| 1064 | * |
| 1065 | * Otherwise, this destroys req->private_bio, if any, |
| 1066 | * and returns true. |
| 1067 | */ |
| 1068 | static bool do_remote_read(struct drbd_request *req) |
| 1069 | { |
Andreas Gruenbacher | 84b8c06 | 2011-07-28 15:27:51 +0200 | [diff] [blame] | 1070 | struct drbd_device *device = req->device; |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1071 | enum drbd_read_balancing rbm; |
| 1072 | |
| 1073 | if (req->private_bio) { |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1074 | if (!drbd_may_do_local_read(device, |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1075 | req->i.sector, req->i.size)) { |
| 1076 | bio_put(req->private_bio); |
| 1077 | req->private_bio = NULL; |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1078 | put_ldev(device); |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1079 | } |
| 1080 | } |
| 1081 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1082 | if (device->state.pdsk != D_UP_TO_DATE) |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1083 | return false; |
| 1084 | |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 1085 | if (req->private_bio == NULL) |
| 1086 | return true; |
| 1087 | |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1088 | /* TODO: improve read balancing decisions, take into account drbd |
| 1089 | * protocol, pending requests etc. */ |
| 1090 | |
| 1091 | rcu_read_lock(); |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1092 | rbm = rcu_dereference(device->ldev->disk_conf)->read_balancing; |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1093 | rcu_read_unlock(); |
| 1094 | |
| 1095 | if (rbm == RB_PREFER_LOCAL && req->private_bio) |
| 1096 | return false; /* submit locally */ |
| 1097 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1098 | if (remote_due_to_read_balancing(device, req->i.sector, rbm)) { |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1099 | if (req->private_bio) { |
| 1100 | bio_put(req->private_bio); |
| 1101 | req->private_bio = NULL; |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1102 | put_ldev(device); |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1103 | } |
| 1104 | return true; |
| 1105 | } |
| 1106 | |
| 1107 | return false; |
| 1108 | } |
| 1109 | |
| 1110 | /* returns number of connections (== 1, for drbd 8.4) |
| 1111 | * expected to actually write this data, |
| 1112 | * which does NOT include those that we are L_AHEAD for. */ |
| 1113 | static int drbd_process_write_request(struct drbd_request *req) |
| 1114 | { |
Andreas Gruenbacher | 84b8c06 | 2011-07-28 15:27:51 +0200 | [diff] [blame] | 1115 | struct drbd_device *device = req->device; |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1116 | int remote, send_oos; |
| 1117 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1118 | remote = drbd_should_do_remote(device->state); |
| 1119 | send_oos = drbd_should_send_out_of_sync(device->state); |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1120 | |
Lars Ellenberg | 519b6d3 | 2012-08-03 02:19:09 +0200 | [diff] [blame] | 1121 | /* Need to replicate writes. Unless it is an empty flush, |
| 1122 | * which is better mapped to a DRBD P_BARRIER packet, |
| 1123 | * also for drbd wire protocol compatibility reasons. |
| 1124 | * If this was a flush, just start a new epoch. |
| 1125 | * Unless the current epoch was empty anyways, or we are not currently |
| 1126 | * replicating, in which case there is no point. */ |
| 1127 | if (unlikely(req->i.size == 0)) { |
| 1128 | /* The only size==0 bios we expect are empty flushes. */ |
Andreas Gruenbacher | 0b0ba1e | 2011-06-27 16:23:33 +0200 | [diff] [blame] | 1129 | D_ASSERT(device, req->master_bio->bi_rw & REQ_FLUSH); |
Lars Ellenberg | 99b4d8f | 2012-08-07 06:42:09 +0200 | [diff] [blame] | 1130 | if (remote) |
Lars Ellenberg | 7074e4a | 2013-03-27 14:08:41 +0100 | [diff] [blame] | 1131 | _req_mod(req, QUEUE_AS_DRBD_BARRIER); |
| 1132 | return remote; |
Lars Ellenberg | 519b6d3 | 2012-08-03 02:19:09 +0200 | [diff] [blame] | 1133 | } |
| 1134 | |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1135 | if (!remote && !send_oos) |
| 1136 | return 0; |
| 1137 | |
Andreas Gruenbacher | 0b0ba1e | 2011-06-27 16:23:33 +0200 | [diff] [blame] | 1138 | D_ASSERT(device, !(remote && send_oos)); |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1139 | |
| 1140 | if (remote) { |
| 1141 | _req_mod(req, TO_BE_SENT); |
| 1142 | _req_mod(req, QUEUE_FOR_NET_WRITE); |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1143 | } else if (drbd_set_out_of_sync(device, req->i.sector, req->i.size)) |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1144 | _req_mod(req, QUEUE_FOR_SEND_OOS); |
| 1145 | |
| 1146 | return remote; |
| 1147 | } |
| 1148 | |
| 1149 | static void |
| 1150 | drbd_submit_req_private_bio(struct drbd_request *req) |
| 1151 | { |
Andreas Gruenbacher | 84b8c06 | 2011-07-28 15:27:51 +0200 | [diff] [blame] | 1152 | struct drbd_device *device = req->device; |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1153 | struct bio *bio = req->private_bio; |
| 1154 | const int rw = bio_rw(bio); |
| 1155 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1156 | bio->bi_bdev = device->ldev->backing_bdev; |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1157 | |
| 1158 | /* State may have changed since we grabbed our reference on the |
| 1159 | * ->ldev member. Double check, and short-circuit to endio. |
| 1160 | * In case the last activity log transaction failed to get on |
| 1161 | * stable storage, and this is a WRITE, we may not even submit |
| 1162 | * this bio. */ |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1163 | if (get_ldev(device)) { |
Lars Ellenberg | e5f891b | 2013-11-22 12:32:01 +0100 | [diff] [blame] | 1164 | req->pre_submit_jif = jiffies; |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1165 | if (drbd_insert_fault(device, |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1166 | rw == WRITE ? DRBD_FAULT_DT_WR |
| 1167 | : rw == READ ? DRBD_FAULT_DT_RD |
| 1168 | : DRBD_FAULT_DT_RA)) |
| 1169 | bio_endio(bio, -EIO); |
| 1170 | else |
| 1171 | generic_make_request(bio); |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1172 | put_ldev(device); |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1173 | } else |
| 1174 | bio_endio(bio, -EIO); |
| 1175 | } |
| 1176 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1177 | static void drbd_queue_write(struct drbd_device *device, struct drbd_request *req) |
Lars Ellenberg | 779b3fe | 2013-03-19 18:16:54 +0100 | [diff] [blame] | 1178 | { |
Lars Ellenberg | 844a6ae7 | 2013-11-22 12:52:03 +0100 | [diff] [blame] | 1179 | spin_lock_irq(&device->resource->req_lock); |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1180 | list_add_tail(&req->tl_requests, &device->submit.writes); |
Lars Ellenberg | 844a6ae7 | 2013-11-22 12:52:03 +0100 | [diff] [blame] | 1181 | list_add_tail(&req->req_pending_master_completion, |
| 1182 | &device->pending_master_completion[1 /* WRITE */]); |
| 1183 | spin_unlock_irq(&device->resource->req_lock); |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1184 | queue_work(device->submit.wq, &device->submit.worker); |
Lars Ellenberg | f5b90b6 | 2014-05-07 22:41:28 +0200 | [diff] [blame^] | 1185 | /* do_submit() may sleep internally on al_wait, too */ |
| 1186 | wake_up(&device->al_wait); |
Lars Ellenberg | 779b3fe | 2013-03-19 18:16:54 +0100 | [diff] [blame] | 1187 | } |
| 1188 | |
Lars Ellenberg | 6d9febe | 2013-03-19 18:16:50 +0100 | [diff] [blame] | 1189 | /* returns the new drbd_request pointer, if the caller is expected to |
| 1190 | * drbd_send_and_submit() it (to save latency), or NULL if we queued the |
| 1191 | * request on the submitter thread. |
| 1192 | * Returns ERR_PTR(-ENOMEM) if we cannot allocate a drbd_request. |
| 1193 | */ |
Rashika Kheria | 01cd263 | 2013-12-19 15:12:27 +0530 | [diff] [blame] | 1194 | static struct drbd_request * |
Lars Ellenberg | e5f891b | 2013-11-22 12:32:01 +0100 | [diff] [blame] | 1195 | drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long start_jif) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1196 | { |
Lars Ellenberg | 6d9febe | 2013-03-19 18:16:50 +0100 | [diff] [blame] | 1197 | const int rw = bio_data_dir(bio); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1198 | struct drbd_request *req; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1199 | |
| 1200 | /* allocate outside of all locks; */ |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1201 | req = drbd_req_new(device, bio); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1202 | if (!req) { |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1203 | dec_ap_bio(device); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1204 | /* only pass the error to the upper layers. |
| 1205 | * if user cannot handle io errors, that's not our business. */ |
Andreas Gruenbacher | d018017 | 2011-07-03 17:53:52 +0200 | [diff] [blame] | 1206 | drbd_err(device, "could not kmalloc() req\n"); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1207 | bio_endio(bio, -ENOMEM); |
Lars Ellenberg | 6d9febe | 2013-03-19 18:16:50 +0100 | [diff] [blame] | 1208 | return ERR_PTR(-ENOMEM); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1209 | } |
Lars Ellenberg | e5f891b | 2013-11-22 12:32:01 +0100 | [diff] [blame] | 1210 | req->start_jif = start_jif; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1211 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1212 | if (!get_ldev(device)) { |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1213 | bio_put(req->private_bio); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1214 | req->private_bio = NULL; |
| 1215 | } |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1216 | |
Lars Ellenberg | 7e8c288 | 2013-03-19 18:16:57 +0100 | [diff] [blame] | 1217 | /* Update disk stats */ |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1218 | _drbd_start_io_acct(device, req); |
Lars Ellenberg | 7e8c288 | 2013-03-19 18:16:57 +0100 | [diff] [blame] | 1219 | |
Lars Ellenberg | 519b6d3 | 2012-08-03 02:19:09 +0200 | [diff] [blame] | 1220 | if (rw == WRITE && req->private_bio && req->i.size |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1221 | && !test_bit(AL_SUSPENDED, &device->flags)) { |
| 1222 | if (!drbd_al_begin_io_fastpath(device, &req->i)) { |
Lars Ellenberg | ad3fee7 | 2013-12-20 11:22:13 +0100 | [diff] [blame] | 1223 | atomic_inc(&device->ap_actlog_cnt); |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1224 | drbd_queue_write(device, req); |
Lars Ellenberg | 779b3fe | 2013-03-19 18:16:54 +0100 | [diff] [blame] | 1225 | return NULL; |
| 1226 | } |
Philipp Reisner | 0778286 | 2010-08-31 12:00:50 +0200 | [diff] [blame] | 1227 | req->rq_state |= RQ_IN_ACT_LOG; |
Lars Ellenberg | e5f891b | 2013-11-22 12:32:01 +0100 | [diff] [blame] | 1228 | req->in_actlog_jif = jiffies; |
Philipp Reisner | 0778286 | 2010-08-31 12:00:50 +0200 | [diff] [blame] | 1229 | } |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1230 | |
Lars Ellenberg | 6d9febe | 2013-03-19 18:16:50 +0100 | [diff] [blame] | 1231 | return req; |
| 1232 | } |
| 1233 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1234 | static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req) |
Lars Ellenberg | 6d9febe | 2013-03-19 18:16:50 +0100 | [diff] [blame] | 1235 | { |
Lars Ellenberg | 35b5ed5 | 2013-12-04 12:07:09 +0100 | [diff] [blame] | 1236 | struct drbd_resource *resource = device->resource; |
Lars Ellenberg | 6d9febe | 2013-03-19 18:16:50 +0100 | [diff] [blame] | 1237 | const int rw = bio_rw(req->master_bio); |
| 1238 | struct bio_and_error m = { NULL, }; |
| 1239 | bool no_remote = false; |
Lars Ellenberg | 35b5ed5 | 2013-12-04 12:07:09 +0100 | [diff] [blame] | 1240 | bool submit_private_bio = false; |
Lars Ellenberg | 6d9febe | 2013-03-19 18:16:50 +0100 | [diff] [blame] | 1241 | |
Lars Ellenberg | 35b5ed5 | 2013-12-04 12:07:09 +0100 | [diff] [blame] | 1242 | spin_lock_irq(&resource->req_lock); |
Andreas Gruenbacher | 6024fec | 2011-01-28 15:53:51 +0100 | [diff] [blame] | 1243 | if (rw == WRITE) { |
Lars Ellenberg | 648e46b | 2012-03-26 20:12:24 +0200 | [diff] [blame] | 1244 | /* This may temporarily give up the req_lock, |
| 1245 | * but will re-aquire it before it returns here. |
| 1246 | * Needs to be before the check on drbd_suspended() */ |
| 1247 | complete_conflicting_writes(req); |
Lars Ellenberg | 607f25e | 2013-03-27 14:08:45 +0100 | [diff] [blame] | 1248 | /* no more giving up req_lock from now on! */ |
| 1249 | |
| 1250 | /* check for congestion, and potentially stop sending |
| 1251 | * full data updates, but start sending "dirty bits" only. */ |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1252 | maybe_pull_ahead(device); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1253 | } |
| 1254 | |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1255 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1256 | if (drbd_suspended(device)) { |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1257 | /* push back and retry: */ |
| 1258 | req->rq_state |= RQ_POSTPONED; |
| 1259 | if (req->private_bio) { |
| 1260 | bio_put(req->private_bio); |
| 1261 | req->private_bio = NULL; |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1262 | put_ldev(device); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1263 | } |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1264 | goto out; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1265 | } |
| 1266 | |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1267 | /* We fail READ/READA early, if we can not serve it. |
| 1268 | * We must do this before req is registered on any lists. |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 1269 | * Otherwise, drbd_req_complete() will queue failed READ for retry. */ |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1270 | if (rw != WRITE) { |
| 1271 | if (!do_remote_read(req) && !req->private_bio) |
| 1272 | goto nodata; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1273 | } |
| 1274 | |
Lars Ellenberg | b6dd1a8 | 2011-11-28 15:04:49 +0100 | [diff] [blame] | 1275 | /* which transfer log epoch does this belong to? */ |
Andreas Gruenbacher | a6b32bc | 2011-05-31 14:33:49 +0200 | [diff] [blame] | 1276 | req->epoch = atomic_read(&first_peer_device(device)->connection->current_tle_nr); |
Philipp Reisner | 288f422 | 2010-05-27 15:07:43 +0200 | [diff] [blame] | 1277 | |
Lars Ellenberg | 227f052 | 2012-07-31 09:31:11 +0200 | [diff] [blame] | 1278 | /* no point in adding empty flushes to the transfer log, |
| 1279 | * they are mapped to drbd barriers already. */ |
Lars Ellenberg | 99b4d8f | 2012-08-07 06:42:09 +0200 | [diff] [blame] | 1280 | if (likely(req->i.size!=0)) { |
| 1281 | if (rw == WRITE) |
Andreas Gruenbacher | a6b32bc | 2011-05-31 14:33:49 +0200 | [diff] [blame] | 1282 | first_peer_device(device)->connection->current_tle_writes++; |
Philipp Reisner | 288f422 | 2010-05-27 15:07:43 +0200 | [diff] [blame] | 1283 | |
Andreas Gruenbacher | a6b32bc | 2011-05-31 14:33:49 +0200 | [diff] [blame] | 1284 | list_add_tail(&req->tl_requests, &first_peer_device(device)->connection->transfer_log); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1285 | } |
Philipp Reisner | 6753171 | 2010-10-27 12:21:30 +0200 | [diff] [blame] | 1286 | |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1287 | if (rw == WRITE) { |
| 1288 | if (!drbd_process_write_request(req)) |
| 1289 | no_remote = true; |
| 1290 | } else { |
| 1291 | /* We either have a private_bio, or we can read from remote. |
| 1292 | * Otherwise we had done the goto nodata above. */ |
| 1293 | if (req->private_bio == NULL) { |
| 1294 | _req_mod(req, TO_BE_SENT); |
| 1295 | _req_mod(req, QUEUE_FOR_NET_READ); |
Lars Ellenberg | 6719fb0 | 2010-10-18 23:04:07 +0200 | [diff] [blame] | 1296 | } else |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1297 | no_remote = true; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1298 | } |
| 1299 | |
Lars Ellenberg | 844a6ae7 | 2013-11-22 12:52:03 +0100 | [diff] [blame] | 1300 | /* If it took the fast path in drbd_request_prepare, add it here. |
| 1301 | * The slow path has added it already. */ |
| 1302 | if (list_empty(&req->req_pending_master_completion)) |
| 1303 | list_add_tail(&req->req_pending_master_completion, |
| 1304 | &device->pending_master_completion[rw == WRITE]); |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1305 | if (req->private_bio) { |
| 1306 | /* needs to be marked within the same spinlock */ |
Lars Ellenberg | 844a6ae7 | 2013-11-22 12:52:03 +0100 | [diff] [blame] | 1307 | list_add_tail(&req->req_pending_local, |
| 1308 | &device->pending_completion[rw == WRITE]); |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1309 | _req_mod(req, TO_BE_SUBMITTED); |
| 1310 | /* but we need to give up the spinlock to submit */ |
Lars Ellenberg | 35b5ed5 | 2013-12-04 12:07:09 +0100 | [diff] [blame] | 1311 | submit_private_bio = true; |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1312 | } else if (no_remote) { |
| 1313 | nodata: |
| 1314 | if (__ratelimit(&drbd_ratelimit_state)) |
Andreas Gruenbacher | d018017 | 2011-07-03 17:53:52 +0200 | [diff] [blame] | 1315 | drbd_err(device, "IO ERROR: neither local nor remote data, sector %llu+%u\n", |
Lars Ellenberg | 42839f6 | 2012-09-27 15:19:38 +0200 | [diff] [blame] | 1316 | (unsigned long long)req->i.sector, req->i.size >> 9); |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1317 | /* A write may have been queued for send_oos, however. |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 1318 | * So we can not simply free it, we must go through drbd_req_put_completion_ref() */ |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1319 | } |
| 1320 | |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1321 | out: |
Lars Ellenberg | a0d856d | 2012-01-24 17:19:42 +0100 | [diff] [blame] | 1322 | if (drbd_req_put_completion_ref(req, &m, 1)) |
| 1323 | kref_put(&req->kref, drbd_req_destroy); |
Lars Ellenberg | 35b5ed5 | 2013-12-04 12:07:09 +0100 | [diff] [blame] | 1324 | spin_unlock_irq(&resource->req_lock); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1325 | |
Lars Ellenberg | 35b5ed5 | 2013-12-04 12:07:09 +0100 | [diff] [blame] | 1326 | /* Even though above is a kref_put(), this is safe. |
| 1327 | * As long as we still need to submit our private bio, |
| 1328 | * we hold a completion ref, and the request cannot disappear. |
| 1329 | * If however this request did not even have a private bio to submit |
| 1330 | * (e.g. remote read), req may already be invalid now. |
| 1331 | * That's why we cannot check on req->private_bio. */ |
| 1332 | if (submit_private_bio) |
| 1333 | drbd_submit_req_private_bio(req); |
Lars Ellenberg | 5da9c83 | 2012-03-29 17:04:14 +0200 | [diff] [blame] | 1334 | if (m.bio) |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1335 | complete_master_bio(device, &m); |
Lars Ellenberg | 6d9febe | 2013-03-19 18:16:50 +0100 | [diff] [blame] | 1336 | } |
| 1337 | |
Lars Ellenberg | e5f891b | 2013-11-22 12:32:01 +0100 | [diff] [blame] | 1338 | void __drbd_make_request(struct drbd_device *device, struct bio *bio, unsigned long start_jif) |
Lars Ellenberg | 6d9febe | 2013-03-19 18:16:50 +0100 | [diff] [blame] | 1339 | { |
Lars Ellenberg | e5f891b | 2013-11-22 12:32:01 +0100 | [diff] [blame] | 1340 | struct drbd_request *req = drbd_request_prepare(device, bio, start_jif); |
Lars Ellenberg | 6d9febe | 2013-03-19 18:16:50 +0100 | [diff] [blame] | 1341 | if (IS_ERR_OR_NULL(req)) |
| 1342 | return; |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1343 | drbd_send_and_submit(device, req); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1344 | } |
| 1345 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1346 | static void submit_fast_path(struct drbd_device *device, struct list_head *incoming) |
Lars Ellenberg | 113fef9 | 2013-03-22 18:14:40 -0600 | [diff] [blame] | 1347 | { |
Lars Ellenberg | 08a1dda | 2013-03-19 18:16:56 +0100 | [diff] [blame] | 1348 | struct drbd_request *req, *tmp; |
| 1349 | list_for_each_entry_safe(req, tmp, incoming, tl_requests) { |
| 1350 | const int rw = bio_data_dir(req->master_bio); |
Lars Ellenberg | 113fef9 | 2013-03-22 18:14:40 -0600 | [diff] [blame] | 1351 | |
Lars Ellenberg | 08a1dda | 2013-03-19 18:16:56 +0100 | [diff] [blame] | 1352 | if (rw == WRITE /* rw != WRITE should not even end up here! */ |
| 1353 | && req->private_bio && req->i.size |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1354 | && !test_bit(AL_SUSPENDED, &device->flags)) { |
| 1355 | if (!drbd_al_begin_io_fastpath(device, &req->i)) |
Lars Ellenberg | 08a1dda | 2013-03-19 18:16:56 +0100 | [diff] [blame] | 1356 | continue; |
| 1357 | |
| 1358 | req->rq_state |= RQ_IN_ACT_LOG; |
Lars Ellenberg | e5f891b | 2013-11-22 12:32:01 +0100 | [diff] [blame] | 1359 | req->in_actlog_jif = jiffies; |
Lars Ellenberg | ad3fee7 | 2013-12-20 11:22:13 +0100 | [diff] [blame] | 1360 | atomic_dec(&device->ap_actlog_cnt); |
Lars Ellenberg | 08a1dda | 2013-03-19 18:16:56 +0100 | [diff] [blame] | 1361 | } |
| 1362 | |
| 1363 | list_del_init(&req->tl_requests); |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1364 | drbd_send_and_submit(device, req); |
Lars Ellenberg | 113fef9 | 2013-03-22 18:14:40 -0600 | [diff] [blame] | 1365 | } |
Lars Ellenberg | 113fef9 | 2013-03-22 18:14:40 -0600 | [diff] [blame] | 1366 | } |
| 1367 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1368 | static bool prepare_al_transaction_nonblock(struct drbd_device *device, |
Lars Ellenberg | 08a1dda | 2013-03-19 18:16:56 +0100 | [diff] [blame] | 1369 | struct list_head *incoming, |
Lars Ellenberg | f5b90b6 | 2014-05-07 22:41:28 +0200 | [diff] [blame^] | 1370 | struct list_head *pending, |
| 1371 | struct list_head *later) |
Lars Ellenberg | 08a1dda | 2013-03-19 18:16:56 +0100 | [diff] [blame] | 1372 | { |
| 1373 | struct drbd_request *req, *tmp; |
| 1374 | int wake = 0; |
| 1375 | int err; |
| 1376 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1377 | spin_lock_irq(&device->al_lock); |
Lars Ellenberg | 08a1dda | 2013-03-19 18:16:56 +0100 | [diff] [blame] | 1378 | list_for_each_entry_safe(req, tmp, incoming, tl_requests) { |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1379 | err = drbd_al_begin_io_nonblock(device, &req->i); |
Lars Ellenberg | f5b90b6 | 2014-05-07 22:41:28 +0200 | [diff] [blame^] | 1380 | if (err == -ENOBUFS) |
| 1381 | break; |
Lars Ellenberg | 08a1dda | 2013-03-19 18:16:56 +0100 | [diff] [blame] | 1382 | if (err == -EBUSY) |
| 1383 | wake = 1; |
| 1384 | if (err) |
Lars Ellenberg | f5b90b6 | 2014-05-07 22:41:28 +0200 | [diff] [blame^] | 1385 | list_move_tail(&req->tl_requests, later); |
| 1386 | else |
| 1387 | list_move_tail(&req->tl_requests, pending); |
Lars Ellenberg | 08a1dda | 2013-03-19 18:16:56 +0100 | [diff] [blame] | 1388 | } |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1389 | spin_unlock_irq(&device->al_lock); |
Lars Ellenberg | 08a1dda | 2013-03-19 18:16:56 +0100 | [diff] [blame] | 1390 | if (wake) |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1391 | wake_up(&device->al_wait); |
Lars Ellenberg | 08a1dda | 2013-03-19 18:16:56 +0100 | [diff] [blame] | 1392 | return !list_empty(pending); |
| 1393 | } |
Lars Ellenberg | 113fef9 | 2013-03-22 18:14:40 -0600 | [diff] [blame] | 1394 | |
Lars Ellenberg | f5b90b6 | 2014-05-07 22:41:28 +0200 | [diff] [blame^] | 1395 | void send_and_submit_pending(struct drbd_device *device, struct list_head *pending) |
| 1396 | { |
| 1397 | struct drbd_request *req, *tmp; |
| 1398 | |
| 1399 | list_for_each_entry_safe(req, tmp, pending, tl_requests) { |
| 1400 | req->rq_state |= RQ_IN_ACT_LOG; |
| 1401 | req->in_actlog_jif = jiffies; |
| 1402 | atomic_dec(&device->ap_actlog_cnt); |
| 1403 | list_del_init(&req->tl_requests); |
| 1404 | drbd_send_and_submit(device, req); |
| 1405 | } |
| 1406 | } |
| 1407 | |
Lars Ellenberg | 113fef9 | 2013-03-22 18:14:40 -0600 | [diff] [blame] | 1408 | void do_submit(struct work_struct *ws) |
| 1409 | { |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1410 | struct drbd_device *device = container_of(ws, struct drbd_device, submit.worker); |
Lars Ellenberg | f5b90b6 | 2014-05-07 22:41:28 +0200 | [diff] [blame^] | 1411 | LIST_HEAD(incoming); /* from drbd_make_request() */ |
| 1412 | LIST_HEAD(pending); /* to be submitted after next AL-transaction commit */ |
| 1413 | LIST_HEAD(busy); /* blocked by resync requests */ |
| 1414 | |
| 1415 | /* grab new incoming requests */ |
| 1416 | spin_lock_irq(&device->resource->req_lock); |
| 1417 | list_splice_tail_init(&device->submit.writes, &incoming); |
| 1418 | spin_unlock_irq(&device->resource->req_lock); |
Lars Ellenberg | 113fef9 | 2013-03-22 18:14:40 -0600 | [diff] [blame] | 1419 | |
Lars Ellenberg | 08a1dda | 2013-03-19 18:16:56 +0100 | [diff] [blame] | 1420 | for (;;) { |
Lars Ellenberg | f5b90b6 | 2014-05-07 22:41:28 +0200 | [diff] [blame^] | 1421 | DEFINE_WAIT(wait); |
Lars Ellenberg | 113fef9 | 2013-03-22 18:14:40 -0600 | [diff] [blame] | 1422 | |
Lars Ellenberg | f5b90b6 | 2014-05-07 22:41:28 +0200 | [diff] [blame^] | 1423 | /* move used-to-be-busy back to front of incoming */ |
| 1424 | list_splice_init(&busy, &incoming); |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1425 | submit_fast_path(device, &incoming); |
Lars Ellenberg | 08a1dda | 2013-03-19 18:16:56 +0100 | [diff] [blame] | 1426 | if (list_empty(&incoming)) |
| 1427 | break; |
| 1428 | |
Lars Ellenberg | 45ad07b | 2013-03-19 18:16:58 +0100 | [diff] [blame] | 1429 | for (;;) { |
Lars Ellenberg | f5b90b6 | 2014-05-07 22:41:28 +0200 | [diff] [blame^] | 1430 | prepare_to_wait(&device->al_wait, &wait, TASK_UNINTERRUPTIBLE); |
| 1431 | |
| 1432 | list_splice_init(&busy, &incoming); |
| 1433 | prepare_al_transaction_nonblock(device, &incoming, &pending, &busy); |
| 1434 | if (!list_empty(&pending)) |
| 1435 | break; |
| 1436 | |
| 1437 | schedule(); |
| 1438 | |
| 1439 | /* If all currently "hot" activity log extents are kept busy by |
| 1440 | * incoming requests, we still must not totally starve new |
| 1441 | * requests to "cold" extents. |
| 1442 | * Something left on &incoming means there had not been |
| 1443 | * enough update slots available, and the activity log |
| 1444 | * has been marked as "starving". |
| 1445 | * |
| 1446 | * Try again now, without looking for new requests, |
| 1447 | * effectively blocking all new requests until we made |
| 1448 | * at least _some_ progress with what we currently have. |
| 1449 | */ |
| 1450 | if (!list_empty(&incoming)) |
| 1451 | continue; |
| 1452 | |
| 1453 | /* Nothing moved to pending, but nothing left |
| 1454 | * on incoming: all moved to busy! |
| 1455 | * Grab new and iterate. */ |
| 1456 | spin_lock_irq(&device->resource->req_lock); |
| 1457 | list_splice_tail_init(&device->submit.writes, &incoming); |
| 1458 | spin_unlock_irq(&device->resource->req_lock); |
| 1459 | } |
| 1460 | finish_wait(&device->al_wait, &wait); |
| 1461 | |
| 1462 | /* If the transaction was full, before all incoming requests |
| 1463 | * had been processed, skip ahead to commit, and iterate |
| 1464 | * without splicing in more incoming requests from upper layers. |
| 1465 | * |
| 1466 | * Else, if all incoming have been processed, |
| 1467 | * they have become either "pending" (to be submitted after |
| 1468 | * next transaction commit) or "busy" (blocked by resync). |
| 1469 | * |
| 1470 | * Maybe more was queued, while we prepared the transaction? |
| 1471 | * Try to stuff those into this transaction as well. |
| 1472 | * Be strictly non-blocking here, |
| 1473 | * we already have something to commit. |
| 1474 | * |
| 1475 | * Commit if we don't make any more progres. |
| 1476 | */ |
| 1477 | |
| 1478 | while (list_empty(&incoming)) { |
Lars Ellenberg | 45ad07b | 2013-03-19 18:16:58 +0100 | [diff] [blame] | 1479 | LIST_HEAD(more_pending); |
| 1480 | LIST_HEAD(more_incoming); |
| 1481 | bool made_progress; |
| 1482 | |
| 1483 | /* It is ok to look outside the lock, |
| 1484 | * it's only an optimization anyways */ |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1485 | if (list_empty(&device->submit.writes)) |
Lars Ellenberg | 45ad07b | 2013-03-19 18:16:58 +0100 | [diff] [blame] | 1486 | break; |
| 1487 | |
Lars Ellenberg | 844a6ae7 | 2013-11-22 12:52:03 +0100 | [diff] [blame] | 1488 | spin_lock_irq(&device->resource->req_lock); |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1489 | list_splice_tail_init(&device->submit.writes, &more_incoming); |
Lars Ellenberg | 844a6ae7 | 2013-11-22 12:52:03 +0100 | [diff] [blame] | 1490 | spin_unlock_irq(&device->resource->req_lock); |
Lars Ellenberg | 45ad07b | 2013-03-19 18:16:58 +0100 | [diff] [blame] | 1491 | |
| 1492 | if (list_empty(&more_incoming)) |
| 1493 | break; |
| 1494 | |
Lars Ellenberg | f5b90b6 | 2014-05-07 22:41:28 +0200 | [diff] [blame^] | 1495 | made_progress = prepare_al_transaction_nonblock(device, &more_incoming, &more_pending, &busy); |
Lars Ellenberg | 45ad07b | 2013-03-19 18:16:58 +0100 | [diff] [blame] | 1496 | |
| 1497 | list_splice_tail_init(&more_pending, &pending); |
| 1498 | list_splice_tail_init(&more_incoming, &incoming); |
Lars Ellenberg | 45ad07b | 2013-03-19 18:16:58 +0100 | [diff] [blame] | 1499 | if (!made_progress) |
| 1500 | break; |
| 1501 | } |
Lars Ellenberg | f5b90b6 | 2014-05-07 22:41:28 +0200 | [diff] [blame^] | 1502 | |
Lars Ellenberg | 4dd726f | 2014-02-11 11:15:36 +0100 | [diff] [blame] | 1503 | drbd_al_begin_io_commit(device); |
Lars Ellenberg | f5b90b6 | 2014-05-07 22:41:28 +0200 | [diff] [blame^] | 1504 | send_and_submit_pending(device, &pending); |
Lars Ellenberg | 113fef9 | 2013-03-22 18:14:40 -0600 | [diff] [blame] | 1505 | } |
| 1506 | } |
| 1507 | |
Christoph Hellwig | 5a7bbad | 2011-09-12 12:12:01 +0200 | [diff] [blame] | 1508 | void drbd_make_request(struct request_queue *q, struct bio *bio) |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1509 | { |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1510 | struct drbd_device *device = (struct drbd_device *) q->queuedata; |
Lars Ellenberg | e5f891b | 2013-11-22 12:32:01 +0100 | [diff] [blame] | 1511 | unsigned long start_jif; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1512 | |
Lars Ellenberg | e5f891b | 2013-11-22 12:32:01 +0100 | [diff] [blame] | 1513 | start_jif = jiffies; |
Philipp Reisner | aeda1cd6 | 2010-11-09 17:45:06 +0100 | [diff] [blame] | 1514 | |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1515 | /* |
| 1516 | * what we "blindly" assume: |
| 1517 | */ |
Andreas Gruenbacher | 0b0ba1e | 2011-06-27 16:23:33 +0200 | [diff] [blame] | 1518 | D_ASSERT(device, IS_ALIGNED(bio->bi_iter.bi_size, 512)); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1519 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1520 | inc_ap_bio(device); |
Lars Ellenberg | e5f891b | 2013-11-22 12:32:01 +0100 | [diff] [blame] | 1521 | __drbd_make_request(device, bio, start_jif); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1522 | } |
| 1523 | |
Lars Ellenberg | 23361cf | 2011-03-31 16:36:43 +0200 | [diff] [blame] | 1524 | /* This is called by bio_add_page(). |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1525 | * |
Lars Ellenberg | 23361cf | 2011-03-31 16:36:43 +0200 | [diff] [blame] | 1526 | * q->max_hw_sectors and other global limits are already enforced there. |
| 1527 | * |
| 1528 | * We need to call down to our lower level device, |
| 1529 | * in case it has special restrictions. |
| 1530 | * |
| 1531 | * We also may need to enforce configured max-bio-bvecs limits. |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1532 | * |
| 1533 | * As long as the BIO is empty we have to allow at least one bvec, |
Lars Ellenberg | 23361cf | 2011-03-31 16:36:43 +0200 | [diff] [blame] | 1534 | * regardless of size and offset, so no need to ask lower levels. |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1535 | */ |
| 1536 | int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec) |
| 1537 | { |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1538 | struct drbd_device *device = (struct drbd_device *) q->queuedata; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1539 | unsigned int bio_size = bvm->bi_size; |
Lars Ellenberg | 23361cf | 2011-03-31 16:36:43 +0200 | [diff] [blame] | 1540 | int limit = DRBD_MAX_BIO_SIZE; |
| 1541 | int backing_limit; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1542 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1543 | if (bio_size && get_ldev(device)) { |
Lars Ellenberg | 35f47ef | 2013-10-23 10:59:19 +0200 | [diff] [blame] | 1544 | unsigned int max_hw_sectors = queue_max_hw_sectors(q); |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1545 | struct request_queue * const b = |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1546 | device->ldev->backing_bdev->bd_disk->queue; |
Lars Ellenberg | a1c88d0 | 2010-05-14 19:16:41 +0200 | [diff] [blame] | 1547 | if (b->merge_bvec_fn) { |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1548 | backing_limit = b->merge_bvec_fn(b, bvm, bvec); |
| 1549 | limit = min(limit, backing_limit); |
| 1550 | } |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1551 | put_ldev(device); |
Lars Ellenberg | 35f47ef | 2013-10-23 10:59:19 +0200 | [diff] [blame] | 1552 | if ((limit >> 9) > max_hw_sectors) |
| 1553 | limit = max_hw_sectors << 9; |
Philipp Reisner | b411b36 | 2009-09-25 16:07:19 -0700 | [diff] [blame] | 1554 | } |
| 1555 | return limit; |
| 1556 | } |
Philipp Reisner | 7fde2be | 2011-03-01 11:08:28 +0100 | [diff] [blame] | 1557 | |
| 1558 | void request_timer_fn(unsigned long data) |
| 1559 | { |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1560 | struct drbd_device *device = (struct drbd_device *) data; |
Andreas Gruenbacher | a6b32bc | 2011-05-31 14:33:49 +0200 | [diff] [blame] | 1561 | struct drbd_connection *connection = first_peer_device(device)->connection; |
Lars Ellenberg | 7753a4c1 | 2013-11-22 13:00:12 +0100 | [diff] [blame] | 1562 | struct drbd_request *req_read, *req_write, *req_peer; /* oldest request */ |
Philipp Reisner | 44ed167 | 2011-04-19 17:10:19 +0200 | [diff] [blame] | 1563 | struct net_conf *nc; |
Lars Ellenberg | 7753a4c1 | 2013-11-22 13:00:12 +0100 | [diff] [blame] | 1564 | unsigned long oldest_submit_jif; |
Philipp Reisner | dfa8bed | 2011-06-29 14:06:08 +0200 | [diff] [blame] | 1565 | unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */ |
Lars Ellenberg | ba280c0 | 2012-04-25 11:46:14 +0200 | [diff] [blame] | 1566 | unsigned long now; |
Philipp Reisner | 7fde2be | 2011-03-01 11:08:28 +0100 | [diff] [blame] | 1567 | |
Philipp Reisner | 44ed167 | 2011-04-19 17:10:19 +0200 | [diff] [blame] | 1568 | rcu_read_lock(); |
Andreas Gruenbacher | bde89a9 | 2011-05-30 16:32:41 +0200 | [diff] [blame] | 1569 | nc = rcu_dereference(connection->net_conf); |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1570 | if (nc && device->state.conn >= C_WF_REPORT_PARAMS) |
Lars Ellenberg | 07be15b | 2012-05-07 11:53:08 +0200 | [diff] [blame] | 1571 | ent = nc->timeout * HZ/10 * nc->ko_count; |
Philipp Reisner | cdfda63 | 2011-07-05 15:38:59 +0200 | [diff] [blame] | 1572 | |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1573 | if (get_ldev(device)) { /* implicit state.disk >= D_INCONSISTENT */ |
| 1574 | dt = rcu_dereference(device->ldev->disk_conf)->disk_timeout * HZ / 10; |
| 1575 | put_ldev(device); |
Philipp Reisner | dfa8bed | 2011-06-29 14:06:08 +0200 | [diff] [blame] | 1576 | } |
Philipp Reisner | 44ed167 | 2011-04-19 17:10:19 +0200 | [diff] [blame] | 1577 | rcu_read_unlock(); |
| 1578 | |
Philipp Reisner | dfa8bed | 2011-06-29 14:06:08 +0200 | [diff] [blame] | 1579 | et = min_not_zero(dt, ent); |
| 1580 | |
Lars Ellenberg | ba280c0 | 2012-04-25 11:46:14 +0200 | [diff] [blame] | 1581 | if (!et) |
Philipp Reisner | 7fde2be | 2011-03-01 11:08:28 +0100 | [diff] [blame] | 1582 | return; /* Recurring timer stopped */ |
| 1583 | |
Lars Ellenberg | ba280c0 | 2012-04-25 11:46:14 +0200 | [diff] [blame] | 1584 | now = jiffies; |
Lars Ellenberg | 7753a4c1 | 2013-11-22 13:00:12 +0100 | [diff] [blame] | 1585 | nt = now + et; |
Lars Ellenberg | ba280c0 | 2012-04-25 11:46:14 +0200 | [diff] [blame] | 1586 | |
Andreas Gruenbacher | 0500813 | 2011-07-07 14:19:42 +0200 | [diff] [blame] | 1587 | spin_lock_irq(&device->resource->req_lock); |
Lars Ellenberg | 7753a4c1 | 2013-11-22 13:00:12 +0100 | [diff] [blame] | 1588 | req_read = list_first_entry_or_null(&device->pending_completion[0], struct drbd_request, req_pending_local); |
| 1589 | req_write = list_first_entry_or_null(&device->pending_completion[1], struct drbd_request, req_pending_local); |
| 1590 | req_peer = connection->req_not_net_done; |
| 1591 | /* maybe the oldest request waiting for the peer is in fact still |
| 1592 | * blocking in tcp sendmsg */ |
| 1593 | if (!req_peer && connection->req_next && connection->req_next->pre_send_jif) |
| 1594 | req_peer = connection->req_next; |
| 1595 | |
| 1596 | /* evaluate the oldest peer request only in one timer! */ |
| 1597 | if (req_peer && req_peer->device != device) |
| 1598 | req_peer = NULL; |
| 1599 | |
| 1600 | /* do we have something to evaluate? */ |
| 1601 | if (req_peer == NULL && req_write == NULL && req_read == NULL) |
| 1602 | goto out; |
| 1603 | |
| 1604 | oldest_submit_jif = |
| 1605 | (req_write && req_read) |
| 1606 | ? ( time_before(req_write->pre_submit_jif, req_read->pre_submit_jif) |
| 1607 | ? req_write->pre_submit_jif : req_read->pre_submit_jif ) |
| 1608 | : req_write ? req_write->pre_submit_jif |
| 1609 | : req_read ? req_read->pre_submit_jif : now; |
Philipp Reisner | 7fde2be | 2011-03-01 11:08:28 +0100 | [diff] [blame] | 1610 | |
Lars Ellenberg | ba280c0 | 2012-04-25 11:46:14 +0200 | [diff] [blame] | 1611 | /* The request is considered timed out, if |
| 1612 | * - we have some effective timeout from the configuration, |
| 1613 | * with above state restrictions applied, |
| 1614 | * - the oldest request is waiting for a response from the network |
| 1615 | * resp. the local disk, |
| 1616 | * - the oldest request is in fact older than the effective timeout, |
| 1617 | * - the connection was established (resp. disk was attached) |
| 1618 | * for longer than the timeout already. |
| 1619 | * Note that for 32bit jiffies and very stable connections/disks, |
| 1620 | * we may have a wrap around, which is catched by |
| 1621 | * !time_in_range(now, last_..._jif, last_..._jif + timeout). |
| 1622 | * |
| 1623 | * Side effect: once per 32bit wrap-around interval, which means every |
| 1624 | * ~198 days with 250 HZ, we have a window where the timeout would need |
| 1625 | * to expire twice (worst case) to become effective. Good enough. |
| 1626 | */ |
Lars Ellenberg | 0853546 | 2014-04-28 18:43:31 +0200 | [diff] [blame] | 1627 | if (ent && req_peer && |
Lars Ellenberg | 7753a4c1 | 2013-11-22 13:00:12 +0100 | [diff] [blame] | 1628 | time_after(now, req_peer->pre_send_jif + ent) && |
Andreas Gruenbacher | bde89a9 | 2011-05-30 16:32:41 +0200 | [diff] [blame] | 1629 | !time_in_range(now, connection->last_reconnect_jif, connection->last_reconnect_jif + ent)) { |
Andreas Gruenbacher | d018017 | 2011-07-03 17:53:52 +0200 | [diff] [blame] | 1630 | drbd_warn(device, "Remote failed to finish a request within ko-count * timeout\n"); |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1631 | _drbd_set_state(_NS(device, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL); |
Philipp Reisner | 7fde2be | 2011-03-01 11:08:28 +0100 | [diff] [blame] | 1632 | } |
Lars Ellenberg | 7753a4c1 | 2013-11-22 13:00:12 +0100 | [diff] [blame] | 1633 | if (dt && oldest_submit_jif != now && |
| 1634 | time_after(now, oldest_submit_jif + dt) && |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1635 | !time_in_range(now, device->last_reattach_jif, device->last_reattach_jif + dt)) { |
Andreas Gruenbacher | d018017 | 2011-07-03 17:53:52 +0200 | [diff] [blame] | 1636 | drbd_warn(device, "Local backing device failed to meet the disk-timeout\n"); |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1637 | __drbd_chk_io_error(device, DRBD_FORCE_DETACH); |
Philipp Reisner | dfa8bed | 2011-06-29 14:06:08 +0200 | [diff] [blame] | 1638 | } |
Lars Ellenberg | 0853546 | 2014-04-28 18:43:31 +0200 | [diff] [blame] | 1639 | |
| 1640 | /* Reschedule timer for the nearest not already expired timeout. |
| 1641 | * Fallback to now + min(effective network timeout, disk timeout). */ |
Lars Ellenberg | 7753a4c1 | 2013-11-22 13:00:12 +0100 | [diff] [blame] | 1642 | ent = (ent && req_peer && time_before(now, req_peer->pre_send_jif + ent)) |
| 1643 | ? req_peer->pre_send_jif + ent : now + et; |
| 1644 | dt = (dt && oldest_submit_jif != now && time_before(now, oldest_submit_jif + dt)) |
| 1645 | ? oldest_submit_jif + dt : now + et; |
Lars Ellenberg | 0853546 | 2014-04-28 18:43:31 +0200 | [diff] [blame] | 1646 | nt = time_before(ent, dt) ? ent : dt; |
Lars Ellenberg | 7753a4c1 | 2013-11-22 13:00:12 +0100 | [diff] [blame] | 1647 | out: |
Andreas Gruenbacher | 0500813 | 2011-07-07 14:19:42 +0200 | [diff] [blame] | 1648 | spin_unlock_irq(&connection->resource->req_lock); |
Andreas Gruenbacher | b30ab79 | 2011-07-03 13:26:43 +0200 | [diff] [blame] | 1649 | mod_timer(&device->request_timer, nt); |
Philipp Reisner | 7fde2be | 2011-03-01 11:08:28 +0100 | [diff] [blame] | 1650 | } |