Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2016 Red Hat, Inc. All rights reserved. |
| 3 | * |
| 4 | * This file is released under the GPL. |
| 5 | */ |
| 6 | |
| 7 | #include "dm-core.h" |
| 8 | #include "dm-rq.h" |
| 9 | |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 10 | #include <linux/blk-mq.h> |
| 11 | |
| 12 | #define DM_MSG_PREFIX "core-rq" |
| 13 | |
Mike Snitzer | e689fba | 2019-02-20 15:37:44 -0500 | [diff] [blame] | 14 | /* |
| 15 | * One of these is allocated per request. |
| 16 | */ |
| 17 | struct dm_rq_target_io { |
| 18 | struct mapped_device *md; |
| 19 | struct dm_target *ti; |
| 20 | struct request *orig, *clone; |
| 21 | struct kthread_work work; |
| 22 | blk_status_t error; |
| 23 | union map_info info; |
| 24 | struct dm_stats_aux stats_aux; |
| 25 | unsigned long duration_jiffies; |
| 26 | unsigned n_sectors; |
| 27 | unsigned completed; |
| 28 | }; |
| 29 | |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 30 | #define DM_MQ_NR_HW_QUEUES 1 |
| 31 | #define DM_MQ_QUEUE_DEPTH 2048 |
| 32 | static unsigned dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES; |
| 33 | static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH; |
| 34 | |
| 35 | /* |
| 36 | * Request-based DM's mempools' reserved IOs set by the user. |
| 37 | */ |
| 38 | #define RESERVED_REQUEST_BASED_IOS 256 |
| 39 | static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS; |
| 40 | |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 41 | unsigned dm_get_reserved_rq_based_ios(void) |
| 42 | { |
| 43 | return __dm_get_module_param(&reserved_rq_based_ios, |
| 44 | RESERVED_REQUEST_BASED_IOS, DM_RESERVED_MAX_IOS); |
| 45 | } |
| 46 | EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios); |
| 47 | |
| 48 | static unsigned dm_get_blk_mq_nr_hw_queues(void) |
| 49 | { |
| 50 | return __dm_get_module_param(&dm_mq_nr_hw_queues, 1, 32); |
| 51 | } |
| 52 | |
| 53 | static unsigned dm_get_blk_mq_queue_depth(void) |
| 54 | { |
| 55 | return __dm_get_module_param(&dm_mq_queue_depth, |
| 56 | DM_MQ_QUEUE_DEPTH, BLK_MQ_MAX_DEPTH); |
| 57 | } |
| 58 | |
| 59 | int dm_request_based(struct mapped_device *md) |
| 60 | { |
Jens Axboe | 344e9ff | 2018-11-15 12:22:51 -0700 | [diff] [blame] | 61 | return queue_is_mq(md->queue); |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 62 | } |
| 63 | |
Jens Axboe | 6a23e05 | 2018-10-10 20:49:26 -0600 | [diff] [blame] | 64 | void dm_start_queue(struct request_queue *q) |
Mike Snitzer | 9dbeaea | 2016-09-01 11:59:33 -0400 | [diff] [blame] | 65 | { |
Ming Lei | f660174 | 2017-06-06 23:22:04 +0800 | [diff] [blame] | 66 | blk_mq_unquiesce_queue(q); |
Mike Snitzer | 9dbeaea | 2016-09-01 11:59:33 -0400 | [diff] [blame] | 67 | blk_mq_kick_requeue_list(q); |
| 68 | } |
| 69 | |
Jens Axboe | 6a23e05 | 2018-10-10 20:49:26 -0600 | [diff] [blame] | 70 | void dm_stop_queue(struct request_queue *q) |
Bart Van Assche | 2397a15 | 2016-08-31 15:18:11 -0700 | [diff] [blame] | 71 | { |
Bart Van Assche | 7b17c2f | 2016-10-28 17:22:16 -0700 | [diff] [blame] | 72 | blk_mq_quiesce_queue(q); |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 73 | } |
| 74 | |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 75 | /* |
| 76 | * Partial completion handling for request-based dm |
| 77 | */ |
| 78 | static void end_clone_bio(struct bio *clone) |
| 79 | { |
| 80 | struct dm_rq_clone_bio_info *info = |
| 81 | container_of(clone, struct dm_rq_clone_bio_info, clone); |
| 82 | struct dm_rq_target_io *tio = info->tio; |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 83 | unsigned int nr_bytes = info->orig->bi_iter.bi_size; |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 84 | blk_status_t error = clone->bi_status; |
Ming Lei | dc6364b | 2017-08-24 20:19:52 +0800 | [diff] [blame] | 85 | bool is_last = !clone->bi_next; |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 86 | |
| 87 | bio_put(clone); |
| 88 | |
| 89 | if (tio->error) |
| 90 | /* |
| 91 | * An error has already been detected on the request. |
| 92 | * Once error occurred, just let clone->end_io() handle |
| 93 | * the remainder. |
| 94 | */ |
| 95 | return; |
| 96 | else if (error) { |
| 97 | /* |
| 98 | * Don't notice the error to the upper layer yet. |
| 99 | * The error handling decision is made by the target driver, |
| 100 | * when the request is completed. |
| 101 | */ |
| 102 | tio->error = error; |
Ming Lei | dc6364b | 2017-08-24 20:19:52 +0800 | [diff] [blame] | 103 | goto exit; |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 104 | } |
| 105 | |
| 106 | /* |
| 107 | * I/O for the bio successfully completed. |
| 108 | * Notice the data completion to the upper layer. |
| 109 | */ |
Ming Lei | dc6364b | 2017-08-24 20:19:52 +0800 | [diff] [blame] | 110 | tio->completed += nr_bytes; |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 111 | |
| 112 | /* |
| 113 | * Update the original request. |
Pavel Begunkov | d370ad2 | 2019-06-20 20:50:50 +0300 | [diff] [blame] | 114 | * Do not use blk_mq_end_request() here, because it may complete |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 115 | * the original request before the clone, and break the ordering. |
| 116 | */ |
Ming Lei | dc6364b | 2017-08-24 20:19:52 +0800 | [diff] [blame] | 117 | if (is_last) |
| 118 | exit: |
| 119 | blk_update_request(tio->orig, BLK_STS_OK, tio->completed); |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 120 | } |
| 121 | |
| 122 | static struct dm_rq_target_io *tio_from_request(struct request *rq) |
| 123 | { |
Christoph Hellwig | eb8db83 | 2017-01-22 18:32:46 +0100 | [diff] [blame] | 124 | return blk_mq_rq_to_pdu(rq); |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 125 | } |
| 126 | |
| 127 | static void rq_end_stats(struct mapped_device *md, struct request *orig) |
| 128 | { |
| 129 | if (unlikely(dm_stats_used(&md->stats))) { |
| 130 | struct dm_rq_target_io *tio = tio_from_request(orig); |
| 131 | tio->duration_jiffies = jiffies - tio->duration_jiffies; |
| 132 | dm_stats_account_io(&md->stats, rq_data_dir(orig), |
| 133 | blk_rq_pos(orig), tio->n_sectors, true, |
| 134 | tio->duration_jiffies, &tio->stats_aux); |
| 135 | } |
| 136 | } |
| 137 | |
| 138 | /* |
| 139 | * Don't touch any member of the md after calling this function because |
| 140 | * the md may be freed in dm_put() at the end of this function. |
| 141 | * Or do dm_get() before calling this function and dm_put() later. |
| 142 | */ |
Mike Snitzer | 2adc5c5 | 2018-11-08 14:59:41 -0500 | [diff] [blame] | 143 | static void rq_completed(struct mapped_device *md) |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 144 | { |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 145 | /* |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 146 | * dm_put() must be at the end of this function. See the comment above |
| 147 | */ |
| 148 | dm_put(md); |
| 149 | } |
| 150 | |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 151 | /* |
| 152 | * Complete the clone and the original request. |
| 153 | * Must be called without clone's queue lock held, |
| 154 | * see end_clone_request() for more details. |
| 155 | */ |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 156 | static void dm_end_request(struct request *clone, blk_status_t error) |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 157 | { |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 158 | struct dm_rq_target_io *tio = clone->end_io_data; |
| 159 | struct mapped_device *md = tio->md; |
| 160 | struct request *rq = tio->orig; |
| 161 | |
Christoph Hellwig | eb8db83 | 2017-01-22 18:32:46 +0100 | [diff] [blame] | 162 | blk_rq_unprep_clone(clone); |
Yufen Yu | 5de719e | 2019-04-24 23:19:05 +0800 | [diff] [blame] | 163 | tio->ti->type->release_clone_rq(clone, NULL); |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 164 | |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 165 | rq_end_stats(md, rq); |
Jens Axboe | 6a23e05 | 2018-10-10 20:49:26 -0600 | [diff] [blame] | 166 | blk_mq_end_request(rq, error); |
Mike Snitzer | 2adc5c5 | 2018-11-08 14:59:41 -0500 | [diff] [blame] | 167 | rq_completed(md); |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 168 | } |
| 169 | |
Mike Snitzer | e0c1075 | 2016-09-14 10:36:39 -0400 | [diff] [blame] | 170 | static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs) |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 171 | { |
Bart Van Assche | 52d7f1b | 2016-10-28 17:20:32 -0700 | [diff] [blame] | 172 | blk_mq_delay_kick_requeue_list(q, msecs); |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 173 | } |
| 174 | |
Mike Snitzer | e0c1075 | 2016-09-14 10:36:39 -0400 | [diff] [blame] | 175 | void dm_mq_kick_requeue_list(struct mapped_device *md) |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 176 | { |
Mike Snitzer | 33bd6f0 | 2020-09-19 13:09:11 -0400 | [diff] [blame] | 177 | __dm_mq_kick_requeue_list(md->queue, 0); |
Mike Snitzer | e0c1075 | 2016-09-14 10:36:39 -0400 | [diff] [blame] | 178 | } |
| 179 | EXPORT_SYMBOL(dm_mq_kick_requeue_list); |
| 180 | |
| 181 | static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs) |
| 182 | { |
Bart Van Assche | 2b053ac | 2016-10-28 17:21:41 -0700 | [diff] [blame] | 183 | blk_mq_requeue_request(rq, false); |
Mike Snitzer | e0c1075 | 2016-09-14 10:36:39 -0400 | [diff] [blame] | 184 | __dm_mq_kick_requeue_list(rq->q, msecs); |
| 185 | } |
| 186 | |
Mike Snitzer | fbc39b4 | 2016-09-13 12:16:14 -0400 | [diff] [blame] | 187 | static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_requeue) |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 188 | { |
Mike Snitzer | fbc39b4 | 2016-09-13 12:16:14 -0400 | [diff] [blame] | 189 | struct mapped_device *md = tio->md; |
| 190 | struct request *rq = tio->orig; |
Bart Van Assche | d5c27f3f | 2017-08-09 11:32:16 -0700 | [diff] [blame] | 191 | unsigned long delay_ms = delay_requeue ? 100 : 0; |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 192 | |
| 193 | rq_end_stats(md, rq); |
Christoph Hellwig | eb8db83 | 2017-01-22 18:32:46 +0100 | [diff] [blame] | 194 | if (tio->clone) { |
| 195 | blk_rq_unprep_clone(tio->clone); |
Yufen Yu | 5de719e | 2019-04-24 23:19:05 +0800 | [diff] [blame] | 196 | tio->ti->type->release_clone_rq(tio->clone, NULL); |
Christoph Hellwig | eb8db83 | 2017-01-22 18:32:46 +0100 | [diff] [blame] | 197 | } |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 198 | |
Jens Axboe | 6a23e05 | 2018-10-10 20:49:26 -0600 | [diff] [blame] | 199 | dm_mq_delay_requeue_request(rq, delay_ms); |
Mike Snitzer | 2adc5c5 | 2018-11-08 14:59:41 -0500 | [diff] [blame] | 200 | rq_completed(md); |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 201 | } |
| 202 | |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 203 | static void dm_done(struct request *clone, blk_status_t error, bool mapped) |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 204 | { |
Christoph Hellwig | 7ed8578 | 2017-04-26 09:40:37 +0200 | [diff] [blame] | 205 | int r = DM_ENDIO_DONE; |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 206 | struct dm_rq_target_io *tio = clone->end_io_data; |
| 207 | dm_request_endio_fn rq_end_io = NULL; |
| 208 | |
| 209 | if (tio->ti) { |
| 210 | rq_end_io = tio->ti->type->rq_end_io; |
| 211 | |
| 212 | if (mapped && rq_end_io) |
| 213 | r = rq_end_io(tio->ti, clone, error, &tio->info); |
| 214 | } |
| 215 | |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 216 | if (unlikely(error == BLK_STS_TARGET)) { |
Mike Snitzer | bcb4443 | 2019-04-03 12:23:11 -0400 | [diff] [blame] | 217 | if (req_op(clone) == REQ_OP_DISCARD && |
| 218 | !clone->q->limits.max_discard_sectors) |
| 219 | disable_discard(tio->md); |
| 220 | else if (req_op(clone) == REQ_OP_WRITE_SAME && |
| 221 | !clone->q->limits.max_write_same_sectors) |
Christoph Hellwig | ac62d62 | 2017-04-05 19:21:05 +0200 | [diff] [blame] | 222 | disable_write_same(tio->md); |
Mike Snitzer | bcb4443 | 2019-04-03 12:23:11 -0400 | [diff] [blame] | 223 | else if (req_op(clone) == REQ_OP_WRITE_ZEROES && |
| 224 | !clone->q->limits.max_write_zeroes_sectors) |
Christoph Hellwig | ac62d62 | 2017-04-05 19:21:05 +0200 | [diff] [blame] | 225 | disable_write_zeroes(tio->md); |
| 226 | } |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 227 | |
Christoph Hellwig | 7ed8578 | 2017-04-26 09:40:37 +0200 | [diff] [blame] | 228 | switch (r) { |
| 229 | case DM_ENDIO_DONE: |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 230 | /* The target wants to complete the I/O */ |
Christoph Hellwig | 7ed8578 | 2017-04-26 09:40:37 +0200 | [diff] [blame] | 231 | dm_end_request(clone, error); |
| 232 | break; |
| 233 | case DM_ENDIO_INCOMPLETE: |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 234 | /* The target will handle the I/O */ |
| 235 | return; |
Christoph Hellwig | 7ed8578 | 2017-04-26 09:40:37 +0200 | [diff] [blame] | 236 | case DM_ENDIO_REQUEUE: |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 237 | /* The target wants to requeue the I/O */ |
Mike Snitzer | fbc39b4 | 2016-09-13 12:16:14 -0400 | [diff] [blame] | 238 | dm_requeue_original_request(tio, false); |
Christoph Hellwig | 7ed8578 | 2017-04-26 09:40:37 +0200 | [diff] [blame] | 239 | break; |
Mike Snitzer | ac514ff | 2018-01-12 19:53:40 -0500 | [diff] [blame] | 240 | case DM_ENDIO_DELAY_REQUEUE: |
| 241 | /* The target wants to requeue the I/O after a delay */ |
| 242 | dm_requeue_original_request(tio, true); |
| 243 | break; |
Christoph Hellwig | 7ed8578 | 2017-04-26 09:40:37 +0200 | [diff] [blame] | 244 | default: |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 245 | DMWARN("unimplemented target endio return value: %d", r); |
| 246 | BUG(); |
| 247 | } |
| 248 | } |
| 249 | |
| 250 | /* |
| 251 | * Request completion handler for request-based dm |
| 252 | */ |
| 253 | static void dm_softirq_done(struct request *rq) |
| 254 | { |
| 255 | bool mapped = true; |
| 256 | struct dm_rq_target_io *tio = tio_from_request(rq); |
| 257 | struct request *clone = tio->clone; |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 258 | |
| 259 | if (!clone) { |
Jens Axboe | 61febef | 2017-02-24 13:19:32 -0700 | [diff] [blame] | 260 | struct mapped_device *md = tio->md; |
| 261 | |
| 262 | rq_end_stats(md, rq); |
Jens Axboe | 6a23e05 | 2018-10-10 20:49:26 -0600 | [diff] [blame] | 263 | blk_mq_end_request(rq, tio->error); |
Mike Snitzer | 2adc5c5 | 2018-11-08 14:59:41 -0500 | [diff] [blame] | 264 | rq_completed(md); |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 265 | return; |
| 266 | } |
| 267 | |
Christoph Hellwig | e806402 | 2016-10-20 15:12:13 +0200 | [diff] [blame] | 268 | if (rq->rq_flags & RQF_FAILED) |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 269 | mapped = false; |
| 270 | |
| 271 | dm_done(clone, tio->error, mapped); |
| 272 | } |
| 273 | |
| 274 | /* |
| 275 | * Complete the clone and the original request with the error status |
| 276 | * through softirq context. |
| 277 | */ |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 278 | static void dm_complete_request(struct request *rq, blk_status_t error) |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 279 | { |
| 280 | struct dm_rq_target_io *tio = tio_from_request(rq); |
| 281 | |
| 282 | tio->error = error; |
Christoph Hellwig | 15f73f5 | 2020-06-11 08:44:47 +0200 | [diff] [blame] | 283 | if (likely(!blk_should_fake_timeout(rq->q))) |
| 284 | blk_mq_complete_request(rq); |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 285 | } |
| 286 | |
| 287 | /* |
| 288 | * Complete the not-mapped clone and the original request with the error status |
| 289 | * through softirq context. |
| 290 | * Target's rq_end_io() function isn't called. |
Jens Axboe | 6a23e05 | 2018-10-10 20:49:26 -0600 | [diff] [blame] | 291 | * This may be used when the target's clone_and_map_rq() function fails. |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 292 | */ |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 293 | static void dm_kill_unmapped_request(struct request *rq, blk_status_t error) |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 294 | { |
Christoph Hellwig | e806402 | 2016-10-20 15:12:13 +0200 | [diff] [blame] | 295 | rq->rq_flags |= RQF_FAILED; |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 296 | dm_complete_request(rq, error); |
| 297 | } |
| 298 | |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 299 | static void end_clone_request(struct request *clone, blk_status_t error) |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 300 | { |
| 301 | struct dm_rq_target_io *tio = clone->end_io_data; |
| 302 | |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 303 | dm_complete_request(tio->orig, error); |
| 304 | } |
| 305 | |
Ming Lei | 396eaf2 | 2018-01-17 11:25:57 -0500 | [diff] [blame] | 306 | static blk_status_t dm_dispatch_clone_request(struct request *clone, struct request *rq) |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 307 | { |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 308 | blk_status_t r; |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 309 | |
| 310 | if (blk_queue_io_stat(clone->q)) |
Christoph Hellwig | e806402 | 2016-10-20 15:12:13 +0200 | [diff] [blame] | 311 | clone->rq_flags |= RQF_IO_STAT; |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 312 | |
Omar Sandoval | 522a777 | 2018-05-09 02:08:53 -0700 | [diff] [blame] | 313 | clone->start_time_ns = ktime_get_ns(); |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 314 | r = blk_insert_cloned_request(clone->q, clone); |
Ming Lei | 86ff7c2 | 2018-01-30 22:04:57 -0500 | [diff] [blame] | 315 | if (r != BLK_STS_OK && r != BLK_STS_RESOURCE && r != BLK_STS_DEV_RESOURCE) |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 316 | /* must complete clone in terms of original request */ |
| 317 | dm_complete_request(rq, r); |
Ming Lei | 396eaf2 | 2018-01-17 11:25:57 -0500 | [diff] [blame] | 318 | return r; |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 319 | } |
| 320 | |
| 321 | static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig, |
| 322 | void *data) |
| 323 | { |
| 324 | struct dm_rq_target_io *tio = data; |
| 325 | struct dm_rq_clone_bio_info *info = |
| 326 | container_of(bio, struct dm_rq_clone_bio_info, clone); |
| 327 | |
| 328 | info->orig = bio_orig; |
| 329 | info->tio = tio; |
| 330 | bio->bi_end_io = end_clone_bio; |
| 331 | |
| 332 | return 0; |
| 333 | } |
| 334 | |
| 335 | static int setup_clone(struct request *clone, struct request *rq, |
| 336 | struct dm_rq_target_io *tio, gfp_t gfp_mask) |
| 337 | { |
| 338 | int r; |
| 339 | |
Kent Overstreet | 6f1c819 | 2018-05-20 18:25:53 -0400 | [diff] [blame] | 340 | r = blk_rq_prep_clone(clone, rq, &tio->md->bs, gfp_mask, |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 341 | dm_rq_bio_constructor, tio); |
| 342 | if (r) |
| 343 | return r; |
| 344 | |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 345 | clone->end_io = end_clone_request; |
| 346 | clone->end_io_data = tio; |
| 347 | |
| 348 | tio->clone = clone; |
| 349 | |
| 350 | return 0; |
| 351 | } |
| 352 | |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 353 | static void init_tio(struct dm_rq_target_io *tio, struct request *rq, |
| 354 | struct mapped_device *md) |
| 355 | { |
| 356 | tio->md = md; |
| 357 | tio->ti = NULL; |
| 358 | tio->clone = NULL; |
| 359 | tio->orig = rq; |
| 360 | tio->error = 0; |
Ming Lei | dc6364b | 2017-08-24 20:19:52 +0800 | [diff] [blame] | 361 | tio->completed = 0; |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 362 | /* |
| 363 | * Avoid initializing info for blk-mq; it passes |
| 364 | * target-specific data through info.ptr |
| 365 | * (see: dm_mq_init_request) |
| 366 | */ |
| 367 | if (!md->init_tio_pdu) |
| 368 | memset(&tio->info, 0, sizeof(tio->info)); |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 369 | } |
| 370 | |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 371 | /* |
| 372 | * Returns: |
Mike Snitzer | a8ac51e | 2016-09-09 19:24:57 -0400 | [diff] [blame] | 373 | * DM_MAPIO_* : the request has been processed as indicated |
| 374 | * DM_MAPIO_REQUEUE : the original request needs to be immediately requeued |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 375 | * < 0 : the request was completed due to failure |
| 376 | */ |
Mike Snitzer | fbc39b4 | 2016-09-13 12:16:14 -0400 | [diff] [blame] | 377 | static int map_request(struct dm_rq_target_io *tio) |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 378 | { |
| 379 | int r; |
| 380 | struct dm_target *ti = tio->ti; |
Mike Snitzer | fbc39b4 | 2016-09-13 12:16:14 -0400 | [diff] [blame] | 381 | struct mapped_device *md = tio->md; |
| 382 | struct request *rq = tio->orig; |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 383 | struct request *clone = NULL; |
Ming Lei | 396eaf2 | 2018-01-17 11:25:57 -0500 | [diff] [blame] | 384 | blk_status_t ret; |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 385 | |
Christoph Hellwig | eb8db83 | 2017-01-22 18:32:46 +0100 | [diff] [blame] | 386 | r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone); |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 387 | switch (r) { |
| 388 | case DM_MAPIO_SUBMITTED: |
| 389 | /* The target has taken the I/O to submit by itself later */ |
| 390 | break; |
| 391 | case DM_MAPIO_REMAPPED: |
Christoph Hellwig | eb8db83 | 2017-01-22 18:32:46 +0100 | [diff] [blame] | 392 | if (setup_clone(clone, rq, tio, GFP_ATOMIC)) { |
| 393 | /* -ENOMEM */ |
Yufen Yu | 5de719e | 2019-04-24 23:19:05 +0800 | [diff] [blame] | 394 | ti->type->release_clone_rq(clone, &tio->info); |
Christoph Hellwig | eb8db83 | 2017-01-22 18:32:46 +0100 | [diff] [blame] | 395 | return DM_MAPIO_REQUEUE; |
| 396 | } |
| 397 | |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 398 | /* The target has remapped the I/O so dispatch it */ |
Christoph Hellwig | a54895f | 2020-12-03 17:21:39 +0100 | [diff] [blame] | 399 | trace_block_rq_remap(clone, disk_devt(dm_disk(md)), |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 400 | blk_rq_pos(rq)); |
Ming Lei | 396eaf2 | 2018-01-17 11:25:57 -0500 | [diff] [blame] | 401 | ret = dm_dispatch_clone_request(clone, rq); |
Ming Lei | 86ff7c2 | 2018-01-30 22:04:57 -0500 | [diff] [blame] | 402 | if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) { |
Ming Lei | 396eaf2 | 2018-01-17 11:25:57 -0500 | [diff] [blame] | 403 | blk_rq_unprep_clone(clone); |
Ming Lei | 226b4fc | 2019-07-25 10:04:59 +0800 | [diff] [blame] | 404 | blk_mq_cleanup_rq(clone); |
Yufen Yu | 5de719e | 2019-04-24 23:19:05 +0800 | [diff] [blame] | 405 | tio->ti->type->release_clone_rq(clone, &tio->info); |
Ming Lei | 396eaf2 | 2018-01-17 11:25:57 -0500 | [diff] [blame] | 406 | tio->clone = NULL; |
Mike Snitzer | 34743bf | 2018-12-10 11:55:56 -0500 | [diff] [blame] | 407 | return DM_MAPIO_REQUEUE; |
Ming Lei | 396eaf2 | 2018-01-17 11:25:57 -0500 | [diff] [blame] | 408 | } |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 409 | break; |
| 410 | case DM_MAPIO_REQUEUE: |
| 411 | /* The target wants to requeue the I/O */ |
Mike Snitzer | a8ac51e | 2016-09-09 19:24:57 -0400 | [diff] [blame] | 412 | break; |
| 413 | case DM_MAPIO_DELAY_REQUEUE: |
| 414 | /* The target wants to requeue the I/O after a delay */ |
Mike Snitzer | fbc39b4 | 2016-09-13 12:16:14 -0400 | [diff] [blame] | 415 | dm_requeue_original_request(tio, true); |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 416 | break; |
Christoph Hellwig | 412445a | 2017-04-26 09:40:39 +0200 | [diff] [blame] | 417 | case DM_MAPIO_KILL: |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 418 | /* The target wants to complete the I/O */ |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 419 | dm_kill_unmapped_request(rq, BLK_STS_IOERR); |
Christoph Hellwig | ece0728 | 2017-05-15 17:28:36 +0200 | [diff] [blame] | 420 | break; |
Christoph Hellwig | 412445a | 2017-04-26 09:40:39 +0200 | [diff] [blame] | 421 | default: |
| 422 | DMWARN("unimplemented target map return value: %d", r); |
| 423 | BUG(); |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 424 | } |
| 425 | |
Mike Snitzer | a8ac51e | 2016-09-09 19:24:57 -0400 | [diff] [blame] | 426 | return r; |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 427 | } |
| 428 | |
Jens Axboe | 6a23e05 | 2018-10-10 20:49:26 -0600 | [diff] [blame] | 429 | /* DEPRECATED: previously used for request-based merge heuristic in dm_request_fn() */ |
| 430 | ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf) |
| 431 | { |
| 432 | return sprintf(buf, "%u\n", 0); |
| 433 | } |
| 434 | |
| 435 | ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md, |
| 436 | const char *buf, size_t count) |
| 437 | { |
| 438 | return count; |
| 439 | } |
| 440 | |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 441 | static void dm_start_request(struct mapped_device *md, struct request *orig) |
| 442 | { |
Jens Axboe | 6a23e05 | 2018-10-10 20:49:26 -0600 | [diff] [blame] | 443 | blk_mq_start_request(orig); |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 444 | |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 445 | if (unlikely(dm_stats_used(&md->stats))) { |
| 446 | struct dm_rq_target_io *tio = tio_from_request(orig); |
| 447 | tio->duration_jiffies = jiffies; |
| 448 | tio->n_sectors = blk_rq_sectors(orig); |
| 449 | dm_stats_account_io(&md->stats, rq_data_dir(orig), |
| 450 | blk_rq_pos(orig), tio->n_sectors, false, 0, |
| 451 | &tio->stats_aux); |
| 452 | } |
| 453 | |
| 454 | /* |
| 455 | * Hold the md reference here for the in-flight I/O. |
| 456 | * We can't rely on the reference count by device opener, |
| 457 | * because the device may be closed during the request completion |
| 458 | * when all bios are completed. |
| 459 | * See the comment in rq_completed() too. |
| 460 | */ |
| 461 | dm_get(md); |
| 462 | } |
| 463 | |
Jens Axboe | 6a23e05 | 2018-10-10 20:49:26 -0600 | [diff] [blame] | 464 | static int dm_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, |
| 465 | unsigned int hctx_idx, unsigned int numa_node) |
Christoph Hellwig | eb8db83 | 2017-01-22 18:32:46 +0100 | [diff] [blame] | 466 | { |
Jens Axboe | 6a23e05 | 2018-10-10 20:49:26 -0600 | [diff] [blame] | 467 | struct mapped_device *md = set->driver_data; |
Christoph Hellwig | eb8db83 | 2017-01-22 18:32:46 +0100 | [diff] [blame] | 468 | struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq); |
| 469 | |
| 470 | /* |
| 471 | * Must initialize md member of tio, otherwise it won't |
| 472 | * be available in dm_mq_queue_rq. |
| 473 | */ |
| 474 | tio->md = md; |
| 475 | |
| 476 | if (md->init_tio_pdu) { |
| 477 | /* target-specific per-io data is immediately after the tio */ |
| 478 | tio->info.ptr = tio + 1; |
| 479 | } |
| 480 | |
| 481 | return 0; |
| 482 | } |
| 483 | |
Christoph Hellwig | fc17b65 | 2017-06-03 09:38:05 +0200 | [diff] [blame] | 484 | static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 485 | const struct blk_mq_queue_data *bd) |
| 486 | { |
| 487 | struct request *rq = bd->rq; |
| 488 | struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq); |
| 489 | struct mapped_device *md = tio->md; |
| 490 | struct dm_target *ti = md->immutable_target; |
| 491 | |
Ming Lei | b4459b1 | 2021-09-23 17:11:31 +0800 | [diff] [blame] | 492 | /* |
| 493 | * blk-mq's unquiesce may come from outside events, such as |
| 494 | * elevator switch, updating nr_requests or others, and request may |
| 495 | * come during suspend, so simply ask for blk-mq to requeue it. |
| 496 | */ |
| 497 | if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) |
| 498 | return BLK_STS_RESOURCE; |
| 499 | |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 500 | if (unlikely(!ti)) { |
| 501 | int srcu_idx; |
| 502 | struct dm_table *map = dm_get_live_table(md, &srcu_idx); |
| 503 | |
| 504 | ti = dm_table_find_target(map, 0); |
| 505 | dm_put_live_table(md, srcu_idx); |
| 506 | } |
| 507 | |
| 508 | if (ti->type->busy && ti->type->busy(ti)) |
Christoph Hellwig | fc17b65 | 2017-06-03 09:38:05 +0200 | [diff] [blame] | 509 | return BLK_STS_RESOURCE; |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 510 | |
| 511 | dm_start_request(md, rq); |
| 512 | |
| 513 | /* Init tio using md established in .init_request */ |
| 514 | init_tio(tio, rq, md); |
| 515 | |
| 516 | /* |
| 517 | * Establish tio->ti before calling map_request(). |
| 518 | */ |
| 519 | tio->ti = ti; |
| 520 | |
| 521 | /* Direct call is fine since .queue_rq allows allocations */ |
Mike Snitzer | fbc39b4 | 2016-09-13 12:16:14 -0400 | [diff] [blame] | 522 | if (map_request(tio) == DM_MAPIO_REQUEUE) { |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 523 | /* Undo dm_start_request() before requeuing */ |
| 524 | rq_end_stats(md, rq); |
Mike Snitzer | 2adc5c5 | 2018-11-08 14:59:41 -0500 | [diff] [blame] | 525 | rq_completed(md); |
Christoph Hellwig | fc17b65 | 2017-06-03 09:38:05 +0200 | [diff] [blame] | 526 | return BLK_STS_RESOURCE; |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 527 | } |
| 528 | |
Christoph Hellwig | fc17b65 | 2017-06-03 09:38:05 +0200 | [diff] [blame] | 529 | return BLK_STS_OK; |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 530 | } |
| 531 | |
Eric Biggers | f363b08 | 2017-03-30 13:39:16 -0700 | [diff] [blame] | 532 | static const struct blk_mq_ops dm_mq_ops = { |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 533 | .queue_rq = dm_mq_queue_rq, |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 534 | .complete = dm_softirq_done, |
| 535 | .init_request = dm_mq_init_request, |
| 536 | }; |
| 537 | |
Mike Snitzer | e83068a | 2016-05-24 21:16:51 -0400 | [diff] [blame] | 538 | int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t) |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 539 | { |
Mike Snitzer | e83068a | 2016-05-24 21:16:51 -0400 | [diff] [blame] | 540 | struct dm_target *immutable_tgt; |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 541 | int err; |
| 542 | |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 543 | md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id); |
| 544 | if (!md->tag_set) |
| 545 | return -ENOMEM; |
| 546 | |
| 547 | md->tag_set->ops = &dm_mq_ops; |
| 548 | md->tag_set->queue_depth = dm_get_blk_mq_queue_depth(); |
| 549 | md->tag_set->numa_node = md->numa_node_id; |
Ming Lei | bf0beec | 2020-05-29 15:53:15 +0200 | [diff] [blame] | 550 | md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING; |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 551 | md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues(); |
| 552 | md->tag_set->driver_data = md; |
| 553 | |
| 554 | md->tag_set->cmd_size = sizeof(struct dm_rq_target_io); |
Mike Snitzer | e83068a | 2016-05-24 21:16:51 -0400 | [diff] [blame] | 555 | immutable_tgt = dm_table_get_immutable_target(t); |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 556 | if (immutable_tgt && immutable_tgt->per_io_data_size) { |
| 557 | /* any target-specific per-io data is immediately after the tio */ |
| 558 | md->tag_set->cmd_size += immutable_tgt->per_io_data_size; |
| 559 | md->init_tio_pdu = true; |
| 560 | } |
| 561 | |
| 562 | err = blk_mq_alloc_tag_set(md->tag_set); |
| 563 | if (err) |
| 564 | goto out_kfree_tag_set; |
| 565 | |
Christoph Hellwig | 26a9750 | 2021-06-02 09:53:17 +0300 | [diff] [blame] | 566 | err = blk_mq_init_allocated_queue(md->tag_set, md->queue); |
| 567 | if (err) |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 568 | goto out_tag_set; |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 569 | return 0; |
| 570 | |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 571 | out_tag_set: |
| 572 | blk_mq_free_tag_set(md->tag_set); |
| 573 | out_kfree_tag_set: |
| 574 | kfree(md->tag_set); |
Benjamin Block | 8e947c8 | 2021-04-29 23:37:00 +0200 | [diff] [blame] | 575 | md->tag_set = NULL; |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 576 | |
| 577 | return err; |
| 578 | } |
| 579 | |
| 580 | void dm_mq_cleanup_mapped_device(struct mapped_device *md) |
| 581 | { |
| 582 | if (md->tag_set) { |
| 583 | blk_mq_free_tag_set(md->tag_set); |
| 584 | kfree(md->tag_set); |
Benjamin Block | 8e947c8 | 2021-04-29 23:37:00 +0200 | [diff] [blame] | 585 | md->tag_set = NULL; |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 586 | } |
| 587 | } |
| 588 | |
| 589 | module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR); |
| 590 | MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools"); |
| 591 | |
Jens Axboe | 6a23e05 | 2018-10-10 20:49:26 -0600 | [diff] [blame] | 592 | /* Unused, but preserved for userspace compatibility */ |
| 593 | static bool use_blk_mq = true; |
Mike Snitzer | 4cc9613 | 2016-05-12 16:28:10 -0400 | [diff] [blame] | 594 | module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR); |
| 595 | MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices"); |
| 596 | |
| 597 | module_param(dm_mq_nr_hw_queues, uint, S_IRUGO | S_IWUSR); |
| 598 | MODULE_PARM_DESC(dm_mq_nr_hw_queues, "Number of hardware queues for request-based dm-mq devices"); |
| 599 | |
| 600 | module_param(dm_mq_queue_depth, uint, S_IRUGO | S_IWUSR); |
| 601 | MODULE_PARM_DESC(dm_mq_queue_depth, "Queue depth for request-based dm-mq devices"); |