Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 1 | /* |
| 2 | * blk-mq scheduling framework |
| 3 | * |
| 4 | * Copyright (C) 2016 Jens Axboe |
| 5 | */ |
| 6 | #include <linux/kernel.h> |
| 7 | #include <linux/module.h> |
| 8 | #include <linux/blk-mq.h> |
| 9 | |
| 10 | #include <trace/events/block.h> |
| 11 | |
| 12 | #include "blk.h" |
| 13 | #include "blk-mq.h" |
Omar Sandoval | d332ce0 | 2017-05-04 08:24:40 -0600 | [diff] [blame] | 14 | #include "blk-mq-debugfs.h" |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 15 | #include "blk-mq-sched.h" |
| 16 | #include "blk-mq-tag.h" |
| 17 | #include "blk-wbt.h" |
| 18 | |
| 19 | void blk_mq_sched_free_hctx_data(struct request_queue *q, |
| 20 | void (*exit)(struct blk_mq_hw_ctx *)) |
| 21 | { |
| 22 | struct blk_mq_hw_ctx *hctx; |
| 23 | int i; |
| 24 | |
| 25 | queue_for_each_hw_ctx(q, hctx, i) { |
| 26 | if (exit && hctx->sched_data) |
| 27 | exit(hctx); |
| 28 | kfree(hctx->sched_data); |
| 29 | hctx->sched_data = NULL; |
| 30 | } |
| 31 | } |
| 32 | EXPORT_SYMBOL_GPL(blk_mq_sched_free_hctx_data); |
| 33 | |
Damien Le Moal | e2b3fa5 | 2018-11-20 10:52:34 +0900 | [diff] [blame] | 34 | void blk_mq_sched_assign_ioc(struct request *rq) |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 35 | { |
Christoph Hellwig | 44e8c2b | 2017-06-16 18:15:25 +0200 | [diff] [blame] | 36 | struct request_queue *q = rq->q; |
Jens Axboe | 0c62bff | 2018-11-20 19:12:46 -0700 | [diff] [blame] | 37 | struct io_context *ioc; |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 38 | struct io_cq *icq; |
| 39 | |
Jens Axboe | 0c62bff | 2018-11-20 19:12:46 -0700 | [diff] [blame] | 40 | /* |
| 41 | * May not have an IO context if it's a passthrough request |
| 42 | */ |
| 43 | ioc = current->io_context; |
| 44 | if (!ioc) |
| 45 | return; |
| 46 | |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 47 | spin_lock_irq(&q->queue_lock); |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 48 | icq = ioc_lookup_icq(ioc, q); |
Christoph Hellwig | 0d945c1 | 2018-11-15 12:17:28 -0700 | [diff] [blame] | 49 | spin_unlock_irq(&q->queue_lock); |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 50 | |
| 51 | if (!icq) { |
| 52 | icq = ioc_create_icq(ioc, q, GFP_ATOMIC); |
| 53 | if (!icq) |
| 54 | return; |
| 55 | } |
Christoph Hellwig | ea511e3 | 2017-06-16 18:15:20 +0200 | [diff] [blame] | 56 | get_io_context(icq->ioc); |
Christoph Hellwig | 44e8c2b | 2017-06-16 18:15:25 +0200 | [diff] [blame] | 57 | rq->elv.icq = icq; |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 58 | } |
| 59 | |
Jens Axboe | 8e8320c | 2017-06-20 17:56:13 -0600 | [diff] [blame] | 60 | /* |
| 61 | * Mark a hardware queue as needing a restart. For shared queues, maintain |
| 62 | * a count of how many hardware queues are marked for restart. |
| 63 | */ |
Damien Le Moal | 7211aef8 | 2018-12-17 15:14:05 +0900 | [diff] [blame] | 64 | void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) |
Jens Axboe | 8e8320c | 2017-06-20 17:56:13 -0600 | [diff] [blame] | 65 | { |
| 66 | if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) |
| 67 | return; |
| 68 | |
Ming Lei | 97889f9 | 2018-06-25 19:31:48 +0800 | [diff] [blame] | 69 | set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); |
Jens Axboe | 8e8320c | 2017-06-20 17:56:13 -0600 | [diff] [blame] | 70 | } |
Damien Le Moal | 7211aef8 | 2018-12-17 15:14:05 +0900 | [diff] [blame] | 71 | EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx); |
Jens Axboe | 8e8320c | 2017-06-20 17:56:13 -0600 | [diff] [blame] | 72 | |
Ming Lei | 97889f9 | 2018-06-25 19:31:48 +0800 | [diff] [blame] | 73 | void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) |
Jens Axboe | 8e8320c | 2017-06-20 17:56:13 -0600 | [diff] [blame] | 74 | { |
| 75 | if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) |
Ming Lei | 97889f9 | 2018-06-25 19:31:48 +0800 | [diff] [blame] | 76 | return; |
| 77 | clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); |
Jens Axboe | 8e8320c | 2017-06-20 17:56:13 -0600 | [diff] [blame] | 78 | |
Ming Lei | 97889f9 | 2018-06-25 19:31:48 +0800 | [diff] [blame] | 79 | blk_mq_run_hw_queue(hctx, true); |
Jens Axboe | 8e8320c | 2017-06-20 17:56:13 -0600 | [diff] [blame] | 80 | } |
| 81 | |
Ming Lei | 1f460b6 | 2017-10-27 12:43:30 +0800 | [diff] [blame] | 82 | /* |
| 83 | * Only SCSI implements .get_budget and .put_budget, and SCSI restarts |
| 84 | * its queue by itself in its completion handler, so we don't need to |
| 85 | * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE. |
| 86 | */ |
| 87 | static void blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) |
Ming Lei | caf8eb0 | 2017-10-14 17:22:26 +0800 | [diff] [blame] | 88 | { |
| 89 | struct request_queue *q = hctx->queue; |
| 90 | struct elevator_queue *e = q->elevator; |
| 91 | LIST_HEAD(rq_list); |
| 92 | |
| 93 | do { |
Ming Lei | de14829 | 2017-10-14 17:22:29 +0800 | [diff] [blame] | 94 | struct request *rq; |
Ming Lei | caf8eb0 | 2017-10-14 17:22:26 +0800 | [diff] [blame] | 95 | |
Jens Axboe | f9cd4bf | 2018-11-01 16:41:41 -0600 | [diff] [blame] | 96 | if (e->type->ops.has_work && !e->type->ops.has_work(hctx)) |
Ming Lei | caf8eb0 | 2017-10-14 17:22:26 +0800 | [diff] [blame] | 97 | break; |
Ming Lei | de14829 | 2017-10-14 17:22:29 +0800 | [diff] [blame] | 98 | |
Ming Lei | 88022d7 | 2017-11-05 02:21:12 +0800 | [diff] [blame] | 99 | if (!blk_mq_get_dispatch_budget(hctx)) |
Ming Lei | 1f460b6 | 2017-10-27 12:43:30 +0800 | [diff] [blame] | 100 | break; |
Ming Lei | de14829 | 2017-10-14 17:22:29 +0800 | [diff] [blame] | 101 | |
Jens Axboe | f9cd4bf | 2018-11-01 16:41:41 -0600 | [diff] [blame] | 102 | rq = e->type->ops.dispatch_request(hctx); |
Ming Lei | de14829 | 2017-10-14 17:22:29 +0800 | [diff] [blame] | 103 | if (!rq) { |
| 104 | blk_mq_put_dispatch_budget(hctx); |
| 105 | break; |
Ming Lei | de14829 | 2017-10-14 17:22:29 +0800 | [diff] [blame] | 106 | } |
| 107 | |
| 108 | /* |
| 109 | * Now this rq owns the budget which has to be released |
| 110 | * if this rq won't be queued to driver via .queue_rq() |
| 111 | * in blk_mq_dispatch_rq_list(). |
| 112 | */ |
Ming Lei | caf8eb0 | 2017-10-14 17:22:26 +0800 | [diff] [blame] | 113 | list_add(&rq->queuelist, &rq_list); |
Ming Lei | de14829 | 2017-10-14 17:22:29 +0800 | [diff] [blame] | 114 | } while (blk_mq_dispatch_rq_list(q, &rq_list, true)); |
Ming Lei | caf8eb0 | 2017-10-14 17:22:26 +0800 | [diff] [blame] | 115 | } |
| 116 | |
Ming Lei | b347689 | 2017-10-14 17:22:30 +0800 | [diff] [blame] | 117 | static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx, |
| 118 | struct blk_mq_ctx *ctx) |
| 119 | { |
Jens Axboe | f31967f | 2018-10-29 13:13:29 -0600 | [diff] [blame] | 120 | unsigned short idx = ctx->index_hw[hctx->type]; |
Ming Lei | b347689 | 2017-10-14 17:22:30 +0800 | [diff] [blame] | 121 | |
| 122 | if (++idx == hctx->nr_ctx) |
| 123 | idx = 0; |
| 124 | |
| 125 | return hctx->ctxs[idx]; |
| 126 | } |
| 127 | |
Ming Lei | 1f460b6 | 2017-10-27 12:43:30 +0800 | [diff] [blame] | 128 | /* |
| 129 | * Only SCSI implements .get_budget and .put_budget, and SCSI restarts |
| 130 | * its queue by itself in its completion handler, so we don't need to |
| 131 | * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE. |
| 132 | */ |
| 133 | static void blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx) |
Ming Lei | b347689 | 2017-10-14 17:22:30 +0800 | [diff] [blame] | 134 | { |
| 135 | struct request_queue *q = hctx->queue; |
| 136 | LIST_HEAD(rq_list); |
| 137 | struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from); |
| 138 | |
| 139 | do { |
| 140 | struct request *rq; |
Ming Lei | b347689 | 2017-10-14 17:22:30 +0800 | [diff] [blame] | 141 | |
| 142 | if (!sbitmap_any_bit_set(&hctx->ctx_map)) |
| 143 | break; |
| 144 | |
Ming Lei | 88022d7 | 2017-11-05 02:21:12 +0800 | [diff] [blame] | 145 | if (!blk_mq_get_dispatch_budget(hctx)) |
Ming Lei | 1f460b6 | 2017-10-27 12:43:30 +0800 | [diff] [blame] | 146 | break; |
Ming Lei | b347689 | 2017-10-14 17:22:30 +0800 | [diff] [blame] | 147 | |
| 148 | rq = blk_mq_dequeue_from_ctx(hctx, ctx); |
| 149 | if (!rq) { |
| 150 | blk_mq_put_dispatch_budget(hctx); |
| 151 | break; |
Ming Lei | b347689 | 2017-10-14 17:22:30 +0800 | [diff] [blame] | 152 | } |
| 153 | |
| 154 | /* |
| 155 | * Now this rq owns the budget which has to be released |
| 156 | * if this rq won't be queued to driver via .queue_rq() |
| 157 | * in blk_mq_dispatch_rq_list(). |
| 158 | */ |
| 159 | list_add(&rq->queuelist, &rq_list); |
| 160 | |
| 161 | /* round robin for fair dispatch */ |
| 162 | ctx = blk_mq_next_ctx(hctx, rq->mq_ctx); |
| 163 | |
| 164 | } while (blk_mq_dispatch_rq_list(q, &rq_list, true)); |
| 165 | |
| 166 | WRITE_ONCE(hctx->dispatch_from, ctx); |
Ming Lei | b347689 | 2017-10-14 17:22:30 +0800 | [diff] [blame] | 167 | } |
| 168 | |
Ming Lei | 1f460b6 | 2017-10-27 12:43:30 +0800 | [diff] [blame] | 169 | void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 170 | { |
Omar Sandoval | 81380ca | 2017-04-07 08:56:26 -0600 | [diff] [blame] | 171 | struct request_queue *q = hctx->queue; |
| 172 | struct elevator_queue *e = q->elevator; |
Jens Axboe | f9cd4bf | 2018-11-01 16:41:41 -0600 | [diff] [blame] | 173 | const bool has_sched_dispatch = e && e->type->ops.dispatch_request; |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 174 | LIST_HEAD(rq_list); |
| 175 | |
Ming Lei | f4560ff | 2017-06-18 14:24:27 -0600 | [diff] [blame] | 176 | /* RCU or SRCU read lock is needed before checking quiesced flag */ |
| 177 | if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) |
Ming Lei | 1f460b6 | 2017-10-27 12:43:30 +0800 | [diff] [blame] | 178 | return; |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 179 | |
| 180 | hctx->run++; |
| 181 | |
| 182 | /* |
| 183 | * If we have previous entries on our dispatch list, grab them first for |
| 184 | * more fair dispatch. |
| 185 | */ |
| 186 | if (!list_empty_careful(&hctx->dispatch)) { |
| 187 | spin_lock(&hctx->lock); |
| 188 | if (!list_empty(&hctx->dispatch)) |
| 189 | list_splice_init(&hctx->dispatch, &rq_list); |
| 190 | spin_unlock(&hctx->lock); |
| 191 | } |
| 192 | |
| 193 | /* |
| 194 | * Only ask the scheduler for requests, if we didn't have residual |
| 195 | * requests from the dispatch list. This is to avoid the case where |
| 196 | * we only ever dispatch a fraction of the requests available because |
| 197 | * of low device queue depth. Once we pull requests out of the IO |
| 198 | * scheduler, we can no longer merge or sort them. So it's best to |
| 199 | * leave them there for as long as we can. Mark the hw queue as |
| 200 | * needing a restart in that case. |
Ming Lei | caf8eb0 | 2017-10-14 17:22:26 +0800 | [diff] [blame] | 201 | * |
Ming Lei | 5e3d02b | 2017-10-14 17:22:25 +0800 | [diff] [blame] | 202 | * We want to dispatch from the scheduler if there was nothing |
| 203 | * on the dispatch list or we were able to dispatch from the |
| 204 | * dispatch list. |
Jens Axboe | 64765a7 | 2017-02-17 11:39:26 -0700 | [diff] [blame] | 205 | */ |
Ming Lei | caf8eb0 | 2017-10-14 17:22:26 +0800 | [diff] [blame] | 206 | if (!list_empty(&rq_list)) { |
| 207 | blk_mq_sched_mark_restart_hctx(hctx); |
Ming Lei | b347689 | 2017-10-14 17:22:30 +0800 | [diff] [blame] | 208 | if (blk_mq_dispatch_rq_list(q, &rq_list, false)) { |
| 209 | if (has_sched_dispatch) |
Ming Lei | 1f460b6 | 2017-10-27 12:43:30 +0800 | [diff] [blame] | 210 | blk_mq_do_dispatch_sched(hctx); |
Ming Lei | b347689 | 2017-10-14 17:22:30 +0800 | [diff] [blame] | 211 | else |
Ming Lei | 1f460b6 | 2017-10-27 12:43:30 +0800 | [diff] [blame] | 212 | blk_mq_do_dispatch_ctx(hctx); |
Ming Lei | b347689 | 2017-10-14 17:22:30 +0800 | [diff] [blame] | 213 | } |
Ming Lei | caf8eb0 | 2017-10-14 17:22:26 +0800 | [diff] [blame] | 214 | } else if (has_sched_dispatch) { |
Ming Lei | 1f460b6 | 2017-10-27 12:43:30 +0800 | [diff] [blame] | 215 | blk_mq_do_dispatch_sched(hctx); |
Ming Lei | 6e768717 | 2018-07-03 09:03:16 -0600 | [diff] [blame] | 216 | } else if (hctx->dispatch_busy) { |
| 217 | /* dequeue request one by one from sw queue if queue is busy */ |
Ming Lei | 1f460b6 | 2017-10-27 12:43:30 +0800 | [diff] [blame] | 218 | blk_mq_do_dispatch_ctx(hctx); |
Ming Lei | caf8eb0 | 2017-10-14 17:22:26 +0800 | [diff] [blame] | 219 | } else { |
| 220 | blk_mq_flush_busy_ctxs(hctx, &rq_list); |
Ming Lei | de14829 | 2017-10-14 17:22:29 +0800 | [diff] [blame] | 221 | blk_mq_dispatch_rq_list(q, &rq_list, false); |
Jens Axboe | c13660a | 2017-01-26 12:40:07 -0700 | [diff] [blame] | 222 | } |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 223 | } |
| 224 | |
Jens Axboe | e4d750c | 2017-02-03 09:48:28 -0700 | [diff] [blame] | 225 | bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, |
| 226 | struct request **merged_request) |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 227 | { |
| 228 | struct request *rq; |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 229 | |
Christoph Hellwig | 34fe7c0 | 2017-02-08 14:46:48 +0100 | [diff] [blame] | 230 | switch (elv_merge(q, &rq, bio)) { |
| 231 | case ELEVATOR_BACK_MERGE: |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 232 | if (!blk_mq_sched_allow_merge(q, rq, bio)) |
| 233 | return false; |
Christoph Hellwig | 34fe7c0 | 2017-02-08 14:46:48 +0100 | [diff] [blame] | 234 | if (!bio_attempt_back_merge(q, rq, bio)) |
| 235 | return false; |
| 236 | *merged_request = attempt_back_merge(q, rq); |
| 237 | if (!*merged_request) |
| 238 | elv_merged_request(q, rq, ELEVATOR_BACK_MERGE); |
| 239 | return true; |
| 240 | case ELEVATOR_FRONT_MERGE: |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 241 | if (!blk_mq_sched_allow_merge(q, rq, bio)) |
| 242 | return false; |
Christoph Hellwig | 34fe7c0 | 2017-02-08 14:46:48 +0100 | [diff] [blame] | 243 | if (!bio_attempt_front_merge(q, rq, bio)) |
| 244 | return false; |
| 245 | *merged_request = attempt_front_merge(q, rq); |
| 246 | if (!*merged_request) |
| 247 | elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE); |
| 248 | return true; |
Keith Busch | bea99a5 | 2018-02-01 14:41:15 -0700 | [diff] [blame] | 249 | case ELEVATOR_DISCARD_MERGE: |
| 250 | return bio_attempt_discard_merge(q, rq, bio); |
Christoph Hellwig | 34fe7c0 | 2017-02-08 14:46:48 +0100 | [diff] [blame] | 251 | default: |
| 252 | return false; |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 253 | } |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 254 | } |
| 255 | EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge); |
| 256 | |
Ming Lei | 9bddeb2 | 2017-05-26 19:53:20 +0800 | [diff] [blame] | 257 | /* |
Jens Axboe | 9c55873 | 2018-05-30 15:26:07 +0800 | [diff] [blame] | 258 | * Iterate list of requests and see if we can merge this bio with any |
| 259 | * of them. |
Ming Lei | 9bddeb2 | 2017-05-26 19:53:20 +0800 | [diff] [blame] | 260 | */ |
Jens Axboe | 9c55873 | 2018-05-30 15:26:07 +0800 | [diff] [blame] | 261 | bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list, |
| 262 | struct bio *bio) |
Ming Lei | 9bddeb2 | 2017-05-26 19:53:20 +0800 | [diff] [blame] | 263 | { |
| 264 | struct request *rq; |
| 265 | int checked = 8; |
| 266 | |
Jens Axboe | 9c55873 | 2018-05-30 15:26:07 +0800 | [diff] [blame] | 267 | list_for_each_entry_reverse(rq, list, queuelist) { |
Ming Lei | 9bddeb2 | 2017-05-26 19:53:20 +0800 | [diff] [blame] | 268 | bool merged = false; |
| 269 | |
| 270 | if (!checked--) |
| 271 | break; |
| 272 | |
| 273 | if (!blk_rq_merge_ok(rq, bio)) |
| 274 | continue; |
| 275 | |
| 276 | switch (blk_try_merge(rq, bio)) { |
| 277 | case ELEVATOR_BACK_MERGE: |
| 278 | if (blk_mq_sched_allow_merge(q, rq, bio)) |
| 279 | merged = bio_attempt_back_merge(q, rq, bio); |
| 280 | break; |
| 281 | case ELEVATOR_FRONT_MERGE: |
| 282 | if (blk_mq_sched_allow_merge(q, rq, bio)) |
| 283 | merged = bio_attempt_front_merge(q, rq, bio); |
| 284 | break; |
| 285 | case ELEVATOR_DISCARD_MERGE: |
| 286 | merged = bio_attempt_discard_merge(q, rq, bio); |
| 287 | break; |
| 288 | default: |
| 289 | continue; |
| 290 | } |
| 291 | |
Ming Lei | 9bddeb2 | 2017-05-26 19:53:20 +0800 | [diff] [blame] | 292 | return merged; |
| 293 | } |
| 294 | |
| 295 | return false; |
| 296 | } |
Jens Axboe | 9c55873 | 2018-05-30 15:26:07 +0800 | [diff] [blame] | 297 | EXPORT_SYMBOL_GPL(blk_mq_bio_list_merge); |
| 298 | |
| 299 | /* |
| 300 | * Reverse check our software queue for entries that we could potentially |
| 301 | * merge with. Currently includes a hand-wavy stop count of 8, to not spend |
| 302 | * too much time checking for merges. |
| 303 | */ |
| 304 | static bool blk_mq_attempt_merge(struct request_queue *q, |
Ming Lei | c16d6b5 | 2018-12-17 08:44:05 -0700 | [diff] [blame^] | 305 | struct blk_mq_hw_ctx *hctx, |
Jens Axboe | 9c55873 | 2018-05-30 15:26:07 +0800 | [diff] [blame] | 306 | struct blk_mq_ctx *ctx, struct bio *bio) |
| 307 | { |
Ming Lei | c16d6b5 | 2018-12-17 08:44:05 -0700 | [diff] [blame^] | 308 | enum hctx_type type = hctx->type; |
| 309 | |
Jens Axboe | 9c55873 | 2018-05-30 15:26:07 +0800 | [diff] [blame] | 310 | lockdep_assert_held(&ctx->lock); |
| 311 | |
Ming Lei | c16d6b5 | 2018-12-17 08:44:05 -0700 | [diff] [blame^] | 312 | if (blk_mq_bio_list_merge(q, &ctx->rq_lists[type], bio)) { |
Jens Axboe | 9c55873 | 2018-05-30 15:26:07 +0800 | [diff] [blame] | 313 | ctx->rq_merged++; |
| 314 | return true; |
| 315 | } |
| 316 | |
| 317 | return false; |
| 318 | } |
Ming Lei | 9bddeb2 | 2017-05-26 19:53:20 +0800 | [diff] [blame] | 319 | |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 320 | bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio) |
| 321 | { |
| 322 | struct elevator_queue *e = q->elevator; |
Ming Lei | 9bddeb2 | 2017-05-26 19:53:20 +0800 | [diff] [blame] | 323 | struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); |
Jens Axboe | f9afca4 | 2018-10-29 13:11:38 -0600 | [diff] [blame] | 324 | struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx->cpu); |
Ming Lei | 9bddeb2 | 2017-05-26 19:53:20 +0800 | [diff] [blame] | 325 | bool ret = false; |
Ming Lei | c16d6b5 | 2018-12-17 08:44:05 -0700 | [diff] [blame^] | 326 | enum hctx_type type; |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 327 | |
Jens Axboe | f9cd4bf | 2018-11-01 16:41:41 -0600 | [diff] [blame] | 328 | if (e && e->type->ops.bio_merge) { |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 329 | blk_mq_put_ctx(ctx); |
Jens Axboe | f9cd4bf | 2018-11-01 16:41:41 -0600 | [diff] [blame] | 330 | return e->type->ops.bio_merge(hctx, bio); |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 331 | } |
| 332 | |
Ming Lei | c16d6b5 | 2018-12-17 08:44:05 -0700 | [diff] [blame^] | 333 | type = hctx->type; |
Ming Lei | b04f50a | 2018-07-02 17:35:59 +0800 | [diff] [blame] | 334 | if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) && |
Ming Lei | c16d6b5 | 2018-12-17 08:44:05 -0700 | [diff] [blame^] | 335 | !list_empty_careful(&ctx->rq_lists[type])) { |
Ming Lei | 9bddeb2 | 2017-05-26 19:53:20 +0800 | [diff] [blame] | 336 | /* default per sw-queue merge */ |
| 337 | spin_lock(&ctx->lock); |
Ming Lei | c16d6b5 | 2018-12-17 08:44:05 -0700 | [diff] [blame^] | 338 | ret = blk_mq_attempt_merge(q, hctx, ctx, bio); |
Ming Lei | 9bddeb2 | 2017-05-26 19:53:20 +0800 | [diff] [blame] | 339 | spin_unlock(&ctx->lock); |
| 340 | } |
| 341 | |
| 342 | blk_mq_put_ctx(ctx); |
| 343 | return ret; |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 344 | } |
| 345 | |
| 346 | bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq) |
| 347 | { |
| 348 | return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq); |
| 349 | } |
| 350 | EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge); |
| 351 | |
| 352 | void blk_mq_sched_request_inserted(struct request *rq) |
| 353 | { |
| 354 | trace_block_rq_insert(rq->q, rq); |
| 355 | } |
| 356 | EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted); |
| 357 | |
Omar Sandoval | 0cacba6 | 2017-02-02 15:42:39 -0800 | [diff] [blame] | 358 | static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, |
Ming Lei | a6a252e | 2017-11-02 23:24:36 +0800 | [diff] [blame] | 359 | bool has_sched, |
Omar Sandoval | 0cacba6 | 2017-02-02 15:42:39 -0800 | [diff] [blame] | 360 | struct request *rq) |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 361 | { |
Ming Lei | a6a252e | 2017-11-02 23:24:36 +0800 | [diff] [blame] | 362 | /* dispatch flush rq directly */ |
| 363 | if (rq->rq_flags & RQF_FLUSH_SEQ) { |
| 364 | spin_lock(&hctx->lock); |
| 365 | list_add(&rq->queuelist, &hctx->dispatch); |
| 366 | spin_unlock(&hctx->lock); |
| 367 | return true; |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 368 | } |
| 369 | |
Ming Lei | 923218f | 2017-11-02 23:24:38 +0800 | [diff] [blame] | 370 | if (has_sched) |
Ming Lei | a6a252e | 2017-11-02 23:24:36 +0800 | [diff] [blame] | 371 | rq->rq_flags |= RQF_SORTED; |
Ming Lei | a6a252e | 2017-11-02 23:24:36 +0800 | [diff] [blame] | 372 | |
| 373 | return false; |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 374 | } |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 375 | |
Jens Axboe | bd6737f | 2017-01-27 01:00:47 -0700 | [diff] [blame] | 376 | void blk_mq_sched_insert_request(struct request *rq, bool at_head, |
Mike Snitzer | 9e97d29 | 2018-01-17 11:25:58 -0500 | [diff] [blame] | 377 | bool run_queue, bool async) |
Jens Axboe | bd6737f | 2017-01-27 01:00:47 -0700 | [diff] [blame] | 378 | { |
| 379 | struct request_queue *q = rq->q; |
| 380 | struct elevator_queue *e = q->elevator; |
| 381 | struct blk_mq_ctx *ctx = rq->mq_ctx; |
Jens Axboe | ea4f995 | 2018-10-29 15:06:13 -0600 | [diff] [blame] | 382 | struct blk_mq_hw_ctx *hctx = rq->mq_hctx; |
Jens Axboe | bd6737f | 2017-01-27 01:00:47 -0700 | [diff] [blame] | 383 | |
Ming Lei | a6a252e | 2017-11-02 23:24:36 +0800 | [diff] [blame] | 384 | /* flush rq in flush machinery need to be dispatched directly */ |
| 385 | if (!(rq->rq_flags & RQF_FLUSH_SEQ) && op_is_flush(rq->cmd_flags)) { |
Ming Lei | 923218f | 2017-11-02 23:24:38 +0800 | [diff] [blame] | 386 | blk_insert_flush(rq); |
| 387 | goto run; |
Jens Axboe | bd6737f | 2017-01-27 01:00:47 -0700 | [diff] [blame] | 388 | } |
| 389 | |
Ming Lei | 923218f | 2017-11-02 23:24:38 +0800 | [diff] [blame] | 390 | WARN_ON(e && (rq->tag != -1)); |
| 391 | |
Ming Lei | a6a252e | 2017-11-02 23:24:36 +0800 | [diff] [blame] | 392 | if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) |
Omar Sandoval | 0cacba6 | 2017-02-02 15:42:39 -0800 | [diff] [blame] | 393 | goto run; |
| 394 | |
Jens Axboe | f9cd4bf | 2018-11-01 16:41:41 -0600 | [diff] [blame] | 395 | if (e && e->type->ops.insert_requests) { |
Jens Axboe | bd6737f | 2017-01-27 01:00:47 -0700 | [diff] [blame] | 396 | LIST_HEAD(list); |
| 397 | |
| 398 | list_add(&rq->queuelist, &list); |
Jens Axboe | f9cd4bf | 2018-11-01 16:41:41 -0600 | [diff] [blame] | 399 | e->type->ops.insert_requests(hctx, &list, at_head); |
Jens Axboe | bd6737f | 2017-01-27 01:00:47 -0700 | [diff] [blame] | 400 | } else { |
| 401 | spin_lock(&ctx->lock); |
| 402 | __blk_mq_insert_request(hctx, rq, at_head); |
| 403 | spin_unlock(&ctx->lock); |
| 404 | } |
| 405 | |
Omar Sandoval | 0cacba6 | 2017-02-02 15:42:39 -0800 | [diff] [blame] | 406 | run: |
Jens Axboe | bd6737f | 2017-01-27 01:00:47 -0700 | [diff] [blame] | 407 | if (run_queue) |
| 408 | blk_mq_run_hw_queue(hctx, async); |
| 409 | } |
| 410 | |
Jens Axboe | 67cae4c | 2018-10-30 11:31:51 -0600 | [diff] [blame] | 411 | void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx, |
Jens Axboe | bd6737f | 2017-01-27 01:00:47 -0700 | [diff] [blame] | 412 | struct blk_mq_ctx *ctx, |
| 413 | struct list_head *list, bool run_queue_async) |
| 414 | { |
Jens Axboe | f9afca4 | 2018-10-29 13:11:38 -0600 | [diff] [blame] | 415 | struct elevator_queue *e; |
Jens Axboe | f9afca4 | 2018-10-29 13:11:38 -0600 | [diff] [blame] | 416 | |
| 417 | e = hctx->queue->elevator; |
Jens Axboe | f9cd4bf | 2018-11-01 16:41:41 -0600 | [diff] [blame] | 418 | if (e && e->type->ops.insert_requests) |
| 419 | e->type->ops.insert_requests(hctx, list, false); |
Ming Lei | 6ce3dd6 | 2018-07-10 09:03:31 +0800 | [diff] [blame] | 420 | else { |
| 421 | /* |
| 422 | * try to issue requests directly if the hw queue isn't |
| 423 | * busy in case of 'none' scheduler, and this way may save |
| 424 | * us one extra enqueue & dequeue to sw queue. |
| 425 | */ |
Jianchao Wang | 5b7a6f1 | 2018-12-14 09:28:19 +0800 | [diff] [blame] | 426 | if (!hctx->dispatch_busy && !e && !run_queue_async) |
Ming Lei | 6ce3dd6 | 2018-07-10 09:03:31 +0800 | [diff] [blame] | 427 | blk_mq_try_issue_list_directly(hctx, list); |
Jianchao Wang | 5b7a6f1 | 2018-12-14 09:28:19 +0800 | [diff] [blame] | 428 | else |
| 429 | blk_mq_insert_requests(hctx, ctx, list); |
Ming Lei | 6ce3dd6 | 2018-07-10 09:03:31 +0800 | [diff] [blame] | 430 | } |
Jens Axboe | bd6737f | 2017-01-27 01:00:47 -0700 | [diff] [blame] | 431 | |
| 432 | blk_mq_run_hw_queue(hctx, run_queue_async); |
| 433 | } |
| 434 | |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 435 | static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set, |
| 436 | struct blk_mq_hw_ctx *hctx, |
| 437 | unsigned int hctx_idx) |
| 438 | { |
| 439 | if (hctx->sched_tags) { |
| 440 | blk_mq_free_rqs(set, hctx->sched_tags, hctx_idx); |
| 441 | blk_mq_free_rq_map(hctx->sched_tags); |
| 442 | hctx->sched_tags = NULL; |
| 443 | } |
| 444 | } |
| 445 | |
Omar Sandoval | 6917ff0 | 2017-04-05 12:01:30 -0700 | [diff] [blame] | 446 | static int blk_mq_sched_alloc_tags(struct request_queue *q, |
| 447 | struct blk_mq_hw_ctx *hctx, |
| 448 | unsigned int hctx_idx) |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 449 | { |
| 450 | struct blk_mq_tag_set *set = q->tag_set; |
Omar Sandoval | 6917ff0 | 2017-04-05 12:01:30 -0700 | [diff] [blame] | 451 | int ret; |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 452 | |
Omar Sandoval | 6917ff0 | 2017-04-05 12:01:30 -0700 | [diff] [blame] | 453 | hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests, |
| 454 | set->reserved_tags); |
| 455 | if (!hctx->sched_tags) |
| 456 | return -ENOMEM; |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 457 | |
Omar Sandoval | 6917ff0 | 2017-04-05 12:01:30 -0700 | [diff] [blame] | 458 | ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests); |
| 459 | if (ret) |
| 460 | blk_mq_sched_free_tags(set, hctx, hctx_idx); |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 461 | |
Omar Sandoval | 6917ff0 | 2017-04-05 12:01:30 -0700 | [diff] [blame] | 462 | return ret; |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 463 | } |
| 464 | |
Omar Sandoval | 54d5329 | 2017-04-07 08:52:27 -0600 | [diff] [blame] | 465 | static void blk_mq_sched_tags_teardown(struct request_queue *q) |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 466 | { |
| 467 | struct blk_mq_tag_set *set = q->tag_set; |
| 468 | struct blk_mq_hw_ctx *hctx; |
| 469 | int i; |
| 470 | |
| 471 | queue_for_each_hw_ctx(q, hctx, i) |
| 472 | blk_mq_sched_free_tags(set, hctx, i); |
| 473 | } |
Jens Axboe | d348499 | 2017-01-13 14:43:58 -0700 | [diff] [blame] | 474 | |
Omar Sandoval | 6917ff0 | 2017-04-05 12:01:30 -0700 | [diff] [blame] | 475 | int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) |
| 476 | { |
| 477 | struct blk_mq_hw_ctx *hctx; |
Omar Sandoval | ee056f9 | 2017-04-05 12:01:34 -0700 | [diff] [blame] | 478 | struct elevator_queue *eq; |
Omar Sandoval | 6917ff0 | 2017-04-05 12:01:30 -0700 | [diff] [blame] | 479 | unsigned int i; |
| 480 | int ret; |
| 481 | |
| 482 | if (!e) { |
| 483 | q->elevator = NULL; |
Ming Lei | 32a50fa | 2018-06-02 15:18:09 +0800 | [diff] [blame] | 484 | q->nr_requests = q->tag_set->queue_depth; |
Omar Sandoval | 6917ff0 | 2017-04-05 12:01:30 -0700 | [diff] [blame] | 485 | return 0; |
| 486 | } |
| 487 | |
| 488 | /* |
Ming Lei | 32825c4 | 2017-07-03 20:37:14 +0800 | [diff] [blame] | 489 | * Default to double of smaller one between hw queue_depth and 128, |
| 490 | * since we don't split into sync/async like the old code did. |
| 491 | * Additionally, this is a per-hw queue depth. |
Omar Sandoval | 6917ff0 | 2017-04-05 12:01:30 -0700 | [diff] [blame] | 492 | */ |
Ming Lei | 32825c4 | 2017-07-03 20:37:14 +0800 | [diff] [blame] | 493 | q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth, |
| 494 | BLKDEV_MAX_RQ); |
Omar Sandoval | 6917ff0 | 2017-04-05 12:01:30 -0700 | [diff] [blame] | 495 | |
| 496 | queue_for_each_hw_ctx(q, hctx, i) { |
| 497 | ret = blk_mq_sched_alloc_tags(q, hctx, i); |
| 498 | if (ret) |
| 499 | goto err; |
| 500 | } |
| 501 | |
Jens Axboe | f9cd4bf | 2018-11-01 16:41:41 -0600 | [diff] [blame] | 502 | ret = e->ops.init_sched(q, e); |
Omar Sandoval | 6917ff0 | 2017-04-05 12:01:30 -0700 | [diff] [blame] | 503 | if (ret) |
| 504 | goto err; |
| 505 | |
Omar Sandoval | d332ce0 | 2017-05-04 08:24:40 -0600 | [diff] [blame] | 506 | blk_mq_debugfs_register_sched(q); |
| 507 | |
| 508 | queue_for_each_hw_ctx(q, hctx, i) { |
Jens Axboe | f9cd4bf | 2018-11-01 16:41:41 -0600 | [diff] [blame] | 509 | if (e->ops.init_hctx) { |
| 510 | ret = e->ops.init_hctx(hctx, i); |
Omar Sandoval | ee056f9 | 2017-04-05 12:01:34 -0700 | [diff] [blame] | 511 | if (ret) { |
| 512 | eq = q->elevator; |
| 513 | blk_mq_exit_sched(q, eq); |
| 514 | kobject_put(&eq->kobj); |
| 515 | return ret; |
| 516 | } |
| 517 | } |
Omar Sandoval | d332ce0 | 2017-05-04 08:24:40 -0600 | [diff] [blame] | 518 | blk_mq_debugfs_register_sched_hctx(q, hctx); |
Omar Sandoval | ee056f9 | 2017-04-05 12:01:34 -0700 | [diff] [blame] | 519 | } |
| 520 | |
Omar Sandoval | 6917ff0 | 2017-04-05 12:01:30 -0700 | [diff] [blame] | 521 | return 0; |
| 522 | |
| 523 | err: |
Omar Sandoval | 54d5329 | 2017-04-07 08:52:27 -0600 | [diff] [blame] | 524 | blk_mq_sched_tags_teardown(q); |
| 525 | q->elevator = NULL; |
Omar Sandoval | 6917ff0 | 2017-04-05 12:01:30 -0700 | [diff] [blame] | 526 | return ret; |
| 527 | } |
| 528 | |
Omar Sandoval | 54d5329 | 2017-04-07 08:52:27 -0600 | [diff] [blame] | 529 | void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e) |
| 530 | { |
Omar Sandoval | ee056f9 | 2017-04-05 12:01:34 -0700 | [diff] [blame] | 531 | struct blk_mq_hw_ctx *hctx; |
| 532 | unsigned int i; |
| 533 | |
Omar Sandoval | d332ce0 | 2017-05-04 08:24:40 -0600 | [diff] [blame] | 534 | queue_for_each_hw_ctx(q, hctx, i) { |
| 535 | blk_mq_debugfs_unregister_sched_hctx(hctx); |
Jens Axboe | f9cd4bf | 2018-11-01 16:41:41 -0600 | [diff] [blame] | 536 | if (e->type->ops.exit_hctx && hctx->sched_data) { |
| 537 | e->type->ops.exit_hctx(hctx, i); |
Omar Sandoval | d332ce0 | 2017-05-04 08:24:40 -0600 | [diff] [blame] | 538 | hctx->sched_data = NULL; |
Omar Sandoval | ee056f9 | 2017-04-05 12:01:34 -0700 | [diff] [blame] | 539 | } |
| 540 | } |
Omar Sandoval | d332ce0 | 2017-05-04 08:24:40 -0600 | [diff] [blame] | 541 | blk_mq_debugfs_unregister_sched(q); |
Jens Axboe | f9cd4bf | 2018-11-01 16:41:41 -0600 | [diff] [blame] | 542 | if (e->type->ops.exit_sched) |
| 543 | e->type->ops.exit_sched(e); |
Omar Sandoval | 54d5329 | 2017-04-07 08:52:27 -0600 | [diff] [blame] | 544 | blk_mq_sched_tags_teardown(q); |
| 545 | q->elevator = NULL; |
| 546 | } |