Christoph Hellwig | 3dcf60b | 2019-04-30 14:42:43 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 2 | /* |
| 3 | * blk-mq scheduling framework |
| 4 | * |
| 5 | * Copyright (C) 2016 Jens Axboe |
| 6 | */ |
| 7 | #include <linux/kernel.h> |
| 8 | #include <linux/module.h> |
| 9 | #include <linux/blk-mq.h> |
Ming Lei | 6e6fcbc | 2020-06-30 18:25:01 +0800 | [diff] [blame] | 10 | #include <linux/list_sort.h> |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 11 | |
| 12 | #include <trace/events/block.h> |
| 13 | |
| 14 | #include "blk.h" |
| 15 | #include "blk-mq.h" |
Omar Sandoval | d332ce0 | 2017-05-04 08:24:40 -0600 | [diff] [blame] | 16 | #include "blk-mq-debugfs.h" |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 17 | #include "blk-mq-sched.h" |
| 18 | #include "blk-mq-tag.h" |
| 19 | #include "blk-wbt.h" |
| 20 | |
Jens Axboe | 8e8320c | 2017-06-20 17:56:13 -0600 | [diff] [blame] | 21 | /* |
| 22 | * Mark a hardware queue as needing a restart. For shared queues, maintain |
| 23 | * a count of how many hardware queues are marked for restart. |
| 24 | */ |
Damien Le Moal | 7211aef8 | 2018-12-17 15:14:05 +0900 | [diff] [blame] | 25 | void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) |
Jens Axboe | 8e8320c | 2017-06-20 17:56:13 -0600 | [diff] [blame] | 26 | { |
| 27 | if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) |
| 28 | return; |
| 29 | |
Ming Lei | 97889f9 | 2018-06-25 19:31:48 +0800 | [diff] [blame] | 30 | set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); |
Jens Axboe | 8e8320c | 2017-06-20 17:56:13 -0600 | [diff] [blame] | 31 | } |
Damien Le Moal | 7211aef8 | 2018-12-17 15:14:05 +0900 | [diff] [blame] | 32 | EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx); |
Jens Axboe | 8e8320c | 2017-06-20 17:56:13 -0600 | [diff] [blame] | 33 | |
Pavel Begunkov | e9ea159 | 2021-10-09 13:25:42 +0100 | [diff] [blame] | 34 | void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) |
Jens Axboe | 8e8320c | 2017-06-20 17:56:13 -0600 | [diff] [blame] | 35 | { |
Ming Lei | 97889f9 | 2018-06-25 19:31:48 +0800 | [diff] [blame] | 36 | clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); |
Jens Axboe | 8e8320c | 2017-06-20 17:56:13 -0600 | [diff] [blame] | 37 | |
Ming Lei | d7d8535 | 2020-08-17 18:01:15 +0800 | [diff] [blame] | 38 | /* |
| 39 | * Order clearing SCHED_RESTART and list_empty_careful(&hctx->dispatch) |
| 40 | * in blk_mq_run_hw_queue(). Its pair is the barrier in |
| 41 | * blk_mq_dispatch_rq_list(). So dispatch code won't see SCHED_RESTART, |
| 42 | * meantime new request added to hctx->dispatch is missed to check in |
| 43 | * blk_mq_run_hw_queue(). |
| 44 | */ |
| 45 | smp_mb(); |
| 46 | |
Ming Lei | 97889f9 | 2018-06-25 19:31:48 +0800 | [diff] [blame] | 47 | blk_mq_run_hw_queue(hctx, true); |
Jens Axboe | 8e8320c | 2017-06-20 17:56:13 -0600 | [diff] [blame] | 48 | } |
| 49 | |
Sami Tolvanen | 4f0f586 | 2021-04-08 11:28:34 -0700 | [diff] [blame] | 50 | static int sched_rq_cmp(void *priv, const struct list_head *a, |
| 51 | const struct list_head *b) |
Ming Lei | 6e6fcbc | 2020-06-30 18:25:01 +0800 | [diff] [blame] | 52 | { |
| 53 | struct request *rqa = container_of(a, struct request, queuelist); |
| 54 | struct request *rqb = container_of(b, struct request, queuelist); |
| 55 | |
| 56 | return rqa->mq_hctx > rqb->mq_hctx; |
| 57 | } |
| 58 | |
| 59 | static bool blk_mq_dispatch_hctx_list(struct list_head *rq_list) |
| 60 | { |
| 61 | struct blk_mq_hw_ctx *hctx = |
| 62 | list_first_entry(rq_list, struct request, queuelist)->mq_hctx; |
| 63 | struct request *rq; |
| 64 | LIST_HEAD(hctx_list); |
| 65 | unsigned int count = 0; |
Ming Lei | 6e6fcbc | 2020-06-30 18:25:01 +0800 | [diff] [blame] | 66 | |
| 67 | list_for_each_entry(rq, rq_list, queuelist) { |
| 68 | if (rq->mq_hctx != hctx) { |
| 69 | list_cut_before(&hctx_list, rq_list, &rq->queuelist); |
| 70 | goto dispatch; |
| 71 | } |
| 72 | count++; |
| 73 | } |
| 74 | list_splice_tail_init(rq_list, &hctx_list); |
| 75 | |
| 76 | dispatch: |
Baolin Wang | 106e71c | 2020-07-04 15:26:14 +0800 | [diff] [blame] | 77 | return blk_mq_dispatch_rq_list(hctx, &hctx_list, count); |
Ming Lei | 6e6fcbc | 2020-06-30 18:25:01 +0800 | [diff] [blame] | 78 | } |
| 79 | |
Douglas Anderson | a082342 | 2020-04-20 09:24:53 -0700 | [diff] [blame] | 80 | #define BLK_MQ_BUDGET_DELAY 3 /* ms units */ |
| 81 | |
Ming Lei | 1f460b6 | 2017-10-27 12:43:30 +0800 | [diff] [blame] | 82 | /* |
| 83 | * Only SCSI implements .get_budget and .put_budget, and SCSI restarts |
| 84 | * its queue by itself in its completion handler, so we don't need to |
| 85 | * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE. |
Salman Qazi | 28d6572 | 2020-04-24 08:03:21 -0700 | [diff] [blame] | 86 | * |
| 87 | * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to |
| 88 | * be run again. This is necessary to avoid starving flushes. |
Ming Lei | 1f460b6 | 2017-10-27 12:43:30 +0800 | [diff] [blame] | 89 | */ |
Ming Lei | 6e6fcbc | 2020-06-30 18:25:01 +0800 | [diff] [blame] | 90 | static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) |
Ming Lei | caf8eb0 | 2017-10-14 17:22:26 +0800 | [diff] [blame] | 91 | { |
| 92 | struct request_queue *q = hctx->queue; |
| 93 | struct elevator_queue *e = q->elevator; |
Ming Lei | 6e6fcbc | 2020-06-30 18:25:01 +0800 | [diff] [blame] | 94 | bool multi_hctxs = false, run_queue = false; |
| 95 | bool dispatched = false, busy = false; |
| 96 | unsigned int max_dispatch; |
Ming Lei | caf8eb0 | 2017-10-14 17:22:26 +0800 | [diff] [blame] | 97 | LIST_HEAD(rq_list); |
Ming Lei | 6e6fcbc | 2020-06-30 18:25:01 +0800 | [diff] [blame] | 98 | int count = 0; |
| 99 | |
| 100 | if (hctx->dispatch_busy) |
| 101 | max_dispatch = 1; |
| 102 | else |
| 103 | max_dispatch = hctx->queue->nr_requests; |
Ming Lei | caf8eb0 | 2017-10-14 17:22:26 +0800 | [diff] [blame] | 104 | |
| 105 | do { |
Ming Lei | 6e6fcbc | 2020-06-30 18:25:01 +0800 | [diff] [blame] | 106 | struct request *rq; |
Ming Lei | 2a5a24a | 2021-01-22 10:33:12 +0800 | [diff] [blame] | 107 | int budget_token; |
Ming Lei | 6e6fcbc | 2020-06-30 18:25:01 +0800 | [diff] [blame] | 108 | |
Jens Axboe | f9cd4bf | 2018-11-01 16:41:41 -0600 | [diff] [blame] | 109 | if (e->type->ops.has_work && !e->type->ops.has_work(hctx)) |
Ming Lei | caf8eb0 | 2017-10-14 17:22:26 +0800 | [diff] [blame] | 110 | break; |
Ming Lei | de14829 | 2017-10-14 17:22:29 +0800 | [diff] [blame] | 111 | |
Salman Qazi | 28d6572 | 2020-04-24 08:03:21 -0700 | [diff] [blame] | 112 | if (!list_empty_careful(&hctx->dispatch)) { |
Ming Lei | 6e6fcbc | 2020-06-30 18:25:01 +0800 | [diff] [blame] | 113 | busy = true; |
Salman Qazi | 28d6572 | 2020-04-24 08:03:21 -0700 | [diff] [blame] | 114 | break; |
| 115 | } |
| 116 | |
Ming Lei | 2a5a24a | 2021-01-22 10:33:12 +0800 | [diff] [blame] | 117 | budget_token = blk_mq_get_dispatch_budget(q); |
| 118 | if (budget_token < 0) |
Ming Lei | 1f460b6 | 2017-10-27 12:43:30 +0800 | [diff] [blame] | 119 | break; |
Ming Lei | de14829 | 2017-10-14 17:22:29 +0800 | [diff] [blame] | 120 | |
Jens Axboe | f9cd4bf | 2018-11-01 16:41:41 -0600 | [diff] [blame] | 121 | rq = e->type->ops.dispatch_request(hctx); |
Ming Lei | de14829 | 2017-10-14 17:22:29 +0800 | [diff] [blame] | 122 | if (!rq) { |
Ming Lei | 2a5a24a | 2021-01-22 10:33:12 +0800 | [diff] [blame] | 123 | blk_mq_put_dispatch_budget(q, budget_token); |
Douglas Anderson | a082342 | 2020-04-20 09:24:53 -0700 | [diff] [blame] | 124 | /* |
| 125 | * We're releasing without dispatching. Holding the |
| 126 | * budget could have blocked any "hctx"s with the |
| 127 | * same queue and if we didn't dispatch then there's |
| 128 | * no guarantee anyone will kick the queue. Kick it |
| 129 | * ourselves. |
| 130 | */ |
Ming Lei | 6e6fcbc | 2020-06-30 18:25:01 +0800 | [diff] [blame] | 131 | run_queue = true; |
Ming Lei | de14829 | 2017-10-14 17:22:29 +0800 | [diff] [blame] | 132 | break; |
Ming Lei | de14829 | 2017-10-14 17:22:29 +0800 | [diff] [blame] | 133 | } |
| 134 | |
Ming Lei | 2a5a24a | 2021-01-22 10:33:12 +0800 | [diff] [blame] | 135 | blk_mq_set_rq_budget_token(rq, budget_token); |
| 136 | |
Ming Lei | de14829 | 2017-10-14 17:22:29 +0800 | [diff] [blame] | 137 | /* |
| 138 | * Now this rq owns the budget which has to be released |
| 139 | * if this rq won't be queued to driver via .queue_rq() |
| 140 | * in blk_mq_dispatch_rq_list(). |
| 141 | */ |
Ming Lei | 6e6fcbc | 2020-06-30 18:25:01 +0800 | [diff] [blame] | 142 | list_add_tail(&rq->queuelist, &rq_list); |
Jan Kara | 61347154 | 2021-06-03 12:47:21 +0200 | [diff] [blame] | 143 | count++; |
Ming Lei | 6e6fcbc | 2020-06-30 18:25:01 +0800 | [diff] [blame] | 144 | if (rq->mq_hctx != hctx) |
| 145 | multi_hctxs = true; |
Jan Kara | 61347154 | 2021-06-03 12:47:21 +0200 | [diff] [blame] | 146 | |
| 147 | /* |
| 148 | * If we cannot get tag for the request, stop dequeueing |
| 149 | * requests from the IO scheduler. We are unlikely to be able |
| 150 | * to submit them anyway and it creates false impression for |
| 151 | * scheduling heuristics that the device can take more IO. |
| 152 | */ |
| 153 | if (!blk_mq_get_driver_tag(rq)) |
| 154 | break; |
| 155 | } while (count < max_dispatch); |
Ming Lei | 6e6fcbc | 2020-06-30 18:25:01 +0800 | [diff] [blame] | 156 | |
| 157 | if (!count) { |
| 158 | if (run_queue) |
| 159 | blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY); |
| 160 | } else if (multi_hctxs) { |
| 161 | /* |
| 162 | * Requests from different hctx may be dequeued from some |
| 163 | * schedulers, such as bfq and deadline. |
| 164 | * |
| 165 | * Sort the requests in the list according to their hctx, |
| 166 | * dispatch batching requests from same hctx at a time. |
| 167 | */ |
| 168 | list_sort(NULL, &rq_list, sched_rq_cmp); |
| 169 | do { |
| 170 | dispatched |= blk_mq_dispatch_hctx_list(&rq_list); |
| 171 | } while (!list_empty(&rq_list)); |
| 172 | } else { |
| 173 | dispatched = blk_mq_dispatch_rq_list(hctx, &rq_list, count); |
| 174 | } |
| 175 | |
| 176 | if (busy) |
| 177 | return -EAGAIN; |
| 178 | return !!dispatched; |
| 179 | } |
| 180 | |
| 181 | static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) |
| 182 | { |
| 183 | int ret; |
| 184 | |
| 185 | do { |
| 186 | ret = __blk_mq_do_dispatch_sched(hctx); |
| 187 | } while (ret == 1); |
Salman Qazi | 28d6572 | 2020-04-24 08:03:21 -0700 | [diff] [blame] | 188 | |
| 189 | return ret; |
Ming Lei | caf8eb0 | 2017-10-14 17:22:26 +0800 | [diff] [blame] | 190 | } |
| 191 | |
Ming Lei | b347689 | 2017-10-14 17:22:30 +0800 | [diff] [blame] | 192 | static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx, |
| 193 | struct blk_mq_ctx *ctx) |
| 194 | { |
Jens Axboe | f31967f | 2018-10-29 13:13:29 -0600 | [diff] [blame] | 195 | unsigned short idx = ctx->index_hw[hctx->type]; |
Ming Lei | b347689 | 2017-10-14 17:22:30 +0800 | [diff] [blame] | 196 | |
| 197 | if (++idx == hctx->nr_ctx) |
| 198 | idx = 0; |
| 199 | |
| 200 | return hctx->ctxs[idx]; |
| 201 | } |
| 202 | |
Ming Lei | 1f460b6 | 2017-10-27 12:43:30 +0800 | [diff] [blame] | 203 | /* |
| 204 | * Only SCSI implements .get_budget and .put_budget, and SCSI restarts |
| 205 | * its queue by itself in its completion handler, so we don't need to |
| 206 | * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE. |
Salman Qazi | 28d6572 | 2020-04-24 08:03:21 -0700 | [diff] [blame] | 207 | * |
| 208 | * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to |
Randy Dunlap | c4aecaa | 2020-07-30 18:42:32 -0700 | [diff] [blame] | 209 | * be run again. This is necessary to avoid starving flushes. |
Ming Lei | 1f460b6 | 2017-10-27 12:43:30 +0800 | [diff] [blame] | 210 | */ |
Salman Qazi | 28d6572 | 2020-04-24 08:03:21 -0700 | [diff] [blame] | 211 | static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx) |
Ming Lei | b347689 | 2017-10-14 17:22:30 +0800 | [diff] [blame] | 212 | { |
| 213 | struct request_queue *q = hctx->queue; |
| 214 | LIST_HEAD(rq_list); |
| 215 | struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from); |
Salman Qazi | 28d6572 | 2020-04-24 08:03:21 -0700 | [diff] [blame] | 216 | int ret = 0; |
Ming Lei | 445874e | 2020-06-30 18:24:57 +0800 | [diff] [blame] | 217 | struct request *rq; |
Ming Lei | b347689 | 2017-10-14 17:22:30 +0800 | [diff] [blame] | 218 | |
| 219 | do { |
Ming Lei | 2a5a24a | 2021-01-22 10:33:12 +0800 | [diff] [blame] | 220 | int budget_token; |
| 221 | |
Salman Qazi | 28d6572 | 2020-04-24 08:03:21 -0700 | [diff] [blame] | 222 | if (!list_empty_careful(&hctx->dispatch)) { |
| 223 | ret = -EAGAIN; |
| 224 | break; |
| 225 | } |
| 226 | |
Ming Lei | b347689 | 2017-10-14 17:22:30 +0800 | [diff] [blame] | 227 | if (!sbitmap_any_bit_set(&hctx->ctx_map)) |
| 228 | break; |
| 229 | |
Ming Lei | 2a5a24a | 2021-01-22 10:33:12 +0800 | [diff] [blame] | 230 | budget_token = blk_mq_get_dispatch_budget(q); |
| 231 | if (budget_token < 0) |
Ming Lei | 1f460b6 | 2017-10-27 12:43:30 +0800 | [diff] [blame] | 232 | break; |
Ming Lei | b347689 | 2017-10-14 17:22:30 +0800 | [diff] [blame] | 233 | |
| 234 | rq = blk_mq_dequeue_from_ctx(hctx, ctx); |
| 235 | if (!rq) { |
Ming Lei | 2a5a24a | 2021-01-22 10:33:12 +0800 | [diff] [blame] | 236 | blk_mq_put_dispatch_budget(q, budget_token); |
Douglas Anderson | a082342 | 2020-04-20 09:24:53 -0700 | [diff] [blame] | 237 | /* |
| 238 | * We're releasing without dispatching. Holding the |
| 239 | * budget could have blocked any "hctx"s with the |
| 240 | * same queue and if we didn't dispatch then there's |
| 241 | * no guarantee anyone will kick the queue. Kick it |
| 242 | * ourselves. |
| 243 | */ |
| 244 | blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY); |
Ming Lei | b347689 | 2017-10-14 17:22:30 +0800 | [diff] [blame] | 245 | break; |
Ming Lei | b347689 | 2017-10-14 17:22:30 +0800 | [diff] [blame] | 246 | } |
| 247 | |
Ming Lei | 2a5a24a | 2021-01-22 10:33:12 +0800 | [diff] [blame] | 248 | blk_mq_set_rq_budget_token(rq, budget_token); |
| 249 | |
Ming Lei | b347689 | 2017-10-14 17:22:30 +0800 | [diff] [blame] | 250 | /* |
| 251 | * Now this rq owns the budget which has to be released |
| 252 | * if this rq won't be queued to driver via .queue_rq() |
| 253 | * in blk_mq_dispatch_rq_list(). |
| 254 | */ |
| 255 | list_add(&rq->queuelist, &rq_list); |
| 256 | |
| 257 | /* round robin for fair dispatch */ |
| 258 | ctx = blk_mq_next_ctx(hctx, rq->mq_ctx); |
| 259 | |
Ming Lei | 1fd40b5 | 2020-06-30 18:25:00 +0800 | [diff] [blame] | 260 | } while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, 1)); |
Ming Lei | b347689 | 2017-10-14 17:22:30 +0800 | [diff] [blame] | 261 | |
| 262 | WRITE_ONCE(hctx->dispatch_from, ctx); |
Salman Qazi | 28d6572 | 2020-04-24 08:03:21 -0700 | [diff] [blame] | 263 | return ret; |
Ming Lei | b347689 | 2017-10-14 17:22:30 +0800 | [diff] [blame] | 264 | } |
| 265 | |
Zheng Bin | e1b586f | 2020-04-29 09:36:32 +0800 | [diff] [blame] | 266 | static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 267 | { |
Omar Sandoval | 81380ca | 2017-04-07 08:56:26 -0600 | [diff] [blame] | 268 | struct request_queue *q = hctx->queue; |
Damien Le Moal | e42cfb1 | 2021-06-18 10:59:22 +0900 | [diff] [blame] | 269 | const bool has_sched = q->elevator; |
Salman Qazi | 28d6572 | 2020-04-24 08:03:21 -0700 | [diff] [blame] | 270 | int ret = 0; |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 271 | LIST_HEAD(rq_list); |
| 272 | |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 273 | /* |
| 274 | * If we have previous entries on our dispatch list, grab them first for |
| 275 | * more fair dispatch. |
| 276 | */ |
| 277 | if (!list_empty_careful(&hctx->dispatch)) { |
| 278 | spin_lock(&hctx->lock); |
| 279 | if (!list_empty(&hctx->dispatch)) |
| 280 | list_splice_init(&hctx->dispatch, &rq_list); |
| 281 | spin_unlock(&hctx->lock); |
| 282 | } |
| 283 | |
| 284 | /* |
| 285 | * Only ask the scheduler for requests, if we didn't have residual |
| 286 | * requests from the dispatch list. This is to avoid the case where |
| 287 | * we only ever dispatch a fraction of the requests available because |
| 288 | * of low device queue depth. Once we pull requests out of the IO |
| 289 | * scheduler, we can no longer merge or sort them. So it's best to |
| 290 | * leave them there for as long as we can. Mark the hw queue as |
| 291 | * needing a restart in that case. |
Ming Lei | caf8eb0 | 2017-10-14 17:22:26 +0800 | [diff] [blame] | 292 | * |
Ming Lei | 5e3d02b | 2017-10-14 17:22:25 +0800 | [diff] [blame] | 293 | * We want to dispatch from the scheduler if there was nothing |
| 294 | * on the dispatch list or we were able to dispatch from the |
| 295 | * dispatch list. |
Jens Axboe | 64765a7 | 2017-02-17 11:39:26 -0700 | [diff] [blame] | 296 | */ |
Ming Lei | caf8eb0 | 2017-10-14 17:22:26 +0800 | [diff] [blame] | 297 | if (!list_empty(&rq_list)) { |
| 298 | blk_mq_sched_mark_restart_hctx(hctx); |
Ming Lei | 1fd40b5 | 2020-06-30 18:25:00 +0800 | [diff] [blame] | 299 | if (blk_mq_dispatch_rq_list(hctx, &rq_list, 0)) { |
Damien Le Moal | e42cfb1 | 2021-06-18 10:59:22 +0900 | [diff] [blame] | 300 | if (has_sched) |
Salman Qazi | 28d6572 | 2020-04-24 08:03:21 -0700 | [diff] [blame] | 301 | ret = blk_mq_do_dispatch_sched(hctx); |
Ming Lei | b347689 | 2017-10-14 17:22:30 +0800 | [diff] [blame] | 302 | else |
Salman Qazi | 28d6572 | 2020-04-24 08:03:21 -0700 | [diff] [blame] | 303 | ret = blk_mq_do_dispatch_ctx(hctx); |
Ming Lei | b347689 | 2017-10-14 17:22:30 +0800 | [diff] [blame] | 304 | } |
Damien Le Moal | e42cfb1 | 2021-06-18 10:59:22 +0900 | [diff] [blame] | 305 | } else if (has_sched) { |
Salman Qazi | 28d6572 | 2020-04-24 08:03:21 -0700 | [diff] [blame] | 306 | ret = blk_mq_do_dispatch_sched(hctx); |
Ming Lei | 6e768717 | 2018-07-03 09:03:16 -0600 | [diff] [blame] | 307 | } else if (hctx->dispatch_busy) { |
| 308 | /* dequeue request one by one from sw queue if queue is busy */ |
Salman Qazi | 28d6572 | 2020-04-24 08:03:21 -0700 | [diff] [blame] | 309 | ret = blk_mq_do_dispatch_ctx(hctx); |
Ming Lei | caf8eb0 | 2017-10-14 17:22:26 +0800 | [diff] [blame] | 310 | } else { |
| 311 | blk_mq_flush_busy_ctxs(hctx, &rq_list); |
Ming Lei | 1fd40b5 | 2020-06-30 18:25:00 +0800 | [diff] [blame] | 312 | blk_mq_dispatch_rq_list(hctx, &rq_list, 0); |
Jens Axboe | c13660a | 2017-01-26 12:40:07 -0700 | [diff] [blame] | 313 | } |
Salman Qazi | 28d6572 | 2020-04-24 08:03:21 -0700 | [diff] [blame] | 314 | |
| 315 | return ret; |
| 316 | } |
| 317 | |
| 318 | void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) |
| 319 | { |
| 320 | struct request_queue *q = hctx->queue; |
| 321 | |
| 322 | /* RCU or SRCU read lock is needed before checking quiesced flag */ |
| 323 | if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) |
| 324 | return; |
| 325 | |
| 326 | hctx->run++; |
| 327 | |
| 328 | /* |
| 329 | * A return of -EAGAIN is an indication that hctx->dispatch is not |
| 330 | * empty and we must run again in order to avoid starving flushes. |
| 331 | */ |
| 332 | if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) { |
| 333 | if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) |
| 334 | blk_mq_run_hw_queue(hctx, true); |
| 335 | } |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 336 | } |
| 337 | |
Pavel Begunkov | 179ae84f | 2021-10-20 20:00:49 +0100 | [diff] [blame] | 338 | bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, |
Christoph Hellwig | 14ccb66 | 2019-06-06 12:29:01 +0200 | [diff] [blame] | 339 | unsigned int nr_segs) |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 340 | { |
| 341 | struct elevator_queue *e = q->elevator; |
Omar Sandoval | efed9a3 | 2021-05-10 17:05:35 -0700 | [diff] [blame] | 342 | struct blk_mq_ctx *ctx; |
| 343 | struct blk_mq_hw_ctx *hctx; |
Ming Lei | 9bddeb2 | 2017-05-26 19:53:20 +0800 | [diff] [blame] | 344 | bool ret = false; |
Ming Lei | c16d6b5 | 2018-12-17 08:44:05 -0700 | [diff] [blame] | 345 | enum hctx_type type; |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 346 | |
Jens Axboe | 900e080 | 2021-11-03 05:47:09 -0600 | [diff] [blame] | 347 | if (e && e->type->ops.bio_merge) { |
| 348 | ret = e->type->ops.bio_merge(q, bio, nr_segs); |
| 349 | goto out_put; |
| 350 | } |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 351 | |
Omar Sandoval | efed9a3 | 2021-05-10 17:05:35 -0700 | [diff] [blame] | 352 | ctx = blk_mq_get_ctx(q); |
| 353 | hctx = blk_mq_map_queue(q, bio->bi_opf, ctx); |
Ming Lei | c16d6b5 | 2018-12-17 08:44:05 -0700 | [diff] [blame] | 354 | type = hctx->type; |
Baolin Wang | cdfcef9 | 2020-08-28 10:52:57 +0800 | [diff] [blame] | 355 | if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) || |
| 356 | list_empty_careful(&ctx->rq_lists[type])) |
Jens Axboe | 900e080 | 2021-11-03 05:47:09 -0600 | [diff] [blame] | 357 | goto out_put; |
Baolin Wang | cdfcef9 | 2020-08-28 10:52:57 +0800 | [diff] [blame] | 358 | |
| 359 | /* default per sw-queue merge */ |
| 360 | spin_lock(&ctx->lock); |
| 361 | /* |
| 362 | * Reverse check our software queue for entries that we could |
| 363 | * potentially merge with. Currently includes a hand-wavy stop |
| 364 | * count of 8, to not spend too much time checking for merges. |
| 365 | */ |
Jens Axboe | 9a14d6c | 2021-10-16 17:27:20 -0600 | [diff] [blame] | 366 | if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) |
Baolin Wang | cdfcef9 | 2020-08-28 10:52:57 +0800 | [diff] [blame] | 367 | ret = true; |
Ming Lei | 9bddeb2 | 2017-05-26 19:53:20 +0800 | [diff] [blame] | 368 | |
Baolin Wang | cdfcef9 | 2020-08-28 10:52:57 +0800 | [diff] [blame] | 369 | spin_unlock(&ctx->lock); |
Jens Axboe | 900e080 | 2021-11-03 05:47:09 -0600 | [diff] [blame] | 370 | out_put: |
Ming Lei | 9bddeb2 | 2017-05-26 19:53:20 +0800 | [diff] [blame] | 371 | return ret; |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 372 | } |
| 373 | |
Jan Kara | fd2ef39 | 2021-06-23 11:36:34 +0200 | [diff] [blame] | 374 | bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq, |
| 375 | struct list_head *free) |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 376 | { |
Jan Kara | fd2ef39 | 2021-06-23 11:36:34 +0200 | [diff] [blame] | 377 | return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq, free); |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 378 | } |
| 379 | EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge); |
| 380 | |
Omar Sandoval | 0cacba6 | 2017-02-02 15:42:39 -0800 | [diff] [blame] | 381 | static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, |
| 382 | struct request *rq) |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 383 | { |
Ming Lei | 01e99ae | 2020-02-25 09:04:32 +0800 | [diff] [blame] | 384 | /* |
| 385 | * dispatch flush and passthrough rq directly |
| 386 | * |
| 387 | * passthrough request has to be added to hctx->dispatch directly. |
| 388 | * For some reason, device may be in one situation which can't |
| 389 | * handle FS request, so STS_RESOURCE is always returned and the |
| 390 | * FS request will be added to hctx->dispatch. However passthrough |
| 391 | * request may be required at that time for fixing the problem. If |
| 392 | * passthrough request is added to scheduler queue, there isn't any |
| 393 | * chance to dispatch it given we prioritize requests in hctx->dispatch. |
| 394 | */ |
| 395 | if ((rq->rq_flags & RQF_FLUSH_SEQ) || blk_rq_is_passthrough(rq)) |
Ming Lei | a6a252e | 2017-11-02 23:24:36 +0800 | [diff] [blame] | 396 | return true; |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 397 | |
Ming Lei | a6a252e | 2017-11-02 23:24:36 +0800 | [diff] [blame] | 398 | return false; |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 399 | } |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 400 | |
Jens Axboe | bd6737f | 2017-01-27 01:00:47 -0700 | [diff] [blame] | 401 | void blk_mq_sched_insert_request(struct request *rq, bool at_head, |
Mike Snitzer | 9e97d29 | 2018-01-17 11:25:58 -0500 | [diff] [blame] | 402 | bool run_queue, bool async) |
Jens Axboe | bd6737f | 2017-01-27 01:00:47 -0700 | [diff] [blame] | 403 | { |
| 404 | struct request_queue *q = rq->q; |
| 405 | struct elevator_queue *e = q->elevator; |
| 406 | struct blk_mq_ctx *ctx = rq->mq_ctx; |
Jens Axboe | ea4f995 | 2018-10-29 15:06:13 -0600 | [diff] [blame] | 407 | struct blk_mq_hw_ctx *hctx = rq->mq_hctx; |
Jens Axboe | bd6737f | 2017-01-27 01:00:47 -0700 | [diff] [blame] | 408 | |
Xianting Tian | e44a6a2 | 2020-08-27 14:34:17 +0800 | [diff] [blame] | 409 | WARN_ON(e && (rq->tag != BLK_MQ_NO_TAG)); |
Ming Lei | 923218f | 2017-11-02 23:24:38 +0800 | [diff] [blame] | 410 | |
Jean Delvare | 5218e12 | 2021-03-01 17:23:25 +0100 | [diff] [blame] | 411 | if (blk_mq_sched_bypass_insert(hctx, rq)) { |
Ming Lei | cc3200e | 2020-03-12 17:15:48 +0800 | [diff] [blame] | 412 | /* |
| 413 | * Firstly normal IO request is inserted to scheduler queue or |
| 414 | * sw queue, meantime we add flush request to dispatch queue( |
| 415 | * hctx->dispatch) directly and there is at most one in-flight |
| 416 | * flush request for each hw queue, so it doesn't matter to add |
| 417 | * flush request to tail or front of the dispatch queue. |
| 418 | * |
| 419 | * Secondly in case of NCQ, flush request belongs to non-NCQ |
| 420 | * command, and queueing it will fail when there is any |
| 421 | * in-flight normal IO request(NCQ command). When adding flush |
| 422 | * rq to the front of hctx->dispatch, it is easier to introduce |
| 423 | * extra time to flush rq's latency because of S_SCHED_RESTART |
| 424 | * compared with adding to the tail of dispatch queue, then |
| 425 | * chance of flush merge is increased, and less flush requests |
| 426 | * will be issued to controller. It is observed that ~10% time |
| 427 | * is saved in blktests block/004 on disk attached to AHCI/NCQ |
| 428 | * drive when adding flush rq to the front of hctx->dispatch. |
| 429 | * |
| 430 | * Simply queue flush rq to the front of hctx->dispatch so that |
| 431 | * intensive flush workloads can benefit in case of NCQ HW. |
| 432 | */ |
| 433 | at_head = (rq->rq_flags & RQF_FLUSH_SEQ) ? true : at_head; |
Ming Lei | 01e99ae | 2020-02-25 09:04:32 +0800 | [diff] [blame] | 434 | blk_mq_request_bypass_insert(rq, at_head, false); |
Omar Sandoval | 0cacba6 | 2017-02-02 15:42:39 -0800 | [diff] [blame] | 435 | goto run; |
Ming Lei | 01e99ae | 2020-02-25 09:04:32 +0800 | [diff] [blame] | 436 | } |
Omar Sandoval | 0cacba6 | 2017-02-02 15:42:39 -0800 | [diff] [blame] | 437 | |
Damien Le Moal | e42cfb1 | 2021-06-18 10:59:22 +0900 | [diff] [blame] | 438 | if (e) { |
Jens Axboe | bd6737f | 2017-01-27 01:00:47 -0700 | [diff] [blame] | 439 | LIST_HEAD(list); |
| 440 | |
| 441 | list_add(&rq->queuelist, &list); |
Jens Axboe | f9cd4bf | 2018-11-01 16:41:41 -0600 | [diff] [blame] | 442 | e->type->ops.insert_requests(hctx, &list, at_head); |
Jens Axboe | bd6737f | 2017-01-27 01:00:47 -0700 | [diff] [blame] | 443 | } else { |
| 444 | spin_lock(&ctx->lock); |
| 445 | __blk_mq_insert_request(hctx, rq, at_head); |
| 446 | spin_unlock(&ctx->lock); |
| 447 | } |
| 448 | |
Omar Sandoval | 0cacba6 | 2017-02-02 15:42:39 -0800 | [diff] [blame] | 449 | run: |
Jens Axboe | bd6737f | 2017-01-27 01:00:47 -0700 | [diff] [blame] | 450 | if (run_queue) |
| 451 | blk_mq_run_hw_queue(hctx, async); |
| 452 | } |
| 453 | |
Jens Axboe | 67cae4c | 2018-10-30 11:31:51 -0600 | [diff] [blame] | 454 | void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx, |
Jens Axboe | bd6737f | 2017-01-27 01:00:47 -0700 | [diff] [blame] | 455 | struct blk_mq_ctx *ctx, |
| 456 | struct list_head *list, bool run_queue_async) |
| 457 | { |
Jens Axboe | f9afca4 | 2018-10-29 13:11:38 -0600 | [diff] [blame] | 458 | struct elevator_queue *e; |
Ming Lei | e87eb30 | 2019-04-30 09:52:23 +0800 | [diff] [blame] | 459 | struct request_queue *q = hctx->queue; |
| 460 | |
| 461 | /* |
| 462 | * blk_mq_sched_insert_requests() is called from flush plug |
| 463 | * context only, and hold one usage counter to prevent queue |
| 464 | * from being released. |
| 465 | */ |
| 466 | percpu_ref_get(&q->q_usage_counter); |
Jens Axboe | f9afca4 | 2018-10-29 13:11:38 -0600 | [diff] [blame] | 467 | |
| 468 | e = hctx->queue->elevator; |
Damien Le Moal | e42cfb1 | 2021-06-18 10:59:22 +0900 | [diff] [blame] | 469 | if (e) { |
Jens Axboe | f9cd4bf | 2018-11-01 16:41:41 -0600 | [diff] [blame] | 470 | e->type->ops.insert_requests(hctx, list, false); |
Damien Le Moal | e42cfb1 | 2021-06-18 10:59:22 +0900 | [diff] [blame] | 471 | } else { |
Ming Lei | 6ce3dd6 | 2018-07-10 09:03:31 +0800 | [diff] [blame] | 472 | /* |
| 473 | * try to issue requests directly if the hw queue isn't |
| 474 | * busy in case of 'none' scheduler, and this way may save |
| 475 | * us one extra enqueue & dequeue to sw queue. |
| 476 | */ |
Jean Sacren | ef1661b | 2021-10-29 14:29:45 -0600 | [diff] [blame] | 477 | if (!hctx->dispatch_busy && !run_queue_async) { |
Ming Lei | 4cafe86 | 2021-12-03 21:15:34 +0800 | [diff] [blame] | 478 | blk_mq_run_dispatch_ops(hctx->queue, |
| 479 | blk_mq_try_issue_list_directly(hctx, list)); |
Bart Van Assche | fd9c40f | 2019-04-04 10:08:43 -0700 | [diff] [blame] | 480 | if (list_empty(list)) |
Ming Lei | e87eb30 | 2019-04-30 09:52:23 +0800 | [diff] [blame] | 481 | goto out; |
Bart Van Assche | fd9c40f | 2019-04-04 10:08:43 -0700 | [diff] [blame] | 482 | } |
| 483 | blk_mq_insert_requests(hctx, ctx, list); |
Ming Lei | 6ce3dd6 | 2018-07-10 09:03:31 +0800 | [diff] [blame] | 484 | } |
Jens Axboe | bd6737f | 2017-01-27 01:00:47 -0700 | [diff] [blame] | 485 | |
| 486 | blk_mq_run_hw_queue(hctx, run_queue_async); |
Ming Lei | e87eb30 | 2019-04-30 09:52:23 +0800 | [diff] [blame] | 487 | out: |
| 488 | percpu_ref_put(&q->q_usage_counter); |
Jens Axboe | bd6737f | 2017-01-27 01:00:47 -0700 | [diff] [blame] | 489 | } |
| 490 | |
John Garry | d99a6bb | 2021-10-05 18:23:30 +0800 | [diff] [blame] | 491 | static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q, |
| 492 | struct blk_mq_hw_ctx *hctx, |
| 493 | unsigned int hctx_idx) |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 494 | { |
John Garry | 079a2e3 | 2021-10-05 18:23:39 +0800 | [diff] [blame] | 495 | if (blk_mq_is_shared_tags(q->tag_set->flags)) { |
| 496 | hctx->sched_tags = q->sched_shared_tags; |
John Garry | e155b0c | 2021-10-05 18:23:37 +0800 | [diff] [blame] | 497 | return 0; |
| 498 | } |
| 499 | |
John Garry | 63064be | 2021-10-05 18:23:35 +0800 | [diff] [blame] | 500 | hctx->sched_tags = blk_mq_alloc_map_and_rqs(q->tag_set, hctx_idx, |
| 501 | q->nr_requests); |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 502 | |
Omar Sandoval | 6917ff0 | 2017-04-05 12:01:30 -0700 | [diff] [blame] | 503 | if (!hctx->sched_tags) |
| 504 | return -ENOMEM; |
John Garry | 63064be | 2021-10-05 18:23:35 +0800 | [diff] [blame] | 505 | return 0; |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 506 | } |
| 507 | |
John Garry | 079a2e3 | 2021-10-05 18:23:39 +0800 | [diff] [blame] | 508 | static void blk_mq_exit_sched_shared_tags(struct request_queue *queue) |
John Garry | e155b0c | 2021-10-05 18:23:37 +0800 | [diff] [blame] | 509 | { |
John Garry | 079a2e3 | 2021-10-05 18:23:39 +0800 | [diff] [blame] | 510 | blk_mq_free_rq_map(queue->sched_shared_tags); |
| 511 | queue->sched_shared_tags = NULL; |
John Garry | e155b0c | 2021-10-05 18:23:37 +0800 | [diff] [blame] | 512 | } |
| 513 | |
Ming Lei | c3e2219 | 2019-06-04 21:08:02 +0800 | [diff] [blame] | 514 | /* called in queue's release handler, tagset has gone away */ |
John Garry | e155b0c | 2021-10-05 18:23:37 +0800 | [diff] [blame] | 515 | static void blk_mq_sched_tags_teardown(struct request_queue *q, unsigned int flags) |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 516 | { |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 517 | struct blk_mq_hw_ctx *hctx; |
| 518 | int i; |
| 519 | |
Ming Lei | c3e2219 | 2019-06-04 21:08:02 +0800 | [diff] [blame] | 520 | queue_for_each_hw_ctx(q, hctx, i) { |
| 521 | if (hctx->sched_tags) { |
John Garry | 8bdf7b3 | 2021-10-22 16:12:20 +0800 | [diff] [blame] | 522 | if (!blk_mq_is_shared_tags(flags)) |
John Garry | e155b0c | 2021-10-05 18:23:37 +0800 | [diff] [blame] | 523 | blk_mq_free_rq_map(hctx->sched_tags); |
Ming Lei | c3e2219 | 2019-06-04 21:08:02 +0800 | [diff] [blame] | 524 | hctx->sched_tags = NULL; |
| 525 | } |
| 526 | } |
John Garry | e155b0c | 2021-10-05 18:23:37 +0800 | [diff] [blame] | 527 | |
John Garry | 079a2e3 | 2021-10-05 18:23:39 +0800 | [diff] [blame] | 528 | if (blk_mq_is_shared_tags(flags)) |
| 529 | blk_mq_exit_sched_shared_tags(q); |
Jens Axboe | bd166ef | 2017-01-17 06:03:22 -0700 | [diff] [blame] | 530 | } |
Jens Axboe | d348499 | 2017-01-13 14:43:58 -0700 | [diff] [blame] | 531 | |
John Garry | 079a2e3 | 2021-10-05 18:23:39 +0800 | [diff] [blame] | 532 | static int blk_mq_init_sched_shared_tags(struct request_queue *queue) |
John Garry | d97e594 | 2021-05-13 20:00:58 +0800 | [diff] [blame] | 533 | { |
| 534 | struct blk_mq_tag_set *set = queue->tag_set; |
John Garry | d97e594 | 2021-05-13 20:00:58 +0800 | [diff] [blame] | 535 | |
| 536 | /* |
| 537 | * Set initial depth at max so that we don't need to reallocate for |
| 538 | * updating nr_requests. |
| 539 | */ |
John Garry | 079a2e3 | 2021-10-05 18:23:39 +0800 | [diff] [blame] | 540 | queue->sched_shared_tags = blk_mq_alloc_map_and_rqs(set, |
John Garry | e155b0c | 2021-10-05 18:23:37 +0800 | [diff] [blame] | 541 | BLK_MQ_NO_HCTX_IDX, |
| 542 | MAX_SCHED_RQ); |
John Garry | 079a2e3 | 2021-10-05 18:23:39 +0800 | [diff] [blame] | 543 | if (!queue->sched_shared_tags) |
John Garry | e155b0c | 2021-10-05 18:23:37 +0800 | [diff] [blame] | 544 | return -ENOMEM; |
John Garry | d97e594 | 2021-05-13 20:00:58 +0800 | [diff] [blame] | 545 | |
John Garry | 079a2e3 | 2021-10-05 18:23:39 +0800 | [diff] [blame] | 546 | blk_mq_tag_update_sched_shared_tags(queue); |
John Garry | d97e594 | 2021-05-13 20:00:58 +0800 | [diff] [blame] | 547 | |
| 548 | return 0; |
| 549 | } |
| 550 | |
Omar Sandoval | 6917ff0 | 2017-04-05 12:01:30 -0700 | [diff] [blame] | 551 | int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) |
| 552 | { |
John Garry | e155b0c | 2021-10-05 18:23:37 +0800 | [diff] [blame] | 553 | unsigned int i, flags = q->tag_set->flags; |
Omar Sandoval | 6917ff0 | 2017-04-05 12:01:30 -0700 | [diff] [blame] | 554 | struct blk_mq_hw_ctx *hctx; |
Omar Sandoval | ee056f9 | 2017-04-05 12:01:34 -0700 | [diff] [blame] | 555 | struct elevator_queue *eq; |
Omar Sandoval | 6917ff0 | 2017-04-05 12:01:30 -0700 | [diff] [blame] | 556 | int ret; |
| 557 | |
| 558 | if (!e) { |
| 559 | q->elevator = NULL; |
Ming Lei | 32a50fa | 2018-06-02 15:18:09 +0800 | [diff] [blame] | 560 | q->nr_requests = q->tag_set->queue_depth; |
Omar Sandoval | 6917ff0 | 2017-04-05 12:01:30 -0700 | [diff] [blame] | 561 | return 0; |
| 562 | } |
| 563 | |
| 564 | /* |
Ming Lei | 32825c4 | 2017-07-03 20:37:14 +0800 | [diff] [blame] | 565 | * Default to double of smaller one between hw queue_depth and 128, |
| 566 | * since we don't split into sync/async like the old code did. |
| 567 | * Additionally, this is a per-hw queue depth. |
Omar Sandoval | 6917ff0 | 2017-04-05 12:01:30 -0700 | [diff] [blame] | 568 | */ |
Ming Lei | 32825c4 | 2017-07-03 20:37:14 +0800 | [diff] [blame] | 569 | q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth, |
John Garry | d2a2796 | 2021-10-05 18:23:27 +0800 | [diff] [blame] | 570 | BLKDEV_DEFAULT_RQ); |
Omar Sandoval | 6917ff0 | 2017-04-05 12:01:30 -0700 | [diff] [blame] | 571 | |
John Garry | 079a2e3 | 2021-10-05 18:23:39 +0800 | [diff] [blame] | 572 | if (blk_mq_is_shared_tags(flags)) { |
| 573 | ret = blk_mq_init_sched_shared_tags(q); |
John Garry | e155b0c | 2021-10-05 18:23:37 +0800 | [diff] [blame] | 574 | if (ret) |
| 575 | return ret; |
| 576 | } |
| 577 | |
Omar Sandoval | 6917ff0 | 2017-04-05 12:01:30 -0700 | [diff] [blame] | 578 | queue_for_each_hw_ctx(q, hctx, i) { |
John Garry | d99a6bb | 2021-10-05 18:23:30 +0800 | [diff] [blame] | 579 | ret = blk_mq_sched_alloc_map_and_rqs(q, hctx, i); |
Omar Sandoval | 6917ff0 | 2017-04-05 12:01:30 -0700 | [diff] [blame] | 580 | if (ret) |
John Garry | d99a6bb | 2021-10-05 18:23:30 +0800 | [diff] [blame] | 581 | goto err_free_map_and_rqs; |
John Garry | d97e594 | 2021-05-13 20:00:58 +0800 | [diff] [blame] | 582 | } |
| 583 | |
Jens Axboe | f9cd4bf | 2018-11-01 16:41:41 -0600 | [diff] [blame] | 584 | ret = e->ops.init_sched(q, e); |
Omar Sandoval | 6917ff0 | 2017-04-05 12:01:30 -0700 | [diff] [blame] | 585 | if (ret) |
John Garry | e155b0c | 2021-10-05 18:23:37 +0800 | [diff] [blame] | 586 | goto err_free_map_and_rqs; |
Omar Sandoval | 6917ff0 | 2017-04-05 12:01:30 -0700 | [diff] [blame] | 587 | |
Omar Sandoval | d332ce0 | 2017-05-04 08:24:40 -0600 | [diff] [blame] | 588 | blk_mq_debugfs_register_sched(q); |
| 589 | |
| 590 | queue_for_each_hw_ctx(q, hctx, i) { |
Jens Axboe | f9cd4bf | 2018-11-01 16:41:41 -0600 | [diff] [blame] | 591 | if (e->ops.init_hctx) { |
| 592 | ret = e->ops.init_hctx(hctx, i); |
Omar Sandoval | ee056f9 | 2017-04-05 12:01:34 -0700 | [diff] [blame] | 593 | if (ret) { |
| 594 | eq = q->elevator; |
John Garry | 1820f4f | 2021-10-05 18:23:31 +0800 | [diff] [blame] | 595 | blk_mq_sched_free_rqs(q); |
Omar Sandoval | ee056f9 | 2017-04-05 12:01:34 -0700 | [diff] [blame] | 596 | blk_mq_exit_sched(q, eq); |
| 597 | kobject_put(&eq->kobj); |
| 598 | return ret; |
| 599 | } |
| 600 | } |
Omar Sandoval | d332ce0 | 2017-05-04 08:24:40 -0600 | [diff] [blame] | 601 | blk_mq_debugfs_register_sched_hctx(q, hctx); |
Omar Sandoval | ee056f9 | 2017-04-05 12:01:34 -0700 | [diff] [blame] | 602 | } |
| 603 | |
Omar Sandoval | 6917ff0 | 2017-04-05 12:01:30 -0700 | [diff] [blame] | 604 | return 0; |
| 605 | |
John Garry | d99a6bb | 2021-10-05 18:23:30 +0800 | [diff] [blame] | 606 | err_free_map_and_rqs: |
John Garry | 1820f4f | 2021-10-05 18:23:31 +0800 | [diff] [blame] | 607 | blk_mq_sched_free_rqs(q); |
John Garry | e155b0c | 2021-10-05 18:23:37 +0800 | [diff] [blame] | 608 | blk_mq_sched_tags_teardown(q, flags); |
| 609 | |
Omar Sandoval | 54d5329 | 2017-04-07 08:52:27 -0600 | [diff] [blame] | 610 | q->elevator = NULL; |
Omar Sandoval | 6917ff0 | 2017-04-05 12:01:30 -0700 | [diff] [blame] | 611 | return ret; |
| 612 | } |
| 613 | |
Ming Lei | c3e2219 | 2019-06-04 21:08:02 +0800 | [diff] [blame] | 614 | /* |
| 615 | * called in either blk_queue_cleanup or elevator_switch, tagset |
| 616 | * is required for freeing requests |
| 617 | */ |
John Garry | 1820f4f | 2021-10-05 18:23:31 +0800 | [diff] [blame] | 618 | void blk_mq_sched_free_rqs(struct request_queue *q) |
Ming Lei | c3e2219 | 2019-06-04 21:08:02 +0800 | [diff] [blame] | 619 | { |
| 620 | struct blk_mq_hw_ctx *hctx; |
| 621 | int i; |
| 622 | |
John Garry | 079a2e3 | 2021-10-05 18:23:39 +0800 | [diff] [blame] | 623 | if (blk_mq_is_shared_tags(q->tag_set->flags)) { |
| 624 | blk_mq_free_rqs(q->tag_set, q->sched_shared_tags, |
John Garry | e155b0c | 2021-10-05 18:23:37 +0800 | [diff] [blame] | 625 | BLK_MQ_NO_HCTX_IDX); |
| 626 | } else { |
| 627 | queue_for_each_hw_ctx(q, hctx, i) { |
| 628 | if (hctx->sched_tags) |
| 629 | blk_mq_free_rqs(q->tag_set, |
| 630 | hctx->sched_tags, i); |
| 631 | } |
Ming Lei | c3e2219 | 2019-06-04 21:08:02 +0800 | [diff] [blame] | 632 | } |
| 633 | } |
| 634 | |
Omar Sandoval | 54d5329 | 2017-04-07 08:52:27 -0600 | [diff] [blame] | 635 | void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e) |
| 636 | { |
Omar Sandoval | ee056f9 | 2017-04-05 12:01:34 -0700 | [diff] [blame] | 637 | struct blk_mq_hw_ctx *hctx; |
| 638 | unsigned int i; |
Ming Lei | f0c1c4d | 2021-06-09 14:30:46 +0800 | [diff] [blame] | 639 | unsigned int flags = 0; |
Omar Sandoval | ee056f9 | 2017-04-05 12:01:34 -0700 | [diff] [blame] | 640 | |
Omar Sandoval | d332ce0 | 2017-05-04 08:24:40 -0600 | [diff] [blame] | 641 | queue_for_each_hw_ctx(q, hctx, i) { |
| 642 | blk_mq_debugfs_unregister_sched_hctx(hctx); |
Jens Axboe | f9cd4bf | 2018-11-01 16:41:41 -0600 | [diff] [blame] | 643 | if (e->type->ops.exit_hctx && hctx->sched_data) { |
| 644 | e->type->ops.exit_hctx(hctx, i); |
Omar Sandoval | d332ce0 | 2017-05-04 08:24:40 -0600 | [diff] [blame] | 645 | hctx->sched_data = NULL; |
Omar Sandoval | ee056f9 | 2017-04-05 12:01:34 -0700 | [diff] [blame] | 646 | } |
Ming Lei | f0c1c4d | 2021-06-09 14:30:46 +0800 | [diff] [blame] | 647 | flags = hctx->flags; |
Omar Sandoval | ee056f9 | 2017-04-05 12:01:34 -0700 | [diff] [blame] | 648 | } |
Omar Sandoval | d332ce0 | 2017-05-04 08:24:40 -0600 | [diff] [blame] | 649 | blk_mq_debugfs_unregister_sched(q); |
Jens Axboe | f9cd4bf | 2018-11-01 16:41:41 -0600 | [diff] [blame] | 650 | if (e->type->ops.exit_sched) |
| 651 | e->type->ops.exit_sched(e); |
John Garry | e155b0c | 2021-10-05 18:23:37 +0800 | [diff] [blame] | 652 | blk_mq_sched_tags_teardown(q, flags); |
Omar Sandoval | 54d5329 | 2017-04-07 08:52:27 -0600 | [diff] [blame] | 653 | q->elevator = NULL; |
| 654 | } |