blob: 800ac902809b8b948e9716bfe6437000cad6f21c [file] [log] [blame]
Christoph Hellwig3dcf60b2019-04-30 14:42:43 -04001// SPDX-License-Identifier: GPL-2.0
Jens Axboe945ffb62017-01-14 17:11:11 -07002/*
3 * MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler,
4 * for the blk-mq scheduling framework
5 *
6 * Copyright (C) 2016 Jens Axboe <axboe@kernel.dk>
7 */
8#include <linux/kernel.h>
9#include <linux/fs.h>
10#include <linux/blkdev.h>
11#include <linux/blk-mq.h>
12#include <linux/elevator.h>
13#include <linux/bio.h>
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/init.h>
17#include <linux/compiler.h>
18#include <linux/rbtree.h>
19#include <linux/sbitmap.h>
20
21#include "blk.h"
22#include "blk-mq.h"
Omar Sandovaldaaadb32017-05-04 00:31:34 -070023#include "blk-mq-debugfs.h"
Jens Axboe945ffb62017-01-14 17:11:11 -070024#include "blk-mq-tag.h"
25#include "blk-mq-sched.h"
26
27/*
Mauro Carvalho Chehab898bd372019-04-18 19:45:00 -030028 * See Documentation/block/deadline-iosched.rst
Jens Axboe945ffb62017-01-14 17:11:11 -070029 */
30static const int read_expire = HZ / 2; /* max time before a read is submitted. */
31static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
32static const int writes_starved = 2; /* max times reads can starve a write */
33static const int fifo_batch = 16; /* # of sequential requests treated as one
34 by the above parameters. For throughput. */
35
36struct deadline_data {
37 /*
38 * run time data
39 */
40
41 /*
42 * requests (deadline_rq s) are present on both sort_list and fifo_list
43 */
44 struct rb_root sort_list[2];
45 struct list_head fifo_list[2];
46
47 /*
48 * next in sort order. read, write or both are NULL
49 */
50 struct request *next_rq[2];
51 unsigned int batching; /* number of sequential requests made */
52 unsigned int starved; /* times reads have starved writes */
53
54 /*
55 * settings that change how the i/o scheduler behaves
56 */
57 int fifo_expire[2];
58 int fifo_batch;
59 int writes_starved;
60 int front_merges;
61
62 spinlock_t lock;
Damien Le Moal5700f692017-12-21 15:43:40 +090063 spinlock_t zone_lock;
Jens Axboe945ffb62017-01-14 17:11:11 -070064 struct list_head dispatch;
65};
66
67static inline struct rb_root *
68deadline_rb_root(struct deadline_data *dd, struct request *rq)
69{
70 return &dd->sort_list[rq_data_dir(rq)];
71}
72
73/*
74 * get the request after `rq' in sector-sorted order
75 */
76static inline struct request *
77deadline_latter_request(struct request *rq)
78{
79 struct rb_node *node = rb_next(&rq->rb_node);
80
81 if (node)
82 return rb_entry_rq(node);
83
84 return NULL;
85}
86
87static void
88deadline_add_rq_rb(struct deadline_data *dd, struct request *rq)
89{
90 struct rb_root *root = deadline_rb_root(dd, rq);
91
92 elv_rb_add(root, rq);
93}
94
95static inline void
96deadline_del_rq_rb(struct deadline_data *dd, struct request *rq)
97{
98 const int data_dir = rq_data_dir(rq);
99
100 if (dd->next_rq[data_dir] == rq)
101 dd->next_rq[data_dir] = deadline_latter_request(rq);
102
103 elv_rb_del(deadline_rb_root(dd, rq), rq);
104}
105
106/*
107 * remove rq from rbtree and fifo.
108 */
109static void deadline_remove_request(struct request_queue *q, struct request *rq)
110{
111 struct deadline_data *dd = q->elevator->elevator_data;
112
113 list_del_init(&rq->queuelist);
114
115 /*
116 * We might not be on the rbtree, if we are doing an insert merge
117 */
118 if (!RB_EMPTY_NODE(&rq->rb_node))
119 deadline_del_rq_rb(dd, rq);
120
121 elv_rqhash_del(q, rq);
122 if (q->last_merge == rq)
123 q->last_merge = NULL;
124}
125
126static void dd_request_merged(struct request_queue *q, struct request *req,
Christoph Hellwig34fe7c02017-02-08 14:46:48 +0100127 enum elv_merge type)
Jens Axboe945ffb62017-01-14 17:11:11 -0700128{
129 struct deadline_data *dd = q->elevator->elevator_data;
130
131 /*
132 * if the merge was a front merge, we need to reposition request
133 */
134 if (type == ELEVATOR_FRONT_MERGE) {
135 elv_rb_del(deadline_rb_root(dd, req), req);
136 deadline_add_rq_rb(dd, req);
137 }
138}
139
140static void dd_merged_requests(struct request_queue *q, struct request *req,
141 struct request *next)
142{
143 /*
144 * if next expires before rq, assign its expire time to rq
145 * and move into next position (next will be deleted) in fifo
146 */
147 if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
148 if (time_before((unsigned long)next->fifo_time,
149 (unsigned long)req->fifo_time)) {
150 list_move(&req->queuelist, &next->queuelist);
151 req->fifo_time = next->fifo_time;
152 }
153 }
154
155 /*
156 * kill knowledge of next, this one is a goner
157 */
158 deadline_remove_request(q, next);
159}
160
161/*
162 * move an entry to dispatch queue
163 */
164static void
165deadline_move_request(struct deadline_data *dd, struct request *rq)
166{
167 const int data_dir = rq_data_dir(rq);
168
169 dd->next_rq[READ] = NULL;
170 dd->next_rq[WRITE] = NULL;
171 dd->next_rq[data_dir] = deadline_latter_request(rq);
172
173 /*
174 * take it off the sort and fifo list
175 */
176 deadline_remove_request(rq->q, rq);
177}
178
179/*
180 * deadline_check_fifo returns 0 if there are no expired requests on the fifo,
181 * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
182 */
183static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
184{
185 struct request *rq = rq_entry_fifo(dd->fifo_list[ddir].next);
186
187 /*
188 * rq is expired!
189 */
190 if (time_after_eq(jiffies, (unsigned long)rq->fifo_time))
191 return 1;
192
193 return 0;
194}
195
196/*
Damien Le Moalbf09ce52017-12-21 15:43:39 +0900197 * For the specified data direction, return the next request to
198 * dispatch using arrival ordered lists.
199 */
200static struct request *
201deadline_fifo_request(struct deadline_data *dd, int data_dir)
202{
Damien Le Moal5700f692017-12-21 15:43:40 +0900203 struct request *rq;
204 unsigned long flags;
205
Damien Le Moalbf09ce52017-12-21 15:43:39 +0900206 if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE))
207 return NULL;
208
209 if (list_empty(&dd->fifo_list[data_dir]))
210 return NULL;
211
Damien Le Moal5700f692017-12-21 15:43:40 +0900212 rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
213 if (data_dir == READ || !blk_queue_is_zoned(rq->q))
214 return rq;
215
216 /*
217 * Look for a write request that can be dispatched, that is one with
218 * an unlocked target zone.
219 */
220 spin_lock_irqsave(&dd->zone_lock, flags);
221 list_for_each_entry(rq, &dd->fifo_list[WRITE], queuelist) {
222 if (blk_req_can_dispatch_to_zone(rq))
223 goto out;
224 }
225 rq = NULL;
226out:
227 spin_unlock_irqrestore(&dd->zone_lock, flags);
228
229 return rq;
Damien Le Moalbf09ce52017-12-21 15:43:39 +0900230}
231
232/*
233 * For the specified data direction, return the next request to
234 * dispatch using sector position sorted lists.
235 */
236static struct request *
237deadline_next_request(struct deadline_data *dd, int data_dir)
238{
Damien Le Moal5700f692017-12-21 15:43:40 +0900239 struct request *rq;
240 unsigned long flags;
241
Damien Le Moalbf09ce52017-12-21 15:43:39 +0900242 if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE))
243 return NULL;
244
Damien Le Moal5700f692017-12-21 15:43:40 +0900245 rq = dd->next_rq[data_dir];
246 if (!rq)
247 return NULL;
248
249 if (data_dir == READ || !blk_queue_is_zoned(rq->q))
250 return rq;
251
252 /*
253 * Look for a write request that can be dispatched, that is one with
254 * an unlocked target zone.
255 */
256 spin_lock_irqsave(&dd->zone_lock, flags);
257 while (rq) {
258 if (blk_req_can_dispatch_to_zone(rq))
259 break;
260 rq = deadline_latter_request(rq);
261 }
262 spin_unlock_irqrestore(&dd->zone_lock, flags);
263
264 return rq;
Damien Le Moalbf09ce52017-12-21 15:43:39 +0900265}
266
267/*
Jens Axboe945ffb62017-01-14 17:11:11 -0700268 * deadline_dispatch_requests selects the best request according to
269 * read/write expire, fifo_batch, etc
270 */
Jens Axboeca11f202018-01-06 09:23:11 -0700271static struct request *__dd_dispatch_request(struct deadline_data *dd)
Jens Axboe945ffb62017-01-14 17:11:11 -0700272{
Damien Le Moalbf09ce52017-12-21 15:43:39 +0900273 struct request *rq, *next_rq;
Jens Axboe945ffb62017-01-14 17:11:11 -0700274 bool reads, writes;
275 int data_dir;
276
277 if (!list_empty(&dd->dispatch)) {
278 rq = list_first_entry(&dd->dispatch, struct request, queuelist);
279 list_del_init(&rq->queuelist);
280 goto done;
281 }
282
283 reads = !list_empty(&dd->fifo_list[READ]);
284 writes = !list_empty(&dd->fifo_list[WRITE]);
285
286 /*
287 * batches are currently reads XOR writes
288 */
Damien Le Moalbf09ce52017-12-21 15:43:39 +0900289 rq = deadline_next_request(dd, WRITE);
290 if (!rq)
291 rq = deadline_next_request(dd, READ);
Jens Axboe945ffb62017-01-14 17:11:11 -0700292
293 if (rq && dd->batching < dd->fifo_batch)
294 /* we have a next request are still entitled to batch */
295 goto dispatch_request;
296
297 /*
298 * at this point we are not running a batch. select the appropriate
299 * data direction (read / write)
300 */
301
302 if (reads) {
303 BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ]));
304
Damien Le Moal5700f692017-12-21 15:43:40 +0900305 if (deadline_fifo_request(dd, WRITE) &&
306 (dd->starved++ >= dd->writes_starved))
Jens Axboe945ffb62017-01-14 17:11:11 -0700307 goto dispatch_writes;
308
309 data_dir = READ;
310
311 goto dispatch_find_request;
312 }
313
314 /*
315 * there are either no reads or writes have been starved
316 */
317
318 if (writes) {
319dispatch_writes:
320 BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE]));
321
322 dd->starved = 0;
323
324 data_dir = WRITE;
325
326 goto dispatch_find_request;
327 }
328
329 return NULL;
330
331dispatch_find_request:
332 /*
333 * we are not running a batch, find best request for selected data_dir
334 */
Damien Le Moalbf09ce52017-12-21 15:43:39 +0900335 next_rq = deadline_next_request(dd, data_dir);
336 if (deadline_check_fifo(dd, data_dir) || !next_rq) {
Jens Axboe945ffb62017-01-14 17:11:11 -0700337 /*
338 * A deadline has expired, the last request was in the other
339 * direction, or we have run out of higher-sectored requests.
340 * Start again from the request with the earliest expiry time.
341 */
Damien Le Moalbf09ce52017-12-21 15:43:39 +0900342 rq = deadline_fifo_request(dd, data_dir);
Jens Axboe945ffb62017-01-14 17:11:11 -0700343 } else {
344 /*
345 * The last req was the same dir and we have a next request in
346 * sort order. No expired requests so continue on from here.
347 */
Damien Le Moalbf09ce52017-12-21 15:43:39 +0900348 rq = next_rq;
Jens Axboe945ffb62017-01-14 17:11:11 -0700349 }
350
Damien Le Moal5700f692017-12-21 15:43:40 +0900351 /*
352 * For a zoned block device, if we only have writes queued and none of
353 * them can be dispatched, rq will be NULL.
354 */
355 if (!rq)
356 return NULL;
357
Jens Axboe945ffb62017-01-14 17:11:11 -0700358 dd->batching = 0;
359
360dispatch_request:
361 /*
362 * rq is the selected appropriate request.
363 */
364 dd->batching++;
365 deadline_move_request(dd, rq);
366done:
Damien Le Moal5700f692017-12-21 15:43:40 +0900367 /*
368 * If the request needs its target zone locked, do it.
369 */
370 blk_req_zone_write_lock(rq);
Jens Axboe945ffb62017-01-14 17:11:11 -0700371 rq->rq_flags |= RQF_STARTED;
372 return rq;
373}
374
Jens Axboeca11f202018-01-06 09:23:11 -0700375/*
376 * One confusing aspect here is that we get called for a specific
Damien Le Moal7211aef82018-12-17 15:14:05 +0900377 * hardware queue, but we may return a request that is for a
Jens Axboeca11f202018-01-06 09:23:11 -0700378 * different hardware queue. This is because mq-deadline has shared
379 * state for all hardware queues, in terms of sorting, FIFOs, etc.
380 */
Jens Axboec13660a2017-01-26 12:40:07 -0700381static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
Jens Axboe945ffb62017-01-14 17:11:11 -0700382{
383 struct deadline_data *dd = hctx->queue->elevator->elevator_data;
Jens Axboec13660a2017-01-26 12:40:07 -0700384 struct request *rq;
Jens Axboe945ffb62017-01-14 17:11:11 -0700385
386 spin_lock(&dd->lock);
Jens Axboeca11f202018-01-06 09:23:11 -0700387 rq = __dd_dispatch_request(dd);
Jens Axboe945ffb62017-01-14 17:11:11 -0700388 spin_unlock(&dd->lock);
Kashyap Desaib4455472020-08-19 23:20:28 +0800389 if (rq)
390 atomic_dec(&rq->mq_hctx->elevator_queued);
Jens Axboec13660a2017-01-26 12:40:07 -0700391
392 return rq;
Jens Axboe945ffb62017-01-14 17:11:11 -0700393}
394
395static void dd_exit_queue(struct elevator_queue *e)
396{
397 struct deadline_data *dd = e->elevator_data;
398
399 BUG_ON(!list_empty(&dd->fifo_list[READ]));
400 BUG_ON(!list_empty(&dd->fifo_list[WRITE]));
401
402 kfree(dd);
403}
404
405/*
406 * initialize elevator private data (deadline_data).
407 */
408static int dd_init_queue(struct request_queue *q, struct elevator_type *e)
409{
410 struct deadline_data *dd;
411 struct elevator_queue *eq;
412
413 eq = elevator_alloc(q, e);
414 if (!eq)
415 return -ENOMEM;
416
417 dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
418 if (!dd) {
419 kobject_put(&eq->kobj);
420 return -ENOMEM;
421 }
422 eq->elevator_data = dd;
423
424 INIT_LIST_HEAD(&dd->fifo_list[READ]);
425 INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
426 dd->sort_list[READ] = RB_ROOT;
427 dd->sort_list[WRITE] = RB_ROOT;
428 dd->fifo_expire[READ] = read_expire;
429 dd->fifo_expire[WRITE] = write_expire;
430 dd->writes_starved = writes_starved;
431 dd->front_merges = 1;
432 dd->fifo_batch = fifo_batch;
433 spin_lock_init(&dd->lock);
Damien Le Moal5700f692017-12-21 15:43:40 +0900434 spin_lock_init(&dd->zone_lock);
Jens Axboe945ffb62017-01-14 17:11:11 -0700435 INIT_LIST_HEAD(&dd->dispatch);
436
437 q->elevator = eq;
438 return 0;
439}
440
441static int dd_request_merge(struct request_queue *q, struct request **rq,
442 struct bio *bio)
443{
444 struct deadline_data *dd = q->elevator->elevator_data;
445 sector_t sector = bio_end_sector(bio);
446 struct request *__rq;
447
448 if (!dd->front_merges)
449 return ELEVATOR_NO_MERGE;
450
451 __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector);
452 if (__rq) {
453 BUG_ON(sector != blk_rq_pos(__rq));
454
455 if (elv_bio_merge_ok(__rq, bio)) {
456 *rq = __rq;
457 return ELEVATOR_FRONT_MERGE;
458 }
459 }
460
461 return ELEVATOR_NO_MERGE;
462}
463
Christoph Hellwig14ccb662019-06-06 12:29:01 +0200464static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
465 unsigned int nr_segs)
Jens Axboe945ffb62017-01-14 17:11:11 -0700466{
467 struct request_queue *q = hctx->queue;
468 struct deadline_data *dd = q->elevator->elevator_data;
Jens Axboee4d750c2017-02-03 09:48:28 -0700469 struct request *free = NULL;
470 bool ret;
Jens Axboe945ffb62017-01-14 17:11:11 -0700471
472 spin_lock(&dd->lock);
Christoph Hellwig14ccb662019-06-06 12:29:01 +0200473 ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
Jens Axboe945ffb62017-01-14 17:11:11 -0700474 spin_unlock(&dd->lock);
475
Jens Axboee4d750c2017-02-03 09:48:28 -0700476 if (free)
477 blk_mq_free_request(free);
478
Jens Axboe945ffb62017-01-14 17:11:11 -0700479 return ret;
480}
481
482/*
483 * add rq to rbtree and fifo
484 */
485static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
486 bool at_head)
487{
488 struct request_queue *q = hctx->queue;
489 struct deadline_data *dd = q->elevator->elevator_data;
490 const int data_dir = rq_data_dir(rq);
491
Damien Le Moal5700f692017-12-21 15:43:40 +0900492 /*
493 * This may be a requeue of a write request that has locked its
494 * target zone. If it is the case, this releases the zone lock.
495 */
496 blk_req_zone_write_unlock(rq);
497
Jens Axboe945ffb62017-01-14 17:11:11 -0700498 if (blk_mq_sched_try_insert_merge(q, rq))
499 return;
500
501 blk_mq_sched_request_inserted(rq);
502
Christoph Hellwig57292b52017-01-31 16:57:29 +0100503 if (at_head || blk_rq_is_passthrough(rq)) {
Jens Axboe945ffb62017-01-14 17:11:11 -0700504 if (at_head)
505 list_add(&rq->queuelist, &dd->dispatch);
506 else
507 list_add_tail(&rq->queuelist, &dd->dispatch);
508 } else {
509 deadline_add_rq_rb(dd, rq);
510
511 if (rq_mergeable(rq)) {
512 elv_rqhash_add(q, rq);
513 if (!q->last_merge)
514 q->last_merge = rq;
515 }
516
517 /*
518 * set expire time and add to fifo list
519 */
520 rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
521 list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]);
522 }
523}
524
525static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
526 struct list_head *list, bool at_head)
527{
528 struct request_queue *q = hctx->queue;
529 struct deadline_data *dd = q->elevator->elevator_data;
530
531 spin_lock(&dd->lock);
532 while (!list_empty(list)) {
533 struct request *rq;
534
535 rq = list_first_entry(list, struct request, queuelist);
536 list_del_init(&rq->queuelist);
537 dd_insert_request(hctx, rq, at_head);
Kashyap Desaib4455472020-08-19 23:20:28 +0800538 atomic_inc(&hctx->elevator_queued);
Jens Axboe945ffb62017-01-14 17:11:11 -0700539 }
540 spin_unlock(&dd->lock);
541}
542
Damien Le Moal5700f692017-12-21 15:43:40 +0900543/*
Damien Le Moalf3bc78d2018-02-28 09:35:29 -0800544 * Nothing to do here. This is defined only to ensure that .finish_request
545 * method is called upon request completion.
546 */
Christoph Hellwig5d9c3052020-05-29 15:53:08 +0200547static void dd_prepare_request(struct request *rq)
Damien Le Moalf3bc78d2018-02-28 09:35:29 -0800548{
549}
550
551/*
Damien Le Moal5700f692017-12-21 15:43:40 +0900552 * For zoned block devices, write unlock the target zone of
553 * completed write requests. Do this while holding the zone lock
554 * spinlock so that the zone is never unlocked while deadline_fifo_request()
Damien Le Moalf3bc78d2018-02-28 09:35:29 -0800555 * or deadline_next_request() are executing. This function is called for
556 * all requests, whether or not these requests complete successfully.
Damien Le Moalcb8acab2019-08-28 13:40:20 +0900557 *
558 * For a zoned block device, __dd_dispatch_request() may have stopped
559 * dispatching requests if all the queued requests are write requests directed
560 * at zones that are already locked due to on-going write requests. To ensure
561 * write request dispatch progress in this case, mark the queue as needing a
562 * restart to ensure that the queue is run again after completion of the
563 * request and zones being unlocked.
Damien Le Moal5700f692017-12-21 15:43:40 +0900564 */
Damien Le Moalf3bc78d2018-02-28 09:35:29 -0800565static void dd_finish_request(struct request *rq)
Damien Le Moal5700f692017-12-21 15:43:40 +0900566{
567 struct request_queue *q = rq->q;
568
569 if (blk_queue_is_zoned(q)) {
570 struct deadline_data *dd = q->elevator->elevator_data;
571 unsigned long flags;
572
573 spin_lock_irqsave(&dd->zone_lock, flags);
574 blk_req_zone_write_unlock(rq);
Damien Le Moalcb8acab2019-08-28 13:40:20 +0900575 if (!list_empty(&dd->fifo_list[WRITE]))
576 blk_mq_sched_mark_restart_hctx(rq->mq_hctx);
Damien Le Moal5700f692017-12-21 15:43:40 +0900577 spin_unlock_irqrestore(&dd->zone_lock, flags);
578 }
579}
580
Jens Axboe945ffb62017-01-14 17:11:11 -0700581static bool dd_has_work(struct blk_mq_hw_ctx *hctx)
582{
583 struct deadline_data *dd = hctx->queue->elevator->elevator_data;
584
Kashyap Desaib4455472020-08-19 23:20:28 +0800585 if (!atomic_read(&hctx->elevator_queued))
586 return false;
587
Jens Axboe945ffb62017-01-14 17:11:11 -0700588 return !list_empty_careful(&dd->dispatch) ||
589 !list_empty_careful(&dd->fifo_list[0]) ||
590 !list_empty_careful(&dd->fifo_list[1]);
591}
592
593/*
594 * sysfs parts below
595 */
596static ssize_t
597deadline_var_show(int var, char *page)
598{
599 return sprintf(page, "%d\n", var);
600}
601
weiping zhang235f8da2017-08-25 01:11:33 +0800602static void
603deadline_var_store(int *var, const char *page)
Jens Axboe945ffb62017-01-14 17:11:11 -0700604{
605 char *p = (char *) page;
606
607 *var = simple_strtol(p, &p, 10);
Jens Axboe945ffb62017-01-14 17:11:11 -0700608}
609
610#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
611static ssize_t __FUNC(struct elevator_queue *e, char *page) \
612{ \
613 struct deadline_data *dd = e->elevator_data; \
614 int __data = __VAR; \
615 if (__CONV) \
616 __data = jiffies_to_msecs(__data); \
617 return deadline_var_show(__data, (page)); \
618}
619SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[READ], 1);
620SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[WRITE], 1);
621SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0);
622SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0);
623SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0);
624#undef SHOW_FUNCTION
625
626#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
627static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
628{ \
629 struct deadline_data *dd = e->elevator_data; \
630 int __data; \
weiping zhang235f8da2017-08-25 01:11:33 +0800631 deadline_var_store(&__data, (page)); \
Jens Axboe945ffb62017-01-14 17:11:11 -0700632 if (__data < (MIN)) \
633 __data = (MIN); \
634 else if (__data > (MAX)) \
635 __data = (MAX); \
636 if (__CONV) \
637 *(__PTR) = msecs_to_jiffies(__data); \
638 else \
639 *(__PTR) = __data; \
weiping zhang235f8da2017-08-25 01:11:33 +0800640 return count; \
Jens Axboe945ffb62017-01-14 17:11:11 -0700641}
642STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1);
643STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1);
644STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0);
645STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0);
646STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0);
647#undef STORE_FUNCTION
648
649#define DD_ATTR(name) \
Joe Perches5657a812018-05-24 13:38:59 -0600650 __ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store)
Jens Axboe945ffb62017-01-14 17:11:11 -0700651
652static struct elv_fs_entry deadline_attrs[] = {
653 DD_ATTR(read_expire),
654 DD_ATTR(write_expire),
655 DD_ATTR(writes_starved),
656 DD_ATTR(front_merges),
657 DD_ATTR(fifo_batch),
658 __ATTR_NULL
659};
660
Omar Sandovaldaaadb32017-05-04 00:31:34 -0700661#ifdef CONFIG_BLK_DEBUG_FS
662#define DEADLINE_DEBUGFS_DDIR_ATTRS(ddir, name) \
663static void *deadline_##name##_fifo_start(struct seq_file *m, \
664 loff_t *pos) \
665 __acquires(&dd->lock) \
666{ \
667 struct request_queue *q = m->private; \
668 struct deadline_data *dd = q->elevator->elevator_data; \
669 \
670 spin_lock(&dd->lock); \
671 return seq_list_start(&dd->fifo_list[ddir], *pos); \
672} \
673 \
674static void *deadline_##name##_fifo_next(struct seq_file *m, void *v, \
675 loff_t *pos) \
676{ \
677 struct request_queue *q = m->private; \
678 struct deadline_data *dd = q->elevator->elevator_data; \
679 \
680 return seq_list_next(v, &dd->fifo_list[ddir], pos); \
681} \
682 \
683static void deadline_##name##_fifo_stop(struct seq_file *m, void *v) \
684 __releases(&dd->lock) \
685{ \
686 struct request_queue *q = m->private; \
687 struct deadline_data *dd = q->elevator->elevator_data; \
688 \
689 spin_unlock(&dd->lock); \
690} \
691 \
692static const struct seq_operations deadline_##name##_fifo_seq_ops = { \
693 .start = deadline_##name##_fifo_start, \
694 .next = deadline_##name##_fifo_next, \
695 .stop = deadline_##name##_fifo_stop, \
696 .show = blk_mq_debugfs_rq_show, \
697}; \
698 \
699static int deadline_##name##_next_rq_show(void *data, \
700 struct seq_file *m) \
701{ \
702 struct request_queue *q = data; \
703 struct deadline_data *dd = q->elevator->elevator_data; \
704 struct request *rq = dd->next_rq[ddir]; \
705 \
706 if (rq) \
707 __blk_mq_debugfs_rq_show(m, rq); \
708 return 0; \
709}
710DEADLINE_DEBUGFS_DDIR_ATTRS(READ, read)
711DEADLINE_DEBUGFS_DDIR_ATTRS(WRITE, write)
712#undef DEADLINE_DEBUGFS_DDIR_ATTRS
713
714static int deadline_batching_show(void *data, struct seq_file *m)
715{
716 struct request_queue *q = data;
717 struct deadline_data *dd = q->elevator->elevator_data;
718
719 seq_printf(m, "%u\n", dd->batching);
720 return 0;
721}
722
723static int deadline_starved_show(void *data, struct seq_file *m)
724{
725 struct request_queue *q = data;
726 struct deadline_data *dd = q->elevator->elevator_data;
727
728 seq_printf(m, "%u\n", dd->starved);
729 return 0;
730}
731
732static void *deadline_dispatch_start(struct seq_file *m, loff_t *pos)
733 __acquires(&dd->lock)
734{
735 struct request_queue *q = m->private;
736 struct deadline_data *dd = q->elevator->elevator_data;
737
738 spin_lock(&dd->lock);
739 return seq_list_start(&dd->dispatch, *pos);
740}
741
742static void *deadline_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
743{
744 struct request_queue *q = m->private;
745 struct deadline_data *dd = q->elevator->elevator_data;
746
747 return seq_list_next(v, &dd->dispatch, pos);
748}
749
750static void deadline_dispatch_stop(struct seq_file *m, void *v)
751 __releases(&dd->lock)
752{
753 struct request_queue *q = m->private;
754 struct deadline_data *dd = q->elevator->elevator_data;
755
756 spin_unlock(&dd->lock);
757}
758
759static const struct seq_operations deadline_dispatch_seq_ops = {
760 .start = deadline_dispatch_start,
761 .next = deadline_dispatch_next,
762 .stop = deadline_dispatch_stop,
763 .show = blk_mq_debugfs_rq_show,
764};
765
766#define DEADLINE_QUEUE_DDIR_ATTRS(name) \
767 {#name "_fifo_list", 0400, .seq_ops = &deadline_##name##_fifo_seq_ops}, \
768 {#name "_next_rq", 0400, deadline_##name##_next_rq_show}
769static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
770 DEADLINE_QUEUE_DDIR_ATTRS(read),
771 DEADLINE_QUEUE_DDIR_ATTRS(write),
772 {"batching", 0400, deadline_batching_show},
773 {"starved", 0400, deadline_starved_show},
774 {"dispatch", 0400, .seq_ops = &deadline_dispatch_seq_ops},
775 {},
776};
777#undef DEADLINE_QUEUE_DDIR_ATTRS
778#endif
779
Jens Axboe945ffb62017-01-14 17:11:11 -0700780static struct elevator_type mq_deadline = {
Jens Axboef9cd4bf2018-11-01 16:41:41 -0600781 .ops = {
Jens Axboe945ffb62017-01-14 17:11:11 -0700782 .insert_requests = dd_insert_requests,
Jens Axboec13660a2017-01-26 12:40:07 -0700783 .dispatch_request = dd_dispatch_request,
Damien Le Moalf3bc78d2018-02-28 09:35:29 -0800784 .prepare_request = dd_prepare_request,
785 .finish_request = dd_finish_request,
Jens Axboe945ffb62017-01-14 17:11:11 -0700786 .next_request = elv_rb_latter_request,
787 .former_request = elv_rb_former_request,
788 .bio_merge = dd_bio_merge,
789 .request_merge = dd_request_merge,
790 .requests_merged = dd_merged_requests,
791 .request_merged = dd_request_merged,
792 .has_work = dd_has_work,
793 .init_sched = dd_init_queue,
794 .exit_sched = dd_exit_queue,
795 },
796
Omar Sandovaldaaadb32017-05-04 00:31:34 -0700797#ifdef CONFIG_BLK_DEBUG_FS
798 .queue_debugfs_attrs = deadline_queue_debugfs_attrs,
799#endif
Jens Axboe945ffb62017-01-14 17:11:11 -0700800 .elevator_attrs = deadline_attrs,
801 .elevator_name = "mq-deadline",
Jens Axboe4d740bc2017-10-25 09:47:20 -0600802 .elevator_alias = "deadline",
Damien Le Moal68c43f12019-09-05 18:51:31 +0900803 .elevator_features = ELEVATOR_F_ZBD_SEQ_WRITE,
Jens Axboe945ffb62017-01-14 17:11:11 -0700804 .elevator_owner = THIS_MODULE,
805};
Ben Hutchings7de967e2017-08-13 18:03:15 +0100806MODULE_ALIAS("mq-deadline-iosched");
Jens Axboe945ffb62017-01-14 17:11:11 -0700807
808static int __init deadline_init(void)
809{
810 return elv_register(&mq_deadline);
811}
812
813static void __exit deadline_exit(void)
814{
815 elv_unregister(&mq_deadline);
816}
817
818module_init(deadline_init);
819module_exit(deadline_exit);
820
821MODULE_AUTHOR("Jens Axboe");
822MODULE_LICENSE("GPL");
823MODULE_DESCRIPTION("MQ deadline IO scheduler");