blob: 25d1034952b650882c0f715be87db75aaf627e1b [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Jens Axboebd166ef2017-01-17 06:03:22 -07002#ifndef BLK_MQ_SCHED_H
3#define BLK_MQ_SCHED_H
4
Christoph Hellwig2e9bc342021-09-20 14:33:23 +02005#include "elevator.h"
Jens Axboebd166ef2017-01-17 06:03:22 -07006#include "blk-mq.h"
7#include "blk-mq-tag.h"
8
John Garryd2a27962021-10-05 18:23:27 +08009#define MAX_SCHED_RQ (16 * BLKDEV_DEFAULT_RQ)
John Garryd97e5942021-05-13 20:00:58 +080010
Damien Le Moale2b3fa52018-11-20 10:52:34 +090011void blk_mq_sched_assign_ioc(struct request *rq);
Jens Axboebd166ef2017-01-17 06:03:22 -070012
Jens Axboee4d750c2017-02-03 09:48:28 -070013bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
Christoph Hellwig14ccb662019-06-06 12:29:01 +020014 unsigned int nr_segs, struct request **merged_request);
Pavel Begunkov179ae84f2021-10-20 20:00:49 +010015bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
Christoph Hellwig14ccb662019-06-06 12:29:01 +020016 unsigned int nr_segs);
Jan Karafd2ef392021-06-23 11:36:34 +020017bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
18 struct list_head *free);
Damien Le Moal7211aef82018-12-17 15:14:05 +090019void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx);
Pavel Begunkove9ea1592021-10-09 13:25:42 +010020void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
Jens Axboebd166ef2017-01-17 06:03:22 -070021
Jens Axboebd6737f2017-01-27 01:00:47 -070022void blk_mq_sched_insert_request(struct request *rq, bool at_head,
Mike Snitzer9e97d292018-01-17 11:25:58 -050023 bool run_queue, bool async);
Jens Axboe67cae4c2018-10-30 11:31:51 -060024void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
Jens Axboebd6737f2017-01-27 01:00:47 -070025 struct blk_mq_ctx *ctx,
26 struct list_head *list, bool run_queue_async);
27
Jens Axboebd166ef2017-01-17 06:03:22 -070028void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
Jens Axboebd166ef2017-01-17 06:03:22 -070029
Omar Sandoval6917ff02017-04-05 12:01:30 -070030int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
Omar Sandoval54d53292017-04-07 08:52:27 -060031void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
John Garry1820f4f2021-10-05 18:23:31 +080032void blk_mq_sched_free_rqs(struct request_queue *q);
Jens Axboebd166ef2017-01-17 06:03:22 -070033
Pavel Begunkove9ea1592021-10-09 13:25:42 +010034static inline void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
35{
36 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
37 __blk_mq_sched_restart(hctx);
38}
39
Christoph Hellwig8addffd2021-10-12 18:17:59 +020040static inline bool bio_mergeable(struct bio *bio)
41{
42 return !(bio->bi_opf & REQ_NOMERGE_FLAGS);
43}
44
Jens Axboebd166ef2017-01-17 06:03:22 -070045static inline bool
Jens Axboebd166ef2017-01-17 06:03:22 -070046blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
47 struct bio *bio)
48{
Jens Axboe2ff06822021-10-15 09:44:38 -060049 if (rq->rq_flags & RQF_ELV) {
50 struct elevator_queue *e = q->elevator;
Jens Axboebd166ef2017-01-17 06:03:22 -070051
Jens Axboe2ff06822021-10-15 09:44:38 -060052 if (e->type->ops.allow_merge)
53 return e->type->ops.allow_merge(q, rq, bio);
54 }
Jens Axboebd166ef2017-01-17 06:03:22 -070055 return true;
56}
57
Omar Sandovaled886602018-09-27 15:55:51 -070058static inline void blk_mq_sched_completed_request(struct request *rq, u64 now)
Jens Axboebd166ef2017-01-17 06:03:22 -070059{
Jens Axboe2ff06822021-10-15 09:44:38 -060060 if (rq->rq_flags & RQF_ELV) {
61 struct elevator_queue *e = rq->q->elevator;
Jens Axboebd166ef2017-01-17 06:03:22 -070062
Jens Axboe2ff06822021-10-15 09:44:38 -060063 if (e->type->ops.completed_request)
64 e->type->ops.completed_request(rq, now);
65 }
Jens Axboebd166ef2017-01-17 06:03:22 -070066}
67
Jens Axboebd166ef2017-01-17 06:03:22 -070068static inline void blk_mq_sched_requeue_request(struct request *rq)
69{
Jens Axboe2ff06822021-10-15 09:44:38 -060070 if (rq->rq_flags & RQF_ELV) {
71 struct request_queue *q = rq->q;
72 struct elevator_queue *e = q->elevator;
Jens Axboebd166ef2017-01-17 06:03:22 -070073
Jens Axboe2ff06822021-10-15 09:44:38 -060074 if ((rq->rq_flags & RQF_ELVPRIV) && e->type->ops.requeue_request)
75 e->type->ops.requeue_request(rq);
76 }
Jens Axboebd166ef2017-01-17 06:03:22 -070077}
78
79static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)
80{
81 struct elevator_queue *e = hctx->queue->elevator;
82
Jens Axboef9cd4bf2018-11-01 16:41:41 -060083 if (e && e->type->ops.has_work)
84 return e->type->ops.has_work(hctx);
Jens Axboebd166ef2017-01-17 06:03:22 -070085
86 return false;
87}
88
Jens Axboebd166ef2017-01-17 06:03:22 -070089static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
90{
91 return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
92}
93
94#endif