blob: 2bcb3495e376b5b63c56b4a0501532fec80996fe [file] [log] [blame]
Christoph Hellwig3dcf60b2019-04-30 14:42:43 -04001/* SPDX-License-Identifier: GPL-2.0 */
Josef Bacika7905042018-07-03 09:32:35 -06002#ifndef RQ_QOS_H
3#define RQ_QOS_H
4
5#include <linux/kernel.h>
6#include <linux/blkdev.h>
7#include <linux/blk_types.h>
8#include <linux/atomic.h>
9#include <linux/wait.h>
Ming Lei1da08a42021-06-09 09:58:21 +080010#include <linux/blk-mq.h>
Josef Bacika7905042018-07-03 09:32:35 -060011
Ming Leicc566942018-12-17 09:46:00 +080012#include "blk-mq-debugfs.h"
13
14struct blk_mq_debugfs_attr;
15
Josef Bacika7905042018-07-03 09:32:35 -060016enum rq_qos_id {
17 RQ_QOS_WBT,
Tejun Heobeab17f2019-08-28 15:05:56 -070018 RQ_QOS_LATENCY,
Tejun Heo7caa4712019-08-28 15:05:58 -070019 RQ_QOS_COST,
Josef Bacika7905042018-07-03 09:32:35 -060020};
21
22struct rq_wait {
23 wait_queue_head_t wait;
24 atomic_t inflight;
25};
26
27struct rq_qos {
28 struct rq_qos_ops *ops;
29 struct request_queue *q;
30 enum rq_qos_id id;
31 struct rq_qos *next;
Ming Leicc566942018-12-17 09:46:00 +080032#ifdef CONFIG_BLK_DEBUG_FS
33 struct dentry *debugfs_dir;
34#endif
Josef Bacika7905042018-07-03 09:32:35 -060035};
36
37struct rq_qos_ops {
Christoph Hellwigd5337562018-11-14 17:02:09 +010038 void (*throttle)(struct rq_qos *, struct bio *);
Josef Bacikc1c80382018-07-03 11:14:59 -040039 void (*track)(struct rq_qos *, struct request *, struct bio *);
Tejun Heod3e65ff2019-08-28 15:05:54 -070040 void (*merge)(struct rq_qos *, struct request *, struct bio *);
Josef Bacika7905042018-07-03 09:32:35 -060041 void (*issue)(struct rq_qos *, struct request *);
42 void (*requeue)(struct rq_qos *, struct request *);
43 void (*done)(struct rq_qos *, struct request *);
Josef Bacik67b42d02018-07-03 11:15:00 -040044 void (*done_bio)(struct rq_qos *, struct bio *);
Josef Bacikc1c80382018-07-03 11:14:59 -040045 void (*cleanup)(struct rq_qos *, struct bio *);
Tejun Heo9677a3e2019-08-28 15:05:55 -070046 void (*queue_depth_changed)(struct rq_qos *);
Josef Bacika7905042018-07-03 09:32:35 -060047 void (*exit)(struct rq_qos *);
Ming Leicc566942018-12-17 09:46:00 +080048 const struct blk_mq_debugfs_attr *debugfs_attrs;
Josef Bacika7905042018-07-03 09:32:35 -060049};
50
51struct rq_depth {
52 unsigned int max_depth;
53
54 int scale_step;
55 bool scaled_max;
56
57 unsigned int queue_depth;
58 unsigned int default_depth;
59};
60
61static inline struct rq_qos *rq_qos_id(struct request_queue *q,
62 enum rq_qos_id id)
63{
64 struct rq_qos *rqos;
65 for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
66 if (rqos->id == id)
67 break;
68 }
69 return rqos;
70}
71
72static inline struct rq_qos *wbt_rq_qos(struct request_queue *q)
73{
74 return rq_qos_id(q, RQ_QOS_WBT);
75}
76
77static inline struct rq_qos *blkcg_rq_qos(struct request_queue *q)
78{
Tejun Heobeab17f2019-08-28 15:05:56 -070079 return rq_qos_id(q, RQ_QOS_LATENCY);
Josef Bacika7905042018-07-03 09:32:35 -060080}
81
Ming Leicc566942018-12-17 09:46:00 +080082static inline const char *rq_qos_id_to_name(enum rq_qos_id id)
83{
84 switch (id) {
85 case RQ_QOS_WBT:
86 return "wbt";
Tejun Heobeab17f2019-08-28 15:05:56 -070087 case RQ_QOS_LATENCY:
88 return "latency";
Tejun Heo7caa4712019-08-28 15:05:58 -070089 case RQ_QOS_COST:
90 return "cost";
Ming Leicc566942018-12-17 09:46:00 +080091 }
92 return "unknown";
93}
94
Josef Bacika7905042018-07-03 09:32:35 -060095static inline void rq_wait_init(struct rq_wait *rq_wait)
96{
97 atomic_set(&rq_wait->inflight, 0);
98 init_waitqueue_head(&rq_wait->wait);
99}
100
101static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
102{
Ming Lei1da08a42021-06-09 09:58:21 +0800103 /*
104 * No IO can be in-flight when adding rqos, so freeze queue, which
105 * is fine since we only support rq_qos for blk-mq queue.
106 *
107 * Reuse ->queue_lock for protecting against other concurrent
108 * rq_qos adding/deleting
109 */
110 blk_mq_freeze_queue(q);
111
112 spin_lock_irq(&q->queue_lock);
Josef Bacika7905042018-07-03 09:32:35 -0600113 rqos->next = q->rq_qos;
114 q->rq_qos = rqos;
Ming Lei1da08a42021-06-09 09:58:21 +0800115 spin_unlock_irq(&q->queue_lock);
116
117 blk_mq_unfreeze_queue(q);
Ming Leicc566942018-12-17 09:46:00 +0800118
119 if (rqos->ops->debugfs_attrs)
120 blk_mq_debugfs_register_rqos(rqos);
Josef Bacika7905042018-07-03 09:32:35 -0600121}
122
123static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
124{
Tejun Heo307f4062019-10-15 08:49:27 -0700125 struct rq_qos **cur;
126
Ming Lei1da08a42021-06-09 09:58:21 +0800127 /*
128 * See comment in rq_qos_add() about freezing queue & using
129 * ->queue_lock.
130 */
131 blk_mq_freeze_queue(q);
132
133 spin_lock_irq(&q->queue_lock);
Tejun Heo307f4062019-10-15 08:49:27 -0700134 for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
135 if (*cur == rqos) {
136 *cur = rqos->next;
Josef Bacika7905042018-07-03 09:32:35 -0600137 break;
138 }
Josef Bacika7905042018-07-03 09:32:35 -0600139 }
Ming Lei1da08a42021-06-09 09:58:21 +0800140 spin_unlock_irq(&q->queue_lock);
141
142 blk_mq_unfreeze_queue(q);
Ming Leicc566942018-12-17 09:46:00 +0800143
144 blk_mq_debugfs_unregister_rqos(rqos);
Josef Bacika7905042018-07-03 09:32:35 -0600145}
146
Josef Bacik84f60322018-12-04 12:59:02 -0500147typedef bool (acquire_inflight_cb_t)(struct rq_wait *rqw, void *private_data);
148typedef void (cleanup_cb_t)(struct rq_wait *rqw, void *private_data);
149
150void rq_qos_wait(struct rq_wait *rqw, void *private_data,
151 acquire_inflight_cb_t *acquire_inflight_cb,
152 cleanup_cb_t *cleanup_cb);
Josef Bacik22f17952018-07-19 21:42:13 -0400153bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit);
Harshad Shirwadkarb84477d2019-10-05 11:59:27 -0700154bool rq_depth_scale_up(struct rq_depth *rqd);
155bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
Josef Bacika7905042018-07-03 09:32:35 -0600156bool rq_depth_calc_max_depth(struct rq_depth *rqd);
157
Jens Axboee5045452018-11-15 12:25:10 -0700158void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio);
159void __rq_qos_done(struct rq_qos *rqos, struct request *rq);
160void __rq_qos_issue(struct rq_qos *rqos, struct request *rq);
161void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq);
162void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio);
163void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio);
Tejun Heod3e65ff2019-08-28 15:05:54 -0700164void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio);
Jens Axboee5045452018-11-15 12:25:10 -0700165void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio);
Tejun Heo9677a3e2019-08-28 15:05:55 -0700166void __rq_qos_queue_depth_changed(struct rq_qos *rqos);
Jens Axboee5045452018-11-15 12:25:10 -0700167
168static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
169{
170 if (q->rq_qos)
171 __rq_qos_cleanup(q->rq_qos, bio);
172}
173
174static inline void rq_qos_done(struct request_queue *q, struct request *rq)
175{
176 if (q->rq_qos)
177 __rq_qos_done(q->rq_qos, rq);
178}
179
180static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
181{
182 if (q->rq_qos)
183 __rq_qos_issue(q->rq_qos, rq);
184}
185
186static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
187{
188 if (q->rq_qos)
189 __rq_qos_requeue(q->rq_qos, rq);
190}
191
192static inline void rq_qos_done_bio(struct request_queue *q, struct bio *bio)
193{
194 if (q->rq_qos)
195 __rq_qos_done_bio(q->rq_qos, bio);
196}
197
198static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
199{
Dennis Zhou13369812018-12-17 11:03:51 -0500200 /*
201 * BIO_TRACKED lets controllers know that a bio went through the
202 * normal rq_qos path.
203 */
204 bio_set_flag(bio, BIO_TRACKED);
Jens Axboee5045452018-11-15 12:25:10 -0700205 if (q->rq_qos)
206 __rq_qos_throttle(q->rq_qos, bio);
207}
208
209static inline void rq_qos_track(struct request_queue *q, struct request *rq,
210 struct bio *bio)
211{
212 if (q->rq_qos)
213 __rq_qos_track(q->rq_qos, rq, bio);
214}
215
Tejun Heod3e65ff2019-08-28 15:05:54 -0700216static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
217 struct bio *bio)
218{
219 if (q->rq_qos)
220 __rq_qos_merge(q->rq_qos, rq, bio);
221}
222
Tejun Heo9677a3e2019-08-28 15:05:55 -0700223static inline void rq_qos_queue_depth_changed(struct request_queue *q)
224{
225 if (q->rq_qos)
226 __rq_qos_queue_depth_changed(q->rq_qos);
227}
228
Josef Bacika7905042018-07-03 09:32:35 -0600229void rq_qos_exit(struct request_queue *);
Jens Axboee5045452018-11-15 12:25:10 -0700230
Josef Bacika7905042018-07-03 09:32:35 -0600231#endif