blob: 15a8be57203d6420df3d73a37736adda1a168019 [file] [log] [blame]
Christoph Hellwig8c165672019-04-30 14:42:39 -04001// SPDX-License-Identifier: GPL-2.0
Omar Sandoval00e04392017-04-14 01:00:02 -07002/*
3 * The Kyber I/O scheduler. Controls latency by throttling queue depths using
4 * scalable techniques.
5 *
6 * Copyright (C) 2017 Facebook
Omar Sandoval00e04392017-04-14 01:00:02 -07007 */
8
9#include <linux/kernel.h>
10#include <linux/blkdev.h>
11#include <linux/blk-mq.h>
12#include <linux/elevator.h>
13#include <linux/module.h>
14#include <linux/sbitmap.h>
15
Chaitanya Kulkarnib357e4a2021-02-21 21:29:59 -080016#include <trace/events/block.h>
17
Omar Sandoval00e04392017-04-14 01:00:02 -070018#include "blk.h"
19#include "blk-mq.h"
Omar Sandoval16b738f2017-05-04 00:31:33 -070020#include "blk-mq-debugfs.h"
Omar Sandoval00e04392017-04-14 01:00:02 -070021#include "blk-mq-sched.h"
22#include "blk-mq-tag.h"
Omar Sandoval00e04392017-04-14 01:00:02 -070023
Omar Sandoval6c3b7af2018-09-27 15:55:55 -070024#define CREATE_TRACE_POINTS
25#include <trace/events/kyber.h>
26
Omar Sandoval6e25cb02018-09-27 15:55:54 -070027/*
28 * Scheduling domains: the device is divided into multiple domains based on the
29 * request type.
30 */
Omar Sandoval00e04392017-04-14 01:00:02 -070031enum {
32 KYBER_READ,
Omar Sandoval6e25cb02018-09-27 15:55:54 -070033 KYBER_WRITE,
34 KYBER_DISCARD,
35 KYBER_OTHER,
Omar Sandoval00e04392017-04-14 01:00:02 -070036 KYBER_NUM_DOMAINS,
37};
38
Omar Sandoval6c3b7af2018-09-27 15:55:55 -070039static const char *kyber_domain_names[] = {
40 [KYBER_READ] = "READ",
41 [KYBER_WRITE] = "WRITE",
42 [KYBER_DISCARD] = "DISCARD",
43 [KYBER_OTHER] = "OTHER",
44};
45
Omar Sandoval00e04392017-04-14 01:00:02 -070046enum {
Omar Sandoval00e04392017-04-14 01:00:02 -070047 /*
48 * In order to prevent starvation of synchronous requests by a flood of
49 * asynchronous requests, we reserve 25% of requests for synchronous
50 * operations.
51 */
52 KYBER_ASYNC_PERCENT = 75,
53};
54
55/*
Omar Sandoval6e25cb02018-09-27 15:55:54 -070056 * Maximum device-wide depth for each scheduling domain.
Omar Sandoval00e04392017-04-14 01:00:02 -070057 *
Omar Sandoval6e25cb02018-09-27 15:55:54 -070058 * Even for fast devices with lots of tags like NVMe, you can saturate the
59 * device with only a fraction of the maximum possible queue depth. So, we cap
60 * these to a reasonable value.
Omar Sandoval00e04392017-04-14 01:00:02 -070061 */
62static const unsigned int kyber_depth[] = {
63 [KYBER_READ] = 256,
Omar Sandoval6e25cb02018-09-27 15:55:54 -070064 [KYBER_WRITE] = 128,
65 [KYBER_DISCARD] = 64,
66 [KYBER_OTHER] = 16,
Omar Sandoval00e04392017-04-14 01:00:02 -070067};
68
69/*
Omar Sandoval6e25cb02018-09-27 15:55:54 -070070 * Default latency targets for each scheduling domain.
71 */
72static const u64 kyber_latency_targets[] = {
Omar Sandovalf0a0cdd2018-09-28 09:22:50 -070073 [KYBER_READ] = 2ULL * NSEC_PER_MSEC,
74 [KYBER_WRITE] = 10ULL * NSEC_PER_MSEC,
75 [KYBER_DISCARD] = 5ULL * NSEC_PER_SEC,
Omar Sandoval6e25cb02018-09-27 15:55:54 -070076};
77
78/*
79 * Batch size (number of requests we'll dispatch in a row) for each scheduling
80 * domain.
Omar Sandoval00e04392017-04-14 01:00:02 -070081 */
82static const unsigned int kyber_batch_size[] = {
83 [KYBER_READ] = 16,
Omar Sandoval6e25cb02018-09-27 15:55:54 -070084 [KYBER_WRITE] = 8,
85 [KYBER_DISCARD] = 1,
86 [KYBER_OTHER] = 1,
87};
88
89/*
90 * Requests latencies are recorded in a histogram with buckets defined relative
91 * to the target latency:
92 *
93 * <= 1/4 * target latency
94 * <= 1/2 * target latency
95 * <= 3/4 * target latency
96 * <= target latency
97 * <= 1 1/4 * target latency
98 * <= 1 1/2 * target latency
99 * <= 1 3/4 * target latency
100 * > 1 3/4 * target latency
101 */
102enum {
103 /*
104 * The width of the latency histogram buckets is
105 * 1 / (1 << KYBER_LATENCY_SHIFT) * target latency.
106 */
107 KYBER_LATENCY_SHIFT = 2,
108 /*
109 * The first (1 << KYBER_LATENCY_SHIFT) buckets are <= target latency,
110 * thus, "good".
111 */
112 KYBER_GOOD_BUCKETS = 1 << KYBER_LATENCY_SHIFT,
113 /* There are also (1 << KYBER_LATENCY_SHIFT) "bad" buckets. */
114 KYBER_LATENCY_BUCKETS = 2 << KYBER_LATENCY_SHIFT,
115};
116
117/*
118 * We measure both the total latency and the I/O latency (i.e., latency after
119 * submitting to the device).
120 */
121enum {
122 KYBER_TOTAL_LATENCY,
123 KYBER_IO_LATENCY,
124};
125
Omar Sandoval6c3b7af2018-09-27 15:55:55 -0700126static const char *kyber_latency_type_names[] = {
127 [KYBER_TOTAL_LATENCY] = "total",
128 [KYBER_IO_LATENCY] = "I/O",
129};
130
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700131/*
132 * Per-cpu latency histograms: total latency and I/O latency for each scheduling
133 * domain except for KYBER_OTHER.
134 */
135struct kyber_cpu_latency {
136 atomic_t buckets[KYBER_OTHER][2][KYBER_LATENCY_BUCKETS];
Omar Sandoval00e04392017-04-14 01:00:02 -0700137};
138
Jianchao Wanga6088842018-05-30 10:47:40 -0600139/*
140 * There is a same mapping between ctx & hctx and kcq & khd,
141 * we use request->mq_ctx->index_hw to index the kcq in khd.
142 */
143struct kyber_ctx_queue {
144 /*
145 * Used to ensure operations on rq_list and kcq_map to be an atmoic one.
146 * Also protect the rqs on rq_list when merge.
147 */
148 spinlock_t lock;
149 struct list_head rq_list[KYBER_NUM_DOMAINS];
150} ____cacheline_aligned_in_smp;
151
Omar Sandoval00e04392017-04-14 01:00:02 -0700152struct kyber_queue_data {
Omar Sandoval6c3b7af2018-09-27 15:55:55 -0700153 struct request_queue *q;
154
Omar Sandoval00e04392017-04-14 01:00:02 -0700155 /*
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700156 * Each scheduling domain has a limited number of in-flight requests
157 * device-wide, limited by these tokens.
Omar Sandoval00e04392017-04-14 01:00:02 -0700158 */
159 struct sbitmap_queue domain_tokens[KYBER_NUM_DOMAINS];
160
161 /*
162 * Async request percentage, converted to per-word depth for
163 * sbitmap_get_shallow().
164 */
165 unsigned int async_depth;
166
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700167 struct kyber_cpu_latency __percpu *cpu_latency;
168
169 /* Timer for stats aggregation and adjusting domain tokens. */
170 struct timer_list timer;
171
172 unsigned int latency_buckets[KYBER_OTHER][2][KYBER_LATENCY_BUCKETS];
173
174 unsigned long latency_timeout[KYBER_OTHER];
175
176 int domain_p99[KYBER_OTHER];
177
Omar Sandoval00e04392017-04-14 01:00:02 -0700178 /* Target latencies in nanoseconds. */
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700179 u64 latency_targets[KYBER_OTHER];
Omar Sandoval00e04392017-04-14 01:00:02 -0700180};
181
182struct kyber_hctx_data {
183 spinlock_t lock;
184 struct list_head rqs[KYBER_NUM_DOMAINS];
185 unsigned int cur_domain;
186 unsigned int batching;
Jianchao Wanga6088842018-05-30 10:47:40 -0600187 struct kyber_ctx_queue *kcqs;
188 struct sbitmap kcq_map[KYBER_NUM_DOMAINS];
Jens Axboe00203ba2018-12-20 08:50:58 -0700189 struct sbq_wait domain_wait[KYBER_NUM_DOMAINS];
Omar Sandovalfcf38cd2017-12-05 22:57:43 -0800190 struct sbq_wait_state *domain_ws[KYBER_NUM_DOMAINS];
Omar Sandoval00e04392017-04-14 01:00:02 -0700191 atomic_t wait_index[KYBER_NUM_DOMAINS];
192};
193
Omar Sandovalfcf38cd2017-12-05 22:57:43 -0800194static int kyber_domain_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
195 void *key);
196
Jianchao Wanga6088842018-05-30 10:47:40 -0600197static unsigned int kyber_sched_domain(unsigned int op)
Omar Sandoval00e04392017-04-14 01:00:02 -0700198{
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700199 switch (op & REQ_OP_MASK) {
200 case REQ_OP_READ:
Omar Sandoval00e04392017-04-14 01:00:02 -0700201 return KYBER_READ;
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700202 case REQ_OP_WRITE:
203 return KYBER_WRITE;
204 case REQ_OP_DISCARD:
205 return KYBER_DISCARD;
206 default:
Omar Sandoval00e04392017-04-14 01:00:02 -0700207 return KYBER_OTHER;
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700208 }
Omar Sandoval00e04392017-04-14 01:00:02 -0700209}
210
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700211static void flush_latency_buckets(struct kyber_queue_data *kqd,
212 struct kyber_cpu_latency *cpu_latency,
213 unsigned int sched_domain, unsigned int type)
Omar Sandoval00e04392017-04-14 01:00:02 -0700214{
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700215 unsigned int *buckets = kqd->latency_buckets[sched_domain][type];
216 atomic_t *cpu_buckets = cpu_latency->buckets[sched_domain][type];
217 unsigned int bucket;
Omar Sandoval00e04392017-04-14 01:00:02 -0700218
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700219 for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS; bucket++)
220 buckets[bucket] += atomic_xchg(&cpu_buckets[bucket], 0);
Omar Sandoval00e04392017-04-14 01:00:02 -0700221}
222
223/*
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700224 * Calculate the histogram bucket with the given percentile rank, or -1 if there
225 * aren't enough samples yet.
Omar Sandoval00e04392017-04-14 01:00:02 -0700226 */
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700227static int calculate_percentile(struct kyber_queue_data *kqd,
228 unsigned int sched_domain, unsigned int type,
229 unsigned int percentile)
Omar Sandoval00e04392017-04-14 01:00:02 -0700230{
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700231 unsigned int *buckets = kqd->latency_buckets[sched_domain][type];
232 unsigned int bucket, samples = 0, percentile_samples;
233
234 for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS; bucket++)
235 samples += buckets[bucket];
236
237 if (!samples)
238 return -1;
Omar Sandoval00e04392017-04-14 01:00:02 -0700239
240 /*
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700241 * We do the calculation once we have 500 samples or one second passes
242 * since the first sample was recorded, whichever comes first.
Omar Sandoval00e04392017-04-14 01:00:02 -0700243 */
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700244 if (!kqd->latency_timeout[sched_domain])
245 kqd->latency_timeout[sched_domain] = max(jiffies + HZ, 1UL);
246 if (samples < 500 &&
247 time_is_after_jiffies(kqd->latency_timeout[sched_domain])) {
248 return -1;
Omar Sandoval00e04392017-04-14 01:00:02 -0700249 }
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700250 kqd->latency_timeout[sched_domain] = 0;
Omar Sandoval00e04392017-04-14 01:00:02 -0700251
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700252 percentile_samples = DIV_ROUND_UP(samples * percentile, 100);
253 for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS - 1; bucket++) {
254 if (buckets[bucket] >= percentile_samples)
255 break;
256 percentile_samples -= buckets[bucket];
257 }
258 memset(buckets, 0, sizeof(kqd->latency_buckets[sched_domain][type]));
259
Omar Sandoval6c3b7af2018-09-27 15:55:55 -0700260 trace_kyber_latency(kqd->q, kyber_domain_names[sched_domain],
261 kyber_latency_type_names[type], percentile,
262 bucket + 1, 1 << KYBER_LATENCY_SHIFT, samples);
263
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700264 return bucket;
265}
266
267static void kyber_resize_domain(struct kyber_queue_data *kqd,
268 unsigned int sched_domain, unsigned int depth)
269{
Omar Sandoval00e04392017-04-14 01:00:02 -0700270 depth = clamp(depth, 1U, kyber_depth[sched_domain]);
Omar Sandoval6c3b7af2018-09-27 15:55:55 -0700271 if (depth != kqd->domain_tokens[sched_domain].sb.depth) {
Omar Sandoval00e04392017-04-14 01:00:02 -0700272 sbitmap_queue_resize(&kqd->domain_tokens[sched_domain], depth);
Omar Sandoval6c3b7af2018-09-27 15:55:55 -0700273 trace_kyber_adjust(kqd->q, kyber_domain_names[sched_domain],
274 depth);
275 }
Omar Sandoval00e04392017-04-14 01:00:02 -0700276}
277
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700278static void kyber_timer_fn(struct timer_list *t)
Omar Sandoval00e04392017-04-14 01:00:02 -0700279{
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700280 struct kyber_queue_data *kqd = from_timer(kqd, t, timer);
281 unsigned int sched_domain;
282 int cpu;
283 bool bad = false;
Omar Sandoval00e04392017-04-14 01:00:02 -0700284
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700285 /* Sum all of the per-cpu latency histograms. */
286 for_each_online_cpu(cpu) {
287 struct kyber_cpu_latency *cpu_latency;
Omar Sandoval00e04392017-04-14 01:00:02 -0700288
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700289 cpu_latency = per_cpu_ptr(kqd->cpu_latency, cpu);
290 for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
291 flush_latency_buckets(kqd, cpu_latency, sched_domain,
292 KYBER_TOTAL_LATENCY);
293 flush_latency_buckets(kqd, cpu_latency, sched_domain,
294 KYBER_IO_LATENCY);
Omar Sandoval00e04392017-04-14 01:00:02 -0700295 }
296 }
297
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700298 /*
299 * Check if any domains have a high I/O latency, which might indicate
300 * congestion in the device. Note that we use the p90; we don't want to
301 * be too sensitive to outliers here.
302 */
303 for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
304 int p90;
Omar Sandoval00e04392017-04-14 01:00:02 -0700305
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700306 p90 = calculate_percentile(kqd, sched_domain, KYBER_IO_LATENCY,
307 90);
308 if (p90 >= KYBER_GOOD_BUCKETS)
309 bad = true;
310 }
Omar Sandoval00e04392017-04-14 01:00:02 -0700311
312 /*
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700313 * Adjust the scheduling domain depths. If we determined that there was
314 * congestion, we throttle all domains with good latencies. Either way,
315 * we ease up on throttling domains with bad latencies.
Omar Sandoval00e04392017-04-14 01:00:02 -0700316 */
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700317 for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
318 unsigned int orig_depth, depth;
319 int p99;
320
321 p99 = calculate_percentile(kqd, sched_domain,
322 KYBER_TOTAL_LATENCY, 99);
323 /*
324 * This is kind of subtle: different domains will not
325 * necessarily have enough samples to calculate the latency
326 * percentiles during the same window, so we have to remember
327 * the p99 for the next time we observe congestion; once we do,
328 * we don't want to throttle again until we get more data, so we
329 * reset it to -1.
330 */
331 if (bad) {
332 if (p99 < 0)
333 p99 = kqd->domain_p99[sched_domain];
334 kqd->domain_p99[sched_domain] = -1;
335 } else if (p99 >= 0) {
336 kqd->domain_p99[sched_domain] = p99;
337 }
338 if (p99 < 0)
339 continue;
340
341 /*
342 * If this domain has bad latency, throttle less. Otherwise,
343 * throttle more iff we determined that there is congestion.
344 *
345 * The new depth is scaled linearly with the p99 latency vs the
346 * latency target. E.g., if the p99 is 3/4 of the target, then
347 * we throttle down to 3/4 of the current depth, and if the p99
348 * is 2x the target, then we double the depth.
349 */
350 if (bad || p99 >= KYBER_GOOD_BUCKETS) {
351 orig_depth = kqd->domain_tokens[sched_domain].sb.depth;
352 depth = (orig_depth * (p99 + 1)) >> KYBER_LATENCY_SHIFT;
353 kyber_resize_domain(kqd, sched_domain, depth);
354 }
355 }
Omar Sandoval00e04392017-04-14 01:00:02 -0700356}
357
Omar Sandoval00e04392017-04-14 01:00:02 -0700358static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
359{
360 struct kyber_queue_data *kqd;
Omar Sandoval00e04392017-04-14 01:00:02 -0700361 int ret = -ENOMEM;
362 int i;
363
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700364 kqd = kzalloc_node(sizeof(*kqd), GFP_KERNEL, q->node);
Omar Sandoval00e04392017-04-14 01:00:02 -0700365 if (!kqd)
366 goto err;
Omar Sandoval00e04392017-04-14 01:00:02 -0700367
Omar Sandoval6c3b7af2018-09-27 15:55:55 -0700368 kqd->q = q;
369
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700370 kqd->cpu_latency = alloc_percpu_gfp(struct kyber_cpu_latency,
371 GFP_KERNEL | __GFP_ZERO);
372 if (!kqd->cpu_latency)
Omar Sandoval00e04392017-04-14 01:00:02 -0700373 goto err_kqd;
374
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700375 timer_setup(&kqd->timer, kyber_timer_fn, 0);
376
Omar Sandoval00e04392017-04-14 01:00:02 -0700377 for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
378 WARN_ON(!kyber_depth[i]);
379 WARN_ON(!kyber_batch_size[i]);
380 ret = sbitmap_queue_init_node(&kqd->domain_tokens[i],
Omar Sandovalfa2a1f62018-09-27 15:55:53 -0700381 kyber_depth[i], -1, false,
382 GFP_KERNEL, q->node);
Omar Sandoval00e04392017-04-14 01:00:02 -0700383 if (ret) {
384 while (--i >= 0)
385 sbitmap_queue_free(&kqd->domain_tokens[i]);
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700386 goto err_buckets;
Omar Sandoval00e04392017-04-14 01:00:02 -0700387 }
Omar Sandoval00e04392017-04-14 01:00:02 -0700388 }
389
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700390 for (i = 0; i < KYBER_OTHER; i++) {
391 kqd->domain_p99[i] = -1;
392 kqd->latency_targets[i] = kyber_latency_targets[i];
393 }
Omar Sandoval00e04392017-04-14 01:00:02 -0700394
Omar Sandoval00e04392017-04-14 01:00:02 -0700395 return kqd;
396
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700397err_buckets:
398 free_percpu(kqd->cpu_latency);
Omar Sandoval00e04392017-04-14 01:00:02 -0700399err_kqd:
400 kfree(kqd);
401err:
402 return ERR_PTR(ret);
403}
404
405static int kyber_init_sched(struct request_queue *q, struct elevator_type *e)
406{
407 struct kyber_queue_data *kqd;
408 struct elevator_queue *eq;
409
410 eq = elevator_alloc(q, e);
411 if (!eq)
412 return -ENOMEM;
413
414 kqd = kyber_queue_data_alloc(q);
415 if (IS_ERR(kqd)) {
416 kobject_put(&eq->kobj);
417 return PTR_ERR(kqd);
418 }
419
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700420 blk_stat_enable_accounting(q);
421
Omar Sandoval00e04392017-04-14 01:00:02 -0700422 eq->elevator_data = kqd;
423 q->elevator = eq;
424
Omar Sandoval00e04392017-04-14 01:00:02 -0700425 return 0;
426}
427
428static void kyber_exit_sched(struct elevator_queue *e)
429{
430 struct kyber_queue_data *kqd = e->elevator_data;
Omar Sandoval00e04392017-04-14 01:00:02 -0700431 int i;
432
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700433 del_timer_sync(&kqd->timer);
Omar Sandoval00e04392017-04-14 01:00:02 -0700434
435 for (i = 0; i < KYBER_NUM_DOMAINS; i++)
436 sbitmap_queue_free(&kqd->domain_tokens[i]);
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700437 free_percpu(kqd->cpu_latency);
Omar Sandoval00e04392017-04-14 01:00:02 -0700438 kfree(kqd);
439}
440
Jianchao Wanga6088842018-05-30 10:47:40 -0600441static void kyber_ctx_queue_init(struct kyber_ctx_queue *kcq)
442{
443 unsigned int i;
444
445 spin_lock_init(&kcq->lock);
446 for (i = 0; i < KYBER_NUM_DOMAINS; i++)
447 INIT_LIST_HEAD(&kcq->rq_list[i]);
448}
449
Yang Yangffa772c2021-02-05 01:13:10 -0800450static void kyber_depth_updated(struct blk_mq_hw_ctx *hctx)
Omar Sandoval00e04392017-04-14 01:00:02 -0700451{
Jens Axboe28820642018-05-09 13:55:14 -0600452 struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data;
Yang Yangffa772c2021-02-05 01:13:10 -0800453 struct blk_mq_tags *tags = hctx->sched_tags;
454 unsigned int shift = tags->bitmap_tags->sb.shift;
455
456 kqd->async_depth = (1U << shift) * KYBER_ASYNC_PERCENT / 100U;
457
458 sbitmap_queue_min_shallow_depth(tags->bitmap_tags, kqd->async_depth);
459}
460
461static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
462{
Omar Sandoval00e04392017-04-14 01:00:02 -0700463 struct kyber_hctx_data *khd;
464 int i;
465
466 khd = kmalloc_node(sizeof(*khd), GFP_KERNEL, hctx->numa_node);
467 if (!khd)
468 return -ENOMEM;
469
Jianchao Wanga6088842018-05-30 10:47:40 -0600470 khd->kcqs = kmalloc_array_node(hctx->nr_ctx,
471 sizeof(struct kyber_ctx_queue),
472 GFP_KERNEL, hctx->numa_node);
473 if (!khd->kcqs)
474 goto err_khd;
475
476 for (i = 0; i < hctx->nr_ctx; i++)
477 kyber_ctx_queue_init(&khd->kcqs[i]);
478
479 for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
480 if (sbitmap_init_node(&khd->kcq_map[i], hctx->nr_ctx,
Ming Leiefe1f3a2021-01-22 10:33:06 +0800481 ilog2(8), GFP_KERNEL, hctx->numa_node,
Ming Leic548e622021-01-22 10:33:08 +0800482 false, false)) {
Jianchao Wanga6088842018-05-30 10:47:40 -0600483 while (--i >= 0)
484 sbitmap_free(&khd->kcq_map[i]);
485 goto err_kcqs;
486 }
487 }
488
Omar Sandoval00e04392017-04-14 01:00:02 -0700489 spin_lock_init(&khd->lock);
490
491 for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
492 INIT_LIST_HEAD(&khd->rqs[i]);
Jens Axboe00203ba2018-12-20 08:50:58 -0700493 khd->domain_wait[i].sbq = NULL;
494 init_waitqueue_func_entry(&khd->domain_wait[i].wait,
Omar Sandovalfcf38cd2017-12-05 22:57:43 -0800495 kyber_domain_wake);
Jens Axboe00203ba2018-12-20 08:50:58 -0700496 khd->domain_wait[i].wait.private = hctx;
497 INIT_LIST_HEAD(&khd->domain_wait[i].wait.entry);
Omar Sandoval00e04392017-04-14 01:00:02 -0700498 atomic_set(&khd->wait_index[i], 0);
499 }
500
501 khd->cur_domain = 0;
502 khd->batching = 0;
503
504 hctx->sched_data = khd;
Yang Yangffa772c2021-02-05 01:13:10 -0800505 kyber_depth_updated(hctx);
Omar Sandoval00e04392017-04-14 01:00:02 -0700506
507 return 0;
Jianchao Wanga6088842018-05-30 10:47:40 -0600508
509err_kcqs:
510 kfree(khd->kcqs);
511err_khd:
512 kfree(khd);
513 return -ENOMEM;
Omar Sandoval00e04392017-04-14 01:00:02 -0700514}
515
516static void kyber_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
517{
Jianchao Wanga6088842018-05-30 10:47:40 -0600518 struct kyber_hctx_data *khd = hctx->sched_data;
519 int i;
520
521 for (i = 0; i < KYBER_NUM_DOMAINS; i++)
522 sbitmap_free(&khd->kcq_map[i]);
523 kfree(khd->kcqs);
Omar Sandoval00e04392017-04-14 01:00:02 -0700524 kfree(hctx->sched_data);
525}
526
527static int rq_get_domain_token(struct request *rq)
528{
529 return (long)rq->elv.priv[0];
530}
531
532static void rq_set_domain_token(struct request *rq, int token)
533{
534 rq->elv.priv[0] = (void *)(long)token;
535}
536
537static void rq_clear_domain_token(struct kyber_queue_data *kqd,
538 struct request *rq)
539{
540 unsigned int sched_domain;
541 int nr;
542
543 nr = rq_get_domain_token(rq);
544 if (nr != -1) {
Jianchao Wanga6088842018-05-30 10:47:40 -0600545 sched_domain = kyber_sched_domain(rq->cmd_flags);
Omar Sandoval00e04392017-04-14 01:00:02 -0700546 sbitmap_queue_clear(&kqd->domain_tokens[sched_domain], nr,
547 rq->mq_ctx->cpu);
548 }
549}
550
Christoph Hellwig5bbf4e52017-06-16 18:15:26 +0200551static void kyber_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
Omar Sandoval00e04392017-04-14 01:00:02 -0700552{
Omar Sandoval00e04392017-04-14 01:00:02 -0700553 /*
554 * We use the scheduler tags as per-hardware queue queueing tokens.
555 * Async requests can be limited at this stage.
556 */
Christoph Hellwig5bbf4e52017-06-16 18:15:26 +0200557 if (!op_is_sync(op)) {
558 struct kyber_queue_data *kqd = data->q->elevator->elevator_data;
Omar Sandoval00e04392017-04-14 01:00:02 -0700559
Christoph Hellwig5bbf4e52017-06-16 18:15:26 +0200560 data->shallow_depth = kqd->async_depth;
561 }
562}
563
Omar Sandovalefed9a32021-05-10 17:05:35 -0700564static bool kyber_bio_merge(struct request_queue *q, struct bio *bio,
Christoph Hellwig14ccb662019-06-06 12:29:01 +0200565 unsigned int nr_segs)
Jianchao Wanga6088842018-05-30 10:47:40 -0600566{
Omar Sandovalefed9a32021-05-10 17:05:35 -0700567 struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
568 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
Jianchao Wanga6088842018-05-30 10:47:40 -0600569 struct kyber_hctx_data *khd = hctx->sched_data;
Jens Axboef31967f2018-10-29 13:13:29 -0600570 struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw[hctx->type]];
Jianchao Wanga6088842018-05-30 10:47:40 -0600571 unsigned int sched_domain = kyber_sched_domain(bio->bi_opf);
572 struct list_head *rq_list = &kcq->rq_list[sched_domain];
573 bool merged;
574
575 spin_lock(&kcq->lock);
Baolin Wangbdc6a2872020-08-28 10:52:55 +0800576 merged = blk_bio_list_merge(hctx->queue, rq_list, bio, nr_segs);
Jianchao Wanga6088842018-05-30 10:47:40 -0600577 spin_unlock(&kcq->lock);
Jianchao Wanga6088842018-05-30 10:47:40 -0600578
579 return merged;
580}
581
Christoph Hellwig5d9c3052020-05-29 15:53:08 +0200582static void kyber_prepare_request(struct request *rq)
Christoph Hellwig5bbf4e52017-06-16 18:15:26 +0200583{
584 rq_set_domain_token(rq, -1);
Omar Sandoval00e04392017-04-14 01:00:02 -0700585}
586
Jianchao Wanga6088842018-05-30 10:47:40 -0600587static void kyber_insert_requests(struct blk_mq_hw_ctx *hctx,
588 struct list_head *rq_list, bool at_head)
589{
590 struct kyber_hctx_data *khd = hctx->sched_data;
591 struct request *rq, *next;
592
593 list_for_each_entry_safe(rq, next, rq_list, queuelist) {
594 unsigned int sched_domain = kyber_sched_domain(rq->cmd_flags);
Jens Axboef31967f2018-10-29 13:13:29 -0600595 struct kyber_ctx_queue *kcq = &khd->kcqs[rq->mq_ctx->index_hw[hctx->type]];
Jianchao Wanga6088842018-05-30 10:47:40 -0600596 struct list_head *head = &kcq->rq_list[sched_domain];
597
598 spin_lock(&kcq->lock);
Vincent Fufb7b9b02021-08-04 19:49:23 +0000599 trace_block_rq_insert(rq);
Jianchao Wanga6088842018-05-30 10:47:40 -0600600 if (at_head)
601 list_move(&rq->queuelist, head);
602 else
603 list_move_tail(&rq->queuelist, head);
604 sbitmap_set_bit(&khd->kcq_map[sched_domain],
Jens Axboef31967f2018-10-29 13:13:29 -0600605 rq->mq_ctx->index_hw[hctx->type]);
Jianchao Wanga6088842018-05-30 10:47:40 -0600606 spin_unlock(&kcq->lock);
607 }
608}
609
Christoph Hellwig7b9e9362017-06-16 18:15:21 +0200610static void kyber_finish_request(struct request *rq)
Omar Sandoval00e04392017-04-14 01:00:02 -0700611{
Christoph Hellwig7b9e9362017-06-16 18:15:21 +0200612 struct kyber_queue_data *kqd = rq->q->elevator->elevator_data;
Omar Sandoval00e04392017-04-14 01:00:02 -0700613
614 rq_clear_domain_token(kqd, rq);
Omar Sandoval00e04392017-04-14 01:00:02 -0700615}
616
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700617static void add_latency_sample(struct kyber_cpu_latency *cpu_latency,
618 unsigned int sched_domain, unsigned int type,
619 u64 target, u64 latency)
Omar Sandoval00e04392017-04-14 01:00:02 -0700620{
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700621 unsigned int bucket;
622 u64 divisor;
Omar Sandoval00e04392017-04-14 01:00:02 -0700623
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700624 if (latency > 0) {
625 divisor = max_t(u64, target >> KYBER_LATENCY_SHIFT, 1);
626 bucket = min_t(unsigned int, div64_u64(latency - 1, divisor),
627 KYBER_LATENCY_BUCKETS - 1);
628 } else {
629 bucket = 0;
Omar Sandoval00e04392017-04-14 01:00:02 -0700630 }
631
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700632 atomic_inc(&cpu_latency->buckets[sched_domain][type][bucket]);
633}
634
635static void kyber_completed_request(struct request *rq, u64 now)
636{
637 struct kyber_queue_data *kqd = rq->q->elevator->elevator_data;
638 struct kyber_cpu_latency *cpu_latency;
639 unsigned int sched_domain;
640 u64 target;
641
642 sched_domain = kyber_sched_domain(rq->cmd_flags);
643 if (sched_domain == KYBER_OTHER)
Omar Sandoval00e04392017-04-14 01:00:02 -0700644 return;
645
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700646 cpu_latency = get_cpu_ptr(kqd->cpu_latency);
647 target = kqd->latency_targets[sched_domain];
648 add_latency_sample(cpu_latency, sched_domain, KYBER_TOTAL_LATENCY,
649 target, now - rq->start_time_ns);
650 add_latency_sample(cpu_latency, sched_domain, KYBER_IO_LATENCY, target,
651 now - rq->io_start_time_ns);
652 put_cpu_ptr(kqd->cpu_latency);
Omar Sandoval00e04392017-04-14 01:00:02 -0700653
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700654 timer_reduce(&kqd->timer, jiffies + HZ / 10);
Omar Sandoval00e04392017-04-14 01:00:02 -0700655}
656
Jianchao Wanga6088842018-05-30 10:47:40 -0600657struct flush_kcq_data {
658 struct kyber_hctx_data *khd;
659 unsigned int sched_domain;
660 struct list_head *list;
661};
662
663static bool flush_busy_kcq(struct sbitmap *sb, unsigned int bitnr, void *data)
Omar Sandoval00e04392017-04-14 01:00:02 -0700664{
Jianchao Wanga6088842018-05-30 10:47:40 -0600665 struct flush_kcq_data *flush_data = data;
666 struct kyber_ctx_queue *kcq = &flush_data->khd->kcqs[bitnr];
Omar Sandoval00e04392017-04-14 01:00:02 -0700667
Jianchao Wanga6088842018-05-30 10:47:40 -0600668 spin_lock(&kcq->lock);
669 list_splice_tail_init(&kcq->rq_list[flush_data->sched_domain],
670 flush_data->list);
671 sbitmap_clear_bit(sb, bitnr);
672 spin_unlock(&kcq->lock);
Omar Sandoval00e04392017-04-14 01:00:02 -0700673
Jianchao Wanga6088842018-05-30 10:47:40 -0600674 return true;
675}
676
677static void kyber_flush_busy_kcqs(struct kyber_hctx_data *khd,
678 unsigned int sched_domain,
679 struct list_head *list)
680{
681 struct flush_kcq_data data = {
682 .khd = khd,
683 .sched_domain = sched_domain,
684 .list = list,
685 };
686
687 sbitmap_for_each_set(&khd->kcq_map[sched_domain],
688 flush_busy_kcq, &data);
Omar Sandoval00e04392017-04-14 01:00:02 -0700689}
690
Jens Axboe00203ba2018-12-20 08:50:58 -0700691static int kyber_domain_wake(wait_queue_entry_t *wqe, unsigned mode, int flags,
Omar Sandoval00e04392017-04-14 01:00:02 -0700692 void *key)
693{
Jens Axboe00203ba2018-12-20 08:50:58 -0700694 struct blk_mq_hw_ctx *hctx = READ_ONCE(wqe->private);
695 struct sbq_wait *wait = container_of(wqe, struct sbq_wait, wait);
Omar Sandoval00e04392017-04-14 01:00:02 -0700696
Jens Axboe00203ba2018-12-20 08:50:58 -0700697 sbitmap_del_wait_queue(wait);
Omar Sandoval00e04392017-04-14 01:00:02 -0700698 blk_mq_run_hw_queue(hctx, true);
699 return 1;
700}
701
702static int kyber_get_domain_token(struct kyber_queue_data *kqd,
703 struct kyber_hctx_data *khd,
704 struct blk_mq_hw_ctx *hctx)
705{
706 unsigned int sched_domain = khd->cur_domain;
707 struct sbitmap_queue *domain_tokens = &kqd->domain_tokens[sched_domain];
Jens Axboe00203ba2018-12-20 08:50:58 -0700708 struct sbq_wait *wait = &khd->domain_wait[sched_domain];
Omar Sandoval00e04392017-04-14 01:00:02 -0700709 struct sbq_wait_state *ws;
710 int nr;
711
712 nr = __sbitmap_queue_get(domain_tokens);
Omar Sandoval00e04392017-04-14 01:00:02 -0700713
714 /*
715 * If we failed to get a domain token, make sure the hardware queue is
716 * run when one becomes available. Note that this is serialized on
717 * khd->lock, but we still need to be careful about the waker.
718 */
Jens Axboe00203ba2018-12-20 08:50:58 -0700719 if (nr < 0 && list_empty_careful(&wait->wait.entry)) {
Omar Sandoval00e04392017-04-14 01:00:02 -0700720 ws = sbq_wait_ptr(domain_tokens,
721 &khd->wait_index[sched_domain]);
Omar Sandovalfcf38cd2017-12-05 22:57:43 -0800722 khd->domain_ws[sched_domain] = ws;
Jens Axboe00203ba2018-12-20 08:50:58 -0700723 sbitmap_add_wait_queue(domain_tokens, ws, wait);
Omar Sandoval00e04392017-04-14 01:00:02 -0700724
725 /*
726 * Try again in case a token was freed before we got on the wait
Omar Sandovalfcf38cd2017-12-05 22:57:43 -0800727 * queue.
Omar Sandoval00e04392017-04-14 01:00:02 -0700728 */
729 nr = __sbitmap_queue_get(domain_tokens);
730 }
Omar Sandovalfcf38cd2017-12-05 22:57:43 -0800731
732 /*
733 * If we got a token while we were on the wait queue, remove ourselves
734 * from the wait queue to ensure that all wake ups make forward
735 * progress. It's possible that the waker already deleted the entry
736 * between the !list_empty_careful() check and us grabbing the lock, but
737 * list_del_init() is okay with that.
738 */
Jens Axboe00203ba2018-12-20 08:50:58 -0700739 if (nr >= 0 && !list_empty_careful(&wait->wait.entry)) {
Omar Sandovalfcf38cd2017-12-05 22:57:43 -0800740 ws = khd->domain_ws[sched_domain];
741 spin_lock_irq(&ws->wait.lock);
Jens Axboe00203ba2018-12-20 08:50:58 -0700742 sbitmap_del_wait_queue(wait);
Omar Sandovalfcf38cd2017-12-05 22:57:43 -0800743 spin_unlock_irq(&ws->wait.lock);
744 }
745
Omar Sandoval00e04392017-04-14 01:00:02 -0700746 return nr;
747}
748
749static struct request *
750kyber_dispatch_cur_domain(struct kyber_queue_data *kqd,
751 struct kyber_hctx_data *khd,
Jianchao Wanga6088842018-05-30 10:47:40 -0600752 struct blk_mq_hw_ctx *hctx)
Omar Sandoval00e04392017-04-14 01:00:02 -0700753{
754 struct list_head *rqs;
755 struct request *rq;
756 int nr;
757
758 rqs = &khd->rqs[khd->cur_domain];
Omar Sandoval00e04392017-04-14 01:00:02 -0700759
760 /*
Jianchao Wanga6088842018-05-30 10:47:40 -0600761 * If we already have a flushed request, then we just need to get a
762 * token for it. Otherwise, if there are pending requests in the kcqs,
763 * flush the kcqs, but only if we can get a token. If not, we should
764 * leave the requests in the kcqs so that they can be merged. Note that
765 * khd->lock serializes the flushes, so if we observed any bit set in
766 * the kcq_map, we will always get a request.
Omar Sandoval00e04392017-04-14 01:00:02 -0700767 */
Jianchao Wanga6088842018-05-30 10:47:40 -0600768 rq = list_first_entry_or_null(rqs, struct request, queuelist);
Omar Sandoval00e04392017-04-14 01:00:02 -0700769 if (rq) {
770 nr = kyber_get_domain_token(kqd, khd, hctx);
771 if (nr >= 0) {
772 khd->batching++;
773 rq_set_domain_token(rq, nr);
774 list_del_init(&rq->queuelist);
775 return rq;
Omar Sandoval6c3b7af2018-09-27 15:55:55 -0700776 } else {
777 trace_kyber_throttled(kqd->q,
778 kyber_domain_names[khd->cur_domain]);
Omar Sandoval00e04392017-04-14 01:00:02 -0700779 }
Jianchao Wanga6088842018-05-30 10:47:40 -0600780 } else if (sbitmap_any_bit_set(&khd->kcq_map[khd->cur_domain])) {
781 nr = kyber_get_domain_token(kqd, khd, hctx);
782 if (nr >= 0) {
783 kyber_flush_busy_kcqs(khd, khd->cur_domain, rqs);
784 rq = list_first_entry(rqs, struct request, queuelist);
785 khd->batching++;
786 rq_set_domain_token(rq, nr);
787 list_del_init(&rq->queuelist);
788 return rq;
Omar Sandoval6c3b7af2018-09-27 15:55:55 -0700789 } else {
790 trace_kyber_throttled(kqd->q,
791 kyber_domain_names[khd->cur_domain]);
Jianchao Wanga6088842018-05-30 10:47:40 -0600792 }
Omar Sandoval00e04392017-04-14 01:00:02 -0700793 }
794
795 /* There were either no pending requests or no tokens. */
796 return NULL;
797}
798
799static struct request *kyber_dispatch_request(struct blk_mq_hw_ctx *hctx)
800{
801 struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data;
802 struct kyber_hctx_data *khd = hctx->sched_data;
Omar Sandoval00e04392017-04-14 01:00:02 -0700803 struct request *rq;
804 int i;
805
806 spin_lock(&khd->lock);
807
808 /*
809 * First, if we are still entitled to batch, try to dispatch a request
810 * from the batch.
811 */
812 if (khd->batching < kyber_batch_size[khd->cur_domain]) {
Jianchao Wanga6088842018-05-30 10:47:40 -0600813 rq = kyber_dispatch_cur_domain(kqd, khd, hctx);
Omar Sandoval00e04392017-04-14 01:00:02 -0700814 if (rq)
815 goto out;
816 }
817
818 /*
819 * Either,
820 * 1. We were no longer entitled to a batch.
821 * 2. The domain we were batching didn't have any requests.
822 * 3. The domain we were batching was out of tokens.
823 *
824 * Start another batch. Note that this wraps back around to the original
825 * domain if no other domains have requests or tokens.
826 */
827 khd->batching = 0;
828 for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
829 if (khd->cur_domain == KYBER_NUM_DOMAINS - 1)
830 khd->cur_domain = 0;
831 else
832 khd->cur_domain++;
833
Jianchao Wanga6088842018-05-30 10:47:40 -0600834 rq = kyber_dispatch_cur_domain(kqd, khd, hctx);
Omar Sandoval00e04392017-04-14 01:00:02 -0700835 if (rq)
836 goto out;
837 }
838
839 rq = NULL;
840out:
841 spin_unlock(&khd->lock);
842 return rq;
843}
844
845static bool kyber_has_work(struct blk_mq_hw_ctx *hctx)
846{
847 struct kyber_hctx_data *khd = hctx->sched_data;
848 int i;
849
850 for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
Jianchao Wanga6088842018-05-30 10:47:40 -0600851 if (!list_empty_careful(&khd->rqs[i]) ||
852 sbitmap_any_bit_set(&khd->kcq_map[i]))
Omar Sandoval00e04392017-04-14 01:00:02 -0700853 return true;
854 }
Jianchao Wanga6088842018-05-30 10:47:40 -0600855
856 return false;
Omar Sandoval00e04392017-04-14 01:00:02 -0700857}
858
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700859#define KYBER_LAT_SHOW_STORE(domain, name) \
860static ssize_t kyber_##name##_lat_show(struct elevator_queue *e, \
861 char *page) \
Omar Sandoval00e04392017-04-14 01:00:02 -0700862{ \
863 struct kyber_queue_data *kqd = e->elevator_data; \
864 \
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700865 return sprintf(page, "%llu\n", kqd->latency_targets[domain]); \
Omar Sandoval00e04392017-04-14 01:00:02 -0700866} \
867 \
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700868static ssize_t kyber_##name##_lat_store(struct elevator_queue *e, \
869 const char *page, size_t count) \
Omar Sandoval00e04392017-04-14 01:00:02 -0700870{ \
871 struct kyber_queue_data *kqd = e->elevator_data; \
872 unsigned long long nsec; \
873 int ret; \
874 \
875 ret = kstrtoull(page, 10, &nsec); \
876 if (ret) \
877 return ret; \
878 \
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700879 kqd->latency_targets[domain] = nsec; \
Omar Sandoval00e04392017-04-14 01:00:02 -0700880 \
881 return count; \
882}
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700883KYBER_LAT_SHOW_STORE(KYBER_READ, read);
884KYBER_LAT_SHOW_STORE(KYBER_WRITE, write);
Omar Sandoval00e04392017-04-14 01:00:02 -0700885#undef KYBER_LAT_SHOW_STORE
886
887#define KYBER_LAT_ATTR(op) __ATTR(op##_lat_nsec, 0644, kyber_##op##_lat_show, kyber_##op##_lat_store)
888static struct elv_fs_entry kyber_sched_attrs[] = {
889 KYBER_LAT_ATTR(read),
890 KYBER_LAT_ATTR(write),
891 __ATTR_NULL
892};
893#undef KYBER_LAT_ATTR
894
Omar Sandoval16b738f2017-05-04 00:31:33 -0700895#ifdef CONFIG_BLK_DEBUG_FS
896#define KYBER_DEBUGFS_DOMAIN_ATTRS(domain, name) \
897static int kyber_##name##_tokens_show(void *data, struct seq_file *m) \
898{ \
899 struct request_queue *q = data; \
900 struct kyber_queue_data *kqd = q->elevator->elevator_data; \
901 \
902 sbitmap_queue_show(&kqd->domain_tokens[domain], m); \
903 return 0; \
904} \
905 \
906static void *kyber_##name##_rqs_start(struct seq_file *m, loff_t *pos) \
907 __acquires(&khd->lock) \
908{ \
909 struct blk_mq_hw_ctx *hctx = m->private; \
910 struct kyber_hctx_data *khd = hctx->sched_data; \
911 \
912 spin_lock(&khd->lock); \
913 return seq_list_start(&khd->rqs[domain], *pos); \
914} \
915 \
916static void *kyber_##name##_rqs_next(struct seq_file *m, void *v, \
917 loff_t *pos) \
918{ \
919 struct blk_mq_hw_ctx *hctx = m->private; \
920 struct kyber_hctx_data *khd = hctx->sched_data; \
921 \
922 return seq_list_next(v, &khd->rqs[domain], pos); \
923} \
924 \
925static void kyber_##name##_rqs_stop(struct seq_file *m, void *v) \
926 __releases(&khd->lock) \
927{ \
928 struct blk_mq_hw_ctx *hctx = m->private; \
929 struct kyber_hctx_data *khd = hctx->sched_data; \
930 \
931 spin_unlock(&khd->lock); \
932} \
933 \
934static const struct seq_operations kyber_##name##_rqs_seq_ops = { \
935 .start = kyber_##name##_rqs_start, \
936 .next = kyber_##name##_rqs_next, \
937 .stop = kyber_##name##_rqs_stop, \
938 .show = blk_mq_debugfs_rq_show, \
939}; \
940 \
941static int kyber_##name##_waiting_show(void *data, struct seq_file *m) \
942{ \
943 struct blk_mq_hw_ctx *hctx = data; \
944 struct kyber_hctx_data *khd = hctx->sched_data; \
Jens Axboe00203ba2018-12-20 08:50:58 -0700945 wait_queue_entry_t *wait = &khd->domain_wait[domain].wait; \
Omar Sandoval16b738f2017-05-04 00:31:33 -0700946 \
Ingo Molnar2055da92017-06-20 12:06:46 +0200947 seq_printf(m, "%d\n", !list_empty_careful(&wait->entry)); \
Omar Sandoval16b738f2017-05-04 00:31:33 -0700948 return 0; \
949}
950KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_READ, read)
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700951KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_WRITE, write)
952KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_DISCARD, discard)
Omar Sandoval16b738f2017-05-04 00:31:33 -0700953KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_OTHER, other)
954#undef KYBER_DEBUGFS_DOMAIN_ATTRS
955
956static int kyber_async_depth_show(void *data, struct seq_file *m)
957{
958 struct request_queue *q = data;
959 struct kyber_queue_data *kqd = q->elevator->elevator_data;
960
961 seq_printf(m, "%u\n", kqd->async_depth);
962 return 0;
963}
964
965static int kyber_cur_domain_show(void *data, struct seq_file *m)
966{
967 struct blk_mq_hw_ctx *hctx = data;
968 struct kyber_hctx_data *khd = hctx->sched_data;
969
Omar Sandoval6c3b7af2018-09-27 15:55:55 -0700970 seq_printf(m, "%s\n", kyber_domain_names[khd->cur_domain]);
Omar Sandoval16b738f2017-05-04 00:31:33 -0700971 return 0;
972}
973
974static int kyber_batching_show(void *data, struct seq_file *m)
975{
976 struct blk_mq_hw_ctx *hctx = data;
977 struct kyber_hctx_data *khd = hctx->sched_data;
978
979 seq_printf(m, "%u\n", khd->batching);
980 return 0;
981}
982
983#define KYBER_QUEUE_DOMAIN_ATTRS(name) \
984 {#name "_tokens", 0400, kyber_##name##_tokens_show}
985static const struct blk_mq_debugfs_attr kyber_queue_debugfs_attrs[] = {
986 KYBER_QUEUE_DOMAIN_ATTRS(read),
Omar Sandoval6e25cb02018-09-27 15:55:54 -0700987 KYBER_QUEUE_DOMAIN_ATTRS(write),
988 KYBER_QUEUE_DOMAIN_ATTRS(discard),
Omar Sandoval16b738f2017-05-04 00:31:33 -0700989 KYBER_QUEUE_DOMAIN_ATTRS(other),
990 {"async_depth", 0400, kyber_async_depth_show},
991 {},
992};
993#undef KYBER_QUEUE_DOMAIN_ATTRS
994
995#define KYBER_HCTX_DOMAIN_ATTRS(name) \
996 {#name "_rqs", 0400, .seq_ops = &kyber_##name##_rqs_seq_ops}, \
997 {#name "_waiting", 0400, kyber_##name##_waiting_show}
998static const struct blk_mq_debugfs_attr kyber_hctx_debugfs_attrs[] = {
999 KYBER_HCTX_DOMAIN_ATTRS(read),
Omar Sandoval6e25cb02018-09-27 15:55:54 -07001000 KYBER_HCTX_DOMAIN_ATTRS(write),
1001 KYBER_HCTX_DOMAIN_ATTRS(discard),
Omar Sandoval16b738f2017-05-04 00:31:33 -07001002 KYBER_HCTX_DOMAIN_ATTRS(other),
1003 {"cur_domain", 0400, kyber_cur_domain_show},
1004 {"batching", 0400, kyber_batching_show},
1005 {},
1006};
1007#undef KYBER_HCTX_DOMAIN_ATTRS
1008#endif
1009
Omar Sandoval00e04392017-04-14 01:00:02 -07001010static struct elevator_type kyber_sched = {
Jens Axboef9cd4bf2018-11-01 16:41:41 -06001011 .ops = {
Omar Sandoval00e04392017-04-14 01:00:02 -07001012 .init_sched = kyber_init_sched,
1013 .exit_sched = kyber_exit_sched,
1014 .init_hctx = kyber_init_hctx,
1015 .exit_hctx = kyber_exit_hctx,
Christoph Hellwig5bbf4e52017-06-16 18:15:26 +02001016 .limit_depth = kyber_limit_depth,
Jianchao Wanga6088842018-05-30 10:47:40 -06001017 .bio_merge = kyber_bio_merge,
Christoph Hellwig5bbf4e52017-06-16 18:15:26 +02001018 .prepare_request = kyber_prepare_request,
Jianchao Wanga6088842018-05-30 10:47:40 -06001019 .insert_requests = kyber_insert_requests,
Christoph Hellwig7b9e9362017-06-16 18:15:21 +02001020 .finish_request = kyber_finish_request,
Ming Leiba989a02018-02-23 23:36:57 +08001021 .requeue_request = kyber_finish_request,
Omar Sandoval00e04392017-04-14 01:00:02 -07001022 .completed_request = kyber_completed_request,
1023 .dispatch_request = kyber_dispatch_request,
1024 .has_work = kyber_has_work,
Yang Yangffa772c2021-02-05 01:13:10 -08001025 .depth_updated = kyber_depth_updated,
Omar Sandoval00e04392017-04-14 01:00:02 -07001026 },
Omar Sandoval16b738f2017-05-04 00:31:33 -07001027#ifdef CONFIG_BLK_DEBUG_FS
1028 .queue_debugfs_attrs = kyber_queue_debugfs_attrs,
1029 .hctx_debugfs_attrs = kyber_hctx_debugfs_attrs,
1030#endif
Omar Sandoval00e04392017-04-14 01:00:02 -07001031 .elevator_attrs = kyber_sched_attrs,
1032 .elevator_name = "kyber",
Jan Karab6e68ee2021-01-11 17:47:17 +01001033 .elevator_features = ELEVATOR_F_MQ_AWARE,
Omar Sandoval00e04392017-04-14 01:00:02 -07001034 .elevator_owner = THIS_MODULE,
1035};
1036
1037static int __init kyber_init(void)
1038{
1039 return elv_register(&kyber_sched);
1040}
1041
1042static void __exit kyber_exit(void)
1043{
1044 elv_unregister(&kyber_sched);
1045}
1046
1047module_init(kyber_init);
1048module_exit(kyber_exit);
1049
1050MODULE_AUTHOR("Omar Sandoval");
1051MODULE_LICENSE("GPL");
1052MODULE_DESCRIPTION("Kyber I/O scheduler");