blob: 7c462c006b2690e6e829b10ede60248e4da0cb6b [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Vivek Goyale43473b2010-09-15 17:06:35 -04002/*
3 * Interface for controlling IO bandwidth on a request queue
4 *
5 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
6 */
7
8#include <linux/module.h>
9#include <linux/slab.h>
10#include <linux/blkdev.h>
11#include <linux/bio.h>
12#include <linux/blktrace_api.h>
Tejun Heoeea8f412015-05-22 17:13:17 -040013#include <linux/blk-cgroup.h>
Tejun Heobc9fcbf2011-10-19 14:31:18 +020014#include "blk.h"
Tejun Heo1d1566462019-11-07 11:18:04 -080015#include "blk-cgroup-rwstat.h"
Christoph Hellwige4a19f72021-11-23 19:53:09 +010016#include "blk-stat.h"
Jens Axboea7b36ee2021-10-05 09:11:56 -060017#include "blk-throttle.h"
Vivek Goyale43473b2010-09-15 17:06:35 -040018
19/* Max dispatch from a group in 1 round */
Baolin Wange675df22020-09-07 16:10:15 +080020#define THROTL_GRP_QUANTUM 8
Vivek Goyale43473b2010-09-15 17:06:35 -040021
22/* Total max dispatch from all groups in one round */
Baolin Wange675df22020-09-07 16:10:15 +080023#define THROTL_QUANTUM 32
Vivek Goyale43473b2010-09-15 17:06:35 -040024
Shaohua Lid61fcfa2017-03-27 10:51:38 -070025/* Throttling is performed over a slice and after that slice is renewed */
26#define DFL_THROTL_SLICE_HD (HZ / 10)
27#define DFL_THROTL_SLICE_SSD (HZ / 50)
Shaohua Li297e3d82017-03-27 10:51:37 -070028#define MAX_THROTL_SLICE (HZ)
Shaohua Li9e234ee2017-03-27 10:51:41 -070029#define MAX_IDLE_TIME (5L * 1000 * 1000) /* 5 s */
Shaohua Li9bb67ae2017-05-17 13:07:26 -070030#define MIN_THROTL_BPS (320 * 1024)
31#define MIN_THROTL_IOPS (10)
Shaohua Lib4f428e2017-05-17 13:07:27 -070032#define DFL_LATENCY_TARGET (-1L)
33#define DFL_IDLE_THRESHOLD (0)
Shaohua Li6679a902017-06-06 12:40:43 -070034#define DFL_HD_BASELINE_LATENCY (4000L) /* 4ms */
35#define LATENCY_FILTERED_SSD (0)
36/*
37 * For HD, very small latency comes from sequential IO. Such IO is helpless to
38 * help determine if its IO is impacted by others, hence we ignore the IO
39 */
40#define LATENCY_FILTERED_HD (1000L) /* 1ms */
Vivek Goyale43473b2010-09-15 17:06:35 -040041
Vivek Goyal450adcb2011-03-01 13:40:54 -050042/* A workqueue to queue throttle related work */
43static struct workqueue_struct *kthrotld_workqueue;
Vivek Goyal450adcb2011-03-01 13:40:54 -050044
Tejun Heo5b2c16a2013-05-14 13:52:32 -070045enum tg_state_flags {
46 THROTL_TG_PENDING = 1 << 0, /* on parent's pending tree */
Tejun Heo0e9f4162013-05-14 13:52:35 -070047 THROTL_TG_WAS_EMPTY = 1 << 1, /* bio_lists[] became non-empty */
Tejun Heo5b2c16a2013-05-14 13:52:32 -070048};
49
Vivek Goyale43473b2010-09-15 17:06:35 -040050#define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
51
Shaohua Lib9147dd2017-03-27 15:19:42 -070052/* We measure latency for request size from <= 4k to >= 1M */
53#define LATENCY_BUCKET_SIZE 9
54
55struct latency_bucket {
56 unsigned long total_latency; /* ns / 1024 */
57 int samples;
58};
59
60struct avg_latency_bucket {
61 unsigned long latency; /* ns / 1024 */
62 bool valid;
63};
64
Vivek Goyale43473b2010-09-15 17:06:35 -040065struct throtl_data
66{
Vivek Goyale43473b2010-09-15 17:06:35 -040067 /* service tree for active throtl groups */
Tejun Heoc9e03322013-05-14 13:52:32 -070068 struct throtl_service_queue service_queue;
Vivek Goyale43473b2010-09-15 17:06:35 -040069
Vivek Goyale43473b2010-09-15 17:06:35 -040070 struct request_queue *queue;
71
72 /* Total Number of queued bios on READ and WRITE lists */
73 unsigned int nr_queued[2];
74
Shaohua Li297e3d82017-03-27 10:51:37 -070075 unsigned int throtl_slice;
76
Vivek Goyale43473b2010-09-15 17:06:35 -040077 /* Work for dispatching throttled bios */
Tejun Heo69df0ab2013-05-14 13:52:36 -070078 struct work_struct dispatch_work;
Shaohua Li9f626e32017-03-27 10:51:30 -070079 unsigned int limit_index;
80 bool limit_valid[LIMIT_CNT];
Shaohua Li3f0abd82017-03-27 10:51:35 -070081
82 unsigned long low_upgrade_time;
83 unsigned long low_downgrade_time;
Shaohua Li7394e312017-03-27 10:51:40 -070084
85 unsigned int scale;
Shaohua Lib9147dd2017-03-27 15:19:42 -070086
Joseph Qib889bf62017-11-21 09:38:30 +080087 struct latency_bucket tmp_buckets[2][LATENCY_BUCKET_SIZE];
88 struct avg_latency_bucket avg_buckets[2][LATENCY_BUCKET_SIZE];
89 struct latency_bucket __percpu *latency_buckets[2];
Shaohua Lib9147dd2017-03-27 15:19:42 -070090 unsigned long last_calculate_time;
Shaohua Li6679a902017-06-06 12:40:43 -070091 unsigned long filtered_latency;
Shaohua Lib9147dd2017-03-27 15:19:42 -070092
93 bool track_bio_latency;
Vivek Goyale43473b2010-09-15 17:06:35 -040094};
95
Kees Cooke99e88a2017-10-16 14:43:17 -070096static void throtl_pending_timer_fn(struct timer_list *t);
Tejun Heo69df0ab2013-05-14 13:52:36 -070097
Tejun Heo3c798392012-04-16 13:57:25 -070098static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
Tejun Heo03814112012-03-05 13:15:14 -080099{
Tejun Heof95a04a2012-04-16 13:57:26 -0700100 return pd_to_blkg(&tg->pd);
Tejun Heo03814112012-03-05 13:15:14 -0800101}
102
Tejun Heofda6f272013-05-14 13:52:36 -0700103/**
104 * sq_to_tg - return the throl_grp the specified service queue belongs to
105 * @sq: the throtl_service_queue of interest
106 *
107 * Return the throtl_grp @sq belongs to. If @sq is the top-level one
108 * embedded in throtl_data, %NULL is returned.
109 */
110static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq)
111{
112 if (sq && sq->parent_sq)
113 return container_of(sq, struct throtl_grp, service_queue);
114 else
115 return NULL;
116}
Vivek Goyale43473b2010-09-15 17:06:35 -0400117
Tejun Heofda6f272013-05-14 13:52:36 -0700118/**
119 * sq_to_td - return throtl_data the specified service queue belongs to
120 * @sq: the throtl_service_queue of interest
121 *
Masahiro Yamadab43daed2017-02-27 14:29:09 -0800122 * A service_queue can be embedded in either a throtl_grp or throtl_data.
Tejun Heofda6f272013-05-14 13:52:36 -0700123 * Determine the associated throtl_data accordingly and return it.
124 */
125static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
126{
127 struct throtl_grp *tg = sq_to_tg(sq);
128
129 if (tg)
130 return tg->td;
131 else
132 return container_of(sq, struct throtl_data, service_queue);
133}
134
Shaohua Li7394e312017-03-27 10:51:40 -0700135/*
136 * cgroup's limit in LIMIT_MAX is scaled if low limit is set. This scale is to
137 * make the IO dispatch more smooth.
138 * Scale up: linearly scale up according to lapsed time since upgrade. For
139 * every throtl_slice, the limit scales up 1/2 .low limit till the
140 * limit hits .max limit
141 * Scale down: exponentially scale down if a cgroup doesn't hit its .low limit
142 */
143static uint64_t throtl_adjusted_limit(uint64_t low, struct throtl_data *td)
144{
145 /* arbitrary value to avoid too big scale */
146 if (td->scale < 4096 && time_after_eq(jiffies,
147 td->low_upgrade_time + td->scale * td->throtl_slice))
148 td->scale = (jiffies - td->low_upgrade_time) / td->throtl_slice;
149
150 return low + (low >> 1) * td->scale;
151}
152
Shaohua Li9f626e32017-03-27 10:51:30 -0700153static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw)
154{
Shaohua Lib22c4172017-03-27 10:51:33 -0700155 struct blkcg_gq *blkg = tg_to_blkg(tg);
Shaohua Li7394e312017-03-27 10:51:40 -0700156 struct throtl_data *td;
Shaohua Lib22c4172017-03-27 10:51:33 -0700157 uint64_t ret;
158
159 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
160 return U64_MAX;
Shaohua Li7394e312017-03-27 10:51:40 -0700161
162 td = tg->td;
163 ret = tg->bps[rw][td->limit_index];
Shaohua Li9bb67ae2017-05-17 13:07:26 -0700164 if (ret == 0 && td->limit_index == LIMIT_LOW) {
165 /* intermediate node or iops isn't 0 */
166 if (!list_empty(&blkg->blkcg->css.children) ||
167 tg->iops[rw][td->limit_index])
168 return U64_MAX;
169 else
170 return MIN_THROTL_BPS;
171 }
Shaohua Li7394e312017-03-27 10:51:40 -0700172
173 if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] &&
174 tg->bps[rw][LIMIT_LOW] != tg->bps[rw][LIMIT_MAX]) {
175 uint64_t adjusted;
176
177 adjusted = throtl_adjusted_limit(tg->bps[rw][LIMIT_LOW], td);
178 ret = min(tg->bps[rw][LIMIT_MAX], adjusted);
179 }
Shaohua Lib22c4172017-03-27 10:51:33 -0700180 return ret;
Shaohua Li9f626e32017-03-27 10:51:30 -0700181}
182
183static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
184{
Shaohua Lib22c4172017-03-27 10:51:33 -0700185 struct blkcg_gq *blkg = tg_to_blkg(tg);
Shaohua Li7394e312017-03-27 10:51:40 -0700186 struct throtl_data *td;
Shaohua Lib22c4172017-03-27 10:51:33 -0700187 unsigned int ret;
188
189 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
190 return UINT_MAX;
Shaohua Li9bb67ae2017-05-17 13:07:26 -0700191
Shaohua Li7394e312017-03-27 10:51:40 -0700192 td = tg->td;
193 ret = tg->iops[rw][td->limit_index];
Shaohua Li9bb67ae2017-05-17 13:07:26 -0700194 if (ret == 0 && tg->td->limit_index == LIMIT_LOW) {
195 /* intermediate node or bps isn't 0 */
196 if (!list_empty(&blkg->blkcg->css.children) ||
197 tg->bps[rw][td->limit_index])
198 return UINT_MAX;
199 else
200 return MIN_THROTL_IOPS;
201 }
Shaohua Li7394e312017-03-27 10:51:40 -0700202
203 if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] &&
204 tg->iops[rw][LIMIT_LOW] != tg->iops[rw][LIMIT_MAX]) {
205 uint64_t adjusted;
206
207 adjusted = throtl_adjusted_limit(tg->iops[rw][LIMIT_LOW], td);
208 if (adjusted > UINT_MAX)
209 adjusted = UINT_MAX;
210 ret = min_t(unsigned int, tg->iops[rw][LIMIT_MAX], adjusted);
211 }
Shaohua Lib22c4172017-03-27 10:51:33 -0700212 return ret;
Shaohua Li9f626e32017-03-27 10:51:30 -0700213}
214
Shaohua Lib9147dd2017-03-27 15:19:42 -0700215#define request_bucket_index(sectors) \
216 clamp_t(int, order_base_2(sectors) - 3, 0, LATENCY_BUCKET_SIZE - 1)
217
Tejun Heofda6f272013-05-14 13:52:36 -0700218/**
219 * throtl_log - log debug message via blktrace
220 * @sq: the service_queue being reported
221 * @fmt: printf format string
222 * @args: printf args
223 *
224 * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
225 * throtl_grp; otherwise, just "throtl".
Tejun Heofda6f272013-05-14 13:52:36 -0700226 */
227#define throtl_log(sq, fmt, args...) do { \
228 struct throtl_grp *__tg = sq_to_tg((sq)); \
229 struct throtl_data *__td = sq_to_td((sq)); \
230 \
231 (void)__td; \
Shaohua Li59fa0222016-05-09 17:22:15 -0700232 if (likely(!blk_trace_note_message_enabled(__td->queue))) \
233 break; \
Tejun Heofda6f272013-05-14 13:52:36 -0700234 if ((__tg)) { \
Shaohua Li35fe6d72017-07-12 11:49:56 -0700235 blk_add_cgroup_trace_msg(__td->queue, \
236 tg_to_blkg(__tg)->blkcg, "throtl " fmt, ##args);\
Tejun Heofda6f272013-05-14 13:52:36 -0700237 } else { \
238 blk_add_trace_msg(__td->queue, "throtl " fmt, ##args); \
239 } \
240} while (0)
Vivek Goyale43473b2010-09-15 17:06:35 -0400241
Shaohua Liea0ea2b2017-08-18 16:08:13 -0700242static inline unsigned int throtl_bio_data_size(struct bio *bio)
243{
244 /* assume it's one sector */
245 if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
246 return 512;
247 return bio->bi_iter.bi_size;
248}
249
Tejun Heoc5cc2072013-05-14 13:52:38 -0700250static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
251{
252 INIT_LIST_HEAD(&qn->node);
253 bio_list_init(&qn->bios);
254 qn->tg = tg;
255}
256
257/**
258 * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
259 * @bio: bio being added
260 * @qn: qnode to add bio to
261 * @queued: the service_queue->queued[] list @qn belongs to
262 *
263 * Add @bio to @qn and put @qn on @queued if it's not already on.
264 * @qn->tg's reference count is bumped when @qn is activated. See the
265 * comment on top of throtl_qnode definition for details.
266 */
267static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn,
268 struct list_head *queued)
269{
270 bio_list_add(&qn->bios, bio);
271 if (list_empty(&qn->node)) {
272 list_add_tail(&qn->node, queued);
273 blkg_get(tg_to_blkg(qn->tg));
274 }
275}
276
277/**
278 * throtl_peek_queued - peek the first bio on a qnode list
279 * @queued: the qnode list to peek
280 */
281static struct bio *throtl_peek_queued(struct list_head *queued)
282{
Baolin Wangb7b609d2020-10-08 11:52:26 +0800283 struct throtl_qnode *qn;
Tejun Heoc5cc2072013-05-14 13:52:38 -0700284 struct bio *bio;
285
286 if (list_empty(queued))
287 return NULL;
288
Baolin Wangb7b609d2020-10-08 11:52:26 +0800289 qn = list_first_entry(queued, struct throtl_qnode, node);
Tejun Heoc5cc2072013-05-14 13:52:38 -0700290 bio = bio_list_peek(&qn->bios);
291 WARN_ON_ONCE(!bio);
292 return bio;
293}
294
295/**
296 * throtl_pop_queued - pop the first bio form a qnode list
297 * @queued: the qnode list to pop a bio from
298 * @tg_to_put: optional out argument for throtl_grp to put
299 *
300 * Pop the first bio from the qnode list @queued. After popping, the first
301 * qnode is removed from @queued if empty or moved to the end of @queued so
302 * that the popping order is round-robin.
303 *
304 * When the first qnode is removed, its associated throtl_grp should be put
305 * too. If @tg_to_put is NULL, this function automatically puts it;
306 * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
307 * responsible for putting it.
308 */
309static struct bio *throtl_pop_queued(struct list_head *queued,
310 struct throtl_grp **tg_to_put)
311{
Baolin Wangb7b609d2020-10-08 11:52:26 +0800312 struct throtl_qnode *qn;
Tejun Heoc5cc2072013-05-14 13:52:38 -0700313 struct bio *bio;
314
315 if (list_empty(queued))
316 return NULL;
317
Baolin Wangb7b609d2020-10-08 11:52:26 +0800318 qn = list_first_entry(queued, struct throtl_qnode, node);
Tejun Heoc5cc2072013-05-14 13:52:38 -0700319 bio = bio_list_pop(&qn->bios);
320 WARN_ON_ONCE(!bio);
321
322 if (bio_list_empty(&qn->bios)) {
323 list_del_init(&qn->node);
324 if (tg_to_put)
325 *tg_to_put = qn->tg;
326 else
327 blkg_put(tg_to_blkg(qn->tg));
328 } else {
329 list_move_tail(&qn->node, queued);
330 }
331
332 return bio;
333}
334
Tejun Heo49a2f1e2013-05-14 13:52:34 -0700335/* init a service_queue, assumes the caller zeroed it */
Tejun Heob2ce2642015-08-18 14:55:13 -0700336static void throtl_service_queue_init(struct throtl_service_queue *sq)
Tejun Heo49a2f1e2013-05-14 13:52:34 -0700337{
Tejun Heoc5cc2072013-05-14 13:52:38 -0700338 INIT_LIST_HEAD(&sq->queued[0]);
339 INIT_LIST_HEAD(&sq->queued[1]);
Liu Bo9ff01252018-08-21 05:21:15 +0800340 sq->pending_tree = RB_ROOT_CACHED;
Kees Cooke99e88a2017-10-16 14:43:17 -0700341 timer_setup(&sq->pending_timer, throtl_pending_timer_fn, 0);
Tejun Heo69df0ab2013-05-14 13:52:36 -0700342}
343
Tejun Heocf09a8e2019-08-28 15:05:51 -0700344static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp,
345 struct request_queue *q,
346 struct blkcg *blkcg)
Tejun Heo001bea72015-08-18 14:55:11 -0700347{
Tejun Heo4fb72032015-08-18 14:55:12 -0700348 struct throtl_grp *tg;
Tejun Heo24bdb8e2015-08-18 14:55:22 -0700349 int rw;
Tejun Heo4fb72032015-08-18 14:55:12 -0700350
Tejun Heocf09a8e2019-08-28 15:05:51 -0700351 tg = kzalloc_node(sizeof(*tg), gfp, q->node);
Tejun Heo4fb72032015-08-18 14:55:12 -0700352 if (!tg)
Tejun Heo77ea7332015-08-18 14:55:24 -0700353 return NULL;
Tejun Heo4fb72032015-08-18 14:55:12 -0700354
Tejun Heo7ca46432019-11-07 11:18:01 -0800355 if (blkg_rwstat_init(&tg->stat_bytes, gfp))
356 goto err_free_tg;
357
358 if (blkg_rwstat_init(&tg->stat_ios, gfp))
359 goto err_exit_stat_bytes;
360
Tejun Heob2ce2642015-08-18 14:55:13 -0700361 throtl_service_queue_init(&tg->service_queue);
362
363 for (rw = READ; rw <= WRITE; rw++) {
364 throtl_qnode_init(&tg->qnode_on_self[rw], tg);
365 throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
366 }
367
368 RB_CLEAR_NODE(&tg->rb_node);
Shaohua Li9f626e32017-03-27 10:51:30 -0700369 tg->bps[READ][LIMIT_MAX] = U64_MAX;
370 tg->bps[WRITE][LIMIT_MAX] = U64_MAX;
371 tg->iops[READ][LIMIT_MAX] = UINT_MAX;
372 tg->iops[WRITE][LIMIT_MAX] = UINT_MAX;
Shaohua Licd5ab1b2017-03-27 10:51:32 -0700373 tg->bps_conf[READ][LIMIT_MAX] = U64_MAX;
374 tg->bps_conf[WRITE][LIMIT_MAX] = U64_MAX;
375 tg->iops_conf[READ][LIMIT_MAX] = UINT_MAX;
376 tg->iops_conf[WRITE][LIMIT_MAX] = UINT_MAX;
377 /* LIMIT_LOW will have default value 0 */
Tejun Heob2ce2642015-08-18 14:55:13 -0700378
Shaohua Liec809912017-03-27 10:51:44 -0700379 tg->latency_target = DFL_LATENCY_TARGET;
Shaohua Li5b81fc32017-05-17 13:07:24 -0700380 tg->latency_target_conf = DFL_LATENCY_TARGET;
Shaohua Lib4f428e2017-05-17 13:07:27 -0700381 tg->idletime_threshold = DFL_IDLE_THRESHOLD;
382 tg->idletime_threshold_conf = DFL_IDLE_THRESHOLD;
Shaohua Liec809912017-03-27 10:51:44 -0700383
Tejun Heo4fb72032015-08-18 14:55:12 -0700384 return &tg->pd;
Tejun Heo7ca46432019-11-07 11:18:01 -0800385
386err_exit_stat_bytes:
387 blkg_rwstat_exit(&tg->stat_bytes);
388err_free_tg:
389 kfree(tg);
390 return NULL;
Tejun Heo001bea72015-08-18 14:55:11 -0700391}
392
Tejun Heoa9520cd2015-08-18 14:55:14 -0700393static void throtl_pd_init(struct blkg_policy_data *pd)
Vivek Goyala29a1712011-05-19 15:38:19 -0400394{
Tejun Heoa9520cd2015-08-18 14:55:14 -0700395 struct throtl_grp *tg = pd_to_tg(pd);
396 struct blkcg_gq *blkg = tg_to_blkg(tg);
Tejun Heo77216b02013-05-14 13:52:36 -0700397 struct throtl_data *td = blkg->q->td;
Tejun Heob2ce2642015-08-18 14:55:13 -0700398 struct throtl_service_queue *sq = &tg->service_queue;
Tejun Heocd1604f2012-03-05 13:15:06 -0800399
Tejun Heo91381252013-05-14 13:52:38 -0700400 /*
Tejun Heoaa6ec292014-07-09 10:08:08 -0400401 * If on the default hierarchy, we switch to properly hierarchical
Tejun Heo91381252013-05-14 13:52:38 -0700402 * behavior where limits on a given throtl_grp are applied to the
403 * whole subtree rather than just the group itself. e.g. If 16M
404 * read_bps limit is set on the root group, the whole system can't
405 * exceed 16M for the device.
406 *
Tejun Heoaa6ec292014-07-09 10:08:08 -0400407 * If not on the default hierarchy, the broken flat hierarchy
Tejun Heo91381252013-05-14 13:52:38 -0700408 * behavior is retained where all throtl_grps are treated as if
409 * they're all separate root groups right below throtl_data.
410 * Limits of a group don't interact with limits of other groups
411 * regardless of the position of the group in the hierarchy.
412 */
Tejun Heob2ce2642015-08-18 14:55:13 -0700413 sq->parent_sq = &td->service_queue;
Tejun Heo9e10a132015-09-18 11:56:28 -0400414 if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent)
Tejun Heob2ce2642015-08-18 14:55:13 -0700415 sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
Tejun Heo77216b02013-05-14 13:52:36 -0700416 tg->td = td;
Tejun Heo8a3d2612012-04-01 14:38:44 -0700417}
418
Tejun Heo693e7512013-05-14 13:52:38 -0700419/*
420 * Set has_rules[] if @tg or any of its parents have limits configured.
421 * This doesn't require walking up to the top of the hierarchy as the
422 * parent's has_rules[] is guaranteed to be correct.
423 */
424static void tg_update_has_rules(struct throtl_grp *tg)
425{
426 struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq);
Shaohua Li9f626e32017-03-27 10:51:30 -0700427 struct throtl_data *td = tg->td;
Tejun Heo693e7512013-05-14 13:52:38 -0700428 int rw;
429
430 for (rw = READ; rw <= WRITE; rw++)
431 tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) ||
Shaohua Li9f626e32017-03-27 10:51:30 -0700432 (td->limit_valid[td->limit_index] &&
433 (tg_bps_limit(tg, rw) != U64_MAX ||
434 tg_iops_limit(tg, rw) != UINT_MAX));
Tejun Heo693e7512013-05-14 13:52:38 -0700435}
436
Tejun Heoa9520cd2015-08-18 14:55:14 -0700437static void throtl_pd_online(struct blkg_policy_data *pd)
Tejun Heo693e7512013-05-14 13:52:38 -0700438{
Shaohua Liaec24242017-03-27 10:51:39 -0700439 struct throtl_grp *tg = pd_to_tg(pd);
Tejun Heo693e7512013-05-14 13:52:38 -0700440 /*
441 * We don't want new groups to escape the limits of its ancestors.
442 * Update has_rules[] after a new group is brought online.
443 */
Shaohua Liaec24242017-03-27 10:51:39 -0700444 tg_update_has_rules(tg);
Tejun Heo693e7512013-05-14 13:52:38 -0700445}
446
Yu Kuaiacaf5232020-11-26 11:18:34 +0800447#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
Shaohua Licd5ab1b2017-03-27 10:51:32 -0700448static void blk_throtl_update_limit_valid(struct throtl_data *td)
449{
450 struct cgroup_subsys_state *pos_css;
451 struct blkcg_gq *blkg;
452 bool low_valid = false;
453
454 rcu_read_lock();
455 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
456 struct throtl_grp *tg = blkg_to_tg(blkg);
457
458 if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] ||
Liu Bo43ada782018-06-29 09:56:56 +0800459 tg->iops[READ][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) {
Shaohua Licd5ab1b2017-03-27 10:51:32 -0700460 low_valid = true;
Liu Bo43ada782018-06-29 09:56:56 +0800461 break;
462 }
Shaohua Licd5ab1b2017-03-27 10:51:32 -0700463 }
464 rcu_read_unlock();
465
466 td->limit_valid[LIMIT_LOW] = low_valid;
467}
Yu Kuaiacaf5232020-11-26 11:18:34 +0800468#else
469static inline void blk_throtl_update_limit_valid(struct throtl_data *td)
470{
471}
472#endif
Shaohua Licd5ab1b2017-03-27 10:51:32 -0700473
Shaohua Lic79892c2017-03-27 10:51:34 -0700474static void throtl_upgrade_state(struct throtl_data *td);
Shaohua Licd5ab1b2017-03-27 10:51:32 -0700475static void throtl_pd_offline(struct blkg_policy_data *pd)
476{
477 struct throtl_grp *tg = pd_to_tg(pd);
478
479 tg->bps[READ][LIMIT_LOW] = 0;
480 tg->bps[WRITE][LIMIT_LOW] = 0;
481 tg->iops[READ][LIMIT_LOW] = 0;
482 tg->iops[WRITE][LIMIT_LOW] = 0;
483
484 blk_throtl_update_limit_valid(tg->td);
485
Shaohua Lic79892c2017-03-27 10:51:34 -0700486 if (!tg->td->limit_valid[tg->td->limit_index])
487 throtl_upgrade_state(tg->td);
Shaohua Licd5ab1b2017-03-27 10:51:32 -0700488}
489
Tejun Heo001bea72015-08-18 14:55:11 -0700490static void throtl_pd_free(struct blkg_policy_data *pd)
491{
Tejun Heo4fb72032015-08-18 14:55:12 -0700492 struct throtl_grp *tg = pd_to_tg(pd);
493
Tejun Heob2ce2642015-08-18 14:55:13 -0700494 del_timer_sync(&tg->service_queue.pending_timer);
Tejun Heo7ca46432019-11-07 11:18:01 -0800495 blkg_rwstat_exit(&tg->stat_bytes);
496 blkg_rwstat_exit(&tg->stat_ios);
Tejun Heo4fb72032015-08-18 14:55:12 -0700497 kfree(tg);
Tejun Heo001bea72015-08-18 14:55:11 -0700498}
499
Tejun Heo0049af72013-05-14 13:52:33 -0700500static struct throtl_grp *
501throtl_rb_first(struct throtl_service_queue *parent_sq)
Vivek Goyale43473b2010-09-15 17:06:35 -0400502{
Liu Bo9ff01252018-08-21 05:21:15 +0800503 struct rb_node *n;
Vivek Goyale43473b2010-09-15 17:06:35 -0400504
Liu Bo9ff01252018-08-21 05:21:15 +0800505 n = rb_first_cached(&parent_sq->pending_tree);
506 WARN_ON_ONCE(!n);
507 if (!n)
508 return NULL;
509 return rb_entry_tg(n);
Vivek Goyale43473b2010-09-15 17:06:35 -0400510}
511
Tejun Heo0049af72013-05-14 13:52:33 -0700512static void throtl_rb_erase(struct rb_node *n,
513 struct throtl_service_queue *parent_sq)
Vivek Goyale43473b2010-09-15 17:06:35 -0400514{
Liu Bo9ff01252018-08-21 05:21:15 +0800515 rb_erase_cached(n, &parent_sq->pending_tree);
516 RB_CLEAR_NODE(n);
Tejun Heo0049af72013-05-14 13:52:33 -0700517 --parent_sq->nr_pending;
Vivek Goyale43473b2010-09-15 17:06:35 -0400518}
519
Tejun Heo0049af72013-05-14 13:52:33 -0700520static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
Vivek Goyale43473b2010-09-15 17:06:35 -0400521{
522 struct throtl_grp *tg;
523
Tejun Heo0049af72013-05-14 13:52:33 -0700524 tg = throtl_rb_first(parent_sq);
Vivek Goyale43473b2010-09-15 17:06:35 -0400525 if (!tg)
526 return;
527
Tejun Heo0049af72013-05-14 13:52:33 -0700528 parent_sq->first_pending_disptime = tg->disptime;
Vivek Goyale43473b2010-09-15 17:06:35 -0400529}
530
Tejun Heo77216b02013-05-14 13:52:36 -0700531static void tg_service_queue_add(struct throtl_grp *tg)
Vivek Goyale43473b2010-09-15 17:06:35 -0400532{
Tejun Heo77216b02013-05-14 13:52:36 -0700533 struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq;
Liu Bo9ff01252018-08-21 05:21:15 +0800534 struct rb_node **node = &parent_sq->pending_tree.rb_root.rb_node;
Vivek Goyale43473b2010-09-15 17:06:35 -0400535 struct rb_node *parent = NULL;
536 struct throtl_grp *__tg;
537 unsigned long key = tg->disptime;
Liu Bo9ff01252018-08-21 05:21:15 +0800538 bool leftmost = true;
Vivek Goyale43473b2010-09-15 17:06:35 -0400539
540 while (*node != NULL) {
541 parent = *node;
542 __tg = rb_entry_tg(parent);
543
544 if (time_before(key, __tg->disptime))
545 node = &parent->rb_left;
546 else {
547 node = &parent->rb_right;
Liu Bo9ff01252018-08-21 05:21:15 +0800548 leftmost = false;
Vivek Goyale43473b2010-09-15 17:06:35 -0400549 }
550 }
551
Vivek Goyale43473b2010-09-15 17:06:35 -0400552 rb_link_node(&tg->rb_node, parent, node);
Liu Bo9ff01252018-08-21 05:21:15 +0800553 rb_insert_color_cached(&tg->rb_node, &parent_sq->pending_tree,
554 leftmost);
Vivek Goyale43473b2010-09-15 17:06:35 -0400555}
556
Tejun Heo77216b02013-05-14 13:52:36 -0700557static void throtl_enqueue_tg(struct throtl_grp *tg)
Vivek Goyale43473b2010-09-15 17:06:35 -0400558{
Baolin Wang29379672020-10-08 11:52:28 +0800559 if (!(tg->flags & THROTL_TG_PENDING)) {
560 tg_service_queue_add(tg);
561 tg->flags |= THROTL_TG_PENDING;
562 tg->service_queue.parent_sq->nr_pending++;
563 }
Vivek Goyale43473b2010-09-15 17:06:35 -0400564}
565
Tejun Heo77216b02013-05-14 13:52:36 -0700566static void throtl_dequeue_tg(struct throtl_grp *tg)
Vivek Goyale43473b2010-09-15 17:06:35 -0400567{
Baolin Wang29379672020-10-08 11:52:28 +0800568 if (tg->flags & THROTL_TG_PENDING) {
569 throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq);
570 tg->flags &= ~THROTL_TG_PENDING;
571 }
Vivek Goyale43473b2010-09-15 17:06:35 -0400572}
573
Tejun Heoa9131a22013-05-14 13:52:31 -0700574/* Call with queue lock held */
Tejun Heo69df0ab2013-05-14 13:52:36 -0700575static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
576 unsigned long expires)
Tejun Heoa9131a22013-05-14 13:52:31 -0700577{
Joseph Qia41b8162017-06-07 11:36:14 +0800578 unsigned long max_expire = jiffies + 8 * sq_to_td(sq)->throtl_slice;
Shaohua Li06cceed2017-03-27 10:51:36 -0700579
580 /*
581 * Since we are adjusting the throttle limit dynamically, the sleep
582 * time calculated according to previous limit might be invalid. It's
583 * possible the cgroup sleep time is very long and no other cgroups
584 * have IO running so notify the limit changes. Make sure the cgroup
585 * doesn't sleep too long to avoid the missed notification.
586 */
587 if (time_after(expires, max_expire))
588 expires = max_expire;
Tejun Heo69df0ab2013-05-14 13:52:36 -0700589 mod_timer(&sq->pending_timer, expires);
590 throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu",
591 expires - jiffies, jiffies);
Tejun Heoa9131a22013-05-14 13:52:31 -0700592}
593
Tejun Heo7f52f982013-05-14 13:52:37 -0700594/**
595 * throtl_schedule_next_dispatch - schedule the next dispatch cycle
596 * @sq: the service_queue to schedule dispatch for
597 * @force: force scheduling
598 *
599 * Arm @sq->pending_timer so that the next dispatch cycle starts on the
600 * dispatch time of the first pending child. Returns %true if either timer
601 * is armed or there's no pending child left. %false if the current
602 * dispatch window is still open and the caller should continue
603 * dispatching.
604 *
605 * If @force is %true, the dispatch timer is always scheduled and this
606 * function is guaranteed to return %true. This is to be used when the
607 * caller can't dispatch itself and needs to invoke pending_timer
608 * unconditionally. Note that forced scheduling is likely to induce short
609 * delay before dispatch starts even if @sq->first_pending_disptime is not
610 * in the future and thus shouldn't be used in hot paths.
611 */
612static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq,
613 bool force)
Vivek Goyale43473b2010-09-15 17:06:35 -0400614{
Tejun Heo6a525602013-05-14 13:52:32 -0700615 /* any pending children left? */
Tejun Heoc9e03322013-05-14 13:52:32 -0700616 if (!sq->nr_pending)
Tejun Heo7f52f982013-05-14 13:52:37 -0700617 return true;
Vivek Goyale43473b2010-09-15 17:06:35 -0400618
Tejun Heoc9e03322013-05-14 13:52:32 -0700619 update_min_dispatch_time(sq);
Vivek Goyale43473b2010-09-15 17:06:35 -0400620
Tejun Heo69df0ab2013-05-14 13:52:36 -0700621 /* is the next dispatch time in the future? */
Tejun Heo7f52f982013-05-14 13:52:37 -0700622 if (force || time_after(sq->first_pending_disptime, jiffies)) {
Tejun Heo69df0ab2013-05-14 13:52:36 -0700623 throtl_schedule_pending_timer(sq, sq->first_pending_disptime);
Tejun Heo7f52f982013-05-14 13:52:37 -0700624 return true;
Tejun Heo69df0ab2013-05-14 13:52:36 -0700625 }
626
Tejun Heo7f52f982013-05-14 13:52:37 -0700627 /* tell the caller to continue dispatching */
628 return false;
Vivek Goyale43473b2010-09-15 17:06:35 -0400629}
630
Vivek Goyal32ee5bc2013-05-14 13:52:38 -0700631static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
632 bool rw, unsigned long start)
633{
634 tg->bytes_disp[rw] = 0;
635 tg->io_disp[rw] = 0;
636
Chunguang Xu4f1e9632021-08-02 11:51:56 +0800637 atomic_set(&tg->io_split_cnt[rw], 0);
638
Vivek Goyal32ee5bc2013-05-14 13:52:38 -0700639 /*
640 * Previous slice has expired. We must have trimmed it after last
641 * bio dispatch. That means since start of last slice, we never used
642 * that bandwidth. Do try to make use of that bandwidth while giving
643 * credit.
644 */
645 if (time_after_eq(start, tg->slice_start[rw]))
646 tg->slice_start[rw] = start;
647
Shaohua Li297e3d82017-03-27 10:51:37 -0700648 tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
Vivek Goyal32ee5bc2013-05-14 13:52:38 -0700649 throtl_log(&tg->service_queue,
650 "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
651 rw == READ ? 'R' : 'W', tg->slice_start[rw],
652 tg->slice_end[rw], jiffies);
653}
654
Tejun Heo0f3457f2013-05-14 13:52:32 -0700655static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
Vivek Goyale43473b2010-09-15 17:06:35 -0400656{
657 tg->bytes_disp[rw] = 0;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400658 tg->io_disp[rw] = 0;
Vivek Goyale43473b2010-09-15 17:06:35 -0400659 tg->slice_start[rw] = jiffies;
Shaohua Li297e3d82017-03-27 10:51:37 -0700660 tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
Chunguang Xu4f1e9632021-08-02 11:51:56 +0800661
662 atomic_set(&tg->io_split_cnt[rw], 0);
663
Tejun Heofda6f272013-05-14 13:52:36 -0700664 throtl_log(&tg->service_queue,
665 "[%c] new slice start=%lu end=%lu jiffies=%lu",
666 rw == READ ? 'R' : 'W', tg->slice_start[rw],
667 tg->slice_end[rw], jiffies);
Vivek Goyale43473b2010-09-15 17:06:35 -0400668}
669
Tejun Heo0f3457f2013-05-14 13:52:32 -0700670static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
671 unsigned long jiffy_end)
Vivek Goyald1ae8ff2010-12-01 19:34:46 +0100672{
Shaohua Li297e3d82017-03-27 10:51:37 -0700673 tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
Vivek Goyald1ae8ff2010-12-01 19:34:46 +0100674}
675
Tejun Heo0f3457f2013-05-14 13:52:32 -0700676static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
677 unsigned long jiffy_end)
Vivek Goyale43473b2010-09-15 17:06:35 -0400678{
Baolin Wang1da30f92020-10-08 11:52:29 +0800679 throtl_set_slice_end(tg, rw, jiffy_end);
Tejun Heofda6f272013-05-14 13:52:36 -0700680 throtl_log(&tg->service_queue,
681 "[%c] extend slice start=%lu end=%lu jiffies=%lu",
682 rw == READ ? 'R' : 'W', tg->slice_start[rw],
683 tg->slice_end[rw], jiffies);
Vivek Goyale43473b2010-09-15 17:06:35 -0400684}
685
686/* Determine if previously allocated or extended slice is complete or not */
Tejun Heo0f3457f2013-05-14 13:52:32 -0700687static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
Vivek Goyale43473b2010-09-15 17:06:35 -0400688{
689 if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
Fabian Frederick5cf8c222014-05-02 18:28:17 +0200690 return false;
Vivek Goyale43473b2010-09-15 17:06:35 -0400691
Chengguang Xu0b6bad72018-05-29 18:32:44 +0800692 return true;
Vivek Goyale43473b2010-09-15 17:06:35 -0400693}
694
695/* Trim the used slices and adjust slice start accordingly */
Tejun Heo0f3457f2013-05-14 13:52:32 -0700696static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
Vivek Goyale43473b2010-09-15 17:06:35 -0400697{
Vivek Goyal3aad5d32010-10-01 14:51:14 +0200698 unsigned long nr_slices, time_elapsed, io_trim;
699 u64 bytes_trim, tmp;
Vivek Goyale43473b2010-09-15 17:06:35 -0400700
701 BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
702
703 /*
704 * If bps are unlimited (-1), then time slice don't get
705 * renewed. Don't try to trim the slice if slice is used. A new
706 * slice will start when appropriate.
707 */
Tejun Heo0f3457f2013-05-14 13:52:32 -0700708 if (throtl_slice_used(tg, rw))
Vivek Goyale43473b2010-09-15 17:06:35 -0400709 return;
710
Vivek Goyald1ae8ff2010-12-01 19:34:46 +0100711 /*
712 * A bio has been dispatched. Also adjust slice_end. It might happen
713 * that initially cgroup limit was very low resulting in high
Baolin Wangb53b0722020-09-07 16:10:13 +0800714 * slice_end, but later limit was bumped up and bio was dispatched
Vivek Goyald1ae8ff2010-12-01 19:34:46 +0100715 * sooner, then we need to reduce slice_end. A high bogus slice_end
716 * is bad because it does not allow new slice to start.
717 */
718
Shaohua Li297e3d82017-03-27 10:51:37 -0700719 throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);
Vivek Goyald1ae8ff2010-12-01 19:34:46 +0100720
Vivek Goyale43473b2010-09-15 17:06:35 -0400721 time_elapsed = jiffies - tg->slice_start[rw];
722
Shaohua Li297e3d82017-03-27 10:51:37 -0700723 nr_slices = time_elapsed / tg->td->throtl_slice;
Vivek Goyale43473b2010-09-15 17:06:35 -0400724
725 if (!nr_slices)
726 return;
Shaohua Li297e3d82017-03-27 10:51:37 -0700727 tmp = tg_bps_limit(tg, rw) * tg->td->throtl_slice * nr_slices;
Vivek Goyal3aad5d32010-10-01 14:51:14 +0200728 do_div(tmp, HZ);
729 bytes_trim = tmp;
Vivek Goyale43473b2010-09-15 17:06:35 -0400730
Shaohua Li297e3d82017-03-27 10:51:37 -0700731 io_trim = (tg_iops_limit(tg, rw) * tg->td->throtl_slice * nr_slices) /
732 HZ;
Vivek Goyale43473b2010-09-15 17:06:35 -0400733
Vivek Goyal8e89d132010-09-15 17:06:37 -0400734 if (!bytes_trim && !io_trim)
Vivek Goyale43473b2010-09-15 17:06:35 -0400735 return;
736
737 if (tg->bytes_disp[rw] >= bytes_trim)
738 tg->bytes_disp[rw] -= bytes_trim;
739 else
740 tg->bytes_disp[rw] = 0;
741
Vivek Goyal8e89d132010-09-15 17:06:37 -0400742 if (tg->io_disp[rw] >= io_trim)
743 tg->io_disp[rw] -= io_trim;
744 else
745 tg->io_disp[rw] = 0;
746
Shaohua Li297e3d82017-03-27 10:51:37 -0700747 tg->slice_start[rw] += nr_slices * tg->td->throtl_slice;
Vivek Goyale43473b2010-09-15 17:06:35 -0400748
Tejun Heofda6f272013-05-14 13:52:36 -0700749 throtl_log(&tg->service_queue,
750 "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
751 rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
752 tg->slice_start[rw], tg->slice_end[rw], jiffies);
Vivek Goyale43473b2010-09-15 17:06:35 -0400753}
754
Tejun Heo0f3457f2013-05-14 13:52:32 -0700755static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
Baolin Wang4599ea42020-09-07 16:10:16 +0800756 u32 iops_limit, unsigned long *wait)
Vivek Goyale43473b2010-09-15 17:06:35 -0400757{
758 bool rw = bio_data_dir(bio);
Vivek Goyal8e89d132010-09-15 17:06:37 -0400759 unsigned int io_allowed;
Vivek Goyale43473b2010-09-15 17:06:35 -0400760 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
Vivek Goyalc49c06e2010-10-01 21:16:42 +0200761 u64 tmp;
Vivek Goyale43473b2010-09-15 17:06:35 -0400762
Baolin Wang87fbeb82020-09-07 16:10:17 +0800763 if (iops_limit == UINT_MAX) {
764 if (wait)
765 *wait = 0;
766 return true;
767 }
768
Konstantin Khlebnikov3a10f992019-07-08 18:29:57 +0300769 jiffy_elapsed = jiffies - tg->slice_start[rw];
Vivek Goyale43473b2010-09-15 17:06:35 -0400770
Konstantin Khlebnikov3a10f992019-07-08 18:29:57 +0300771 /* Round up to the next throttle slice, wait time must be nonzero */
772 jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice);
Vivek Goyal8e89d132010-09-15 17:06:37 -0400773
Vivek Goyalc49c06e2010-10-01 21:16:42 +0200774 /*
775 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
776 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
777 * will allow dispatch after 1 second and after that slice should
778 * have been trimmed.
779 */
780
Baolin Wang4599ea42020-09-07 16:10:16 +0800781 tmp = (u64)iops_limit * jiffy_elapsed_rnd;
Vivek Goyalc49c06e2010-10-01 21:16:42 +0200782 do_div(tmp, HZ);
783
784 if (tmp > UINT_MAX)
785 io_allowed = UINT_MAX;
786 else
787 io_allowed = tmp;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400788
789 if (tg->io_disp[rw] + 1 <= io_allowed) {
Vivek Goyale43473b2010-09-15 17:06:35 -0400790 if (wait)
791 *wait = 0;
Fabian Frederick5cf8c222014-05-02 18:28:17 +0200792 return true;
Vivek Goyale43473b2010-09-15 17:06:35 -0400793 }
794
Vivek Goyal8e89d132010-09-15 17:06:37 -0400795 /* Calc approx time to dispatch */
Liu Bo991f61f2018-08-10 01:47:02 +0800796 jiffy_wait = jiffy_elapsed_rnd - jiffy_elapsed;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400797
798 if (wait)
799 *wait = jiffy_wait;
Chengguang Xu0b6bad72018-05-29 18:32:44 +0800800 return false;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400801}
802
Tejun Heo0f3457f2013-05-14 13:52:32 -0700803static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
Baolin Wang4599ea42020-09-07 16:10:16 +0800804 u64 bps_limit, unsigned long *wait)
Vivek Goyal8e89d132010-09-15 17:06:37 -0400805{
806 bool rw = bio_data_dir(bio);
Vivek Goyal3aad5d32010-10-01 14:51:14 +0200807 u64 bytes_allowed, extra_bytes, tmp;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400808 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
Shaohua Liea0ea2b2017-08-18 16:08:13 -0700809 unsigned int bio_size = throtl_bio_data_size(bio);
Vivek Goyale43473b2010-09-15 17:06:35 -0400810
Baolin Wang87fbeb82020-09-07 16:10:17 +0800811 if (bps_limit == U64_MAX) {
812 if (wait)
813 *wait = 0;
814 return true;
815 }
816
Vivek Goyale43473b2010-09-15 17:06:35 -0400817 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
818
819 /* Slice has just started. Consider one slice interval */
820 if (!jiffy_elapsed)
Shaohua Li297e3d82017-03-27 10:51:37 -0700821 jiffy_elapsed_rnd = tg->td->throtl_slice;
Vivek Goyale43473b2010-09-15 17:06:35 -0400822
Shaohua Li297e3d82017-03-27 10:51:37 -0700823 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
Vivek Goyale43473b2010-09-15 17:06:35 -0400824
Baolin Wang4599ea42020-09-07 16:10:16 +0800825 tmp = bps_limit * jiffy_elapsed_rnd;
Vivek Goyal5e901a22010-10-01 21:16:38 +0200826 do_div(tmp, HZ);
Vivek Goyal3aad5d32010-10-01 14:51:14 +0200827 bytes_allowed = tmp;
Vivek Goyale43473b2010-09-15 17:06:35 -0400828
Shaohua Liea0ea2b2017-08-18 16:08:13 -0700829 if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) {
Vivek Goyale43473b2010-09-15 17:06:35 -0400830 if (wait)
831 *wait = 0;
Fabian Frederick5cf8c222014-05-02 18:28:17 +0200832 return true;
Vivek Goyale43473b2010-09-15 17:06:35 -0400833 }
834
835 /* Calc approx time to dispatch */
Shaohua Liea0ea2b2017-08-18 16:08:13 -0700836 extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed;
Baolin Wang4599ea42020-09-07 16:10:16 +0800837 jiffy_wait = div64_u64(extra_bytes * HZ, bps_limit);
Vivek Goyale43473b2010-09-15 17:06:35 -0400838
839 if (!jiffy_wait)
840 jiffy_wait = 1;
841
842 /*
843 * This wait time is without taking into consideration the rounding
844 * up we did. Add that time also.
845 */
846 jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
Vivek Goyale43473b2010-09-15 17:06:35 -0400847 if (wait)
848 *wait = jiffy_wait;
Chengguang Xu0b6bad72018-05-29 18:32:44 +0800849 return false;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400850}
Vivek Goyale43473b2010-09-15 17:06:35 -0400851
Vivek Goyal8e89d132010-09-15 17:06:37 -0400852/*
853 * Returns whether one can dispatch a bio or not. Also returns approx number
854 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
855 */
Tejun Heo0f3457f2013-05-14 13:52:32 -0700856static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
857 unsigned long *wait)
Vivek Goyal8e89d132010-09-15 17:06:37 -0400858{
859 bool rw = bio_data_dir(bio);
860 unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
Baolin Wang4599ea42020-09-07 16:10:16 +0800861 u64 bps_limit = tg_bps_limit(tg, rw);
862 u32 iops_limit = tg_iops_limit(tg, rw);
Vivek Goyal8e89d132010-09-15 17:06:37 -0400863
864 /*
865 * Currently whole state machine of group depends on first bio
866 * queued in the group bio list. So one should not be calling
867 * this function with a different bio if there are other bios
868 * queued.
869 */
Tejun Heo73f0d492013-05-14 13:52:35 -0700870 BUG_ON(tg->service_queue.nr_queued[rw] &&
Tejun Heoc5cc2072013-05-14 13:52:38 -0700871 bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
Vivek Goyal8e89d132010-09-15 17:06:37 -0400872
873 /* If tg->bps = -1, then BW is unlimited */
Baolin Wang4599ea42020-09-07 16:10:16 +0800874 if (bps_limit == U64_MAX && iops_limit == UINT_MAX) {
Vivek Goyal8e89d132010-09-15 17:06:37 -0400875 if (wait)
876 *wait = 0;
Fabian Frederick5cf8c222014-05-02 18:28:17 +0200877 return true;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400878 }
879
880 /*
881 * If previous slice expired, start a new one otherwise renew/extend
882 * existing slice to make sure it is at least throtl_slice interval
Vivek Goyal164c80e2016-09-19 15:12:41 -0600883 * long since now. New slice is started only for empty throttle group.
884 * If there is queued bio, that means there should be an active
885 * slice and it should be extended instead.
Vivek Goyal8e89d132010-09-15 17:06:37 -0400886 */
Vivek Goyal164c80e2016-09-19 15:12:41 -0600887 if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
Tejun Heo0f3457f2013-05-14 13:52:32 -0700888 throtl_start_new_slice(tg, rw);
Vivek Goyal8e89d132010-09-15 17:06:37 -0400889 else {
Shaohua Li297e3d82017-03-27 10:51:37 -0700890 if (time_before(tg->slice_end[rw],
891 jiffies + tg->td->throtl_slice))
892 throtl_extend_slice(tg, rw,
893 jiffies + tg->td->throtl_slice);
Vivek Goyal8e89d132010-09-15 17:06:37 -0400894 }
895
Chunguang Xu4f1e9632021-08-02 11:51:56 +0800896 if (iops_limit != UINT_MAX)
897 tg->io_disp[rw] += atomic_xchg(&tg->io_split_cnt[rw], 0);
898
Baolin Wang4599ea42020-09-07 16:10:16 +0800899 if (tg_with_in_bps_limit(tg, bio, bps_limit, &bps_wait) &&
900 tg_with_in_iops_limit(tg, bio, iops_limit, &iops_wait)) {
Vivek Goyal8e89d132010-09-15 17:06:37 -0400901 if (wait)
902 *wait = 0;
Chengguang Xu0b6bad72018-05-29 18:32:44 +0800903 return true;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400904 }
905
906 max_wait = max(bps_wait, iops_wait);
907
908 if (wait)
909 *wait = max_wait;
910
911 if (time_before(tg->slice_end[rw], jiffies + max_wait))
Tejun Heo0f3457f2013-05-14 13:52:32 -0700912 throtl_extend_slice(tg, rw, jiffies + max_wait);
Vivek Goyale43473b2010-09-15 17:06:35 -0400913
Chengguang Xu0b6bad72018-05-29 18:32:44 +0800914 return false;
Vivek Goyale43473b2010-09-15 17:06:35 -0400915}
916
917static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
918{
919 bool rw = bio_data_dir(bio);
Shaohua Liea0ea2b2017-08-18 16:08:13 -0700920 unsigned int bio_size = throtl_bio_data_size(bio);
Vivek Goyale43473b2010-09-15 17:06:35 -0400921
922 /* Charge the bio to the group */
Shaohua Liea0ea2b2017-08-18 16:08:13 -0700923 tg->bytes_disp[rw] += bio_size;
Vivek Goyal8e89d132010-09-15 17:06:37 -0400924 tg->io_disp[rw]++;
Shaohua Liea0ea2b2017-08-18 16:08:13 -0700925 tg->last_bytes_disp[rw] += bio_size;
Shaohua Li3f0abd82017-03-27 10:51:35 -0700926 tg->last_io_disp[rw]++;
Vivek Goyale43473b2010-09-15 17:06:35 -0400927
Tejun Heo2a0f61e2013-05-14 13:52:36 -0700928 /*
Christoph Hellwig8d2bbd42016-10-20 15:12:12 +0200929 * BIO_THROTTLED is used to prevent the same bio to be throttled
Tejun Heo2a0f61e2013-05-14 13:52:36 -0700930 * more than once as a throttled bio will go through blk-throtl the
931 * second time when it eventually gets issued. Set it when a bio
932 * is being charged to a tg.
Tejun Heo2a0f61e2013-05-14 13:52:36 -0700933 */
Christoph Hellwig8d2bbd42016-10-20 15:12:12 +0200934 if (!bio_flagged(bio, BIO_THROTTLED))
935 bio_set_flag(bio, BIO_THROTTLED);
Vivek Goyale43473b2010-09-15 17:06:35 -0400936}
937
Tejun Heoc5cc2072013-05-14 13:52:38 -0700938/**
939 * throtl_add_bio_tg - add a bio to the specified throtl_grp
940 * @bio: bio to add
941 * @qn: qnode to use
942 * @tg: the target throtl_grp
943 *
944 * Add @bio to @tg's service_queue using @qn. If @qn is not specified,
945 * tg->qnode_on_self[] is used.
946 */
947static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
948 struct throtl_grp *tg)
Vivek Goyale43473b2010-09-15 17:06:35 -0400949{
Tejun Heo73f0d492013-05-14 13:52:35 -0700950 struct throtl_service_queue *sq = &tg->service_queue;
Vivek Goyale43473b2010-09-15 17:06:35 -0400951 bool rw = bio_data_dir(bio);
952
Tejun Heoc5cc2072013-05-14 13:52:38 -0700953 if (!qn)
954 qn = &tg->qnode_on_self[rw];
955
Tejun Heo0e9f4162013-05-14 13:52:35 -0700956 /*
957 * If @tg doesn't currently have any bios queued in the same
958 * direction, queueing @bio can change when @tg should be
959 * dispatched. Mark that @tg was empty. This is automatically
Baolin Wangb53b0722020-09-07 16:10:13 +0800960 * cleared on the next tg_update_disptime().
Tejun Heo0e9f4162013-05-14 13:52:35 -0700961 */
962 if (!sq->nr_queued[rw])
963 tg->flags |= THROTL_TG_WAS_EMPTY;
964
Tejun Heoc5cc2072013-05-14 13:52:38 -0700965 throtl_qnode_add_bio(bio, qn, &sq->queued[rw]);
966
Tejun Heo73f0d492013-05-14 13:52:35 -0700967 sq->nr_queued[rw]++;
Tejun Heo77216b02013-05-14 13:52:36 -0700968 throtl_enqueue_tg(tg);
Vivek Goyale43473b2010-09-15 17:06:35 -0400969}
970
Tejun Heo77216b02013-05-14 13:52:36 -0700971static void tg_update_disptime(struct throtl_grp *tg)
Vivek Goyale43473b2010-09-15 17:06:35 -0400972{
Tejun Heo73f0d492013-05-14 13:52:35 -0700973 struct throtl_service_queue *sq = &tg->service_queue;
Vivek Goyale43473b2010-09-15 17:06:35 -0400974 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
975 struct bio *bio;
976
Markus Elfringd609af32017-01-21 22:15:33 +0100977 bio = throtl_peek_queued(&sq->queued[READ]);
978 if (bio)
Tejun Heo0f3457f2013-05-14 13:52:32 -0700979 tg_may_dispatch(tg, bio, &read_wait);
Vivek Goyale43473b2010-09-15 17:06:35 -0400980
Markus Elfringd609af32017-01-21 22:15:33 +0100981 bio = throtl_peek_queued(&sq->queued[WRITE]);
982 if (bio)
Tejun Heo0f3457f2013-05-14 13:52:32 -0700983 tg_may_dispatch(tg, bio, &write_wait);
Vivek Goyale43473b2010-09-15 17:06:35 -0400984
985 min_wait = min(read_wait, write_wait);
986 disptime = jiffies + min_wait;
987
Vivek Goyale43473b2010-09-15 17:06:35 -0400988 /* Update dispatch time */
Tejun Heo77216b02013-05-14 13:52:36 -0700989 throtl_dequeue_tg(tg);
Vivek Goyale43473b2010-09-15 17:06:35 -0400990 tg->disptime = disptime;
Tejun Heo77216b02013-05-14 13:52:36 -0700991 throtl_enqueue_tg(tg);
Tejun Heo0e9f4162013-05-14 13:52:35 -0700992
993 /* see throtl_add_bio_tg() */
994 tg->flags &= ~THROTL_TG_WAS_EMPTY;
Vivek Goyale43473b2010-09-15 17:06:35 -0400995}
996
Vivek Goyal32ee5bc2013-05-14 13:52:38 -0700997static void start_parent_slice_with_credit(struct throtl_grp *child_tg,
998 struct throtl_grp *parent_tg, bool rw)
999{
1000 if (throtl_slice_used(parent_tg, rw)) {
1001 throtl_start_new_slice_with_credit(parent_tg, rw,
1002 child_tg->slice_start[rw]);
1003 }
1004
1005}
1006
Tejun Heo77216b02013-05-14 13:52:36 -07001007static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
Vivek Goyale43473b2010-09-15 17:06:35 -04001008{
Tejun Heo73f0d492013-05-14 13:52:35 -07001009 struct throtl_service_queue *sq = &tg->service_queue;
Tejun Heo6bc9c2b2013-05-14 13:52:38 -07001010 struct throtl_service_queue *parent_sq = sq->parent_sq;
1011 struct throtl_grp *parent_tg = sq_to_tg(parent_sq);
Tejun Heoc5cc2072013-05-14 13:52:38 -07001012 struct throtl_grp *tg_to_put = NULL;
Vivek Goyale43473b2010-09-15 17:06:35 -04001013 struct bio *bio;
1014
Tejun Heoc5cc2072013-05-14 13:52:38 -07001015 /*
1016 * @bio is being transferred from @tg to @parent_sq. Popping a bio
1017 * from @tg may put its reference and @parent_sq might end up
1018 * getting released prematurely. Remember the tg to put and put it
1019 * after @bio is transferred to @parent_sq.
1020 */
1021 bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
Tejun Heo73f0d492013-05-14 13:52:35 -07001022 sq->nr_queued[rw]--;
Vivek Goyale43473b2010-09-15 17:06:35 -04001023
1024 throtl_charge_bio(tg, bio);
Tejun Heo6bc9c2b2013-05-14 13:52:38 -07001025
1026 /*
1027 * If our parent is another tg, we just need to transfer @bio to
1028 * the parent using throtl_add_bio_tg(). If our parent is
1029 * @td->service_queue, @bio is ready to be issued. Put it on its
1030 * bio_lists[] and decrease total number queued. The caller is
1031 * responsible for issuing these bios.
1032 */
1033 if (parent_tg) {
Tejun Heoc5cc2072013-05-14 13:52:38 -07001034 throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
Vivek Goyal32ee5bc2013-05-14 13:52:38 -07001035 start_parent_slice_with_credit(tg, parent_tg, rw);
Tejun Heo6bc9c2b2013-05-14 13:52:38 -07001036 } else {
Tejun Heoc5cc2072013-05-14 13:52:38 -07001037 throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
1038 &parent_sq->queued[rw]);
Tejun Heo6bc9c2b2013-05-14 13:52:38 -07001039 BUG_ON(tg->td->nr_queued[rw] <= 0);
1040 tg->td->nr_queued[rw]--;
1041 }
Vivek Goyale43473b2010-09-15 17:06:35 -04001042
Tejun Heo0f3457f2013-05-14 13:52:32 -07001043 throtl_trim_slice(tg, rw);
Tejun Heo6bc9c2b2013-05-14 13:52:38 -07001044
Tejun Heoc5cc2072013-05-14 13:52:38 -07001045 if (tg_to_put)
1046 blkg_put(tg_to_blkg(tg_to_put));
Vivek Goyale43473b2010-09-15 17:06:35 -04001047}
1048
Tejun Heo77216b02013-05-14 13:52:36 -07001049static int throtl_dispatch_tg(struct throtl_grp *tg)
Vivek Goyale43473b2010-09-15 17:06:35 -04001050{
Tejun Heo73f0d492013-05-14 13:52:35 -07001051 struct throtl_service_queue *sq = &tg->service_queue;
Vivek Goyale43473b2010-09-15 17:06:35 -04001052 unsigned int nr_reads = 0, nr_writes = 0;
Baolin Wange675df22020-09-07 16:10:15 +08001053 unsigned int max_nr_reads = THROTL_GRP_QUANTUM * 3 / 4;
1054 unsigned int max_nr_writes = THROTL_GRP_QUANTUM - max_nr_reads;
Vivek Goyale43473b2010-09-15 17:06:35 -04001055 struct bio *bio;
1056
1057 /* Try to dispatch 75% READS and 25% WRITES */
1058
Tejun Heoc5cc2072013-05-14 13:52:38 -07001059 while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
Tejun Heo0f3457f2013-05-14 13:52:32 -07001060 tg_may_dispatch(tg, bio, NULL)) {
Vivek Goyale43473b2010-09-15 17:06:35 -04001061
Tejun Heo77216b02013-05-14 13:52:36 -07001062 tg_dispatch_one_bio(tg, bio_data_dir(bio));
Vivek Goyale43473b2010-09-15 17:06:35 -04001063 nr_reads++;
1064
1065 if (nr_reads >= max_nr_reads)
1066 break;
1067 }
1068
Tejun Heoc5cc2072013-05-14 13:52:38 -07001069 while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
Tejun Heo0f3457f2013-05-14 13:52:32 -07001070 tg_may_dispatch(tg, bio, NULL)) {
Vivek Goyale43473b2010-09-15 17:06:35 -04001071
Tejun Heo77216b02013-05-14 13:52:36 -07001072 tg_dispatch_one_bio(tg, bio_data_dir(bio));
Vivek Goyale43473b2010-09-15 17:06:35 -04001073 nr_writes++;
1074
1075 if (nr_writes >= max_nr_writes)
1076 break;
1077 }
1078
1079 return nr_reads + nr_writes;
1080}
1081
Tejun Heo651930b2013-05-14 13:52:35 -07001082static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
Vivek Goyale43473b2010-09-15 17:06:35 -04001083{
1084 unsigned int nr_disp = 0;
Vivek Goyale43473b2010-09-15 17:06:35 -04001085
1086 while (1) {
Baolin Wang23976112020-10-08 11:52:27 +08001087 struct throtl_grp *tg;
Liu Bo2ab74cd2018-05-29 16:29:12 +08001088 struct throtl_service_queue *sq;
Vivek Goyale43473b2010-09-15 17:06:35 -04001089
Baolin Wang23976112020-10-08 11:52:27 +08001090 if (!parent_sq->nr_pending)
1091 break;
1092
1093 tg = throtl_rb_first(parent_sq);
Vivek Goyale43473b2010-09-15 17:06:35 -04001094 if (!tg)
1095 break;
1096
1097 if (time_before(jiffies, tg->disptime))
1098 break;
1099
Tejun Heo77216b02013-05-14 13:52:36 -07001100 throtl_dequeue_tg(tg);
Vivek Goyale43473b2010-09-15 17:06:35 -04001101
Tejun Heo77216b02013-05-14 13:52:36 -07001102 nr_disp += throtl_dispatch_tg(tg);
Vivek Goyale43473b2010-09-15 17:06:35 -04001103
Liu Bo2ab74cd2018-05-29 16:29:12 +08001104 sq = &tg->service_queue;
Tejun Heo73f0d492013-05-14 13:52:35 -07001105 if (sq->nr_queued[0] || sq->nr_queued[1])
Tejun Heo77216b02013-05-14 13:52:36 -07001106 tg_update_disptime(tg);
Vivek Goyale43473b2010-09-15 17:06:35 -04001107
Baolin Wange675df22020-09-07 16:10:15 +08001108 if (nr_disp >= THROTL_QUANTUM)
Vivek Goyale43473b2010-09-15 17:06:35 -04001109 break;
1110 }
1111
1112 return nr_disp;
1113}
1114
Shaohua Lic79892c2017-03-27 10:51:34 -07001115static bool throtl_can_upgrade(struct throtl_data *td,
1116 struct throtl_grp *this_tg);
Tejun Heo6e1a5702013-05-14 13:52:37 -07001117/**
1118 * throtl_pending_timer_fn - timer function for service_queue->pending_timer
Bart Van Assche216382d2019-05-30 17:00:48 -07001119 * @t: the pending_timer member of the throtl_service_queue being serviced
Tejun Heo6e1a5702013-05-14 13:52:37 -07001120 *
1121 * This timer is armed when a child throtl_grp with active bio's become
1122 * pending and queued on the service_queue's pending_tree and expires when
1123 * the first child throtl_grp should be dispatched. This function
Tejun Heo2e48a532013-05-14 13:52:38 -07001124 * dispatches bio's from the children throtl_grps to the parent
1125 * service_queue.
1126 *
1127 * If the parent's parent is another throtl_grp, dispatching is propagated
1128 * by either arming its pending_timer or repeating dispatch directly. If
1129 * the top-level service_tree is reached, throtl_data->dispatch_work is
1130 * kicked so that the ready bio's are issued.
Tejun Heo6e1a5702013-05-14 13:52:37 -07001131 */
Kees Cooke99e88a2017-10-16 14:43:17 -07001132static void throtl_pending_timer_fn(struct timer_list *t)
Tejun Heo69df0ab2013-05-14 13:52:36 -07001133{
Kees Cooke99e88a2017-10-16 14:43:17 -07001134 struct throtl_service_queue *sq = from_timer(sq, t, pending_timer);
Tejun Heo2e48a532013-05-14 13:52:38 -07001135 struct throtl_grp *tg = sq_to_tg(sq);
Tejun Heo69df0ab2013-05-14 13:52:36 -07001136 struct throtl_data *td = sq_to_td(sq);
Tejun Heocb761992013-05-14 13:52:31 -07001137 struct request_queue *q = td->queue;
Tejun Heo2e48a532013-05-14 13:52:38 -07001138 struct throtl_service_queue *parent_sq;
1139 bool dispatched;
Tejun Heo6e1a5702013-05-14 13:52:37 -07001140 int ret;
Vivek Goyale43473b2010-09-15 17:06:35 -04001141
Christoph Hellwig0d945c12018-11-15 12:17:28 -07001142 spin_lock_irq(&q->queue_lock);
Shaohua Lic79892c2017-03-27 10:51:34 -07001143 if (throtl_can_upgrade(td, NULL))
1144 throtl_upgrade_state(td);
1145
Tejun Heo2e48a532013-05-14 13:52:38 -07001146again:
1147 parent_sq = sq->parent_sq;
1148 dispatched = false;
Vivek Goyale43473b2010-09-15 17:06:35 -04001149
Tejun Heo7f52f982013-05-14 13:52:37 -07001150 while (true) {
1151 throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u",
Tejun Heo2e48a532013-05-14 13:52:38 -07001152 sq->nr_queued[READ] + sq->nr_queued[WRITE],
1153 sq->nr_queued[READ], sq->nr_queued[WRITE]);
Vivek Goyale43473b2010-09-15 17:06:35 -04001154
Tejun Heo7f52f982013-05-14 13:52:37 -07001155 ret = throtl_select_dispatch(sq);
1156 if (ret) {
Tejun Heo7f52f982013-05-14 13:52:37 -07001157 throtl_log(sq, "bios disp=%u", ret);
1158 dispatched = true;
Tejun Heo651930b2013-05-14 13:52:35 -07001159 }
Vivek Goyale43473b2010-09-15 17:06:35 -04001160
Tejun Heo7f52f982013-05-14 13:52:37 -07001161 if (throtl_schedule_next_dispatch(sq, false))
1162 break;
1163
1164 /* this dispatch windows is still open, relax and repeat */
Christoph Hellwig0d945c12018-11-15 12:17:28 -07001165 spin_unlock_irq(&q->queue_lock);
Tejun Heo7f52f982013-05-14 13:52:37 -07001166 cpu_relax();
Christoph Hellwig0d945c12018-11-15 12:17:28 -07001167 spin_lock_irq(&q->queue_lock);
Tejun Heo7f52f982013-05-14 13:52:37 -07001168 }
Tejun Heo6a525602013-05-14 13:52:32 -07001169
Tejun Heo2e48a532013-05-14 13:52:38 -07001170 if (!dispatched)
1171 goto out_unlock;
Tejun Heo6e1a5702013-05-14 13:52:37 -07001172
Tejun Heo2e48a532013-05-14 13:52:38 -07001173 if (parent_sq) {
1174 /* @parent_sq is another throl_grp, propagate dispatch */
1175 if (tg->flags & THROTL_TG_WAS_EMPTY) {
1176 tg_update_disptime(tg);
1177 if (!throtl_schedule_next_dispatch(parent_sq, false)) {
1178 /* window is already open, repeat dispatching */
1179 sq = parent_sq;
1180 tg = sq_to_tg(sq);
1181 goto again;
1182 }
1183 }
1184 } else {
Baolin Wangb53b0722020-09-07 16:10:13 +08001185 /* reached the top-level, queue issuing */
Tejun Heo2e48a532013-05-14 13:52:38 -07001186 queue_work(kthrotld_workqueue, &td->dispatch_work);
1187 }
1188out_unlock:
Christoph Hellwig0d945c12018-11-15 12:17:28 -07001189 spin_unlock_irq(&q->queue_lock);
Tejun Heo6e1a5702013-05-14 13:52:37 -07001190}
1191
1192/**
1193 * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
1194 * @work: work item being executed
1195 *
Baolin Wangb53b0722020-09-07 16:10:13 +08001196 * This function is queued for execution when bios reach the bio_lists[]
1197 * of throtl_data->service_queue. Those bios are ready and issued by this
Tejun Heo6e1a5702013-05-14 13:52:37 -07001198 * function.
1199 */
Fabian Frederick8876e1402014-04-17 21:41:16 +02001200static void blk_throtl_dispatch_work_fn(struct work_struct *work)
Tejun Heo6e1a5702013-05-14 13:52:37 -07001201{
1202 struct throtl_data *td = container_of(work, struct throtl_data,
1203 dispatch_work);
1204 struct throtl_service_queue *td_sq = &td->service_queue;
1205 struct request_queue *q = td->queue;
1206 struct bio_list bio_list_on_stack;
1207 struct bio *bio;
1208 struct blk_plug plug;
1209 int rw;
1210
1211 bio_list_init(&bio_list_on_stack);
1212
Christoph Hellwig0d945c12018-11-15 12:17:28 -07001213 spin_lock_irq(&q->queue_lock);
Tejun Heoc5cc2072013-05-14 13:52:38 -07001214 for (rw = READ; rw <= WRITE; rw++)
1215 while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
1216 bio_list_add(&bio_list_on_stack, bio);
Christoph Hellwig0d945c12018-11-15 12:17:28 -07001217 spin_unlock_irq(&q->queue_lock);
Vivek Goyale43473b2010-09-15 17:06:35 -04001218
Tejun Heo6e1a5702013-05-14 13:52:37 -07001219 if (!bio_list_empty(&bio_list_on_stack)) {
Vivek Goyal69d60eb2011-03-09 08:27:37 +01001220 blk_start_plug(&plug);
Christoph Hellwiged00aab2020-07-01 10:59:44 +02001221 while ((bio = bio_list_pop(&bio_list_on_stack)))
1222 submit_bio_noacct(bio);
Vivek Goyal69d60eb2011-03-09 08:27:37 +01001223 blk_finish_plug(&plug);
Vivek Goyale43473b2010-09-15 17:06:35 -04001224 }
Vivek Goyale43473b2010-09-15 17:06:35 -04001225}
1226
Tejun Heof95a04a2012-04-16 13:57:26 -07001227static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
1228 int off)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001229{
Tejun Heof95a04a2012-04-16 13:57:26 -07001230 struct throtl_grp *tg = pd_to_tg(pd);
1231 u64 v = *(u64 *)((void *)tg + off);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001232
Shaohua Li2ab54922017-03-27 10:51:29 -07001233 if (v == U64_MAX)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001234 return 0;
Tejun Heof95a04a2012-04-16 13:57:26 -07001235 return __blkg_prfill_u64(sf, pd, v);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001236}
1237
Tejun Heof95a04a2012-04-16 13:57:26 -07001238static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
1239 int off)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001240{
Tejun Heof95a04a2012-04-16 13:57:26 -07001241 struct throtl_grp *tg = pd_to_tg(pd);
1242 unsigned int v = *(unsigned int *)((void *)tg + off);
Tejun Heoaf133ce2012-04-01 14:38:44 -07001243
Shaohua Li2ab54922017-03-27 10:51:29 -07001244 if (v == UINT_MAX)
Tejun Heoaf133ce2012-04-01 14:38:44 -07001245 return 0;
Tejun Heof95a04a2012-04-16 13:57:26 -07001246 return __blkg_prfill_u64(sf, pd, v);
Tejun Heoaf133ce2012-04-01 14:38:44 -07001247}
1248
Tejun Heo2da8ca82013-12-05 12:28:04 -05001249static int tg_print_conf_u64(struct seq_file *sf, void *v)
Tejun Heoaf133ce2012-04-01 14:38:44 -07001250{
Tejun Heo2da8ca82013-12-05 12:28:04 -05001251 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_u64,
1252 &blkcg_policy_throtl, seq_cft(sf)->private, false);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001253 return 0;
1254}
1255
Tejun Heo2da8ca82013-12-05 12:28:04 -05001256static int tg_print_conf_uint(struct seq_file *sf, void *v)
Vivek Goyale43473b2010-09-15 17:06:35 -04001257{
Tejun Heo2da8ca82013-12-05 12:28:04 -05001258 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_uint,
1259 &blkcg_policy_throtl, seq_cft(sf)->private, false);
Tejun Heoaf133ce2012-04-01 14:38:44 -07001260 return 0;
Vivek Goyale43473b2010-09-15 17:06:35 -04001261}
1262
Shaohua Li9bb67ae2017-05-17 13:07:26 -07001263static void tg_conf_updated(struct throtl_grp *tg, bool global)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001264{
Tejun Heo69948b02015-08-18 14:55:32 -07001265 struct throtl_service_queue *sq = &tg->service_queue;
Tejun Heo492eb212013-08-08 20:11:25 -04001266 struct cgroup_subsys_state *pos_css;
Tejun Heo69948b02015-08-18 14:55:32 -07001267 struct blkcg_gq *blkg;
Tejun Heoaf133ce2012-04-01 14:38:44 -07001268
Tejun Heofda6f272013-05-14 13:52:36 -07001269 throtl_log(&tg->service_queue,
1270 "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
Shaohua Li9f626e32017-03-27 10:51:30 -07001271 tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE),
1272 tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE));
Tejun Heo632b4492013-05-14 13:52:31 -07001273
1274 /*
Tejun Heo693e7512013-05-14 13:52:38 -07001275 * Update has_rules[] flags for the updated tg's subtree. A tg is
1276 * considered to have rules if either the tg itself or any of its
1277 * ancestors has rules. This identifies groups without any
1278 * restrictions in the whole hierarchy and allows them to bypass
1279 * blk-throttle.
1280 */
Shaohua Li9bb67ae2017-05-17 13:07:26 -07001281 blkg_for_each_descendant_pre(blkg, pos_css,
1282 global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) {
Shaohua Li5b81fc32017-05-17 13:07:24 -07001283 struct throtl_grp *this_tg = blkg_to_tg(blkg);
1284 struct throtl_grp *parent_tg;
1285
1286 tg_update_has_rules(this_tg);
1287 /* ignore root/second level */
1288 if (!cgroup_subsys_on_dfl(io_cgrp_subsys) || !blkg->parent ||
1289 !blkg->parent->parent)
1290 continue;
1291 parent_tg = blkg_to_tg(blkg->parent);
1292 /*
1293 * make sure all children has lower idle time threshold and
1294 * higher latency target
1295 */
1296 this_tg->idletime_threshold = min(this_tg->idletime_threshold,
1297 parent_tg->idletime_threshold);
1298 this_tg->latency_target = max(this_tg->latency_target,
1299 parent_tg->latency_target);
1300 }
Tejun Heo693e7512013-05-14 13:52:38 -07001301
1302 /*
Tejun Heo632b4492013-05-14 13:52:31 -07001303 * We're already holding queue_lock and know @tg is valid. Let's
1304 * apply the new config directly.
1305 *
1306 * Restart the slices for both READ and WRITES. It might happen
1307 * that a group's limit are dropped suddenly and we don't want to
1308 * account recently dispatched IO with new low rate.
1309 */
Baolin Wangff8b22c2020-09-07 16:10:14 +08001310 throtl_start_new_slice(tg, READ);
1311 throtl_start_new_slice(tg, WRITE);
Tejun Heo632b4492013-05-14 13:52:31 -07001312
Tejun Heo5b2c16a2013-05-14 13:52:32 -07001313 if (tg->flags & THROTL_TG_PENDING) {
Tejun Heo77216b02013-05-14 13:52:36 -07001314 tg_update_disptime(tg);
Tejun Heo7f52f982013-05-14 13:52:37 -07001315 throtl_schedule_next_dispatch(sq->parent_sq, true);
Tejun Heo632b4492013-05-14 13:52:31 -07001316 }
Tejun Heo69948b02015-08-18 14:55:32 -07001317}
Tejun Heo60c2bc22012-04-01 14:38:43 -07001318
Tejun Heo69948b02015-08-18 14:55:32 -07001319static ssize_t tg_set_conf(struct kernfs_open_file *of,
1320 char *buf, size_t nbytes, loff_t off, bool is_u64)
1321{
1322 struct blkcg *blkcg = css_to_blkcg(of_css(of));
1323 struct blkg_conf_ctx ctx;
1324 struct throtl_grp *tg;
1325 int ret;
1326 u64 v;
1327
1328 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1329 if (ret)
1330 return ret;
1331
1332 ret = -EINVAL;
1333 if (sscanf(ctx.body, "%llu", &v) != 1)
1334 goto out_finish;
1335 if (!v)
Shaohua Li2ab54922017-03-27 10:51:29 -07001336 v = U64_MAX;
Tejun Heo69948b02015-08-18 14:55:32 -07001337
1338 tg = blkg_to_tg(ctx.blkg);
1339
1340 if (is_u64)
1341 *(u64 *)((void *)tg + of_cft(of)->private) = v;
1342 else
1343 *(unsigned int *)((void *)tg + of_cft(of)->private) = v;
1344
Shaohua Li9bb67ae2017-05-17 13:07:26 -07001345 tg_conf_updated(tg, false);
Tejun Heo36aa9e52015-08-18 14:55:31 -07001346 ret = 0;
1347out_finish:
Tejun Heo60c2bc22012-04-01 14:38:43 -07001348 blkg_conf_finish(&ctx);
Tejun Heo36aa9e52015-08-18 14:55:31 -07001349 return ret ?: nbytes;
Tejun Heo60c2bc22012-04-01 14:38:43 -07001350}
1351
Tejun Heo451af502014-05-13 12:16:21 -04001352static ssize_t tg_set_conf_u64(struct kernfs_open_file *of,
1353 char *buf, size_t nbytes, loff_t off)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001354{
Tejun Heo451af502014-05-13 12:16:21 -04001355 return tg_set_conf(of, buf, nbytes, off, true);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001356}
1357
Tejun Heo451af502014-05-13 12:16:21 -04001358static ssize_t tg_set_conf_uint(struct kernfs_open_file *of,
1359 char *buf, size_t nbytes, loff_t off)
Tejun Heo60c2bc22012-04-01 14:38:43 -07001360{
Tejun Heo451af502014-05-13 12:16:21 -04001361 return tg_set_conf(of, buf, nbytes, off, false);
Tejun Heo60c2bc22012-04-01 14:38:43 -07001362}
1363
Tejun Heo7ca46432019-11-07 11:18:01 -08001364static int tg_print_rwstat(struct seq_file *sf, void *v)
1365{
1366 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1367 blkg_prfill_rwstat, &blkcg_policy_throtl,
1368 seq_cft(sf)->private, true);
1369 return 0;
1370}
1371
1372static u64 tg_prfill_rwstat_recursive(struct seq_file *sf,
1373 struct blkg_policy_data *pd, int off)
1374{
1375 struct blkg_rwstat_sample sum;
1376
1377 blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_throtl, off,
1378 &sum);
1379 return __blkg_prfill_rwstat(sf, pd, &sum);
1380}
1381
1382static int tg_print_rwstat_recursive(struct seq_file *sf, void *v)
1383{
1384 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1385 tg_prfill_rwstat_recursive, &blkcg_policy_throtl,
1386 seq_cft(sf)->private, true);
1387 return 0;
1388}
1389
Tejun Heo880f50e2015-08-18 14:55:30 -07001390static struct cftype throtl_legacy_files[] = {
Tejun Heo60c2bc22012-04-01 14:38:43 -07001391 {
1392 .name = "throttle.read_bps_device",
Shaohua Li9f626e32017-03-27 10:51:30 -07001393 .private = offsetof(struct throtl_grp, bps[READ][LIMIT_MAX]),
Tejun Heo2da8ca82013-12-05 12:28:04 -05001394 .seq_show = tg_print_conf_u64,
Tejun Heo451af502014-05-13 12:16:21 -04001395 .write = tg_set_conf_u64,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001396 },
1397 {
1398 .name = "throttle.write_bps_device",
Shaohua Li9f626e32017-03-27 10:51:30 -07001399 .private = offsetof(struct throtl_grp, bps[WRITE][LIMIT_MAX]),
Tejun Heo2da8ca82013-12-05 12:28:04 -05001400 .seq_show = tg_print_conf_u64,
Tejun Heo451af502014-05-13 12:16:21 -04001401 .write = tg_set_conf_u64,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001402 },
1403 {
1404 .name = "throttle.read_iops_device",
Shaohua Li9f626e32017-03-27 10:51:30 -07001405 .private = offsetof(struct throtl_grp, iops[READ][LIMIT_MAX]),
Tejun Heo2da8ca82013-12-05 12:28:04 -05001406 .seq_show = tg_print_conf_uint,
Tejun Heo451af502014-05-13 12:16:21 -04001407 .write = tg_set_conf_uint,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001408 },
1409 {
1410 .name = "throttle.write_iops_device",
Shaohua Li9f626e32017-03-27 10:51:30 -07001411 .private = offsetof(struct throtl_grp, iops[WRITE][LIMIT_MAX]),
Tejun Heo2da8ca82013-12-05 12:28:04 -05001412 .seq_show = tg_print_conf_uint,
Tejun Heo451af502014-05-13 12:16:21 -04001413 .write = tg_set_conf_uint,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001414 },
1415 {
1416 .name = "throttle.io_service_bytes",
Tejun Heo7ca46432019-11-07 11:18:01 -08001417 .private = offsetof(struct throtl_grp, stat_bytes),
1418 .seq_show = tg_print_rwstat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001419 },
1420 {
weiping zhang17534c62017-12-11 22:56:25 +08001421 .name = "throttle.io_service_bytes_recursive",
Tejun Heo7ca46432019-11-07 11:18:01 -08001422 .private = offsetof(struct throtl_grp, stat_bytes),
1423 .seq_show = tg_print_rwstat_recursive,
weiping zhang17534c62017-12-11 22:56:25 +08001424 },
1425 {
Tejun Heo60c2bc22012-04-01 14:38:43 -07001426 .name = "throttle.io_serviced",
Tejun Heo7ca46432019-11-07 11:18:01 -08001427 .private = offsetof(struct throtl_grp, stat_ios),
1428 .seq_show = tg_print_rwstat,
Tejun Heo60c2bc22012-04-01 14:38:43 -07001429 },
weiping zhang17534c62017-12-11 22:56:25 +08001430 {
1431 .name = "throttle.io_serviced_recursive",
Tejun Heo7ca46432019-11-07 11:18:01 -08001432 .private = offsetof(struct throtl_grp, stat_ios),
1433 .seq_show = tg_print_rwstat_recursive,
weiping zhang17534c62017-12-11 22:56:25 +08001434 },
Tejun Heo60c2bc22012-04-01 14:38:43 -07001435 { } /* terminate */
1436};
1437
Shaohua Licd5ab1b2017-03-27 10:51:32 -07001438static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd,
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001439 int off)
1440{
1441 struct throtl_grp *tg = pd_to_tg(pd);
1442 const char *dname = blkg_dev_name(pd->blkg);
1443 char bufs[4][21] = { "max", "max", "max", "max" };
Shaohua Licd5ab1b2017-03-27 10:51:32 -07001444 u64 bps_dft;
1445 unsigned int iops_dft;
Shaohua Liada75b62017-03-27 10:51:42 -07001446 char idle_time[26] = "";
Shaohua Liec809912017-03-27 10:51:44 -07001447 char latency_time[26] = "";
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001448
1449 if (!dname)
1450 return 0;
Shaohua Li9f626e32017-03-27 10:51:30 -07001451
Shaohua Licd5ab1b2017-03-27 10:51:32 -07001452 if (off == LIMIT_LOW) {
1453 bps_dft = 0;
1454 iops_dft = 0;
1455 } else {
1456 bps_dft = U64_MAX;
1457 iops_dft = UINT_MAX;
1458 }
1459
1460 if (tg->bps_conf[READ][off] == bps_dft &&
1461 tg->bps_conf[WRITE][off] == bps_dft &&
1462 tg->iops_conf[READ][off] == iops_dft &&
Shaohua Liada75b62017-03-27 10:51:42 -07001463 tg->iops_conf[WRITE][off] == iops_dft &&
Shaohua Liec809912017-03-27 10:51:44 -07001464 (off != LIMIT_LOW ||
Shaohua Lib4f428e2017-05-17 13:07:27 -07001465 (tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD &&
Shaohua Li5b81fc32017-05-17 13:07:24 -07001466 tg->latency_target_conf == DFL_LATENCY_TARGET)))
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001467 return 0;
1468
Shaohua Li9bb67ae2017-05-17 13:07:26 -07001469 if (tg->bps_conf[READ][off] != U64_MAX)
Shaohua Li9f626e32017-03-27 10:51:30 -07001470 snprintf(bufs[0], sizeof(bufs[0]), "%llu",
Shaohua Licd5ab1b2017-03-27 10:51:32 -07001471 tg->bps_conf[READ][off]);
Shaohua Li9bb67ae2017-05-17 13:07:26 -07001472 if (tg->bps_conf[WRITE][off] != U64_MAX)
Shaohua Li9f626e32017-03-27 10:51:30 -07001473 snprintf(bufs[1], sizeof(bufs[1]), "%llu",
Shaohua Licd5ab1b2017-03-27 10:51:32 -07001474 tg->bps_conf[WRITE][off]);
Shaohua Li9bb67ae2017-05-17 13:07:26 -07001475 if (tg->iops_conf[READ][off] != UINT_MAX)
Shaohua Li9f626e32017-03-27 10:51:30 -07001476 snprintf(bufs[2], sizeof(bufs[2]), "%u",
Shaohua Licd5ab1b2017-03-27 10:51:32 -07001477 tg->iops_conf[READ][off]);
Shaohua Li9bb67ae2017-05-17 13:07:26 -07001478 if (tg->iops_conf[WRITE][off] != UINT_MAX)
Shaohua Li9f626e32017-03-27 10:51:30 -07001479 snprintf(bufs[3], sizeof(bufs[3]), "%u",
Shaohua Licd5ab1b2017-03-27 10:51:32 -07001480 tg->iops_conf[WRITE][off]);
Shaohua Liada75b62017-03-27 10:51:42 -07001481 if (off == LIMIT_LOW) {
Shaohua Li5b81fc32017-05-17 13:07:24 -07001482 if (tg->idletime_threshold_conf == ULONG_MAX)
Shaohua Liada75b62017-03-27 10:51:42 -07001483 strcpy(idle_time, " idle=max");
1484 else
1485 snprintf(idle_time, sizeof(idle_time), " idle=%lu",
Shaohua Li5b81fc32017-05-17 13:07:24 -07001486 tg->idletime_threshold_conf);
Shaohua Liec809912017-03-27 10:51:44 -07001487
Shaohua Li5b81fc32017-05-17 13:07:24 -07001488 if (tg->latency_target_conf == ULONG_MAX)
Shaohua Liec809912017-03-27 10:51:44 -07001489 strcpy(latency_time, " latency=max");
1490 else
1491 snprintf(latency_time, sizeof(latency_time),
Shaohua Li5b81fc32017-05-17 13:07:24 -07001492 " latency=%lu", tg->latency_target_conf);
Shaohua Liada75b62017-03-27 10:51:42 -07001493 }
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001494
Shaohua Liec809912017-03-27 10:51:44 -07001495 seq_printf(sf, "%s rbps=%s wbps=%s riops=%s wiops=%s%s%s\n",
1496 dname, bufs[0], bufs[1], bufs[2], bufs[3], idle_time,
1497 latency_time);
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001498 return 0;
1499}
1500
Shaohua Licd5ab1b2017-03-27 10:51:32 -07001501static int tg_print_limit(struct seq_file *sf, void *v)
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001502{
Shaohua Licd5ab1b2017-03-27 10:51:32 -07001503 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_limit,
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001504 &blkcg_policy_throtl, seq_cft(sf)->private, false);
1505 return 0;
1506}
1507
Shaohua Licd5ab1b2017-03-27 10:51:32 -07001508static ssize_t tg_set_limit(struct kernfs_open_file *of,
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001509 char *buf, size_t nbytes, loff_t off)
1510{
1511 struct blkcg *blkcg = css_to_blkcg(of_css(of));
1512 struct blkg_conf_ctx ctx;
1513 struct throtl_grp *tg;
1514 u64 v[4];
Shaohua Liada75b62017-03-27 10:51:42 -07001515 unsigned long idle_time;
Shaohua Liec809912017-03-27 10:51:44 -07001516 unsigned long latency_time;
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001517 int ret;
Shaohua Licd5ab1b2017-03-27 10:51:32 -07001518 int index = of_cft(of)->private;
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001519
1520 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1521 if (ret)
1522 return ret;
1523
1524 tg = blkg_to_tg(ctx.blkg);
1525
Shaohua Licd5ab1b2017-03-27 10:51:32 -07001526 v[0] = tg->bps_conf[READ][index];
1527 v[1] = tg->bps_conf[WRITE][index];
1528 v[2] = tg->iops_conf[READ][index];
1529 v[3] = tg->iops_conf[WRITE][index];
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001530
Shaohua Li5b81fc32017-05-17 13:07:24 -07001531 idle_time = tg->idletime_threshold_conf;
1532 latency_time = tg->latency_target_conf;
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001533 while (true) {
1534 char tok[27]; /* wiops=18446744073709551616 */
1535 char *p;
Shaohua Li2ab54922017-03-27 10:51:29 -07001536 u64 val = U64_MAX;
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001537 int len;
1538
1539 if (sscanf(ctx.body, "%26s%n", tok, &len) != 1)
1540 break;
1541 if (tok[0] == '\0')
1542 break;
1543 ctx.body += len;
1544
1545 ret = -EINVAL;
1546 p = tok;
1547 strsep(&p, "=");
1548 if (!p || (sscanf(p, "%llu", &val) != 1 && strcmp(p, "max")))
1549 goto out_finish;
1550
1551 ret = -ERANGE;
1552 if (!val)
1553 goto out_finish;
1554
1555 ret = -EINVAL;
Baolin Wang5b7048b2020-10-08 11:52:25 +08001556 if (!strcmp(tok, "rbps") && val > 1)
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001557 v[0] = val;
Baolin Wang5b7048b2020-10-08 11:52:25 +08001558 else if (!strcmp(tok, "wbps") && val > 1)
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001559 v[1] = val;
Baolin Wang5b7048b2020-10-08 11:52:25 +08001560 else if (!strcmp(tok, "riops") && val > 1)
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001561 v[2] = min_t(u64, val, UINT_MAX);
Baolin Wang5b7048b2020-10-08 11:52:25 +08001562 else if (!strcmp(tok, "wiops") && val > 1)
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001563 v[3] = min_t(u64, val, UINT_MAX);
Shaohua Liada75b62017-03-27 10:51:42 -07001564 else if (off == LIMIT_LOW && !strcmp(tok, "idle"))
1565 idle_time = val;
Shaohua Liec809912017-03-27 10:51:44 -07001566 else if (off == LIMIT_LOW && !strcmp(tok, "latency"))
1567 latency_time = val;
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001568 else
1569 goto out_finish;
1570 }
1571
Shaohua Licd5ab1b2017-03-27 10:51:32 -07001572 tg->bps_conf[READ][index] = v[0];
1573 tg->bps_conf[WRITE][index] = v[1];
1574 tg->iops_conf[READ][index] = v[2];
1575 tg->iops_conf[WRITE][index] = v[3];
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001576
Shaohua Licd5ab1b2017-03-27 10:51:32 -07001577 if (index == LIMIT_MAX) {
1578 tg->bps[READ][index] = v[0];
1579 tg->bps[WRITE][index] = v[1];
1580 tg->iops[READ][index] = v[2];
1581 tg->iops[WRITE][index] = v[3];
1582 }
1583 tg->bps[READ][LIMIT_LOW] = min(tg->bps_conf[READ][LIMIT_LOW],
1584 tg->bps_conf[READ][LIMIT_MAX]);
1585 tg->bps[WRITE][LIMIT_LOW] = min(tg->bps_conf[WRITE][LIMIT_LOW],
1586 tg->bps_conf[WRITE][LIMIT_MAX]);
1587 tg->iops[READ][LIMIT_LOW] = min(tg->iops_conf[READ][LIMIT_LOW],
1588 tg->iops_conf[READ][LIMIT_MAX]);
1589 tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW],
1590 tg->iops_conf[WRITE][LIMIT_MAX]);
Shaohua Lib4f428e2017-05-17 13:07:27 -07001591 tg->idletime_threshold_conf = idle_time;
1592 tg->latency_target_conf = latency_time;
Shaohua Licd5ab1b2017-03-27 10:51:32 -07001593
Shaohua Lib4f428e2017-05-17 13:07:27 -07001594 /* force user to configure all settings for low limit */
1595 if (!(tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW] ||
1596 tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) ||
1597 tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD ||
1598 tg->latency_target_conf == DFL_LATENCY_TARGET) {
1599 tg->bps[READ][LIMIT_LOW] = 0;
1600 tg->bps[WRITE][LIMIT_LOW] = 0;
1601 tg->iops[READ][LIMIT_LOW] = 0;
1602 tg->iops[WRITE][LIMIT_LOW] = 0;
1603 tg->idletime_threshold = DFL_IDLE_THRESHOLD;
1604 tg->latency_target = DFL_LATENCY_TARGET;
1605 } else if (index == LIMIT_LOW) {
Shaohua Li5b81fc32017-05-17 13:07:24 -07001606 tg->idletime_threshold = tg->idletime_threshold_conf;
Shaohua Li5b81fc32017-05-17 13:07:24 -07001607 tg->latency_target = tg->latency_target_conf;
Shaohua Licd5ab1b2017-03-27 10:51:32 -07001608 }
Shaohua Lib4f428e2017-05-17 13:07:27 -07001609
1610 blk_throtl_update_limit_valid(tg->td);
1611 if (tg->td->limit_valid[LIMIT_LOW]) {
1612 if (index == LIMIT_LOW)
1613 tg->td->limit_index = LIMIT_LOW;
1614 } else
1615 tg->td->limit_index = LIMIT_MAX;
Shaohua Li9bb67ae2017-05-17 13:07:26 -07001616 tg_conf_updated(tg, index == LIMIT_LOW &&
1617 tg->td->limit_valid[LIMIT_LOW]);
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001618 ret = 0;
1619out_finish:
1620 blkg_conf_finish(&ctx);
1621 return ret ?: nbytes;
1622}
1623
1624static struct cftype throtl_files[] = {
Shaohua Licd5ab1b2017-03-27 10:51:32 -07001625#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
1626 {
1627 .name = "low",
1628 .flags = CFTYPE_NOT_ON_ROOT,
1629 .seq_show = tg_print_limit,
1630 .write = tg_set_limit,
1631 .private = LIMIT_LOW,
1632 },
1633#endif
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001634 {
1635 .name = "max",
1636 .flags = CFTYPE_NOT_ON_ROOT,
Shaohua Licd5ab1b2017-03-27 10:51:32 -07001637 .seq_show = tg_print_limit,
1638 .write = tg_set_limit,
1639 .private = LIMIT_MAX,
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001640 },
1641 { } /* terminate */
1642};
1643
Vivek Goyalda527772011-03-02 19:05:33 -05001644static void throtl_shutdown_wq(struct request_queue *q)
Vivek Goyale43473b2010-09-15 17:06:35 -04001645{
1646 struct throtl_data *td = q->td;
1647
Tejun Heo69df0ab2013-05-14 13:52:36 -07001648 cancel_work_sync(&td->dispatch_work);
Vivek Goyale43473b2010-09-15 17:06:35 -04001649}
1650
Jens Axboea7b36ee2021-10-05 09:11:56 -06001651struct blkcg_policy blkcg_policy_throtl = {
Tejun Heo2ee867dc2015-08-18 14:55:34 -07001652 .dfl_cftypes = throtl_files,
Tejun Heo880f50e2015-08-18 14:55:30 -07001653 .legacy_cftypes = throtl_legacy_files,
Tejun Heof9fcc2d2012-04-16 13:57:27 -07001654
Tejun Heo001bea72015-08-18 14:55:11 -07001655 .pd_alloc_fn = throtl_pd_alloc,
Tejun Heof9fcc2d2012-04-16 13:57:27 -07001656 .pd_init_fn = throtl_pd_init,
Tejun Heo693e7512013-05-14 13:52:38 -07001657 .pd_online_fn = throtl_pd_online,
Shaohua Licd5ab1b2017-03-27 10:51:32 -07001658 .pd_offline_fn = throtl_pd_offline,
Tejun Heo001bea72015-08-18 14:55:11 -07001659 .pd_free_fn = throtl_pd_free,
Vivek Goyale43473b2010-09-15 17:06:35 -04001660};
1661
Shaohua Li3f0abd82017-03-27 10:51:35 -07001662static unsigned long __tg_last_low_overflow_time(struct throtl_grp *tg)
1663{
1664 unsigned long rtime = jiffies, wtime = jiffies;
1665
1666 if (tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW])
1667 rtime = tg->last_low_overflow_time[READ];
1668 if (tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW])
1669 wtime = tg->last_low_overflow_time[WRITE];
1670 return min(rtime, wtime);
1671}
1672
1673/* tg should not be an intermediate node */
1674static unsigned long tg_last_low_overflow_time(struct throtl_grp *tg)
1675{
1676 struct throtl_service_queue *parent_sq;
1677 struct throtl_grp *parent = tg;
1678 unsigned long ret = __tg_last_low_overflow_time(tg);
1679
1680 while (true) {
1681 parent_sq = parent->service_queue.parent_sq;
1682 parent = sq_to_tg(parent_sq);
1683 if (!parent)
1684 break;
1685
1686 /*
1687 * The parent doesn't have low limit, it always reaches low
1688 * limit. Its overflow time is useless for children
1689 */
1690 if (!parent->bps[READ][LIMIT_LOW] &&
1691 !parent->iops[READ][LIMIT_LOW] &&
1692 !parent->bps[WRITE][LIMIT_LOW] &&
1693 !parent->iops[WRITE][LIMIT_LOW])
1694 continue;
1695 if (time_after(__tg_last_low_overflow_time(parent), ret))
1696 ret = __tg_last_low_overflow_time(parent);
1697 }
1698 return ret;
1699}
1700
Shaohua Li9e234ee2017-03-27 10:51:41 -07001701static bool throtl_tg_is_idle(struct throtl_grp *tg)
1702{
1703 /*
1704 * cgroup is idle if:
1705 * - single idle is too long, longer than a fixed value (in case user
Shaohua Lib4f428e2017-05-17 13:07:27 -07001706 * configure a too big threshold) or 4 times of idletime threshold
Shaohua Li9e234ee2017-03-27 10:51:41 -07001707 * - average think time is more than threshold
Shaohua Li53696b82017-03-27 15:19:43 -07001708 * - IO latency is largely below threshold
Shaohua Li9e234ee2017-03-27 10:51:41 -07001709 */
Shaohua Lib4f428e2017-05-17 13:07:27 -07001710 unsigned long time;
Shaohua Li4cff7292017-05-17 13:07:25 -07001711 bool ret;
Shaohua Li9e234ee2017-03-27 10:51:41 -07001712
Shaohua Lib4f428e2017-05-17 13:07:27 -07001713 time = min_t(unsigned long, MAX_IDLE_TIME, 4 * tg->idletime_threshold);
1714 ret = tg->latency_target == DFL_LATENCY_TARGET ||
1715 tg->idletime_threshold == DFL_IDLE_THRESHOLD ||
1716 (ktime_get_ns() >> 10) - tg->last_finish_time > time ||
1717 tg->avg_idletime > tg->idletime_threshold ||
1718 (tg->latency_target && tg->bio_cnt &&
Shaohua Li53696b82017-03-27 15:19:43 -07001719 tg->bad_bio_cnt * 5 < tg->bio_cnt);
Shaohua Li4cff7292017-05-17 13:07:25 -07001720 throtl_log(&tg->service_queue,
1721 "avg_idle=%ld, idle_threshold=%ld, bad_bio=%d, total_bio=%d, is_idle=%d, scale=%d",
1722 tg->avg_idletime, tg->idletime_threshold, tg->bad_bio_cnt,
1723 tg->bio_cnt, ret, tg->td->scale);
1724 return ret;
Shaohua Li9e234ee2017-03-27 10:51:41 -07001725}
1726
Shaohua Lic79892c2017-03-27 10:51:34 -07001727static bool throtl_tg_can_upgrade(struct throtl_grp *tg)
1728{
1729 struct throtl_service_queue *sq = &tg->service_queue;
1730 bool read_limit, write_limit;
1731
1732 /*
1733 * if cgroup reaches low limit (if low limit is 0, the cgroup always
1734 * reaches), it's ok to upgrade to next limit
1735 */
1736 read_limit = tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW];
1737 write_limit = tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW];
1738 if (!read_limit && !write_limit)
1739 return true;
1740 if (read_limit && sq->nr_queued[READ] &&
1741 (!write_limit || sq->nr_queued[WRITE]))
1742 return true;
1743 if (write_limit && sq->nr_queued[WRITE] &&
1744 (!read_limit || sq->nr_queued[READ]))
1745 return true;
Shaohua Liaec24242017-03-27 10:51:39 -07001746
1747 if (time_after_eq(jiffies,
Shaohua Lifa6fb5a2017-03-27 10:51:43 -07001748 tg_last_low_overflow_time(tg) + tg->td->throtl_slice) &&
1749 throtl_tg_is_idle(tg))
Shaohua Liaec24242017-03-27 10:51:39 -07001750 return true;
Shaohua Lic79892c2017-03-27 10:51:34 -07001751 return false;
1752}
1753
1754static bool throtl_hierarchy_can_upgrade(struct throtl_grp *tg)
1755{
1756 while (true) {
1757 if (throtl_tg_can_upgrade(tg))
1758 return true;
1759 tg = sq_to_tg(tg->service_queue.parent_sq);
1760 if (!tg || !tg_to_blkg(tg)->parent)
1761 return false;
1762 }
1763 return false;
1764}
1765
1766static bool throtl_can_upgrade(struct throtl_data *td,
1767 struct throtl_grp *this_tg)
1768{
1769 struct cgroup_subsys_state *pos_css;
1770 struct blkcg_gq *blkg;
1771
1772 if (td->limit_index != LIMIT_LOW)
1773 return false;
1774
Shaohua Li297e3d82017-03-27 10:51:37 -07001775 if (time_before(jiffies, td->low_downgrade_time + td->throtl_slice))
Shaohua Li3f0abd82017-03-27 10:51:35 -07001776 return false;
1777
Shaohua Lic79892c2017-03-27 10:51:34 -07001778 rcu_read_lock();
1779 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
1780 struct throtl_grp *tg = blkg_to_tg(blkg);
1781
1782 if (tg == this_tg)
1783 continue;
1784 if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
1785 continue;
1786 if (!throtl_hierarchy_can_upgrade(tg)) {
1787 rcu_read_unlock();
1788 return false;
1789 }
1790 }
1791 rcu_read_unlock();
1792 return true;
1793}
1794
Shaohua Lifa6fb5a2017-03-27 10:51:43 -07001795static void throtl_upgrade_check(struct throtl_grp *tg)
1796{
1797 unsigned long now = jiffies;
1798
1799 if (tg->td->limit_index != LIMIT_LOW)
1800 return;
1801
1802 if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
1803 return;
1804
1805 tg->last_check_time = now;
1806
1807 if (!time_after_eq(now,
1808 __tg_last_low_overflow_time(tg) + tg->td->throtl_slice))
1809 return;
1810
1811 if (throtl_can_upgrade(tg->td, NULL))
1812 throtl_upgrade_state(tg->td);
1813}
1814
Shaohua Lic79892c2017-03-27 10:51:34 -07001815static void throtl_upgrade_state(struct throtl_data *td)
1816{
1817 struct cgroup_subsys_state *pos_css;
1818 struct blkcg_gq *blkg;
1819
Shaohua Li4cff7292017-05-17 13:07:25 -07001820 throtl_log(&td->service_queue, "upgrade to max");
Shaohua Lic79892c2017-03-27 10:51:34 -07001821 td->limit_index = LIMIT_MAX;
Shaohua Li3f0abd82017-03-27 10:51:35 -07001822 td->low_upgrade_time = jiffies;
Shaohua Li7394e312017-03-27 10:51:40 -07001823 td->scale = 0;
Shaohua Lic79892c2017-03-27 10:51:34 -07001824 rcu_read_lock();
1825 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
1826 struct throtl_grp *tg = blkg_to_tg(blkg);
1827 struct throtl_service_queue *sq = &tg->service_queue;
1828
1829 tg->disptime = jiffies - 1;
1830 throtl_select_dispatch(sq);
Joseph Qi4f02fb72017-09-30 14:38:49 +08001831 throtl_schedule_next_dispatch(sq, true);
Shaohua Lic79892c2017-03-27 10:51:34 -07001832 }
1833 rcu_read_unlock();
1834 throtl_select_dispatch(&td->service_queue);
Joseph Qi4f02fb72017-09-30 14:38:49 +08001835 throtl_schedule_next_dispatch(&td->service_queue, true);
Shaohua Lic79892c2017-03-27 10:51:34 -07001836 queue_work(kthrotld_workqueue, &td->dispatch_work);
1837}
1838
Baolin Wang4247d9c82020-10-08 11:52:22 +08001839static void throtl_downgrade_state(struct throtl_data *td)
Shaohua Li3f0abd82017-03-27 10:51:35 -07001840{
Shaohua Li7394e312017-03-27 10:51:40 -07001841 td->scale /= 2;
1842
Shaohua Li4cff7292017-05-17 13:07:25 -07001843 throtl_log(&td->service_queue, "downgrade, scale %d", td->scale);
Shaohua Li7394e312017-03-27 10:51:40 -07001844 if (td->scale) {
1845 td->low_upgrade_time = jiffies - td->scale * td->throtl_slice;
1846 return;
1847 }
1848
Baolin Wang4247d9c82020-10-08 11:52:22 +08001849 td->limit_index = LIMIT_LOW;
Shaohua Li3f0abd82017-03-27 10:51:35 -07001850 td->low_downgrade_time = jiffies;
1851}
1852
1853static bool throtl_tg_can_downgrade(struct throtl_grp *tg)
1854{
1855 struct throtl_data *td = tg->td;
1856 unsigned long now = jiffies;
1857
1858 /*
1859 * If cgroup is below low limit, consider downgrade and throttle other
1860 * cgroups
1861 */
Shaohua Li297e3d82017-03-27 10:51:37 -07001862 if (time_after_eq(now, td->low_upgrade_time + td->throtl_slice) &&
1863 time_after_eq(now, tg_last_low_overflow_time(tg) +
Shaohua Lifa6fb5a2017-03-27 10:51:43 -07001864 td->throtl_slice) &&
1865 (!throtl_tg_is_idle(tg) ||
1866 !list_empty(&tg_to_blkg(tg)->blkcg->css.children)))
Shaohua Li3f0abd82017-03-27 10:51:35 -07001867 return true;
1868 return false;
1869}
1870
1871static bool throtl_hierarchy_can_downgrade(struct throtl_grp *tg)
1872{
1873 while (true) {
1874 if (!throtl_tg_can_downgrade(tg))
1875 return false;
1876 tg = sq_to_tg(tg->service_queue.parent_sq);
1877 if (!tg || !tg_to_blkg(tg)->parent)
1878 break;
1879 }
1880 return true;
1881}
1882
1883static void throtl_downgrade_check(struct throtl_grp *tg)
1884{
1885 uint64_t bps;
1886 unsigned int iops;
1887 unsigned long elapsed_time;
1888 unsigned long now = jiffies;
1889
1890 if (tg->td->limit_index != LIMIT_MAX ||
1891 !tg->td->limit_valid[LIMIT_LOW])
1892 return;
1893 if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
1894 return;
Shaohua Li297e3d82017-03-27 10:51:37 -07001895 if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
Shaohua Li3f0abd82017-03-27 10:51:35 -07001896 return;
1897
1898 elapsed_time = now - tg->last_check_time;
1899 tg->last_check_time = now;
1900
Shaohua Li297e3d82017-03-27 10:51:37 -07001901 if (time_before(now, tg_last_low_overflow_time(tg) +
1902 tg->td->throtl_slice))
Shaohua Li3f0abd82017-03-27 10:51:35 -07001903 return;
1904
1905 if (tg->bps[READ][LIMIT_LOW]) {
1906 bps = tg->last_bytes_disp[READ] * HZ;
1907 do_div(bps, elapsed_time);
1908 if (bps >= tg->bps[READ][LIMIT_LOW])
1909 tg->last_low_overflow_time[READ] = now;
1910 }
1911
1912 if (tg->bps[WRITE][LIMIT_LOW]) {
1913 bps = tg->last_bytes_disp[WRITE] * HZ;
1914 do_div(bps, elapsed_time);
1915 if (bps >= tg->bps[WRITE][LIMIT_LOW])
1916 tg->last_low_overflow_time[WRITE] = now;
1917 }
1918
1919 if (tg->iops[READ][LIMIT_LOW]) {
Chunguang Xu4f1e9632021-08-02 11:51:56 +08001920 tg->last_io_disp[READ] += atomic_xchg(&tg->last_io_split_cnt[READ], 0);
Shaohua Li3f0abd82017-03-27 10:51:35 -07001921 iops = tg->last_io_disp[READ] * HZ / elapsed_time;
1922 if (iops >= tg->iops[READ][LIMIT_LOW])
1923 tg->last_low_overflow_time[READ] = now;
1924 }
1925
1926 if (tg->iops[WRITE][LIMIT_LOW]) {
Chunguang Xu4f1e9632021-08-02 11:51:56 +08001927 tg->last_io_disp[WRITE] += atomic_xchg(&tg->last_io_split_cnt[WRITE], 0);
Shaohua Li3f0abd82017-03-27 10:51:35 -07001928 iops = tg->last_io_disp[WRITE] * HZ / elapsed_time;
1929 if (iops >= tg->iops[WRITE][LIMIT_LOW])
1930 tg->last_low_overflow_time[WRITE] = now;
1931 }
1932
1933 /*
1934 * If cgroup is below low limit, consider downgrade and throttle other
1935 * cgroups
1936 */
1937 if (throtl_hierarchy_can_downgrade(tg))
Baolin Wang4247d9c82020-10-08 11:52:22 +08001938 throtl_downgrade_state(tg->td);
Shaohua Li3f0abd82017-03-27 10:51:35 -07001939
1940 tg->last_bytes_disp[READ] = 0;
1941 tg->last_bytes_disp[WRITE] = 0;
1942 tg->last_io_disp[READ] = 0;
1943 tg->last_io_disp[WRITE] = 0;
1944}
1945
Shaohua Li9e234ee2017-03-27 10:51:41 -07001946static void blk_throtl_update_idletime(struct throtl_grp *tg)
1947{
Baolin Wang79016012020-10-08 11:52:23 +08001948 unsigned long now;
Shaohua Li9e234ee2017-03-27 10:51:41 -07001949 unsigned long last_finish_time = tg->last_finish_time;
1950
Baolin Wang79016012020-10-08 11:52:23 +08001951 if (last_finish_time == 0)
1952 return;
1953
1954 now = ktime_get_ns() >> 10;
1955 if (now <= last_finish_time ||
Shaohua Li9e234ee2017-03-27 10:51:41 -07001956 last_finish_time == tg->checked_last_finish_time)
1957 return;
1958
1959 tg->avg_idletime = (tg->avg_idletime * 7 + now - last_finish_time) >> 3;
1960 tg->checked_last_finish_time = last_finish_time;
1961}
1962
Shaohua Lib9147dd2017-03-27 15:19:42 -07001963#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
1964static void throtl_update_latency_buckets(struct throtl_data *td)
1965{
Joseph Qib889bf62017-11-21 09:38:30 +08001966 struct avg_latency_bucket avg_latency[2][LATENCY_BUCKET_SIZE];
1967 int i, cpu, rw;
1968 unsigned long last_latency[2] = { 0 };
1969 unsigned long latency[2];
Shaohua Lib9147dd2017-03-27 15:19:42 -07001970
Baolin Wangb185efa2020-10-08 11:52:24 +08001971 if (!blk_queue_nonrot(td->queue) || !td->limit_valid[LIMIT_LOW])
Shaohua Lib9147dd2017-03-27 15:19:42 -07001972 return;
1973 if (time_before(jiffies, td->last_calculate_time + HZ))
1974 return;
1975 td->last_calculate_time = jiffies;
1976
1977 memset(avg_latency, 0, sizeof(avg_latency));
Joseph Qib889bf62017-11-21 09:38:30 +08001978 for (rw = READ; rw <= WRITE; rw++) {
1979 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
1980 struct latency_bucket *tmp = &td->tmp_buckets[rw][i];
Shaohua Lib9147dd2017-03-27 15:19:42 -07001981
Joseph Qib889bf62017-11-21 09:38:30 +08001982 for_each_possible_cpu(cpu) {
1983 struct latency_bucket *bucket;
Shaohua Lib9147dd2017-03-27 15:19:42 -07001984
Joseph Qib889bf62017-11-21 09:38:30 +08001985 /* this isn't race free, but ok in practice */
1986 bucket = per_cpu_ptr(td->latency_buckets[rw],
1987 cpu);
1988 tmp->total_latency += bucket[i].total_latency;
1989 tmp->samples += bucket[i].samples;
1990 bucket[i].total_latency = 0;
1991 bucket[i].samples = 0;
1992 }
Shaohua Lib9147dd2017-03-27 15:19:42 -07001993
Joseph Qib889bf62017-11-21 09:38:30 +08001994 if (tmp->samples >= 32) {
1995 int samples = tmp->samples;
Shaohua Lib9147dd2017-03-27 15:19:42 -07001996
Joseph Qib889bf62017-11-21 09:38:30 +08001997 latency[rw] = tmp->total_latency;
Shaohua Lib9147dd2017-03-27 15:19:42 -07001998
Joseph Qib889bf62017-11-21 09:38:30 +08001999 tmp->total_latency = 0;
2000 tmp->samples = 0;
2001 latency[rw] /= samples;
2002 if (latency[rw] == 0)
2003 continue;
2004 avg_latency[rw][i].latency = latency[rw];
2005 }
Shaohua Lib9147dd2017-03-27 15:19:42 -07002006 }
2007 }
2008
Joseph Qib889bf62017-11-21 09:38:30 +08002009 for (rw = READ; rw <= WRITE; rw++) {
2010 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2011 if (!avg_latency[rw][i].latency) {
2012 if (td->avg_buckets[rw][i].latency < last_latency[rw])
2013 td->avg_buckets[rw][i].latency =
2014 last_latency[rw];
2015 continue;
2016 }
2017
2018 if (!td->avg_buckets[rw][i].valid)
2019 latency[rw] = avg_latency[rw][i].latency;
2020 else
2021 latency[rw] = (td->avg_buckets[rw][i].latency * 7 +
2022 avg_latency[rw][i].latency) >> 3;
2023
2024 td->avg_buckets[rw][i].latency = max(latency[rw],
2025 last_latency[rw]);
2026 td->avg_buckets[rw][i].valid = true;
2027 last_latency[rw] = td->avg_buckets[rw][i].latency;
Shaohua Lib9147dd2017-03-27 15:19:42 -07002028 }
Shaohua Lib9147dd2017-03-27 15:19:42 -07002029 }
Shaohua Li4cff7292017-05-17 13:07:25 -07002030
2031 for (i = 0; i < LATENCY_BUCKET_SIZE; i++)
2032 throtl_log(&td->service_queue,
Joseph Qib889bf62017-11-21 09:38:30 +08002033 "Latency bucket %d: read latency=%ld, read valid=%d, "
2034 "write latency=%ld, write valid=%d", i,
2035 td->avg_buckets[READ][i].latency,
2036 td->avg_buckets[READ][i].valid,
2037 td->avg_buckets[WRITE][i].latency,
2038 td->avg_buckets[WRITE][i].valid);
Shaohua Lib9147dd2017-03-27 15:19:42 -07002039}
2040#else
2041static inline void throtl_update_latency_buckets(struct throtl_data *td)
2042{
2043}
2044#endif
2045
Chunguang Xu4f1e9632021-08-02 11:51:56 +08002046void blk_throtl_charge_bio_split(struct bio *bio)
2047{
2048 struct blkcg_gq *blkg = bio->bi_blkg;
2049 struct throtl_grp *parent = blkg_to_tg(blkg);
2050 struct throtl_service_queue *parent_sq;
2051 bool rw = bio_data_dir(bio);
2052
2053 do {
2054 if (!parent->has_rules[rw])
2055 break;
2056
2057 atomic_inc(&parent->io_split_cnt[rw]);
2058 atomic_inc(&parent->last_io_split_cnt[rw]);
2059
2060 parent_sq = parent->service_queue.parent_sq;
2061 parent = sq_to_tg(parent_sq);
2062 } while (parent);
2063}
2064
Jens Axboea7b36ee2021-10-05 09:11:56 -06002065bool __blk_throtl_bio(struct bio *bio)
Vivek Goyale43473b2010-09-15 17:06:35 -04002066{
Pavel Begunkoved6cdde2021-10-14 15:03:30 +01002067 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
Christoph Hellwigdb18a532020-06-27 09:31:58 +02002068 struct blkcg_gq *blkg = bio->bi_blkg;
Tejun Heoc5cc2072013-05-14 13:52:38 -07002069 struct throtl_qnode *qn = NULL;
Christoph Hellwiga2e83ef2020-06-27 09:31:59 +02002070 struct throtl_grp *tg = blkg_to_tg(blkg);
Tejun Heo73f0d492013-05-14 13:52:35 -07002071 struct throtl_service_queue *sq;
Tejun Heo0e9f4162013-05-14 13:52:35 -07002072 bool rw = bio_data_dir(bio);
Tejun Heobc16a4f2011-10-19 14:33:01 +02002073 bool throttled = false;
Shaohua Lib9147dd2017-03-27 15:19:42 -07002074 struct throtl_data *td = tg->td;
Vivek Goyale43473b2010-09-15 17:06:35 -04002075
Christoph Hellwig93b80632020-06-27 09:31:57 +02002076 rcu_read_lock();
Tejun Heoae118892015-08-18 14:55:20 -07002077
Tejun Heo7ca46432019-11-07 11:18:01 -08002078 if (!cgroup_subsys_on_dfl(io_cgrp_subsys)) {
2079 blkg_rwstat_add(&tg->stat_bytes, bio->bi_opf,
2080 bio->bi_iter.bi_size);
2081 blkg_rwstat_add(&tg->stat_ios, bio->bi_opf, 1);
2082 }
2083
Christoph Hellwig0d945c12018-11-15 12:17:28 -07002084 spin_lock_irq(&q->queue_lock);
Tejun Heoc9589f02015-08-18 14:55:19 -07002085
Shaohua Lib9147dd2017-03-27 15:19:42 -07002086 throtl_update_latency_buckets(td);
2087
Shaohua Li9e234ee2017-03-27 10:51:41 -07002088 blk_throtl_update_idletime(tg);
2089
Tejun Heo73f0d492013-05-14 13:52:35 -07002090 sq = &tg->service_queue;
2091
Shaohua Lic79892c2017-03-27 10:51:34 -07002092again:
Tejun Heo9e660ac2013-05-14 13:52:38 -07002093 while (true) {
Shaohua Li3f0abd82017-03-27 10:51:35 -07002094 if (tg->last_low_overflow_time[rw] == 0)
2095 tg->last_low_overflow_time[rw] = jiffies;
2096 throtl_downgrade_check(tg);
Shaohua Lifa6fb5a2017-03-27 10:51:43 -07002097 throtl_upgrade_check(tg);
Tejun Heo9e660ac2013-05-14 13:52:38 -07002098 /* throtl is FIFO - if bios are already queued, should queue */
2099 if (sq->nr_queued[rw])
2100 break;
Vivek Goyalde701c72011-03-07 21:09:32 +01002101
Tejun Heo9e660ac2013-05-14 13:52:38 -07002102 /* if above limits, break to queue */
Shaohua Lic79892c2017-03-27 10:51:34 -07002103 if (!tg_may_dispatch(tg, bio, NULL)) {
Shaohua Li3f0abd82017-03-27 10:51:35 -07002104 tg->last_low_overflow_time[rw] = jiffies;
Shaohua Lib9147dd2017-03-27 15:19:42 -07002105 if (throtl_can_upgrade(td, tg)) {
2106 throtl_upgrade_state(td);
Shaohua Lic79892c2017-03-27 10:51:34 -07002107 goto again;
2108 }
Tejun Heo9e660ac2013-05-14 13:52:38 -07002109 break;
Shaohua Lic79892c2017-03-27 10:51:34 -07002110 }
Tejun Heo9e660ac2013-05-14 13:52:38 -07002111
2112 /* within limits, let's charge and dispatch directly */
Vivek Goyale43473b2010-09-15 17:06:35 -04002113 throtl_charge_bio(tg, bio);
Vivek Goyal04521db2011-03-22 21:54:29 +01002114
2115 /*
2116 * We need to trim slice even when bios are not being queued
2117 * otherwise it might happen that a bio is not queued for
2118 * a long time and slice keeps on extending and trim is not
2119 * called for a long time. Now if limits are reduced suddenly
2120 * we take into account all the IO dispatched so far at new
2121 * low rate and * newly queued IO gets a really long dispatch
2122 * time.
2123 *
2124 * So keep on trimming slice even if bio is not queued.
2125 */
Tejun Heo0f3457f2013-05-14 13:52:32 -07002126 throtl_trim_slice(tg, rw);
Tejun Heo9e660ac2013-05-14 13:52:38 -07002127
2128 /*
2129 * @bio passed through this layer without being throttled.
Baolin Wangb53b0722020-09-07 16:10:13 +08002130 * Climb up the ladder. If we're already at the top, it
Tejun Heo9e660ac2013-05-14 13:52:38 -07002131 * can be executed directly.
2132 */
Tejun Heoc5cc2072013-05-14 13:52:38 -07002133 qn = &tg->qnode_on_parent[rw];
Tejun Heo9e660ac2013-05-14 13:52:38 -07002134 sq = sq->parent_sq;
2135 tg = sq_to_tg(sq);
2136 if (!tg)
2137 goto out_unlock;
Vivek Goyale43473b2010-09-15 17:06:35 -04002138 }
2139
Tejun Heo9e660ac2013-05-14 13:52:38 -07002140 /* out-of-limit, queue to @tg */
Tejun Heofda6f272013-05-14 13:52:36 -07002141 throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
2142 rw == READ ? 'R' : 'W',
Shaohua Li9f626e32017-03-27 10:51:30 -07002143 tg->bytes_disp[rw], bio->bi_iter.bi_size,
2144 tg_bps_limit(tg, rw),
2145 tg->io_disp[rw], tg_iops_limit(tg, rw),
Tejun Heofda6f272013-05-14 13:52:36 -07002146 sq->nr_queued[READ], sq->nr_queued[WRITE]);
Vivek Goyale43473b2010-09-15 17:06:35 -04002147
Shaohua Li3f0abd82017-03-27 10:51:35 -07002148 tg->last_low_overflow_time[rw] = jiffies;
2149
Shaohua Lib9147dd2017-03-27 15:19:42 -07002150 td->nr_queued[rw]++;
Tejun Heoc5cc2072013-05-14 13:52:38 -07002151 throtl_add_bio_tg(bio, qn, tg);
Tejun Heobc16a4f2011-10-19 14:33:01 +02002152 throttled = true;
Vivek Goyale43473b2010-09-15 17:06:35 -04002153
Tejun Heo7f52f982013-05-14 13:52:37 -07002154 /*
2155 * Update @tg's dispatch time and force schedule dispatch if @tg
2156 * was empty before @bio. The forced scheduling isn't likely to
2157 * cause undue delay as @bio is likely to be dispatched directly if
2158 * its @tg's disptime is not in the future.
2159 */
Tejun Heo0e9f4162013-05-14 13:52:35 -07002160 if (tg->flags & THROTL_TG_WAS_EMPTY) {
Tejun Heo77216b02013-05-14 13:52:36 -07002161 tg_update_disptime(tg);
Tejun Heo7f52f982013-05-14 13:52:37 -07002162 throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true);
Vivek Goyale43473b2010-09-15 17:06:35 -04002163 }
2164
Tejun Heobc16a4f2011-10-19 14:33:01 +02002165out_unlock:
Christoph Hellwig0d945c12018-11-15 12:17:28 -07002166 spin_unlock_irq(&q->queue_lock);
Shaohua Li111be882017-12-20 11:10:17 -07002167 bio_set_flag(bio, BIO_THROTTLED);
Shaohua Lib9147dd2017-03-27 15:19:42 -07002168
2169#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2170 if (throttled || !td->track_bio_latency)
Omar Sandoval5238dcf2018-05-09 02:08:49 -07002171 bio->bi_issue.value |= BIO_ISSUE_THROTL_SKIP_LATENCY;
Shaohua Lib9147dd2017-03-27 15:19:42 -07002172#endif
Christoph Hellwig93b80632020-06-27 09:31:57 +02002173 rcu_read_unlock();
Tejun Heobc16a4f2011-10-19 14:33:01 +02002174 return throttled;
Vivek Goyale43473b2010-09-15 17:06:35 -04002175}
2176
Shaohua Li9e234ee2017-03-27 10:51:41 -07002177#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
Shaohua Lib9147dd2017-03-27 15:19:42 -07002178static void throtl_track_latency(struct throtl_data *td, sector_t size,
2179 int op, unsigned long time)
2180{
2181 struct latency_bucket *latency;
2182 int index;
2183
Joseph Qib889bf62017-11-21 09:38:30 +08002184 if (!td || td->limit_index != LIMIT_LOW ||
2185 !(op == REQ_OP_READ || op == REQ_OP_WRITE) ||
Shaohua Lib9147dd2017-03-27 15:19:42 -07002186 !blk_queue_nonrot(td->queue))
2187 return;
2188
2189 index = request_bucket_index(size);
2190
Joseph Qib889bf62017-11-21 09:38:30 +08002191 latency = get_cpu_ptr(td->latency_buckets[op]);
Shaohua Lib9147dd2017-03-27 15:19:42 -07002192 latency[index].total_latency += time;
2193 latency[index].samples++;
Joseph Qib889bf62017-11-21 09:38:30 +08002194 put_cpu_ptr(td->latency_buckets[op]);
Shaohua Lib9147dd2017-03-27 15:19:42 -07002195}
2196
2197void blk_throtl_stat_add(struct request *rq, u64 time_ns)
2198{
2199 struct request_queue *q = rq->q;
2200 struct throtl_data *td = q->td;
2201
Hou Tao3d244302019-05-21 15:59:03 +08002202 throtl_track_latency(td, blk_rq_stats_sectors(rq), req_op(rq),
2203 time_ns >> 10);
Shaohua Lib9147dd2017-03-27 15:19:42 -07002204}
2205
Shaohua Li9e234ee2017-03-27 10:51:41 -07002206void blk_throtl_bio_endio(struct bio *bio)
2207{
Josef Bacik08e18ea2018-07-03 11:14:50 -04002208 struct blkcg_gq *blkg;
Shaohua Li9e234ee2017-03-27 10:51:41 -07002209 struct throtl_grp *tg;
Shaohua Lib9147dd2017-03-27 15:19:42 -07002210 u64 finish_time_ns;
2211 unsigned long finish_time;
2212 unsigned long start_time;
2213 unsigned long lat;
Joseph Qib889bf62017-11-21 09:38:30 +08002214 int rw = bio_data_dir(bio);
Shaohua Li9e234ee2017-03-27 10:51:41 -07002215
Josef Bacik08e18ea2018-07-03 11:14:50 -04002216 blkg = bio->bi_blkg;
2217 if (!blkg)
Shaohua Li9e234ee2017-03-27 10:51:41 -07002218 return;
Josef Bacik08e18ea2018-07-03 11:14:50 -04002219 tg = blkg_to_tg(blkg);
Baolin Wangb185efa2020-10-08 11:52:24 +08002220 if (!tg->td->limit_valid[LIMIT_LOW])
2221 return;
Shaohua Li9e234ee2017-03-27 10:51:41 -07002222
Shaohua Lib9147dd2017-03-27 15:19:42 -07002223 finish_time_ns = ktime_get_ns();
2224 tg->last_finish_time = finish_time_ns >> 10;
2225
Omar Sandoval5238dcf2018-05-09 02:08:49 -07002226 start_time = bio_issue_time(&bio->bi_issue) >> 10;
2227 finish_time = __bio_issue_time(finish_time_ns) >> 10;
Josef Bacik08e18ea2018-07-03 11:14:50 -04002228 if (!start_time || finish_time <= start_time)
Shaohua Li53696b82017-03-27 15:19:43 -07002229 return;
2230
2231 lat = finish_time - start_time;
Shaohua Lib9147dd2017-03-27 15:19:42 -07002232 /* this is only for bio based driver */
Omar Sandoval5238dcf2018-05-09 02:08:49 -07002233 if (!(bio->bi_issue.value & BIO_ISSUE_THROTL_SKIP_LATENCY))
2234 throtl_track_latency(tg->td, bio_issue_size(&bio->bi_issue),
2235 bio_op(bio), lat);
Shaohua Li53696b82017-03-27 15:19:43 -07002236
Shaohua Li6679a902017-06-06 12:40:43 -07002237 if (tg->latency_target && lat >= tg->td->filtered_latency) {
Shaohua Li53696b82017-03-27 15:19:43 -07002238 int bucket;
2239 unsigned int threshold;
2240
Omar Sandoval5238dcf2018-05-09 02:08:49 -07002241 bucket = request_bucket_index(bio_issue_size(&bio->bi_issue));
Joseph Qib889bf62017-11-21 09:38:30 +08002242 threshold = tg->td->avg_buckets[rw][bucket].latency +
Shaohua Li53696b82017-03-27 15:19:43 -07002243 tg->latency_target;
2244 if (lat > threshold)
2245 tg->bad_bio_cnt++;
2246 /*
2247 * Not race free, could get wrong count, which means cgroups
2248 * will be throttled
2249 */
2250 tg->bio_cnt++;
2251 }
2252
2253 if (time_after(jiffies, tg->bio_cnt_reset_time) || tg->bio_cnt > 1024) {
2254 tg->bio_cnt_reset_time = tg->td->throtl_slice + jiffies;
2255 tg->bio_cnt /= 2;
2256 tg->bad_bio_cnt /= 2;
Shaohua Lib9147dd2017-03-27 15:19:42 -07002257 }
Shaohua Li9e234ee2017-03-27 10:51:41 -07002258}
2259#endif
2260
Vivek Goyale43473b2010-09-15 17:06:35 -04002261int blk_throtl_init(struct request_queue *q)
2262{
2263 struct throtl_data *td;
Tejun Heoa2b16932012-04-13 13:11:33 -07002264 int ret;
Vivek Goyale43473b2010-09-15 17:06:35 -04002265
2266 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
2267 if (!td)
2268 return -ENOMEM;
Joseph Qib889bf62017-11-21 09:38:30 +08002269 td->latency_buckets[READ] = __alloc_percpu(sizeof(struct latency_bucket) *
Shaohua Lib9147dd2017-03-27 15:19:42 -07002270 LATENCY_BUCKET_SIZE, __alignof__(u64));
Joseph Qib889bf62017-11-21 09:38:30 +08002271 if (!td->latency_buckets[READ]) {
2272 kfree(td);
2273 return -ENOMEM;
2274 }
2275 td->latency_buckets[WRITE] = __alloc_percpu(sizeof(struct latency_bucket) *
2276 LATENCY_BUCKET_SIZE, __alignof__(u64));
2277 if (!td->latency_buckets[WRITE]) {
2278 free_percpu(td->latency_buckets[READ]);
Shaohua Lib9147dd2017-03-27 15:19:42 -07002279 kfree(td);
2280 return -ENOMEM;
2281 }
Vivek Goyale43473b2010-09-15 17:06:35 -04002282
Tejun Heo69df0ab2013-05-14 13:52:36 -07002283 INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
Tejun Heob2ce2642015-08-18 14:55:13 -07002284 throtl_service_queue_init(&td->service_queue);
Vivek Goyale43473b2010-09-15 17:06:35 -04002285
Tejun Heocd1604f2012-03-05 13:15:06 -08002286 q->td = td;
Vivek Goyal29b12582011-05-19 15:38:24 -04002287 td->queue = q;
Vivek Goyal02977e42010-10-01 14:49:48 +02002288
Shaohua Li9f626e32017-03-27 10:51:30 -07002289 td->limit_valid[LIMIT_MAX] = true;
Shaohua Licd5ab1b2017-03-27 10:51:32 -07002290 td->limit_index = LIMIT_MAX;
Shaohua Li3f0abd82017-03-27 10:51:35 -07002291 td->low_upgrade_time = jiffies;
2292 td->low_downgrade_time = jiffies;
Shaohua Li9e234ee2017-03-27 10:51:41 -07002293
Tejun Heoa2b16932012-04-13 13:11:33 -07002294 /* activate policy */
Tejun Heo3c798392012-04-16 13:57:25 -07002295 ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
Shaohua Lib9147dd2017-03-27 15:19:42 -07002296 if (ret) {
Joseph Qib889bf62017-11-21 09:38:30 +08002297 free_percpu(td->latency_buckets[READ]);
2298 free_percpu(td->latency_buckets[WRITE]);
Vivek Goyal29b12582011-05-19 15:38:24 -04002299 kfree(td);
Shaohua Lib9147dd2017-03-27 15:19:42 -07002300 }
Tejun Heoa2b16932012-04-13 13:11:33 -07002301 return ret;
Vivek Goyale43473b2010-09-15 17:06:35 -04002302}
2303
2304void blk_throtl_exit(struct request_queue *q)
2305{
Tejun Heoc875f4d2012-03-05 13:15:22 -08002306 BUG_ON(!q->td);
Li Jinlin884f0e82021-09-07 20:12:42 +08002307 del_timer_sync(&q->td->service_queue.pending_timer);
Vivek Goyalda527772011-03-02 19:05:33 -05002308 throtl_shutdown_wq(q);
Tejun Heo3c798392012-04-16 13:57:25 -07002309 blkcg_deactivate_policy(q, &blkcg_policy_throtl);
Joseph Qib889bf62017-11-21 09:38:30 +08002310 free_percpu(q->td->latency_buckets[READ]);
2311 free_percpu(q->td->latency_buckets[WRITE]);
Tejun Heoc9a929d2011-10-19 14:42:16 +02002312 kfree(q->td);
Vivek Goyale43473b2010-09-15 17:06:35 -04002313}
2314
Shaohua Lid61fcfa2017-03-27 10:51:38 -07002315void blk_throtl_register_queue(struct request_queue *q)
2316{
2317 struct throtl_data *td;
Shaohua Li6679a902017-06-06 12:40:43 -07002318 int i;
Shaohua Lid61fcfa2017-03-27 10:51:38 -07002319
2320 td = q->td;
2321 BUG_ON(!td);
2322
Shaohua Li6679a902017-06-06 12:40:43 -07002323 if (blk_queue_nonrot(q)) {
Shaohua Lid61fcfa2017-03-27 10:51:38 -07002324 td->throtl_slice = DFL_THROTL_SLICE_SSD;
Shaohua Li6679a902017-06-06 12:40:43 -07002325 td->filtered_latency = LATENCY_FILTERED_SSD;
2326 } else {
Shaohua Lid61fcfa2017-03-27 10:51:38 -07002327 td->throtl_slice = DFL_THROTL_SLICE_HD;
Shaohua Li6679a902017-06-06 12:40:43 -07002328 td->filtered_latency = LATENCY_FILTERED_HD;
Joseph Qib889bf62017-11-21 09:38:30 +08002329 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2330 td->avg_buckets[READ][i].latency = DFL_HD_BASELINE_LATENCY;
2331 td->avg_buckets[WRITE][i].latency = DFL_HD_BASELINE_LATENCY;
2332 }
Shaohua Li6679a902017-06-06 12:40:43 -07002333 }
Shaohua Lid61fcfa2017-03-27 10:51:38 -07002334#ifndef CONFIG_BLK_DEV_THROTTLING_LOW
2335 /* if no low limit, use previous default */
2336 td->throtl_slice = DFL_THROTL_SLICE_HD;
2337#endif
Shaohua Li9e234ee2017-03-27 10:51:41 -07002338
Jens Axboe344e9ff2018-11-15 12:22:51 -07002339 td->track_bio_latency = !queue_is_mq(q);
Shaohua Lib9147dd2017-03-27 15:19:42 -07002340 if (!td->track_bio_latency)
2341 blk_stat_enable_accounting(q);
Shaohua Lid61fcfa2017-03-27 10:51:38 -07002342}
2343
Shaohua Li297e3d82017-03-27 10:51:37 -07002344#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2345ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page)
2346{
2347 if (!q->td)
2348 return -EINVAL;
2349 return sprintf(page, "%u\n", jiffies_to_msecs(q->td->throtl_slice));
2350}
2351
2352ssize_t blk_throtl_sample_time_store(struct request_queue *q,
2353 const char *page, size_t count)
2354{
2355 unsigned long v;
2356 unsigned long t;
2357
2358 if (!q->td)
2359 return -EINVAL;
2360 if (kstrtoul(page, 10, &v))
2361 return -EINVAL;
2362 t = msecs_to_jiffies(v);
2363 if (t == 0 || t > MAX_THROTL_SLICE)
2364 return -EINVAL;
2365 q->td->throtl_slice = t;
2366 return count;
2367}
2368#endif
2369
Vivek Goyale43473b2010-09-15 17:06:35 -04002370static int __init throtl_init(void)
2371{
Vivek Goyal450adcb2011-03-01 13:40:54 -05002372 kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
2373 if (!kthrotld_workqueue)
2374 panic("Failed to create kthrotld\n");
2375
Tejun Heo3c798392012-04-16 13:57:25 -07002376 return blkcg_policy_register(&blkcg_policy_throtl);
Vivek Goyale43473b2010-09-15 17:06:35 -04002377}
2378
2379module_init(throtl_init);