blob: 0c119be0e813316c28049e9d7a0c6cdc1eddf4f5 [file] [log] [blame]
Christoph Hellwig3dcf60b2019-04-30 14:42:43 -04001// SPDX-License-Identifier: GPL-2.0
Jens Axboee34cbd32016-11-09 12:36:15 -07002/*
3 * buffered writeback throttling. loosely based on CoDel. We can't drop
4 * packets for IO scheduling, so the logic is something like this:
5 *
6 * - Monitor latencies in a defined window of time.
7 * - If the minimum latency in the above window exceeds some target, increment
8 * scaling step and scale down queue depth by a factor of 2x. The monitoring
9 * window is then shrunk to 100 / sqrt(scaling step + 1).
10 * - For any window where we don't have solid data on what the latencies
11 * look like, retain status quo.
12 * - If latencies look good, decrement scaling step.
13 * - If we're only doing writes, allow the scaling step to go negative. This
14 * will temporarily boost write performance, snapping back to a stable
15 * scaling step of 0 if reads show up or the heavy writers finish. Unlike
16 * positive scaling steps where we shrink the monitoring window, a negative
17 * scaling step retains the default step==0 window size.
18 *
19 * Copyright (C) 2016 Jens Axboe
20 *
21 */
22#include <linux/kernel.h>
23#include <linux/blk_types.h>
24#include <linux/slab.h>
25#include <linux/backing-dev.h>
26#include <linux/swap.h>
27
28#include "blk-wbt.h"
Josef Bacika7905042018-07-03 09:32:35 -060029#include "blk-rq-qos.h"
Jens Axboee34cbd32016-11-09 12:36:15 -070030
31#define CREATE_TRACE_POINTS
32#include <trace/events/wbt.h>
33
Omar Sandovala8a45942018-05-09 02:08:48 -070034static inline void wbt_clear_state(struct request *rq)
Omar Sandoval934031a2018-05-09 02:08:47 -070035{
Omar Sandoval544ccc8d2018-05-09 02:08:50 -070036 rq->wbt_flags = 0;
Omar Sandoval934031a2018-05-09 02:08:47 -070037}
38
Omar Sandovala8a45942018-05-09 02:08:48 -070039static inline enum wbt_flags wbt_flags(struct request *rq)
Omar Sandoval934031a2018-05-09 02:08:47 -070040{
Omar Sandoval544ccc8d2018-05-09 02:08:50 -070041 return rq->wbt_flags;
Omar Sandoval934031a2018-05-09 02:08:47 -070042}
43
Omar Sandovala8a45942018-05-09 02:08:48 -070044static inline bool wbt_is_tracked(struct request *rq)
Omar Sandoval934031a2018-05-09 02:08:47 -070045{
Omar Sandoval544ccc8d2018-05-09 02:08:50 -070046 return rq->wbt_flags & WBT_TRACKED;
Omar Sandoval934031a2018-05-09 02:08:47 -070047}
48
Omar Sandovala8a45942018-05-09 02:08:48 -070049static inline bool wbt_is_read(struct request *rq)
Omar Sandoval934031a2018-05-09 02:08:47 -070050{
Omar Sandoval544ccc8d2018-05-09 02:08:50 -070051 return rq->wbt_flags & WBT_READ;
Omar Sandoval934031a2018-05-09 02:08:47 -070052}
53
Jens Axboee34cbd32016-11-09 12:36:15 -070054enum {
55 /*
56 * Default setting, we'll scale up (to 75% of QD max) or down (min 1)
57 * from here depending on device stats
58 */
59 RWB_DEF_DEPTH = 16,
60
61 /*
62 * 100msec window
63 */
64 RWB_WINDOW_NSEC = 100 * 1000 * 1000ULL,
65
66 /*
67 * Disregard stats, if we don't meet this minimum
68 */
69 RWB_MIN_WRITE_SAMPLES = 3,
70
71 /*
72 * If we have this number of consecutive windows with not enough
73 * information to scale up or down, scale up.
74 */
75 RWB_UNKNOWN_BUMP = 5,
76};
77
78static inline bool rwb_enabled(struct rq_wb *rwb)
79{
Zhang Yi1d0903d2021-06-19 17:36:59 +080080 return rwb && rwb->enable_state != WBT_STATE_OFF_DEFAULT &&
81 rwb->wb_normal != 0;
Jens Axboee34cbd32016-11-09 12:36:15 -070082}
83
Jens Axboee34cbd32016-11-09 12:36:15 -070084static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
85{
86 if (rwb_enabled(rwb)) {
87 const unsigned long cur = jiffies;
88
89 if (cur != *var)
90 *var = cur;
91 }
92}
93
94/*
95 * If a task was rate throttled in balance_dirty_pages() within the last
96 * second or so, use that to indicate a higher cleaning rate.
97 */
98static bool wb_recent_wait(struct rq_wb *rwb)
99{
Christoph Hellwigd152c682021-08-16 15:46:24 +0200100 struct bdi_writeback *wb = &rwb->rqos.q->disk->bdi->wb;
Jens Axboee34cbd32016-11-09 12:36:15 -0700101
102 return time_before(jiffies, wb->dirty_sleep + HZ);
103}
104
Jens Axboe8bea6092018-05-07 09:57:08 -0600105static inline struct rq_wait *get_rq_wait(struct rq_wb *rwb,
106 enum wbt_flags wb_acct)
Jens Axboee34cbd32016-11-09 12:36:15 -0700107{
Jens Axboe8bea6092018-05-07 09:57:08 -0600108 if (wb_acct & WBT_KSWAPD)
109 return &rwb->rq_wait[WBT_RWQ_KSWAPD];
Jens Axboe782f5692018-05-07 10:03:23 -0600110 else if (wb_acct & WBT_DISCARD)
111 return &rwb->rq_wait[WBT_RWQ_DISCARD];
Jens Axboe8bea6092018-05-07 09:57:08 -0600112
113 return &rwb->rq_wait[WBT_RWQ_BG];
Jens Axboee34cbd32016-11-09 12:36:15 -0700114}
115
116static void rwb_wake_all(struct rq_wb *rwb)
117{
118 int i;
119
120 for (i = 0; i < WBT_NUM_RWQ; i++) {
121 struct rq_wait *rqw = &rwb->rq_wait[i];
122
Jens Axboeb7882092018-08-20 13:20:50 -0600123 if (wq_has_sleeper(&rqw->wait))
Jens Axboee34cbd32016-11-09 12:36:15 -0700124 wake_up_all(&rqw->wait);
125 }
126}
127
Jens Axboe061a5422018-08-26 10:09:06 -0600128static void wbt_rqw_done(struct rq_wb *rwb, struct rq_wait *rqw,
129 enum wbt_flags wb_acct)
Jens Axboee34cbd32016-11-09 12:36:15 -0700130{
Jens Axboee34cbd32016-11-09 12:36:15 -0700131 int inflight, limit;
132
Jens Axboee34cbd32016-11-09 12:36:15 -0700133 inflight = atomic_dec_return(&rqw->inflight);
134
135 /*
136 * wbt got disabled with IO in flight. Wake up any potential
137 * waiters, we don't have to do more than that.
138 */
139 if (unlikely(!rwb_enabled(rwb))) {
140 rwb_wake_all(rwb);
141 return;
142 }
143
144 /*
Jens Axboe782f5692018-05-07 10:03:23 -0600145 * For discards, our limit is always the background. For writes, if
146 * the device does write back caching, drop further down before we
147 * wake people up.
Jens Axboee34cbd32016-11-09 12:36:15 -0700148 */
Jens Axboe782f5692018-05-07 10:03:23 -0600149 if (wb_acct & WBT_DISCARD)
150 limit = rwb->wb_background;
151 else if (rwb->wc && !wb_recent_wait(rwb))
Jens Axboee34cbd32016-11-09 12:36:15 -0700152 limit = 0;
153 else
154 limit = rwb->wb_normal;
155
156 /*
157 * Don't wake anyone up if we are above the normal limit.
158 */
159 if (inflight && inflight >= limit)
160 return;
161
Jens Axboeb7882092018-08-20 13:20:50 -0600162 if (wq_has_sleeper(&rqw->wait)) {
Jens Axboee34cbd32016-11-09 12:36:15 -0700163 int diff = limit - inflight;
164
165 if (!inflight || diff >= rwb->wb_background / 2)
Jens Axboe38cfb5a2018-08-26 10:10:05 -0600166 wake_up_all(&rqw->wait);
Jens Axboee34cbd32016-11-09 12:36:15 -0700167 }
168}
169
Jens Axboe061a5422018-08-26 10:09:06 -0600170static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct)
171{
172 struct rq_wb *rwb = RQWB(rqos);
173 struct rq_wait *rqw;
174
175 if (!(wb_acct & WBT_TRACKED))
176 return;
177
178 rqw = get_rq_wait(rwb, wb_acct);
179 wbt_rqw_done(rwb, rqw, wb_acct);
180}
181
Jens Axboee34cbd32016-11-09 12:36:15 -0700182/*
183 * Called on completion of a request. Note that it's also called when
184 * a request is merged, when the request gets freed.
185 */
Josef Bacika7905042018-07-03 09:32:35 -0600186static void wbt_done(struct rq_qos *rqos, struct request *rq)
Jens Axboee34cbd32016-11-09 12:36:15 -0700187{
Josef Bacika7905042018-07-03 09:32:35 -0600188 struct rq_wb *rwb = RQWB(rqos);
Jens Axboee34cbd32016-11-09 12:36:15 -0700189
Omar Sandovala8a45942018-05-09 02:08:48 -0700190 if (!wbt_is_tracked(rq)) {
191 if (rwb->sync_cookie == rq) {
Jens Axboee34cbd32016-11-09 12:36:15 -0700192 rwb->sync_issue = 0;
193 rwb->sync_cookie = NULL;
194 }
195
Omar Sandovala8a45942018-05-09 02:08:48 -0700196 if (wbt_is_read(rq))
Jens Axboee34cbd32016-11-09 12:36:15 -0700197 wb_timestamp(rwb, &rwb->last_comp);
Jens Axboee34cbd32016-11-09 12:36:15 -0700198 } else {
Omar Sandovala8a45942018-05-09 02:08:48 -0700199 WARN_ON_ONCE(rq == rwb->sync_cookie);
Josef Bacika7905042018-07-03 09:32:35 -0600200 __wbt_done(rqos, wbt_flags(rq));
Jens Axboee34cbd32016-11-09 12:36:15 -0700201 }
Omar Sandovala8a45942018-05-09 02:08:48 -0700202 wbt_clear_state(rq);
Jens Axboee34cbd32016-11-09 12:36:15 -0700203}
204
Arnd Bergmann4121d382016-11-16 16:29:57 +0100205static inline bool stat_sample_valid(struct blk_rq_stat *stat)
Jens Axboee34cbd32016-11-09 12:36:15 -0700206{
207 /*
208 * We need at least one read sample, and a minimum of
209 * RWB_MIN_WRITE_SAMPLES. We require some write samples to know
210 * that it's writes impacting us, and not just some sole read on
211 * a device that is in a lower power state.
212 */
Omar Sandovalfa2e39c2017-03-21 08:56:06 -0700213 return (stat[READ].nr_samples >= 1 &&
214 stat[WRITE].nr_samples >= RWB_MIN_WRITE_SAMPLES);
Jens Axboee34cbd32016-11-09 12:36:15 -0700215}
216
217static u64 rwb_sync_issue_lat(struct rq_wb *rwb)
218{
Mark Rutland6aa7de02017-10-23 14:07:29 -0700219 u64 now, issue = READ_ONCE(rwb->sync_issue);
Jens Axboee34cbd32016-11-09 12:36:15 -0700220
221 if (!issue || !rwb->sync_cookie)
222 return 0;
223
224 now = ktime_to_ns(ktime_get());
225 return now - issue;
226}
227
228enum {
229 LAT_OK = 1,
230 LAT_UNKNOWN,
231 LAT_UNKNOWN_WRITES,
232 LAT_EXCEEDED,
233};
234
Omar Sandoval34dbad52017-03-21 08:56:08 -0700235static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
Jens Axboee34cbd32016-11-09 12:36:15 -0700236{
Christoph Hellwigd152c682021-08-16 15:46:24 +0200237 struct backing_dev_info *bdi = rwb->rqos.q->disk->bdi;
Josef Bacika7905042018-07-03 09:32:35 -0600238 struct rq_depth *rqd = &rwb->rq_depth;
Jens Axboee34cbd32016-11-09 12:36:15 -0700239 u64 thislat;
240
241 /*
242 * If our stored sync issue exceeds the window size, or it
243 * exceeds our min target AND we haven't logged any entries,
244 * flag the latency as exceeded. wbt works off completion latencies,
245 * but for a flooded device, a single sync IO can take a long time
246 * to complete after being issued. If this time exceeds our
247 * monitoring window AND we didn't see any other completions in that
248 * window, then count that sync IO as a violation of the latency.
249 */
250 thislat = rwb_sync_issue_lat(rwb);
251 if (thislat > rwb->cur_win_nsec ||
Omar Sandovalfa2e39c2017-03-21 08:56:06 -0700252 (thislat > rwb->min_lat_nsec && !stat[READ].nr_samples)) {
Jens Axboed8a0cbf2016-11-10 21:52:53 -0700253 trace_wbt_lat(bdi, thislat);
Jens Axboee34cbd32016-11-09 12:36:15 -0700254 return LAT_EXCEEDED;
255 }
256
257 /*
258 * No read/write mix, if stat isn't valid
259 */
260 if (!stat_sample_valid(stat)) {
261 /*
262 * If we had writes in this stat window and the window is
263 * current, we're only doing writes. If a task recently
264 * waited or still has writes in flights, consider us doing
265 * just writes as well.
266 */
Omar Sandoval34dbad52017-03-21 08:56:08 -0700267 if (stat[WRITE].nr_samples || wb_recent_wait(rwb) ||
268 wbt_inflight(rwb))
Jens Axboee34cbd32016-11-09 12:36:15 -0700269 return LAT_UNKNOWN_WRITES;
270 return LAT_UNKNOWN;
271 }
272
273 /*
274 * If the 'min' latency exceeds our target, step down.
275 */
Omar Sandovalfa2e39c2017-03-21 08:56:06 -0700276 if (stat[READ].min > rwb->min_lat_nsec) {
277 trace_wbt_lat(bdi, stat[READ].min);
Jens Axboed8a0cbf2016-11-10 21:52:53 -0700278 trace_wbt_stat(bdi, stat);
Jens Axboee34cbd32016-11-09 12:36:15 -0700279 return LAT_EXCEEDED;
280 }
281
Josef Bacika7905042018-07-03 09:32:35 -0600282 if (rqd->scale_step)
Jens Axboed8a0cbf2016-11-10 21:52:53 -0700283 trace_wbt_stat(bdi, stat);
Jens Axboee34cbd32016-11-09 12:36:15 -0700284
285 return LAT_OK;
286}
287
Jens Axboee34cbd32016-11-09 12:36:15 -0700288static void rwb_trace_step(struct rq_wb *rwb, const char *msg)
289{
Christoph Hellwigd152c682021-08-16 15:46:24 +0200290 struct backing_dev_info *bdi = rwb->rqos.q->disk->bdi;
Josef Bacika7905042018-07-03 09:32:35 -0600291 struct rq_depth *rqd = &rwb->rq_depth;
Jens Axboed8a0cbf2016-11-10 21:52:53 -0700292
Josef Bacika7905042018-07-03 09:32:35 -0600293 trace_wbt_step(bdi, msg, rqd->scale_step, rwb->cur_win_nsec,
294 rwb->wb_background, rwb->wb_normal, rqd->max_depth);
295}
296
297static void calc_wb_limits(struct rq_wb *rwb)
298{
299 if (rwb->min_lat_nsec == 0) {
300 rwb->wb_normal = rwb->wb_background = 0;
301 } else if (rwb->rq_depth.max_depth <= 2) {
302 rwb->wb_normal = rwb->rq_depth.max_depth;
303 rwb->wb_background = 1;
304 } else {
305 rwb->wb_normal = (rwb->rq_depth.max_depth + 1) / 2;
306 rwb->wb_background = (rwb->rq_depth.max_depth + 3) / 4;
307 }
Jens Axboee34cbd32016-11-09 12:36:15 -0700308}
309
310static void scale_up(struct rq_wb *rwb)
311{
Harshad Shirwadkarb84477d2019-10-05 11:59:27 -0700312 if (!rq_depth_scale_up(&rwb->rq_depth))
313 return;
Josef Bacika7905042018-07-03 09:32:35 -0600314 calc_wb_limits(rwb);
Jens Axboee34cbd32016-11-09 12:36:15 -0700315 rwb->unknown_cnt = 0;
Josef Bacik5e65a202018-10-11 15:29:30 -0400316 rwb_wake_all(rwb);
Tommi Rantala3a89c252020-04-17 16:00:22 +0300317 rwb_trace_step(rwb, tracepoint_string("scale up"));
Jens Axboee34cbd32016-11-09 12:36:15 -0700318}
319
Jens Axboee34cbd32016-11-09 12:36:15 -0700320static void scale_down(struct rq_wb *rwb, bool hard_throttle)
321{
Harshad Shirwadkarb84477d2019-10-05 11:59:27 -0700322 if (!rq_depth_scale_down(&rwb->rq_depth, hard_throttle))
323 return;
Jens Axboee34cbd32016-11-09 12:36:15 -0700324 calc_wb_limits(rwb);
Josef Bacika7905042018-07-03 09:32:35 -0600325 rwb->unknown_cnt = 0;
Tommi Rantala3a89c252020-04-17 16:00:22 +0300326 rwb_trace_step(rwb, tracepoint_string("scale down"));
Jens Axboee34cbd32016-11-09 12:36:15 -0700327}
328
329static void rwb_arm_timer(struct rq_wb *rwb)
330{
Josef Bacika7905042018-07-03 09:32:35 -0600331 struct rq_depth *rqd = &rwb->rq_depth;
332
333 if (rqd->scale_step > 0) {
Jens Axboee34cbd32016-11-09 12:36:15 -0700334 /*
335 * We should speed this up, using some variant of a fast
336 * integer inverse square root calculation. Since we only do
337 * this for every window expiration, it's not a huge deal,
338 * though.
339 */
340 rwb->cur_win_nsec = div_u64(rwb->win_nsec << 4,
Josef Bacika7905042018-07-03 09:32:35 -0600341 int_sqrt((rqd->scale_step + 1) << 8));
Jens Axboee34cbd32016-11-09 12:36:15 -0700342 } else {
343 /*
344 * For step < 0, we don't want to increase/decrease the
345 * window size.
346 */
347 rwb->cur_win_nsec = rwb->win_nsec;
348 }
349
Omar Sandoval34dbad52017-03-21 08:56:08 -0700350 blk_stat_activate_nsecs(rwb->cb, rwb->cur_win_nsec);
Jens Axboee34cbd32016-11-09 12:36:15 -0700351}
352
Omar Sandoval34dbad52017-03-21 08:56:08 -0700353static void wb_timer_fn(struct blk_stat_callback *cb)
Jens Axboee34cbd32016-11-09 12:36:15 -0700354{
Omar Sandoval34dbad52017-03-21 08:56:08 -0700355 struct rq_wb *rwb = cb->data;
Josef Bacika7905042018-07-03 09:32:35 -0600356 struct rq_depth *rqd = &rwb->rq_depth;
Jens Axboee34cbd32016-11-09 12:36:15 -0700357 unsigned int inflight = wbt_inflight(rwb);
358 int status;
359
Andrea Righi480d42d2021-10-19 11:20:26 +0200360 if (!rwb->rqos.q->disk)
361 return;
362
Omar Sandoval34dbad52017-03-21 08:56:08 -0700363 status = latency_exceeded(rwb, cb->stat);
Jens Axboee34cbd32016-11-09 12:36:15 -0700364
Christoph Hellwigd152c682021-08-16 15:46:24 +0200365 trace_wbt_timer(rwb->rqos.q->disk->bdi, status, rqd->scale_step,
366 inflight);
Jens Axboee34cbd32016-11-09 12:36:15 -0700367
368 /*
369 * If we exceeded the latency target, step down. If we did not,
370 * step one level up. If we don't know enough to say either exceeded
371 * or ok, then don't do anything.
372 */
373 switch (status) {
374 case LAT_EXCEEDED:
375 scale_down(rwb, true);
376 break;
377 case LAT_OK:
378 scale_up(rwb);
379 break;
380 case LAT_UNKNOWN_WRITES:
381 /*
382 * We started a the center step, but don't have a valid
383 * read/write sample, but we do have writes going on.
384 * Allow step to go negative, to increase write perf.
385 */
386 scale_up(rwb);
387 break;
388 case LAT_UNKNOWN:
389 if (++rwb->unknown_cnt < RWB_UNKNOWN_BUMP)
390 break;
391 /*
392 * We get here when previously scaled reduced depth, and we
393 * currently don't have a valid read/write sample. For that
394 * case, slowly return to center state (step == 0).
395 */
Josef Bacika7905042018-07-03 09:32:35 -0600396 if (rqd->scale_step > 0)
Jens Axboee34cbd32016-11-09 12:36:15 -0700397 scale_up(rwb);
Josef Bacika7905042018-07-03 09:32:35 -0600398 else if (rqd->scale_step < 0)
Jens Axboee34cbd32016-11-09 12:36:15 -0700399 scale_down(rwb, false);
400 break;
401 default:
402 break;
403 }
404
405 /*
406 * Re-arm timer, if we have IO in flight
407 */
Josef Bacika7905042018-07-03 09:32:35 -0600408 if (rqd->scale_step || inflight)
Jens Axboee34cbd32016-11-09 12:36:15 -0700409 rwb_arm_timer(rwb);
410}
411
Guoqing Jiang4d89e1d2020-05-09 00:00:15 +0200412static void wbt_update_limits(struct rq_wb *rwb)
Jens Axboee34cbd32016-11-09 12:36:15 -0700413{
Josef Bacika7905042018-07-03 09:32:35 -0600414 struct rq_depth *rqd = &rwb->rq_depth;
415
416 rqd->scale_step = 0;
417 rqd->scaled_max = false;
418
419 rq_depth_calc_max_depth(rqd);
Jens Axboee34cbd32016-11-09 12:36:15 -0700420 calc_wb_limits(rwb);
421
422 rwb_wake_all(rwb);
423}
424
Josef Bacika7905042018-07-03 09:32:35 -0600425u64 wbt_get_min_lat(struct request_queue *q)
426{
427 struct rq_qos *rqos = wbt_rq_qos(q);
428 if (!rqos)
429 return 0;
430 return RQWB(rqos)->min_lat_nsec;
431}
432
433void wbt_set_min_lat(struct request_queue *q, u64 val)
434{
435 struct rq_qos *rqos = wbt_rq_qos(q);
436 if (!rqos)
437 return;
438 RQWB(rqos)->min_lat_nsec = val;
439 RQWB(rqos)->enable_state = WBT_STATE_ON_MANUAL;
Guoqing Jiang4d89e1d2020-05-09 00:00:15 +0200440 wbt_update_limits(RQWB(rqos));
Josef Bacika7905042018-07-03 09:32:35 -0600441}
442
443
Jens Axboee34cbd32016-11-09 12:36:15 -0700444static bool close_io(struct rq_wb *rwb)
445{
446 const unsigned long now = jiffies;
447
448 return time_before(now, rwb->last_issue + HZ / 10) ||
449 time_before(now, rwb->last_comp + HZ / 10);
450}
451
452#define REQ_HIPRIO (REQ_SYNC | REQ_META | REQ_PRIO)
453
454static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
455{
456 unsigned int limit;
457
Jens Axboeffa358d2018-08-20 13:24:25 -0600458 /*
459 * If we got disabled, just return UINT_MAX. This ensures that
460 * we'll properly inc a new IO, and dec+wakeup at the end.
461 */
462 if (!rwb_enabled(rwb))
463 return UINT_MAX;
464
Jens Axboe782f5692018-05-07 10:03:23 -0600465 if ((rw & REQ_OP_MASK) == REQ_OP_DISCARD)
466 return rwb->wb_background;
467
Jens Axboee34cbd32016-11-09 12:36:15 -0700468 /*
469 * At this point we know it's a buffered write. If this is
weiping zhang3dfbdc42017-11-23 21:40:10 +0800470 * kswapd trying to free memory, or REQ_SYNC is set, then
Jens Axboee34cbd32016-11-09 12:36:15 -0700471 * it's WB_SYNC_ALL writeback, and we'll use the max limit for
472 * that. If the write is marked as a background write, then use
473 * the idle limit, or go to normal if we haven't had competing
474 * IO for a bit.
475 */
476 if ((rw & REQ_HIPRIO) || wb_recent_wait(rwb) || current_is_kswapd())
Josef Bacika7905042018-07-03 09:32:35 -0600477 limit = rwb->rq_depth.max_depth;
Jens Axboee34cbd32016-11-09 12:36:15 -0700478 else if ((rw & REQ_BACKGROUND) || close_io(rwb)) {
479 /*
480 * If less than 100ms since we completed unrelated IO,
481 * limit us to half the depth for background writeback.
482 */
483 limit = rwb->wb_background;
484 } else
485 limit = rwb->wb_normal;
486
487 return limit;
488}
489
Jens Axboe38cfb5a2018-08-26 10:10:05 -0600490struct wbt_wait_data {
Jens Axboe38cfb5a2018-08-26 10:10:05 -0600491 struct rq_wb *rwb;
Josef Bacikb6c7b582018-12-04 12:59:03 -0500492 enum wbt_flags wb_acct;
Jens Axboe38cfb5a2018-08-26 10:10:05 -0600493 unsigned long rw;
Jens Axboe38cfb5a2018-08-26 10:10:05 -0600494};
495
Josef Bacikb6c7b582018-12-04 12:59:03 -0500496static bool wbt_inflight_cb(struct rq_wait *rqw, void *private_data)
Jens Axboe38cfb5a2018-08-26 10:10:05 -0600497{
Josef Bacikb6c7b582018-12-04 12:59:03 -0500498 struct wbt_wait_data *data = private_data;
499 return rq_wait_inc_below(rqw, get_limit(data->rwb, data->rw));
500}
Jens Axboe38cfb5a2018-08-26 10:10:05 -0600501
Josef Bacikb6c7b582018-12-04 12:59:03 -0500502static void wbt_cleanup_cb(struct rq_wait *rqw, void *private_data)
503{
504 struct wbt_wait_data *data = private_data;
505 wbt_rqw_done(data->rwb, rqw, data->wb_acct);
Jens Axboe38cfb5a2018-08-26 10:10:05 -0600506}
507
Jens Axboee34cbd32016-11-09 12:36:15 -0700508/*
509 * Block if we will exceed our limit, or if we are currently waiting for
510 * the timer to kick off queuing again.
511 */
Jens Axboe8bea6092018-05-07 09:57:08 -0600512static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
Christoph Hellwigd5337562018-11-14 17:02:09 +0100513 unsigned long rw)
Jens Axboee34cbd32016-11-09 12:36:15 -0700514{
Jens Axboe8bea6092018-05-07 09:57:08 -0600515 struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
Jens Axboe38cfb5a2018-08-26 10:10:05 -0600516 struct wbt_wait_data data = {
Jens Axboe38cfb5a2018-08-26 10:10:05 -0600517 .rwb = rwb,
Josef Bacikb6c7b582018-12-04 12:59:03 -0500518 .wb_acct = wb_acct,
Jens Axboe38cfb5a2018-08-26 10:10:05 -0600519 .rw = rw,
520 };
Jens Axboee34cbd32016-11-09 12:36:15 -0700521
Josef Bacikb6c7b582018-12-04 12:59:03 -0500522 rq_qos_wait(rqw, &data, wbt_inflight_cb, wbt_cleanup_cb);
Jens Axboee34cbd32016-11-09 12:36:15 -0700523}
524
Lei Chen482e3022021-01-25 19:27:04 +0800525static inline bool wbt_should_throttle(struct bio *bio)
Jens Axboee34cbd32016-11-09 12:36:15 -0700526{
Jens Axboe782f5692018-05-07 10:03:23 -0600527 switch (bio_op(bio)) {
528 case REQ_OP_WRITE:
529 /*
530 * Don't throttle WRITE_ODIRECT
531 */
532 if ((bio->bi_opf & (REQ_SYNC | REQ_IDLE)) ==
533 (REQ_SYNC | REQ_IDLE))
534 return false;
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500535 fallthrough;
Jens Axboe782f5692018-05-07 10:03:23 -0600536 case REQ_OP_DISCARD:
537 return true;
538 default:
Jens Axboee34cbd32016-11-09 12:36:15 -0700539 return false;
Jens Axboe782f5692018-05-07 10:03:23 -0600540 }
Jens Axboee34cbd32016-11-09 12:36:15 -0700541}
542
Josef Bacikc1c80382018-07-03 11:14:59 -0400543static enum wbt_flags bio_to_wbt_flags(struct rq_wb *rwb, struct bio *bio)
544{
545 enum wbt_flags flags = 0;
546
Jens Axboec1253112018-08-23 09:34:46 -0600547 if (!rwb_enabled(rwb))
548 return 0;
549
Josef Bacikc1c80382018-07-03 11:14:59 -0400550 if (bio_op(bio) == REQ_OP_READ) {
551 flags = WBT_READ;
Lei Chen482e3022021-01-25 19:27:04 +0800552 } else if (wbt_should_throttle(bio)) {
Josef Bacikc1c80382018-07-03 11:14:59 -0400553 if (current_is_kswapd())
554 flags |= WBT_KSWAPD;
555 if (bio_op(bio) == REQ_OP_DISCARD)
556 flags |= WBT_DISCARD;
557 flags |= WBT_TRACKED;
558 }
559 return flags;
560}
561
562static void wbt_cleanup(struct rq_qos *rqos, struct bio *bio)
563{
564 struct rq_wb *rwb = RQWB(rqos);
565 enum wbt_flags flags = bio_to_wbt_flags(rwb, bio);
566 __wbt_done(rqos, flags);
567}
568
Jens Axboee34cbd32016-11-09 12:36:15 -0700569/*
Jens Axboee34cbd32016-11-09 12:36:15 -0700570 * May sleep, if we have exceeded the writeback limits. Caller can pass
571 * in an irq held spinlock, if it holds one when calling this function.
572 * If we do sleep, we'll release and re-grab it.
573 */
Christoph Hellwigd5337562018-11-14 17:02:09 +0100574static void wbt_wait(struct rq_qos *rqos, struct bio *bio)
Jens Axboee34cbd32016-11-09 12:36:15 -0700575{
Josef Bacika7905042018-07-03 09:32:35 -0600576 struct rq_wb *rwb = RQWB(rqos);
Josef Bacikc1c80382018-07-03 11:14:59 -0400577 enum wbt_flags flags;
Jens Axboee34cbd32016-11-09 12:36:15 -0700578
Josef Bacikc1c80382018-07-03 11:14:59 -0400579 flags = bio_to_wbt_flags(rwb, bio);
Ming Leidf60f6e2018-08-14 23:57:49 +0800580 if (!(flags & WBT_TRACKED)) {
Josef Bacikc1c80382018-07-03 11:14:59 -0400581 if (flags & WBT_READ)
Jens Axboee34cbd32016-11-09 12:36:15 -0700582 wb_timestamp(rwb, &rwb->last_issue);
Josef Bacikc1c80382018-07-03 11:14:59 -0400583 return;
Jens Axboee34cbd32016-11-09 12:36:15 -0700584 }
585
Christoph Hellwigd5337562018-11-14 17:02:09 +0100586 __wbt_wait(rwb, flags, bio->bi_opf);
Jens Axboee34cbd32016-11-09 12:36:15 -0700587
Omar Sandoval34dbad52017-03-21 08:56:08 -0700588 if (!blk_stat_is_active(rwb->cb))
Jens Axboee34cbd32016-11-09 12:36:15 -0700589 rwb_arm_timer(rwb);
Josef Bacikc1c80382018-07-03 11:14:59 -0400590}
Jens Axboee34cbd32016-11-09 12:36:15 -0700591
Josef Bacikc1c80382018-07-03 11:14:59 -0400592static void wbt_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
593{
594 struct rq_wb *rwb = RQWB(rqos);
595 rq->wbt_flags |= bio_to_wbt_flags(rwb, bio);
Jens Axboee34cbd32016-11-09 12:36:15 -0700596}
597
Bart Van Asschec83f5362019-01-23 11:05:57 -0800598static void wbt_issue(struct rq_qos *rqos, struct request *rq)
Jens Axboee34cbd32016-11-09 12:36:15 -0700599{
Josef Bacika7905042018-07-03 09:32:35 -0600600 struct rq_wb *rwb = RQWB(rqos);
601
Jens Axboee34cbd32016-11-09 12:36:15 -0700602 if (!rwb_enabled(rwb))
603 return;
604
605 /*
Omar Sandovala8a45942018-05-09 02:08:48 -0700606 * Track sync issue, in case it takes a long time to complete. Allows us
607 * to react quicker, if a sync IO takes a long time to complete. Note
608 * that this is just a hint. The request can go away when it completes,
609 * so it's important we never dereference it. We only use the address to
610 * compare with, which is why we store the sync_issue time locally.
Jens Axboee34cbd32016-11-09 12:36:15 -0700611 */
Omar Sandovala8a45942018-05-09 02:08:48 -0700612 if (wbt_is_read(rq) && !rwb->sync_issue) {
613 rwb->sync_cookie = rq;
Omar Sandoval544ccc8d2018-05-09 02:08:50 -0700614 rwb->sync_issue = rq->io_start_time_ns;
Jens Axboee34cbd32016-11-09 12:36:15 -0700615 }
616}
617
Bart Van Asschec83f5362019-01-23 11:05:57 -0800618static void wbt_requeue(struct rq_qos *rqos, struct request *rq)
Jens Axboee34cbd32016-11-09 12:36:15 -0700619{
Josef Bacika7905042018-07-03 09:32:35 -0600620 struct rq_wb *rwb = RQWB(rqos);
Jens Axboee34cbd32016-11-09 12:36:15 -0700621 if (!rwb_enabled(rwb))
622 return;
Omar Sandovala8a45942018-05-09 02:08:48 -0700623 if (rq == rwb->sync_cookie) {
Jens Axboee34cbd32016-11-09 12:36:15 -0700624 rwb->sync_issue = 0;
625 rwb->sync_cookie = NULL;
626 }
627}
628
Josef Bacika7905042018-07-03 09:32:35 -0600629void wbt_set_write_cache(struct request_queue *q, bool write_cache_on)
Jens Axboee34cbd32016-11-09 12:36:15 -0700630{
Josef Bacika7905042018-07-03 09:32:35 -0600631 struct rq_qos *rqos = wbt_rq_qos(q);
632 if (rqos)
633 RQWB(rqos)->wc = write_cache_on;
Jens Axboee34cbd32016-11-09 12:36:15 -0700634}
635
Jan Kara3f19cd22017-04-11 11:29:01 +0200636/*
Jan Kara8330cdb2017-04-19 11:33:27 +0200637 * Enable wbt if defaults are configured that way
638 */
639void wbt_enable_default(struct request_queue *q)
640{
Josef Bacika7905042018-07-03 09:32:35 -0600641 struct rq_qos *rqos = wbt_rq_qos(q);
Zhang Yi76a80402021-06-19 17:37:00 +0800642
Jan Kara8330cdb2017-04-19 11:33:27 +0200643 /* Throttling already enabled? */
Zhang Yi76a80402021-06-19 17:37:00 +0800644 if (rqos) {
645 if (RQWB(rqos)->enable_state == WBT_STATE_OFF_DEFAULT)
646 RQWB(rqos)->enable_state = WBT_STATE_ON_DEFAULT;
Jan Kara8330cdb2017-04-19 11:33:27 +0200647 return;
Zhang Yi76a80402021-06-19 17:37:00 +0800648 }
Jan Kara8330cdb2017-04-19 11:33:27 +0200649
650 /* Queue not registered? Maybe shutting down... */
Ming Lei58c898b2019-08-27 19:01:47 +0800651 if (!blk_queue_registered(q))
Jan Kara8330cdb2017-04-19 11:33:27 +0200652 return;
653
Jens Axboe344e9ff2018-11-15 12:22:51 -0700654 if (queue_is_mq(q) && IS_ENABLED(CONFIG_BLK_WBT_MQ))
Jan Kara8330cdb2017-04-19 11:33:27 +0200655 wbt_init(q);
656}
657EXPORT_SYMBOL_GPL(wbt_enable_default);
658
Jens Axboe80e091d2016-11-28 09:22:47 -0700659u64 wbt_default_latency_nsec(struct request_queue *q)
660{
661 /*
662 * We default to 2msec for non-rotational storage, and 75msec
663 * for rotational storage.
664 */
665 if (blk_queue_nonrot(q))
666 return 2000000ULL;
667 else
668 return 75000000ULL;
669}
670
Jens Axboe99c749a2017-04-21 07:55:42 -0600671static int wbt_data_dir(const struct request *rq)
672{
Jens Axboe5235553d2018-02-05 13:16:56 -0700673 const int op = req_op(rq);
674
675 if (op == REQ_OP_READ)
676 return READ;
Jens Axboe825843b2018-05-03 09:14:57 -0600677 else if (op_is_write(op))
Jens Axboe5235553d2018-02-05 13:16:56 -0700678 return WRITE;
679
680 /* don't account */
681 return -1;
Jens Axboe99c749a2017-04-21 07:55:42 -0600682}
683
Tejun Heo9677a3e2019-08-28 15:05:55 -0700684static void wbt_queue_depth_changed(struct rq_qos *rqos)
685{
686 RQWB(rqos)->rq_depth.queue_depth = blk_queue_depth(rqos->q);
Guoqing Jiang4d89e1d2020-05-09 00:00:15 +0200687 wbt_update_limits(RQWB(rqos));
Tejun Heo9677a3e2019-08-28 15:05:55 -0700688}
689
Josef Bacika7905042018-07-03 09:32:35 -0600690static void wbt_exit(struct rq_qos *rqos)
691{
692 struct rq_wb *rwb = RQWB(rqos);
693 struct request_queue *q = rqos->q;
694
695 blk_stat_remove_callback(q, rwb->cb);
696 blk_stat_free_callback(rwb->cb);
697 kfree(rwb);
698}
699
700/*
701 * Disable wbt, if enabled by default.
702 */
703void wbt_disable_default(struct request_queue *q)
704{
705 struct rq_qos *rqos = wbt_rq_qos(q);
706 struct rq_wb *rwb;
707 if (!rqos)
708 return;
709 rwb = RQWB(rqos);
Ming Lei544fbd12018-12-12 19:44:34 +0800710 if (rwb->enable_state == WBT_STATE_ON_DEFAULT) {
711 blk_stat_deactivate(rwb->cb);
Zhang Yi1d0903d2021-06-19 17:36:59 +0800712 rwb->enable_state = WBT_STATE_OFF_DEFAULT;
Ming Lei544fbd12018-12-12 19:44:34 +0800713 }
Josef Bacika7905042018-07-03 09:32:35 -0600714}
Jens Axboee815f402018-11-15 12:31:27 -0700715EXPORT_SYMBOL_GPL(wbt_disable_default);
Josef Bacika7905042018-07-03 09:32:35 -0600716
Ming Leid19afeb2018-12-17 09:46:01 +0800717#ifdef CONFIG_BLK_DEBUG_FS
718static int wbt_curr_win_nsec_show(void *data, struct seq_file *m)
719{
720 struct rq_qos *rqos = data;
721 struct rq_wb *rwb = RQWB(rqos);
722
723 seq_printf(m, "%llu\n", rwb->cur_win_nsec);
724 return 0;
725}
726
727static int wbt_enabled_show(void *data, struct seq_file *m)
728{
729 struct rq_qos *rqos = data;
730 struct rq_wb *rwb = RQWB(rqos);
731
732 seq_printf(m, "%d\n", rwb->enable_state);
733 return 0;
734}
735
736static int wbt_id_show(void *data, struct seq_file *m)
737{
738 struct rq_qos *rqos = data;
739
740 seq_printf(m, "%u\n", rqos->id);
741 return 0;
742}
743
744static int wbt_inflight_show(void *data, struct seq_file *m)
745{
746 struct rq_qos *rqos = data;
747 struct rq_wb *rwb = RQWB(rqos);
748 int i;
749
750 for (i = 0; i < WBT_NUM_RWQ; i++)
751 seq_printf(m, "%d: inflight %d\n", i,
752 atomic_read(&rwb->rq_wait[i].inflight));
753 return 0;
754}
755
756static int wbt_min_lat_nsec_show(void *data, struct seq_file *m)
757{
758 struct rq_qos *rqos = data;
759 struct rq_wb *rwb = RQWB(rqos);
760
761 seq_printf(m, "%lu\n", rwb->min_lat_nsec);
762 return 0;
763}
764
765static int wbt_unknown_cnt_show(void *data, struct seq_file *m)
766{
767 struct rq_qos *rqos = data;
768 struct rq_wb *rwb = RQWB(rqos);
769
770 seq_printf(m, "%u\n", rwb->unknown_cnt);
771 return 0;
772}
773
774static int wbt_normal_show(void *data, struct seq_file *m)
775{
776 struct rq_qos *rqos = data;
777 struct rq_wb *rwb = RQWB(rqos);
778
779 seq_printf(m, "%u\n", rwb->wb_normal);
780 return 0;
781}
782
783static int wbt_background_show(void *data, struct seq_file *m)
784{
785 struct rq_qos *rqos = data;
786 struct rq_wb *rwb = RQWB(rqos);
787
788 seq_printf(m, "%u\n", rwb->wb_background);
789 return 0;
790}
791
792static const struct blk_mq_debugfs_attr wbt_debugfs_attrs[] = {
793 {"curr_win_nsec", 0400, wbt_curr_win_nsec_show},
794 {"enabled", 0400, wbt_enabled_show},
795 {"id", 0400, wbt_id_show},
796 {"inflight", 0400, wbt_inflight_show},
797 {"min_lat_nsec", 0400, wbt_min_lat_nsec_show},
798 {"unknown_cnt", 0400, wbt_unknown_cnt_show},
799 {"wb_normal", 0400, wbt_normal_show},
800 {"wb_background", 0400, wbt_background_show},
801 {},
802};
803#endif
804
Josef Bacika7905042018-07-03 09:32:35 -0600805static struct rq_qos_ops wbt_rqos_ops = {
806 .throttle = wbt_wait,
807 .issue = wbt_issue,
Josef Bacikc1c80382018-07-03 11:14:59 -0400808 .track = wbt_track,
Josef Bacika7905042018-07-03 09:32:35 -0600809 .requeue = wbt_requeue,
810 .done = wbt_done,
Josef Bacikc1c80382018-07-03 11:14:59 -0400811 .cleanup = wbt_cleanup,
Tejun Heo9677a3e2019-08-28 15:05:55 -0700812 .queue_depth_changed = wbt_queue_depth_changed,
Josef Bacika7905042018-07-03 09:32:35 -0600813 .exit = wbt_exit,
Ming Leid19afeb2018-12-17 09:46:01 +0800814#ifdef CONFIG_BLK_DEBUG_FS
815 .debugfs_attrs = wbt_debugfs_attrs,
816#endif
Josef Bacika7905042018-07-03 09:32:35 -0600817};
818
Jens Axboe8054b892016-11-10 21:50:51 -0700819int wbt_init(struct request_queue *q)
Jens Axboee34cbd32016-11-09 12:36:15 -0700820{
821 struct rq_wb *rwb;
822 int i;
823
Jens Axboee34cbd32016-11-09 12:36:15 -0700824 rwb = kzalloc(sizeof(*rwb), GFP_KERNEL);
825 if (!rwb)
826 return -ENOMEM;
827
Jens Axboe99c749a2017-04-21 07:55:42 -0600828 rwb->cb = blk_stat_alloc_callback(wb_timer_fn, wbt_data_dir, 2, rwb);
Omar Sandoval34dbad52017-03-21 08:56:08 -0700829 if (!rwb->cb) {
830 kfree(rwb);
831 return -ENOMEM;
832 }
833
Josef Bacika7905042018-07-03 09:32:35 -0600834 for (i = 0; i < WBT_NUM_RWQ; i++)
835 rq_wait_init(&rwb->rq_wait[i]);
Jens Axboee34cbd32016-11-09 12:36:15 -0700836
Josef Bacika7905042018-07-03 09:32:35 -0600837 rwb->rqos.id = RQ_QOS_WBT;
838 rwb->rqos.ops = &wbt_rqos_ops;
839 rwb->rqos.q = q;
Jens Axboee34cbd32016-11-09 12:36:15 -0700840 rwb->last_comp = rwb->last_issue = jiffies;
Jens Axboee34cbd32016-11-09 12:36:15 -0700841 rwb->win_nsec = RWB_WINDOW_NSEC;
Jens Axboed62118b2016-11-28 09:40:34 -0700842 rwb->enable_state = WBT_STATE_ON_DEFAULT;
Josef Bacika7905042018-07-03 09:32:35 -0600843 rwb->wc = 1;
844 rwb->rq_depth.default_depth = RWB_DEF_DEPTH;
Jens Axboee34cbd32016-11-09 12:36:15 -0700845
846 /*
Omar Sandoval34dbad52017-03-21 08:56:08 -0700847 * Assign rwb and add the stats callback.
Jens Axboee34cbd32016-11-09 12:36:15 -0700848 */
Josef Bacika7905042018-07-03 09:32:35 -0600849 rq_qos_add(q, &rwb->rqos);
Omar Sandoval34dbad52017-03-21 08:56:08 -0700850 blk_stat_add_callback(q, rwb->cb);
Jens Axboee34cbd32016-11-09 12:36:15 -0700851
Jens Axboe80e091d2016-11-28 09:22:47 -0700852 rwb->min_lat_nsec = wbt_default_latency_nsec(q);
Jens Axboee34cbd32016-11-09 12:36:15 -0700853
Tejun Heo9677a3e2019-08-28 15:05:55 -0700854 wbt_queue_depth_changed(&rwb->rqos);
Josef Bacika7905042018-07-03 09:32:35 -0600855 wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
Jens Axboee34cbd32016-11-09 12:36:15 -0700856
857 return 0;
858}