blob: 5bdae205e5df3abccf36c508847682ed9e215dd7 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Jens Axboee34cbd32016-11-09 12:36:15 -07002#ifndef WB_THROTTLE_H
3#define WB_THROTTLE_H
4
5#include <linux/kernel.h>
6#include <linux/atomic.h>
7#include <linux/wait.h>
8#include <linux/timer.h>
9#include <linux/ktime.h>
10
11#include "blk-stat.h"
12
13enum wbt_flags {
14 WBT_TRACKED = 1, /* write, tracked for throttling */
15 WBT_READ = 2, /* read */
16 WBT_KSWAPD = 4, /* write, from kswapd */
Jens Axboe782f5692018-05-07 10:03:23 -060017 WBT_DISCARD = 8, /* discard */
Jens Axboee34cbd32016-11-09 12:36:15 -070018
Jens Axboe782f5692018-05-07 10:03:23 -060019 WBT_NR_BITS = 4, /* number of bits */
Jens Axboee34cbd32016-11-09 12:36:15 -070020};
21
22enum {
Jens Axboe8bea6092018-05-07 09:57:08 -060023 WBT_RWQ_BG = 0,
24 WBT_RWQ_KSWAPD,
Jens Axboe782f5692018-05-07 10:03:23 -060025 WBT_RWQ_DISCARD,
Jens Axboe8bea6092018-05-07 09:57:08 -060026 WBT_NUM_RWQ,
Jens Axboee34cbd32016-11-09 12:36:15 -070027};
28
Jens Axboed62118b2016-11-28 09:40:34 -070029/*
30 * Enable states. Either off, or on by default (done at init time),
31 * or on through manual setup in sysfs.
32 */
33enum {
34 WBT_STATE_ON_DEFAULT = 1,
35 WBT_STATE_ON_MANUAL = 2,
36};
37
Jens Axboee34cbd32016-11-09 12:36:15 -070038struct rq_wait {
39 wait_queue_head_t wait;
40 atomic_t inflight;
41};
42
43struct rq_wb {
44 /*
45 * Settings that govern how we throttle
46 */
47 unsigned int wb_background; /* background writeback */
48 unsigned int wb_normal; /* normal writeback */
49 unsigned int wb_max; /* max throughput writeback */
50 int scale_step;
51 bool scaled_max;
52
Jens Axboed62118b2016-11-28 09:40:34 -070053 short enable_state; /* WBT_STATE_* */
54
Jens Axboee34cbd32016-11-09 12:36:15 -070055 /*
56 * Number of consecutive periods where we don't have enough
57 * information to make a firm scale up/down decision.
58 */
59 unsigned int unknown_cnt;
60
61 u64 win_nsec; /* default window size */
62 u64 cur_win_nsec; /* current window size */
63
Omar Sandoval34dbad52017-03-21 08:56:08 -070064 struct blk_stat_callback *cb;
Jens Axboee34cbd32016-11-09 12:36:15 -070065
66 s64 sync_issue;
67 void *sync_cookie;
68
69 unsigned int wc;
70 unsigned int queue_depth;
71
72 unsigned long last_issue; /* last non-throttled issue */
73 unsigned long last_comp; /* last non-throttled comp */
74 unsigned long min_lat_nsec;
Jens Axboed8a0cbf2016-11-10 21:52:53 -070075 struct request_queue *queue;
Jens Axboee34cbd32016-11-09 12:36:15 -070076 struct rq_wait rq_wait[WBT_NUM_RWQ];
Jens Axboee34cbd32016-11-09 12:36:15 -070077};
78
79static inline unsigned int wbt_inflight(struct rq_wb *rwb)
80{
81 unsigned int i, ret = 0;
82
83 for (i = 0; i < WBT_NUM_RWQ; i++)
84 ret += atomic_read(&rwb->rq_wait[i].inflight);
85
86 return ret;
87}
88
Jens Axboee34cbd32016-11-09 12:36:15 -070089#ifdef CONFIG_BLK_WBT
90
Omar Sandoval934031a2018-05-09 02:08:47 -070091static inline void wbt_track(struct blk_issue_stat *stat, enum wbt_flags flags)
92{
93 stat->stat |= ((u64)flags) << BLK_STAT_RES_SHIFT;
94}
95
Jens Axboee34cbd32016-11-09 12:36:15 -070096void __wbt_done(struct rq_wb *, enum wbt_flags);
97void wbt_done(struct rq_wb *, struct blk_issue_stat *);
98enum wbt_flags wbt_wait(struct rq_wb *, struct bio *, spinlock_t *);
Jens Axboe8054b892016-11-10 21:50:51 -070099int wbt_init(struct request_queue *);
Jens Axboee34cbd32016-11-09 12:36:15 -0700100void wbt_exit(struct request_queue *);
101void wbt_update_limits(struct rq_wb *);
102void wbt_requeue(struct rq_wb *, struct blk_issue_stat *);
103void wbt_issue(struct rq_wb *, struct blk_issue_stat *);
Jens Axboefa224ee2016-11-28 09:25:50 -0700104void wbt_disable_default(struct request_queue *);
Jan Kara8330cdb2017-04-19 11:33:27 +0200105void wbt_enable_default(struct request_queue *);
Jens Axboee34cbd32016-11-09 12:36:15 -0700106
107void wbt_set_queue_depth(struct rq_wb *, unsigned int);
108void wbt_set_write_cache(struct rq_wb *, bool);
109
Jens Axboe80e091d2016-11-28 09:22:47 -0700110u64 wbt_default_latency_nsec(struct request_queue *);
111
Jens Axboee34cbd32016-11-09 12:36:15 -0700112#else
113
Omar Sandoval934031a2018-05-09 02:08:47 -0700114static inline void wbt_track(struct blk_issue_stat *stat, enum wbt_flags flags)
115{
116}
Jens Axboee34cbd32016-11-09 12:36:15 -0700117static inline void __wbt_done(struct rq_wb *rwb, enum wbt_flags flags)
118{
119}
120static inline void wbt_done(struct rq_wb *rwb, struct blk_issue_stat *stat)
121{
122}
123static inline enum wbt_flags wbt_wait(struct rq_wb *rwb, struct bio *bio,
124 spinlock_t *lock)
125{
126 return 0;
127}
Jens Axboe8054b892016-11-10 21:50:51 -0700128static inline int wbt_init(struct request_queue *q)
Jens Axboee34cbd32016-11-09 12:36:15 -0700129{
130 return -EINVAL;
131}
132static inline void wbt_exit(struct request_queue *q)
133{
134}
135static inline void wbt_update_limits(struct rq_wb *rwb)
136{
137}
138static inline void wbt_requeue(struct rq_wb *rwb, struct blk_issue_stat *stat)
139{
140}
141static inline void wbt_issue(struct rq_wb *rwb, struct blk_issue_stat *stat)
142{
143}
Jens Axboefa224ee2016-11-28 09:25:50 -0700144static inline void wbt_disable_default(struct request_queue *q)
Jens Axboee34cbd32016-11-09 12:36:15 -0700145{
146}
Jan Kara8330cdb2017-04-19 11:33:27 +0200147static inline void wbt_enable_default(struct request_queue *q)
148{
149}
Jens Axboee34cbd32016-11-09 12:36:15 -0700150static inline void wbt_set_queue_depth(struct rq_wb *rwb, unsigned int depth)
151{
152}
153static inline void wbt_set_write_cache(struct rq_wb *rwb, bool wc)
154{
155}
Jens Axboe80e091d2016-11-28 09:22:47 -0700156static inline u64 wbt_default_latency_nsec(struct request_queue *q)
157{
158 return 0;
159}
Jens Axboee34cbd32016-11-09 12:36:15 -0700160
161#endif /* CONFIG_BLK_WBT */
162
163#endif