blob: d2f2af8aa10c338e66d60bacece123edb396389d [file] [log] [blame]
Josef Bacika7905042018-07-03 09:32:35 -06001#include "blk-rq-qos.h"
2
3#include "blk-wbt.h"
4
5/*
6 * Increment 'v', if 'v' is below 'below'. Returns true if we succeeded,
7 * false if 'v' + 1 would be bigger than 'below'.
8 */
9static bool atomic_inc_below(atomic_t *v, int below)
10{
11 int cur = atomic_read(v);
12
13 for (;;) {
14 int old;
15
16 if (cur >= below)
17 return false;
18 old = atomic_cmpxchg(v, cur, cur + 1);
19 if (old == cur)
20 break;
21 cur = old;
22 }
23
24 return true;
25}
26
27bool rq_wait_inc_below(struct rq_wait *rq_wait, int limit)
28{
29 return atomic_inc_below(&rq_wait->inflight, limit);
30}
31
32void rq_qos_cleanup(struct request_queue *q, enum wbt_flags wb_acct)
33{
34 struct rq_qos *rqos;
35
36 for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
37 if (rqos->ops->cleanup)
38 rqos->ops->cleanup(rqos, wb_acct);
39 }
40}
41
42void rq_qos_done(struct request_queue *q, struct request *rq)
43{
44 struct rq_qos *rqos;
45
46 for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
47 if (rqos->ops->done)
48 rqos->ops->done(rqos, rq);
49 }
50}
51
52void rq_qos_issue(struct request_queue *q, struct request *rq)
53{
54 struct rq_qos *rqos;
55
56 for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
57 if (rqos->ops->issue)
58 rqos->ops->issue(rqos, rq);
59 }
60}
61
62void rq_qos_requeue(struct request_queue *q, struct request *rq)
63{
64 struct rq_qos *rqos;
65
66 for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
67 if (rqos->ops->requeue)
68 rqos->ops->requeue(rqos, rq);
69 }
70}
71
72enum wbt_flags rq_qos_throttle(struct request_queue *q, struct bio *bio,
73 spinlock_t *lock)
74{
75 struct rq_qos *rqos;
76 enum wbt_flags flags = 0;
77
78 for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
79 if (rqos->ops->throttle)
80 flags |= rqos->ops->throttle(rqos, bio, lock);
81 }
82 return flags;
83}
84
85/*
86 * Return true, if we can't increase the depth further by scaling
87 */
88bool rq_depth_calc_max_depth(struct rq_depth *rqd)
89{
90 unsigned int depth;
91 bool ret = false;
92
93 /*
94 * For QD=1 devices, this is a special case. It's important for those
95 * to have one request ready when one completes, so force a depth of
96 * 2 for those devices. On the backend, it'll be a depth of 1 anyway,
97 * since the device can't have more than that in flight. If we're
98 * scaling down, then keep a setting of 1/1/1.
99 */
100 if (rqd->queue_depth == 1) {
101 if (rqd->scale_step > 0)
102 rqd->max_depth = 1;
103 else {
104 rqd->max_depth = 2;
105 ret = true;
106 }
107 } else {
108 /*
109 * scale_step == 0 is our default state. If we have suffered
110 * latency spikes, step will be > 0, and we shrink the
111 * allowed write depths. If step is < 0, we're only doing
112 * writes, and we allow a temporarily higher depth to
113 * increase performance.
114 */
115 depth = min_t(unsigned int, rqd->default_depth,
116 rqd->queue_depth);
117 if (rqd->scale_step > 0)
118 depth = 1 + ((depth - 1) >> min(31, rqd->scale_step));
119 else if (rqd->scale_step < 0) {
120 unsigned int maxd = 3 * rqd->queue_depth / 4;
121
122 depth = 1 + ((depth - 1) << -rqd->scale_step);
123 if (depth > maxd) {
124 depth = maxd;
125 ret = true;
126 }
127 }
128
129 rqd->max_depth = depth;
130 }
131
132 return ret;
133}
134
135void rq_depth_scale_up(struct rq_depth *rqd)
136{
137 /*
138 * Hit max in previous round, stop here
139 */
140 if (rqd->scaled_max)
141 return;
142
143 rqd->scale_step--;
144
145 rqd->scaled_max = rq_depth_calc_max_depth(rqd);
146}
147
148/*
149 * Scale rwb down. If 'hard_throttle' is set, do it quicker, since we
150 * had a latency violation.
151 */
152void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
153{
154 /*
155 * Stop scaling down when we've hit the limit. This also prevents
156 * ->scale_step from going to crazy values, if the device can't
157 * keep up.
158 */
159 if (rqd->max_depth == 1)
160 return;
161
162 if (rqd->scale_step < 0 && hard_throttle)
163 rqd->scale_step = 0;
164 else
165 rqd->scale_step++;
166
167 rqd->scaled_max = false;
168 rq_depth_calc_max_depth(rqd);
169}
170
171void rq_qos_exit(struct request_queue *q)
172{
173 while (q->rq_qos) {
174 struct rq_qos *rqos = q->rq_qos;
175 q->rq_qos = rqos->next;
176 rqos->ops->exit(rqos);
177 }
178}