blob: e0c4baa0185783a586e83faf75b8a62a6b1dace2 [file] [log] [blame]
Tejun Heo7caa4712019-08-28 15:05:58 -07001/* SPDX-License-Identifier: GPL-2.0
2 *
3 * IO cost model based controller.
4 *
5 * Copyright (C) 2019 Tejun Heo <tj@kernel.org>
6 * Copyright (C) 2019 Andy Newell <newella@fb.com>
7 * Copyright (C) 2019 Facebook
8 *
9 * One challenge of controlling IO resources is the lack of trivially
10 * observable cost metric. This is distinguished from CPU and memory where
11 * wallclock time and the number of bytes can serve as accurate enough
12 * approximations.
13 *
14 * Bandwidth and iops are the most commonly used metrics for IO devices but
15 * depending on the type and specifics of the device, different IO patterns
16 * easily lead to multiple orders of magnitude variations rendering them
17 * useless for the purpose of IO capacity distribution. While on-device
18 * time, with a lot of clutches, could serve as a useful approximation for
19 * non-queued rotational devices, this is no longer viable with modern
20 * devices, even the rotational ones.
21 *
22 * While there is no cost metric we can trivially observe, it isn't a
23 * complete mystery. For example, on a rotational device, seek cost
24 * dominates while a contiguous transfer contributes a smaller amount
25 * proportional to the size. If we can characterize at least the relative
26 * costs of these different types of IOs, it should be possible to
27 * implement a reasonable work-conserving proportional IO resource
28 * distribution.
29 *
30 * 1. IO Cost Model
31 *
32 * IO cost model estimates the cost of an IO given its basic parameters and
33 * history (e.g. the end sector of the last IO). The cost is measured in
34 * device time. If a given IO is estimated to cost 10ms, the device should
35 * be able to process ~100 of those IOs in a second.
36 *
37 * Currently, there's only one builtin cost model - linear. Each IO is
38 * classified as sequential or random and given a base cost accordingly.
39 * On top of that, a size cost proportional to the length of the IO is
40 * added. While simple, this model captures the operational
41 * characteristics of a wide varienty of devices well enough. Default
Baolin Wang5ba1add22020-11-26 16:16:11 +080042 * parameters for several different classes of devices are provided and the
Tejun Heo7caa4712019-08-28 15:05:58 -070043 * parameters can be configured from userspace via
44 * /sys/fs/cgroup/io.cost.model.
45 *
46 * If needed, tools/cgroup/iocost_coef_gen.py can be used to generate
47 * device-specific coefficients.
48 *
49 * 2. Control Strategy
50 *
51 * The device virtual time (vtime) is used as the primary control metric.
52 * The control strategy is composed of the following three parts.
53 *
54 * 2-1. Vtime Distribution
55 *
56 * When a cgroup becomes active in terms of IOs, its hierarchical share is
57 * calculated. Please consider the following hierarchy where the numbers
58 * inside parentheses denote the configured weights.
59 *
60 * root
61 * / \
62 * A (w:100) B (w:300)
63 * / \
64 * A0 (w:100) A1 (w:100)
65 *
66 * If B is idle and only A0 and A1 are actively issuing IOs, as the two are
67 * of equal weight, each gets 50% share. If then B starts issuing IOs, B
68 * gets 300/(100+300) or 75% share, and A0 and A1 equally splits the rest,
69 * 12.5% each. The distribution mechanism only cares about these flattened
70 * shares. They're called hweights (hierarchical weights) and always add
Tejun Heofe20cdb52020-09-01 14:52:38 -040071 * upto 1 (WEIGHT_ONE).
Tejun Heo7caa4712019-08-28 15:05:58 -070072 *
73 * A given cgroup's vtime runs slower in inverse proportion to its hweight.
74 * For example, with 12.5% weight, A0's time runs 8 times slower (100/12.5)
75 * against the device vtime - an IO which takes 10ms on the underlying
76 * device is considered to take 80ms on A0.
77 *
78 * This constitutes the basis of IO capacity distribution. Each cgroup's
79 * vtime is running at a rate determined by its hweight. A cgroup tracks
Baolin Wang5ba1add22020-11-26 16:16:11 +080080 * the vtime consumed by past IOs and can issue a new IO if doing so
Tejun Heo7caa4712019-08-28 15:05:58 -070081 * wouldn't outrun the current device vtime. Otherwise, the IO is
82 * suspended until the vtime has progressed enough to cover it.
83 *
84 * 2-2. Vrate Adjustment
85 *
86 * It's unrealistic to expect the cost model to be perfect. There are too
87 * many devices and even on the same device the overall performance
88 * fluctuates depending on numerous factors such as IO mixture and device
89 * internal garbage collection. The controller needs to adapt dynamically.
90 *
91 * This is achieved by adjusting the overall IO rate according to how busy
92 * the device is. If the device becomes overloaded, we're sending down too
93 * many IOs and should generally slow down. If there are waiting issuers
94 * but the device isn't saturated, we're issuing too few and should
95 * generally speed up.
96 *
97 * To slow down, we lower the vrate - the rate at which the device vtime
98 * passes compared to the wall clock. For example, if the vtime is running
99 * at the vrate of 75%, all cgroups added up would only be able to issue
100 * 750ms worth of IOs per second, and vice-versa for speeding up.
101 *
102 * Device business is determined using two criteria - rq wait and
103 * completion latencies.
104 *
105 * When a device gets saturated, the on-device and then the request queues
106 * fill up and a bio which is ready to be issued has to wait for a request
107 * to become available. When this delay becomes noticeable, it's a clear
108 * indication that the device is saturated and we lower the vrate. This
109 * saturation signal is fairly conservative as it only triggers when both
110 * hardware and software queues are filled up, and is used as the default
111 * busy signal.
112 *
113 * As devices can have deep queues and be unfair in how the queued commands
114 * are executed, soley depending on rq wait may not result in satisfactory
115 * control quality. For a better control quality, completion latency QoS
116 * parameters can be configured so that the device is considered saturated
117 * if N'th percentile completion latency rises above the set point.
118 *
119 * The completion latency requirements are a function of both the
120 * underlying device characteristics and the desired IO latency quality of
121 * service. There is an inherent trade-off - the tighter the latency QoS,
122 * the higher the bandwidth lossage. Latency QoS is disabled by default
123 * and can be set through /sys/fs/cgroup/io.cost.qos.
124 *
125 * 2-3. Work Conservation
126 *
127 * Imagine two cgroups A and B with equal weights. A is issuing a small IO
128 * periodically while B is sending out enough parallel IOs to saturate the
129 * device on its own. Let's say A's usage amounts to 100ms worth of IO
130 * cost per second, i.e., 10% of the device capacity. The naive
131 * distribution of half and half would lead to 60% utilization of the
132 * device, a significant reduction in the total amount of work done
133 * compared to free-for-all competition. This is too high a cost to pay
134 * for IO control.
135 *
136 * To conserve the total amount of work done, we keep track of how much
137 * each active cgroup is actually using and yield part of its weight if
138 * there are other cgroups which can make use of it. In the above case,
139 * A's weight will be lowered so that it hovers above the actual usage and
140 * B would be able to use the rest.
141 *
142 * As we don't want to penalize a cgroup for donating its weight, the
143 * surplus weight adjustment factors in a margin and has an immediate
144 * snapback mechanism in case the cgroup needs more IO vtime for itself.
145 *
146 * Note that adjusting down surplus weights has the same effects as
147 * accelerating vtime for other cgroups and work conservation can also be
148 * implemented by adjusting vrate dynamically. However, squaring who can
149 * donate and should take back how much requires hweight propagations
150 * anyway making it easier to implement and understand as a separate
151 * mechanism.
Tejun Heo6954ff12019-08-28 15:05:59 -0700152 *
153 * 3. Monitoring
154 *
155 * Instead of debugfs or other clumsy monitoring mechanisms, this
156 * controller uses a drgn based monitoring script -
157 * tools/cgroup/iocost_monitor.py. For details on drgn, please see
Baolin Wang5ba1add22020-11-26 16:16:11 +0800158 * https://github.com/osandov/drgn. The output looks like the following.
Tejun Heo6954ff12019-08-28 15:05:59 -0700159 *
160 * sdb RUN per=300ms cur_per=234.218:v203.695 busy= +1 vrate= 62.12%
Tejun Heo7c1ee702019-09-04 12:45:56 -0700161 * active weight hweight% inflt% dbt delay usages%
162 * test/a * 50/ 50 33.33/ 33.33 27.65 2 0*041 033:033:033
163 * test/b * 100/ 100 66.67/ 66.67 17.56 0 0*000 066:079:077
Tejun Heo6954ff12019-08-28 15:05:59 -0700164 *
165 * - per : Timer period
166 * - cur_per : Internal wall and device vtime clock
167 * - vrate : Device virtual time rate against wall clock
168 * - weight : Surplus-adjusted and configured weights
169 * - hweight : Surplus-adjusted and configured hierarchical weights
170 * - inflt : The percentage of in-flight IO cost at the end of last period
171 * - del_ms : Deferred issuer delay induction level and duration
172 * - usages : Usage history
Tejun Heo7caa4712019-08-28 15:05:58 -0700173 */
174
175#include <linux/kernel.h>
176#include <linux/module.h>
177#include <linux/timer.h>
178#include <linux/time64.h>
179#include <linux/parser.h>
180#include <linux/sched/signal.h>
181#include <linux/blk-cgroup.h>
Tejun Heo5e124f72020-09-01 14:52:33 -0400182#include <asm/local.h>
183#include <asm/local64.h>
Tejun Heo7caa4712019-08-28 15:05:58 -0700184#include "blk-rq-qos.h"
185#include "blk-stat.h"
186#include "blk-wbt.h"
187
188#ifdef CONFIG_TRACEPOINTS
189
190/* copied from TRACE_CGROUP_PATH, see cgroup-internal.h */
191#define TRACE_IOCG_PATH_LEN 1024
192static DEFINE_SPINLOCK(trace_iocg_path_lock);
193static char trace_iocg_path[TRACE_IOCG_PATH_LEN];
194
195#define TRACE_IOCG_PATH(type, iocg, ...) \
196 do { \
197 unsigned long flags; \
198 if (trace_iocost_##type##_enabled()) { \
199 spin_lock_irqsave(&trace_iocg_path_lock, flags); \
200 cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup, \
201 trace_iocg_path, TRACE_IOCG_PATH_LEN); \
202 trace_iocost_##type(iocg, trace_iocg_path, \
203 ##__VA_ARGS__); \
204 spin_unlock_irqrestore(&trace_iocg_path_lock, flags); \
205 } \
206 } while (0)
207
208#else /* CONFIG_TRACE_POINTS */
209#define TRACE_IOCG_PATH(type, iocg, ...) do { } while (0)
210#endif /* CONFIG_TRACE_POINTS */
211
212enum {
213 MILLION = 1000000,
214
215 /* timer period is calculated from latency requirements, bound it */
216 MIN_PERIOD = USEC_PER_MSEC,
217 MAX_PERIOD = USEC_PER_SEC,
218
219 /*
Tejun Heof1de2432020-09-01 14:52:49 -0400220 * iocg->vtime is targeted at 50% behind the device vtime, which
Tejun Heo7caa4712019-08-28 15:05:58 -0700221 * serves as its IO credit buffer. Surplus weight adjustment is
222 * immediately canceled if the vtime margin runs below 10%.
223 */
Tejun Heo7ca5b2e2020-09-01 14:52:41 -0400224 MARGIN_MIN_PCT = 10,
Tejun Heof1de2432020-09-01 14:52:49 -0400225 MARGIN_LOW_PCT = 20,
226 MARGIN_TARGET_PCT = 50,
Tejun Heo7caa4712019-08-28 15:05:58 -0700227
Tejun Heob0853ab2020-09-01 14:52:50 -0400228 INUSE_ADJ_STEP_PCT = 25,
229
Tejun Heo7ca5b2e2020-09-01 14:52:41 -0400230 /* Have some play in timer operations */
231 TIMER_SLACK_PCT = 1,
Tejun Heo7caa4712019-08-28 15:05:58 -0700232
Tejun Heo7caa4712019-08-28 15:05:58 -0700233 /* 1/64k is granular enough and can easily be handled w/ u32 */
Tejun Heofe20cdb52020-09-01 14:52:38 -0400234 WEIGHT_ONE = 1 << 16,
Tejun Heo7caa4712019-08-28 15:05:58 -0700235
236 /*
237 * As vtime is used to calculate the cost of each IO, it needs to
238 * be fairly high precision. For example, it should be able to
239 * represent the cost of a single page worth of discard with
240 * suffificient accuracy. At the same time, it should be able to
241 * represent reasonably long enough durations to be useful and
242 * convenient during operation.
243 *
244 * 1s worth of vtime is 2^37. This gives us both sub-nanosecond
245 * granularity and days of wrap-around time even at extreme vrates.
246 */
247 VTIME_PER_SEC_SHIFT = 37,
248 VTIME_PER_SEC = 1LLU << VTIME_PER_SEC_SHIFT,
249 VTIME_PER_USEC = VTIME_PER_SEC / USEC_PER_SEC,
Tejun Heocd006502020-04-13 12:27:56 -0400250 VTIME_PER_NSEC = VTIME_PER_SEC / NSEC_PER_SEC,
Tejun Heo7caa4712019-08-28 15:05:58 -0700251
252 /* bound vrate adjustments within two orders of magnitude */
253 VRATE_MIN_PPM = 10000, /* 1% */
254 VRATE_MAX_PPM = 100000000, /* 10000% */
255
256 VRATE_MIN = VTIME_PER_USEC * VRATE_MIN_PPM / MILLION,
257 VRATE_CLAMP_ADJ_PCT = 4,
258
259 /* if IOs end up waiting for requests, issue less */
260 RQ_WAIT_BUSY_PCT = 5,
261
262 /* unbusy hysterisis */
263 UNBUSY_THR_PCT = 75,
264
Tejun Heo5160a5a2020-09-01 14:52:52 -0400265 /*
266 * The effect of delay is indirect and non-linear and a huge amount of
267 * future debt can accumulate abruptly while unthrottled. Linearly scale
268 * up delay as debt is going up and then let it decay exponentially.
269 * This gives us quick ramp ups while delay is accumulating and long
270 * tails which can help reducing the frequency of debt explosions on
271 * unthrottle. The parameters are experimentally determined.
272 *
273 * The delay mechanism provides adequate protection and behavior in many
274 * cases. However, this is far from ideal and falls shorts on both
275 * fronts. The debtors are often throttled too harshly costing a
276 * significant level of fairness and possibly total work while the
277 * protection against their impacts on the system can be choppy and
278 * unreliable.
279 *
280 * The shortcoming primarily stems from the fact that, unlike for page
281 * cache, the kernel doesn't have well-defined back-pressure propagation
282 * mechanism and policies for anonymous memory. Fully addressing this
283 * issue will likely require substantial improvements in the area.
284 */
285 MIN_DELAY_THR_PCT = 500,
286 MAX_DELAY_THR_PCT = 25000,
287 MIN_DELAY = 250,
288 MAX_DELAY = 250 * USEC_PER_MSEC,
289
Tejun Heoc7af2a02020-09-17 20:44:55 -0400290 /* halve debts if avg usage over 100ms is under 50% */
291 DFGV_USAGE_PCT = 50,
292 DFGV_PERIOD = 100 * USEC_PER_MSEC,
Tejun Heodda13152020-09-01 14:52:53 -0400293
Tejun Heo7caa4712019-08-28 15:05:58 -0700294 /* don't let cmds which take a very long time pin lagging for too long */
295 MAX_LAGGING_PERIODS = 10,
296
Tejun Heo7caa4712019-08-28 15:05:58 -0700297 /* switch iff the conditions are met for longer than this */
298 AUTOP_CYCLE_NSEC = 10LLU * NSEC_PER_SEC,
299
300 /*
301 * Count IO size in 4k pages. The 12bit shift helps keeping
302 * size-proportional components of cost calculation in closer
303 * numbers of digits to per-IO cost components.
304 */
305 IOC_PAGE_SHIFT = 12,
306 IOC_PAGE_SIZE = 1 << IOC_PAGE_SHIFT,
307 IOC_SECT_TO_PAGE_SHIFT = IOC_PAGE_SHIFT - SECTOR_SHIFT,
308
309 /* if apart further than 16M, consider randio for linear model */
310 LCOEF_RANDIO_PAGES = 4096,
311};
312
313enum ioc_running {
314 IOC_IDLE,
315 IOC_RUNNING,
316 IOC_STOP,
317};
318
319/* io.cost.qos controls including per-dev enable of the whole controller */
320enum {
321 QOS_ENABLE,
322 QOS_CTRL,
323 NR_QOS_CTRL_PARAMS,
324};
325
326/* io.cost.qos params */
327enum {
328 QOS_RPPM,
329 QOS_RLAT,
330 QOS_WPPM,
331 QOS_WLAT,
332 QOS_MIN,
333 QOS_MAX,
334 NR_QOS_PARAMS,
335};
336
337/* io.cost.model controls */
338enum {
339 COST_CTRL,
340 COST_MODEL,
341 NR_COST_CTRL_PARAMS,
342};
343
344/* builtin linear cost model coefficients */
345enum {
346 I_LCOEF_RBPS,
347 I_LCOEF_RSEQIOPS,
348 I_LCOEF_RRANDIOPS,
349 I_LCOEF_WBPS,
350 I_LCOEF_WSEQIOPS,
351 I_LCOEF_WRANDIOPS,
352 NR_I_LCOEFS,
353};
354
355enum {
356 LCOEF_RPAGE,
357 LCOEF_RSEQIO,
358 LCOEF_RRANDIO,
359 LCOEF_WPAGE,
360 LCOEF_WSEQIO,
361 LCOEF_WRANDIO,
362 NR_LCOEFS,
363};
364
365enum {
366 AUTOP_INVALID,
367 AUTOP_HDD,
368 AUTOP_SSD_QD1,
369 AUTOP_SSD_DFL,
370 AUTOP_SSD_FAST,
371};
372
Tejun Heo7caa4712019-08-28 15:05:58 -0700373struct ioc_params {
374 u32 qos[NR_QOS_PARAMS];
375 u64 i_lcoefs[NR_I_LCOEFS];
376 u64 lcoefs[NR_LCOEFS];
377 u32 too_fast_vrate_pct;
378 u32 too_slow_vrate_pct;
379};
380
Tejun Heo7ca5b2e2020-09-01 14:52:41 -0400381struct ioc_margins {
382 s64 min;
Tejun Heof1de2432020-09-01 14:52:49 -0400383 s64 low;
384 s64 target;
Tejun Heo7ca5b2e2020-09-01 14:52:41 -0400385};
386
Tejun Heo7caa4712019-08-28 15:05:58 -0700387struct ioc_missed {
Tejun Heo5e124f72020-09-01 14:52:33 -0400388 local_t nr_met;
389 local_t nr_missed;
Tejun Heo7caa4712019-08-28 15:05:58 -0700390 u32 last_met;
391 u32 last_missed;
392};
393
394struct ioc_pcpu_stat {
395 struct ioc_missed missed[2];
396
Tejun Heo5e124f72020-09-01 14:52:33 -0400397 local64_t rq_wait_ns;
Tejun Heo7caa4712019-08-28 15:05:58 -0700398 u64 last_rq_wait_ns;
399};
400
401/* per device */
402struct ioc {
403 struct rq_qos rqos;
404
405 bool enabled;
406
407 struct ioc_params params;
Tejun Heo7ca5b2e2020-09-01 14:52:41 -0400408 struct ioc_margins margins;
Tejun Heo7caa4712019-08-28 15:05:58 -0700409 u32 period_us;
Tejun Heo7ca5b2e2020-09-01 14:52:41 -0400410 u32 timer_slack_ns;
Tejun Heo7caa4712019-08-28 15:05:58 -0700411 u64 vrate_min;
412 u64 vrate_max;
413
414 spinlock_t lock;
415 struct timer_list timer;
416 struct list_head active_iocgs; /* active cgroups */
417 struct ioc_pcpu_stat __percpu *pcpu_stat;
418
419 enum ioc_running running;
420 atomic64_t vtime_rate;
Tejun Heoac33e912020-09-01 14:52:54 -0400421 u64 vtime_base_rate;
422 s64 vtime_err;
Tejun Heo7caa4712019-08-28 15:05:58 -0700423
Ahmed S. Darwish67b7b642020-07-20 17:55:26 +0200424 seqcount_spinlock_t period_seqcount;
Tejun Heoce955702020-09-01 14:52:40 -0400425 u64 period_at; /* wallclock starttime */
Tejun Heo7caa4712019-08-28 15:05:58 -0700426 u64 period_at_vtime; /* vtime starttime */
427
428 atomic64_t cur_period; /* inc'd each period */
429 int busy_level; /* saturation history */
430
Tejun Heo7caa4712019-08-28 15:05:58 -0700431 bool weights_updated;
432 atomic_t hweight_gen; /* for lazy hweights */
433
Tejun Heoc7af2a02020-09-17 20:44:55 -0400434 /* debt forgivness */
435 u64 dfgv_period_at;
436 u64 dfgv_period_rem;
437 u64 dfgv_usage_us_sum;
Tejun Heodda13152020-09-01 14:52:53 -0400438
Tejun Heo7caa4712019-08-28 15:05:58 -0700439 u64 autop_too_fast_at;
440 u64 autop_too_slow_at;
441 int autop_idx;
442 bool user_qos_params:1;
443 bool user_cost_model:1;
444};
445
Tejun Heo97eb1972020-09-01 14:52:43 -0400446struct iocg_pcpu_stat {
447 local64_t abs_vusage;
448};
449
450struct iocg_stat {
451 u64 usage_us;
Tejun Heof0bf84a2020-09-01 14:52:56 -0400452 u64 wait_us;
453 u64 indebt_us;
454 u64 indelay_us;
Tejun Heo97eb1972020-09-01 14:52:43 -0400455};
456
Tejun Heo7caa4712019-08-28 15:05:58 -0700457/* per device-cgroup pair */
458struct ioc_gq {
459 struct blkg_policy_data pd;
460 struct ioc *ioc;
461
462 /*
463 * A iocg can get its weight from two sources - an explicit
464 * per-device-cgroup configuration or the default weight of the
465 * cgroup. `cfg_weight` is the explicit per-device-cgroup
466 * configuration. `weight` is the effective considering both
467 * sources.
468 *
469 * When an idle cgroup becomes active its `active` goes from 0 to
470 * `weight`. `inuse` is the surplus adjusted active weight.
471 * `active` and `inuse` are used to calculate `hweight_active` and
472 * `hweight_inuse`.
473 *
474 * `last_inuse` remembers `inuse` while an iocg is idle to persist
475 * surplus adjustments.
Tejun Heob0853ab2020-09-01 14:52:50 -0400476 *
477 * `inuse` may be adjusted dynamically during period. `saved_*` are used
478 * to determine and track adjustments.
Tejun Heo7caa4712019-08-28 15:05:58 -0700479 */
480 u32 cfg_weight;
481 u32 weight;
482 u32 active;
483 u32 inuse;
Tejun Heob0853ab2020-09-01 14:52:50 -0400484
Tejun Heo7caa4712019-08-28 15:05:58 -0700485 u32 last_inuse;
Tejun Heob0853ab2020-09-01 14:52:50 -0400486 s64 saved_margin;
Tejun Heo7caa4712019-08-28 15:05:58 -0700487
488 sector_t cursor; /* to detect randio */
489
490 /*
491 * `vtime` is this iocg's vtime cursor which progresses as IOs are
492 * issued. If lagging behind device vtime, the delta represents
Baolin Wang5ba1add22020-11-26 16:16:11 +0800493 * the currently available IO budget. If running ahead, the
Tejun Heo7caa4712019-08-28 15:05:58 -0700494 * overage.
495 *
496 * `vtime_done` is the same but progressed on completion rather
497 * than issue. The delta behind `vtime` represents the cost of
498 * currently in-flight IOs.
Tejun Heo7caa4712019-08-28 15:05:58 -0700499 */
500 atomic64_t vtime;
501 atomic64_t done_vtime;
Tejun Heo0b80f982020-05-04 19:27:54 -0400502 u64 abs_vdebt;
Tejun Heo7caa4712019-08-28 15:05:58 -0700503
Tejun Heo5160a5a2020-09-01 14:52:52 -0400504 /* current delay in effect and when it started */
505 u64 delay;
506 u64 delay_at;
507
Tejun Heo7caa4712019-08-28 15:05:58 -0700508 /*
509 * The period this iocg was last active in. Used for deactivation
510 * and invalidating `vtime`.
511 */
512 atomic64_t active_period;
513 struct list_head active_list;
514
Tejun Heo00410f12020-09-01 14:52:34 -0400515 /* see __propagate_weights() and current_hweight() for details */
Tejun Heo7caa4712019-08-28 15:05:58 -0700516 u64 child_active_sum;
517 u64 child_inuse_sum;
Tejun Heoe08d02a2020-09-01 14:52:48 -0400518 u64 child_adjusted_sum;
Tejun Heo7caa4712019-08-28 15:05:58 -0700519 int hweight_gen;
520 u32 hweight_active;
521 u32 hweight_inuse;
Tejun Heoe08d02a2020-09-01 14:52:48 -0400522 u32 hweight_donating;
Tejun Heo93f7d2d2020-09-01 14:52:47 -0400523 u32 hweight_after_donation;
Tejun Heo7caa4712019-08-28 15:05:58 -0700524
Tejun Heo97eb1972020-09-01 14:52:43 -0400525 struct list_head walk_list;
Tejun Heo8692d2d2020-09-01 14:52:45 -0400526 struct list_head surplus_list;
Tejun Heo97eb1972020-09-01 14:52:43 -0400527
Tejun Heo7caa4712019-08-28 15:05:58 -0700528 struct wait_queue_head waitq;
529 struct hrtimer waitq_timer;
Tejun Heo7caa4712019-08-28 15:05:58 -0700530
Tejun Heo1aa50d02020-09-01 14:52:44 -0400531 /* timestamp at the latest activation */
532 u64 activated_at;
533
Tejun Heo97eb1972020-09-01 14:52:43 -0400534 /* statistics */
535 struct iocg_pcpu_stat __percpu *pcpu_stat;
536 struct iocg_stat local_stat;
537 struct iocg_stat desc_stat;
538 struct iocg_stat last_stat;
539 u64 last_stat_abs_vusage;
Tejun Heof1de2432020-09-01 14:52:49 -0400540 u64 usage_delta_us;
Tejun Heof0bf84a2020-09-01 14:52:56 -0400541 u64 wait_since;
542 u64 indebt_since;
543 u64 indelay_since;
Tejun Heo7caa4712019-08-28 15:05:58 -0700544
545 /* this iocg's depth in the hierarchy and ancestors including self */
546 int level;
547 struct ioc_gq *ancestors[];
548};
549
550/* per cgroup */
551struct ioc_cgrp {
552 struct blkcg_policy_data cpd;
553 unsigned int dfl_weight;
554};
555
556struct ioc_now {
557 u64 now_ns;
Tejun Heoce955702020-09-01 14:52:40 -0400558 u64 now;
Tejun Heo7caa4712019-08-28 15:05:58 -0700559 u64 vnow;
560 u64 vrate;
561};
562
563struct iocg_wait {
564 struct wait_queue_entry wait;
565 struct bio *bio;
566 u64 abs_cost;
567 bool committed;
568};
569
570struct iocg_wake_ctx {
571 struct ioc_gq *iocg;
572 u32 hw_inuse;
573 s64 vbudget;
574};
575
576static const struct ioc_params autop[] = {
577 [AUTOP_HDD] = {
578 .qos = {
Tejun Heo7afccca2019-09-25 16:03:35 -0700579 [QOS_RLAT] = 250000, /* 250ms */
580 [QOS_WLAT] = 250000,
Tejun Heo7caa4712019-08-28 15:05:58 -0700581 [QOS_MIN] = VRATE_MIN_PPM,
582 [QOS_MAX] = VRATE_MAX_PPM,
583 },
584 .i_lcoefs = {
585 [I_LCOEF_RBPS] = 174019176,
586 [I_LCOEF_RSEQIOPS] = 41708,
587 [I_LCOEF_RRANDIOPS] = 370,
588 [I_LCOEF_WBPS] = 178075866,
589 [I_LCOEF_WSEQIOPS] = 42705,
590 [I_LCOEF_WRANDIOPS] = 378,
591 },
592 },
593 [AUTOP_SSD_QD1] = {
594 .qos = {
595 [QOS_RLAT] = 25000, /* 25ms */
596 [QOS_WLAT] = 25000,
597 [QOS_MIN] = VRATE_MIN_PPM,
598 [QOS_MAX] = VRATE_MAX_PPM,
599 },
600 .i_lcoefs = {
601 [I_LCOEF_RBPS] = 245855193,
602 [I_LCOEF_RSEQIOPS] = 61575,
603 [I_LCOEF_RRANDIOPS] = 6946,
604 [I_LCOEF_WBPS] = 141365009,
605 [I_LCOEF_WSEQIOPS] = 33716,
606 [I_LCOEF_WRANDIOPS] = 26796,
607 },
608 },
609 [AUTOP_SSD_DFL] = {
610 .qos = {
611 [QOS_RLAT] = 25000, /* 25ms */
612 [QOS_WLAT] = 25000,
613 [QOS_MIN] = VRATE_MIN_PPM,
614 [QOS_MAX] = VRATE_MAX_PPM,
615 },
616 .i_lcoefs = {
617 [I_LCOEF_RBPS] = 488636629,
618 [I_LCOEF_RSEQIOPS] = 8932,
619 [I_LCOEF_RRANDIOPS] = 8518,
620 [I_LCOEF_WBPS] = 427891549,
621 [I_LCOEF_WSEQIOPS] = 28755,
622 [I_LCOEF_WRANDIOPS] = 21940,
623 },
624 .too_fast_vrate_pct = 500,
625 },
626 [AUTOP_SSD_FAST] = {
627 .qos = {
628 [QOS_RLAT] = 5000, /* 5ms */
629 [QOS_WLAT] = 5000,
630 [QOS_MIN] = VRATE_MIN_PPM,
631 [QOS_MAX] = VRATE_MAX_PPM,
632 },
633 .i_lcoefs = {
634 [I_LCOEF_RBPS] = 3102524156LLU,
635 [I_LCOEF_RSEQIOPS] = 724816,
636 [I_LCOEF_RRANDIOPS] = 778122,
637 [I_LCOEF_WBPS] = 1742780862LLU,
638 [I_LCOEF_WSEQIOPS] = 425702,
639 [I_LCOEF_WRANDIOPS] = 443193,
640 },
641 .too_slow_vrate_pct = 10,
642 },
643};
644
645/*
646 * vrate adjust percentages indexed by ioc->busy_level. We adjust up on
647 * vtime credit shortage and down on device saturation.
648 */
649static u32 vrate_adj_pct[] =
650 { 0, 0, 0, 0,
651 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
652 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
653 4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8, 8, 8, 16 };
654
655static struct blkcg_policy blkcg_policy_iocost;
656
657/* accessors and helpers */
658static struct ioc *rqos_to_ioc(struct rq_qos *rqos)
659{
660 return container_of(rqos, struct ioc, rqos);
661}
662
663static struct ioc *q_to_ioc(struct request_queue *q)
664{
665 return rqos_to_ioc(rq_qos_id(q, RQ_QOS_COST));
666}
667
668static const char *q_name(struct request_queue *q)
669{
Yufen Yu75e6c002020-10-08 23:26:29 -0400670 if (blk_queue_registered(q))
Tejun Heo7caa4712019-08-28 15:05:58 -0700671 return kobject_name(q->kobj.parent);
672 else
673 return "<unknown>";
674}
675
676static const char __maybe_unused *ioc_name(struct ioc *ioc)
677{
678 return q_name(ioc->rqos.q);
679}
680
681static struct ioc_gq *pd_to_iocg(struct blkg_policy_data *pd)
682{
683 return pd ? container_of(pd, struct ioc_gq, pd) : NULL;
684}
685
686static struct ioc_gq *blkg_to_iocg(struct blkcg_gq *blkg)
687{
688 return pd_to_iocg(blkg_to_pd(blkg, &blkcg_policy_iocost));
689}
690
691static struct blkcg_gq *iocg_to_blkg(struct ioc_gq *iocg)
692{
693 return pd_to_blkg(&iocg->pd);
694}
695
696static struct ioc_cgrp *blkcg_to_iocc(struct blkcg *blkcg)
697{
698 return container_of(blkcg_to_cpd(blkcg, &blkcg_policy_iocost),
699 struct ioc_cgrp, cpd);
700}
701
702/*
703 * Scale @abs_cost to the inverse of @hw_inuse. The lower the hierarchical
Tejun Heo36a52482019-09-04 12:45:52 -0700704 * weight, the more expensive each IO. Must round up.
Tejun Heo7caa4712019-08-28 15:05:58 -0700705 */
706static u64 abs_cost_to_cost(u64 abs_cost, u32 hw_inuse)
707{
Tejun Heofe20cdb52020-09-01 14:52:38 -0400708 return DIV64_U64_ROUND_UP(abs_cost * WEIGHT_ONE, hw_inuse);
Tejun Heo7caa4712019-08-28 15:05:58 -0700709}
710
Tejun Heo36a52482019-09-04 12:45:52 -0700711/*
712 * The inverse of abs_cost_to_cost(). Must round up.
713 */
714static u64 cost_to_abs_cost(u64 cost, u32 hw_inuse)
715{
Tejun Heofe20cdb52020-09-01 14:52:38 -0400716 return DIV64_U64_ROUND_UP(cost * hw_inuse, WEIGHT_ONE);
Tejun Heo36a52482019-09-04 12:45:52 -0700717}
718
Tejun Heo97eb1972020-09-01 14:52:43 -0400719static void iocg_commit_bio(struct ioc_gq *iocg, struct bio *bio,
720 u64 abs_cost, u64 cost)
Tejun Heo7caa4712019-08-28 15:05:58 -0700721{
Tejun Heo97eb1972020-09-01 14:52:43 -0400722 struct iocg_pcpu_stat *gcs;
723
Tejun Heo7caa4712019-08-28 15:05:58 -0700724 bio->bi_iocost_cost = cost;
725 atomic64_add(cost, &iocg->vtime);
Tejun Heo97eb1972020-09-01 14:52:43 -0400726
727 gcs = get_cpu_ptr(iocg->pcpu_stat);
728 local64_add(abs_cost, &gcs->abs_vusage);
729 put_cpu_ptr(gcs);
Tejun Heo7caa4712019-08-28 15:05:58 -0700730}
731
Tejun Heoda437b92020-09-01 14:52:42 -0400732static void iocg_lock(struct ioc_gq *iocg, bool lock_ioc, unsigned long *flags)
733{
734 if (lock_ioc) {
735 spin_lock_irqsave(&iocg->ioc->lock, *flags);
736 spin_lock(&iocg->waitq.lock);
737 } else {
738 spin_lock_irqsave(&iocg->waitq.lock, *flags);
739 }
740}
741
742static void iocg_unlock(struct ioc_gq *iocg, bool unlock_ioc, unsigned long *flags)
743{
744 if (unlock_ioc) {
745 spin_unlock(&iocg->waitq.lock);
746 spin_unlock_irqrestore(&iocg->ioc->lock, *flags);
747 } else {
748 spin_unlock_irqrestore(&iocg->waitq.lock, *flags);
749 }
750}
751
Tejun Heo7caa4712019-08-28 15:05:58 -0700752#define CREATE_TRACE_POINTS
753#include <trace/events/iocost.h>
754
Tejun Heo7ca5b2e2020-09-01 14:52:41 -0400755static void ioc_refresh_margins(struct ioc *ioc)
756{
757 struct ioc_margins *margins = &ioc->margins;
758 u32 period_us = ioc->period_us;
Tejun Heoac33e912020-09-01 14:52:54 -0400759 u64 vrate = ioc->vtime_base_rate;
Tejun Heo7ca5b2e2020-09-01 14:52:41 -0400760
761 margins->min = (period_us * MARGIN_MIN_PCT / 100) * vrate;
Tejun Heof1de2432020-09-01 14:52:49 -0400762 margins->low = (period_us * MARGIN_LOW_PCT / 100) * vrate;
763 margins->target = (period_us * MARGIN_TARGET_PCT / 100) * vrate;
Tejun Heo7ca5b2e2020-09-01 14:52:41 -0400764}
765
Tejun Heo7caa4712019-08-28 15:05:58 -0700766/* latency Qos params changed, update period_us and all the dependent params */
767static void ioc_refresh_period_us(struct ioc *ioc)
768{
769 u32 ppm, lat, multi, period_us;
770
771 lockdep_assert_held(&ioc->lock);
772
773 /* pick the higher latency target */
774 if (ioc->params.qos[QOS_RLAT] >= ioc->params.qos[QOS_WLAT]) {
775 ppm = ioc->params.qos[QOS_RPPM];
776 lat = ioc->params.qos[QOS_RLAT];
777 } else {
778 ppm = ioc->params.qos[QOS_WPPM];
779 lat = ioc->params.qos[QOS_WLAT];
780 }
781
782 /*
783 * We want the period to be long enough to contain a healthy number
784 * of IOs while short enough for granular control. Define it as a
785 * multiple of the latency target. Ideally, the multiplier should
786 * be scaled according to the percentile so that it would nominally
787 * contain a certain number of requests. Let's be simpler and
788 * scale it linearly so that it's 2x >= pct(90) and 10x at pct(50).
789 */
790 if (ppm)
791 multi = max_t(u32, (MILLION - ppm) / 50000, 2);
792 else
793 multi = 2;
794 period_us = multi * lat;
795 period_us = clamp_t(u32, period_us, MIN_PERIOD, MAX_PERIOD);
796
797 /* calculate dependent params */
798 ioc->period_us = period_us;
Tejun Heo7ca5b2e2020-09-01 14:52:41 -0400799 ioc->timer_slack_ns = div64_u64(
800 (u64)period_us * NSEC_PER_USEC * TIMER_SLACK_PCT,
801 100);
802 ioc_refresh_margins(ioc);
Tejun Heo7caa4712019-08-28 15:05:58 -0700803}
804
805static int ioc_autop_idx(struct ioc *ioc)
806{
807 int idx = ioc->autop_idx;
808 const struct ioc_params *p = &autop[idx];
809 u32 vrate_pct;
810 u64 now_ns;
811
812 /* rotational? */
813 if (!blk_queue_nonrot(ioc->rqos.q))
814 return AUTOP_HDD;
815
816 /* handle SATA SSDs w/ broken NCQ */
817 if (blk_queue_depth(ioc->rqos.q) == 1)
818 return AUTOP_SSD_QD1;
819
820 /* use one of the normal ssd sets */
821 if (idx < AUTOP_SSD_DFL)
822 return AUTOP_SSD_DFL;
823
824 /* if user is overriding anything, maintain what was there */
825 if (ioc->user_qos_params || ioc->user_cost_model)
826 return idx;
827
828 /* step up/down based on the vrate */
Tejun Heoac33e912020-09-01 14:52:54 -0400829 vrate_pct = div64_u64(ioc->vtime_base_rate * 100, VTIME_PER_USEC);
Tejun Heo7caa4712019-08-28 15:05:58 -0700830 now_ns = ktime_get_ns();
831
832 if (p->too_fast_vrate_pct && p->too_fast_vrate_pct <= vrate_pct) {
833 if (!ioc->autop_too_fast_at)
834 ioc->autop_too_fast_at = now_ns;
835 if (now_ns - ioc->autop_too_fast_at >= AUTOP_CYCLE_NSEC)
836 return idx + 1;
837 } else {
838 ioc->autop_too_fast_at = 0;
839 }
840
841 if (p->too_slow_vrate_pct && p->too_slow_vrate_pct >= vrate_pct) {
842 if (!ioc->autop_too_slow_at)
843 ioc->autop_too_slow_at = now_ns;
844 if (now_ns - ioc->autop_too_slow_at >= AUTOP_CYCLE_NSEC)
845 return idx - 1;
846 } else {
847 ioc->autop_too_slow_at = 0;
848 }
849
850 return idx;
851}
852
853/*
854 * Take the followings as input
855 *
856 * @bps maximum sequential throughput
857 * @seqiops maximum sequential 4k iops
858 * @randiops maximum random 4k iops
859 *
860 * and calculate the linear model cost coefficients.
861 *
862 * *@page per-page cost 1s / (@bps / 4096)
863 * *@seqio base cost of a seq IO max((1s / @seqiops) - *@page, 0)
864 * @randiops base cost of a rand IO max((1s / @randiops) - *@page, 0)
865 */
866static void calc_lcoefs(u64 bps, u64 seqiops, u64 randiops,
867 u64 *page, u64 *seqio, u64 *randio)
868{
869 u64 v;
870
871 *page = *seqio = *randio = 0;
872
873 if (bps)
874 *page = DIV64_U64_ROUND_UP(VTIME_PER_SEC,
875 DIV_ROUND_UP_ULL(bps, IOC_PAGE_SIZE));
876
877 if (seqiops) {
878 v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, seqiops);
879 if (v > *page)
880 *seqio = v - *page;
881 }
882
883 if (randiops) {
884 v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, randiops);
885 if (v > *page)
886 *randio = v - *page;
887 }
888}
889
890static void ioc_refresh_lcoefs(struct ioc *ioc)
891{
892 u64 *u = ioc->params.i_lcoefs;
893 u64 *c = ioc->params.lcoefs;
894
895 calc_lcoefs(u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
896 &c[LCOEF_RPAGE], &c[LCOEF_RSEQIO], &c[LCOEF_RRANDIO]);
897 calc_lcoefs(u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS],
898 &c[LCOEF_WPAGE], &c[LCOEF_WSEQIO], &c[LCOEF_WRANDIO]);
899}
900
901static bool ioc_refresh_params(struct ioc *ioc, bool force)
902{
903 const struct ioc_params *p;
904 int idx;
905
906 lockdep_assert_held(&ioc->lock);
907
908 idx = ioc_autop_idx(ioc);
909 p = &autop[idx];
910
911 if (idx == ioc->autop_idx && !force)
912 return false;
913
914 if (idx != ioc->autop_idx)
915 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
916
917 ioc->autop_idx = idx;
918 ioc->autop_too_fast_at = 0;
919 ioc->autop_too_slow_at = 0;
920
921 if (!ioc->user_qos_params)
922 memcpy(ioc->params.qos, p->qos, sizeof(p->qos));
923 if (!ioc->user_cost_model)
924 memcpy(ioc->params.i_lcoefs, p->i_lcoefs, sizeof(p->i_lcoefs));
925
926 ioc_refresh_period_us(ioc);
927 ioc_refresh_lcoefs(ioc);
928
929 ioc->vrate_min = DIV64_U64_ROUND_UP((u64)ioc->params.qos[QOS_MIN] *
930 VTIME_PER_USEC, MILLION);
931 ioc->vrate_max = div64_u64((u64)ioc->params.qos[QOS_MAX] *
932 VTIME_PER_USEC, MILLION);
933
934 return true;
935}
936
Tejun Heoac33e912020-09-01 14:52:54 -0400937/*
938 * When an iocg accumulates too much vtime or gets deactivated, we throw away
939 * some vtime, which lowers the overall device utilization. As the exact amount
940 * which is being thrown away is known, we can compensate by accelerating the
941 * vrate accordingly so that the extra vtime generated in the current period
942 * matches what got lost.
943 */
944static void ioc_refresh_vrate(struct ioc *ioc, struct ioc_now *now)
945{
946 s64 pleft = ioc->period_at + ioc->period_us - now->now;
947 s64 vperiod = ioc->period_us * ioc->vtime_base_rate;
948 s64 vcomp, vcomp_min, vcomp_max;
949
950 lockdep_assert_held(&ioc->lock);
951
952 /* we need some time left in this period */
953 if (pleft <= 0)
954 goto done;
955
956 /*
957 * Calculate how much vrate should be adjusted to offset the error.
958 * Limit the amount of adjustment and deduct the adjusted amount from
959 * the error.
960 */
961 vcomp = -div64_s64(ioc->vtime_err, pleft);
962 vcomp_min = -(ioc->vtime_base_rate >> 1);
963 vcomp_max = ioc->vtime_base_rate;
964 vcomp = clamp(vcomp, vcomp_min, vcomp_max);
965
966 ioc->vtime_err += vcomp * pleft;
967
968 atomic64_set(&ioc->vtime_rate, ioc->vtime_base_rate + vcomp);
969done:
970 /* bound how much error can accumulate */
971 ioc->vtime_err = clamp(ioc->vtime_err, -vperiod, vperiod);
972}
973
Baolin Wang926f75f2020-11-26 16:16:15 +0800974static void ioc_adjust_base_vrate(struct ioc *ioc, u32 rq_wait_pct,
975 int nr_lagging, int nr_shortages,
976 int prev_busy_level, u32 *missed_ppm)
977{
978 u64 vrate = ioc->vtime_base_rate;
979 u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max;
980
981 if (!ioc->busy_level || (ioc->busy_level < 0 && nr_lagging)) {
982 if (ioc->busy_level != prev_busy_level || nr_lagging)
983 trace_iocost_ioc_vrate_adj(ioc, atomic64_read(&ioc->vtime_rate),
984 missed_ppm, rq_wait_pct,
985 nr_lagging, nr_shortages);
986
987 return;
988 }
989
Baolin Wang926f75f2020-11-26 16:16:15 +0800990 /*
991 * If vrate is out of bounds, apply clamp gradually as the
992 * bounds can change abruptly. Otherwise, apply busy_level
993 * based adjustment.
994 */
995 if (vrate < vrate_min) {
996 vrate = div64_u64(vrate * (100 + VRATE_CLAMP_ADJ_PCT), 100);
997 vrate = min(vrate, vrate_min);
998 } else if (vrate > vrate_max) {
999 vrate = div64_u64(vrate * (100 - VRATE_CLAMP_ADJ_PCT), 100);
1000 vrate = max(vrate, vrate_max);
1001 } else {
1002 int idx = min_t(int, abs(ioc->busy_level),
1003 ARRAY_SIZE(vrate_adj_pct) - 1);
1004 u32 adj_pct = vrate_adj_pct[idx];
1005
1006 if (ioc->busy_level > 0)
1007 adj_pct = 100 - adj_pct;
1008 else
1009 adj_pct = 100 + adj_pct;
1010
1011 vrate = clamp(DIV64_U64_ROUND_UP(vrate * adj_pct, 100),
1012 vrate_min, vrate_max);
1013 }
1014
1015 trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct,
1016 nr_lagging, nr_shortages);
1017
1018 ioc->vtime_base_rate = vrate;
1019 ioc_refresh_margins(ioc);
1020}
1021
Tejun Heo7caa4712019-08-28 15:05:58 -07001022/* take a snapshot of the current [v]time and vrate */
1023static void ioc_now(struct ioc *ioc, struct ioc_now *now)
1024{
1025 unsigned seq;
1026
1027 now->now_ns = ktime_get();
1028 now->now = ktime_to_us(now->now_ns);
1029 now->vrate = atomic64_read(&ioc->vtime_rate);
1030
1031 /*
1032 * The current vtime is
1033 *
1034 * vtime at period start + (wallclock time since the start) * vrate
1035 *
1036 * As a consistent snapshot of `period_at_vtime` and `period_at` is
1037 * needed, they're seqcount protected.
1038 */
1039 do {
1040 seq = read_seqcount_begin(&ioc->period_seqcount);
1041 now->vnow = ioc->period_at_vtime +
1042 (now->now - ioc->period_at) * now->vrate;
1043 } while (read_seqcount_retry(&ioc->period_seqcount, seq));
1044}
1045
1046static void ioc_start_period(struct ioc *ioc, struct ioc_now *now)
1047{
Tejun Heo7caa4712019-08-28 15:05:58 -07001048 WARN_ON_ONCE(ioc->running != IOC_RUNNING);
1049
1050 write_seqcount_begin(&ioc->period_seqcount);
1051 ioc->period_at = now->now;
1052 ioc->period_at_vtime = now->vnow;
1053 write_seqcount_end(&ioc->period_seqcount);
1054
1055 ioc->timer.expires = jiffies + usecs_to_jiffies(ioc->period_us);
1056 add_timer(&ioc->timer);
1057}
1058
1059/*
1060 * Update @iocg's `active` and `inuse` to @active and @inuse, update level
Tejun Heob0853ab2020-09-01 14:52:50 -04001061 * weight sums and propagate upwards accordingly. If @save, the current margin
1062 * is saved to be used as reference for later inuse in-period adjustments.
Tejun Heo7caa4712019-08-28 15:05:58 -07001063 */
Tejun Heob0853ab2020-09-01 14:52:50 -04001064static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
1065 bool save, struct ioc_now *now)
Tejun Heo7caa4712019-08-28 15:05:58 -07001066{
1067 struct ioc *ioc = iocg->ioc;
1068 int lvl;
1069
1070 lockdep_assert_held(&ioc->lock);
1071
Tejun Heodb84a722020-09-01 14:52:35 -04001072 inuse = clamp_t(u32, inuse, 1, active);
1073
Tejun Heob0853ab2020-09-01 14:52:50 -04001074 iocg->last_inuse = iocg->inuse;
1075 if (save)
1076 iocg->saved_margin = now->vnow - atomic64_read(&iocg->vtime);
1077
Tejun Heodb84a722020-09-01 14:52:35 -04001078 if (active == iocg->active && inuse == iocg->inuse)
1079 return;
Tejun Heo7caa4712019-08-28 15:05:58 -07001080
1081 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1082 struct ioc_gq *parent = iocg->ancestors[lvl];
1083 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1084 u32 parent_active = 0, parent_inuse = 0;
1085
1086 /* update the level sums */
1087 parent->child_active_sum += (s32)(active - child->active);
1088 parent->child_inuse_sum += (s32)(inuse - child->inuse);
1089 /* apply the udpates */
1090 child->active = active;
1091 child->inuse = inuse;
1092
1093 /*
1094 * The delta between inuse and active sums indicates that
Baolin Wang5ba1add22020-11-26 16:16:11 +08001095 * much of weight is being given away. Parent's inuse
Tejun Heo7caa4712019-08-28 15:05:58 -07001096 * and active should reflect the ratio.
1097 */
1098 if (parent->child_active_sum) {
1099 parent_active = parent->weight;
1100 parent_inuse = DIV64_U64_ROUND_UP(
1101 parent_active * parent->child_inuse_sum,
1102 parent->child_active_sum);
1103 }
1104
1105 /* do we need to keep walking up? */
1106 if (parent_active == parent->active &&
1107 parent_inuse == parent->inuse)
1108 break;
1109
1110 active = parent_active;
1111 inuse = parent_inuse;
1112 }
1113
1114 ioc->weights_updated = true;
1115}
1116
Tejun Heo00410f12020-09-01 14:52:34 -04001117static void commit_weights(struct ioc *ioc)
Tejun Heo7caa4712019-08-28 15:05:58 -07001118{
1119 lockdep_assert_held(&ioc->lock);
1120
1121 if (ioc->weights_updated) {
1122 /* paired with rmb in current_hweight(), see there */
1123 smp_wmb();
1124 atomic_inc(&ioc->hweight_gen);
1125 ioc->weights_updated = false;
1126 }
1127}
1128
Tejun Heob0853ab2020-09-01 14:52:50 -04001129static void propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
1130 bool save, struct ioc_now *now)
Tejun Heo7caa4712019-08-28 15:05:58 -07001131{
Tejun Heob0853ab2020-09-01 14:52:50 -04001132 __propagate_weights(iocg, active, inuse, save, now);
Tejun Heo00410f12020-09-01 14:52:34 -04001133 commit_weights(iocg->ioc);
Tejun Heo7caa4712019-08-28 15:05:58 -07001134}
1135
1136static void current_hweight(struct ioc_gq *iocg, u32 *hw_activep, u32 *hw_inusep)
1137{
1138 struct ioc *ioc = iocg->ioc;
1139 int lvl;
1140 u32 hwa, hwi;
1141 int ioc_gen;
1142
1143 /* hot path - if uptodate, use cached */
1144 ioc_gen = atomic_read(&ioc->hweight_gen);
1145 if (ioc_gen == iocg->hweight_gen)
1146 goto out;
1147
1148 /*
Tejun Heo00410f12020-09-01 14:52:34 -04001149 * Paired with wmb in commit_weights(). If we saw the updated
1150 * hweight_gen, all the weight updates from __propagate_weights() are
1151 * visible too.
Tejun Heo7caa4712019-08-28 15:05:58 -07001152 *
1153 * We can race with weight updates during calculation and get it
1154 * wrong. However, hweight_gen would have changed and a future
1155 * reader will recalculate and we're guaranteed to discard the
1156 * wrong result soon.
1157 */
1158 smp_rmb();
1159
Tejun Heofe20cdb52020-09-01 14:52:38 -04001160 hwa = hwi = WEIGHT_ONE;
Tejun Heo7caa4712019-08-28 15:05:58 -07001161 for (lvl = 0; lvl <= iocg->level - 1; lvl++) {
1162 struct ioc_gq *parent = iocg->ancestors[lvl];
1163 struct ioc_gq *child = iocg->ancestors[lvl + 1];
Tejun Heobd0adb92020-09-01 14:52:39 -04001164 u64 active_sum = READ_ONCE(parent->child_active_sum);
1165 u64 inuse_sum = READ_ONCE(parent->child_inuse_sum);
Tejun Heo7caa4712019-08-28 15:05:58 -07001166 u32 active = READ_ONCE(child->active);
1167 u32 inuse = READ_ONCE(child->inuse);
1168
1169 /* we can race with deactivations and either may read as zero */
1170 if (!active_sum || !inuse_sum)
1171 continue;
1172
Tejun Heobd0adb92020-09-01 14:52:39 -04001173 active_sum = max_t(u64, active, active_sum);
1174 hwa = div64_u64((u64)hwa * active, active_sum);
Tejun Heo7caa4712019-08-28 15:05:58 -07001175
Tejun Heobd0adb92020-09-01 14:52:39 -04001176 inuse_sum = max_t(u64, inuse, inuse_sum);
1177 hwi = div64_u64((u64)hwi * inuse, inuse_sum);
Tejun Heo7caa4712019-08-28 15:05:58 -07001178 }
1179
1180 iocg->hweight_active = max_t(u32, hwa, 1);
1181 iocg->hweight_inuse = max_t(u32, hwi, 1);
1182 iocg->hweight_gen = ioc_gen;
1183out:
1184 if (hw_activep)
1185 *hw_activep = iocg->hweight_active;
1186 if (hw_inusep)
1187 *hw_inusep = iocg->hweight_inuse;
1188}
1189
Tejun Heo93f7d2d2020-09-01 14:52:47 -04001190/*
1191 * Calculate the hweight_inuse @iocg would get with max @inuse assuming all the
1192 * other weights stay unchanged.
1193 */
1194static u32 current_hweight_max(struct ioc_gq *iocg)
1195{
1196 u32 hwm = WEIGHT_ONE;
1197 u32 inuse = iocg->active;
1198 u64 child_inuse_sum;
1199 int lvl;
1200
1201 lockdep_assert_held(&iocg->ioc->lock);
1202
1203 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1204 struct ioc_gq *parent = iocg->ancestors[lvl];
1205 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1206
1207 child_inuse_sum = parent->child_inuse_sum + inuse - child->inuse;
1208 hwm = div64_u64((u64)hwm * inuse, child_inuse_sum);
1209 inuse = DIV64_U64_ROUND_UP(parent->active * child_inuse_sum,
1210 parent->child_active_sum);
1211 }
1212
1213 return max_t(u32, hwm, 1);
1214}
1215
Tejun Heob0853ab2020-09-01 14:52:50 -04001216static void weight_updated(struct ioc_gq *iocg, struct ioc_now *now)
Tejun Heo7caa4712019-08-28 15:05:58 -07001217{
1218 struct ioc *ioc = iocg->ioc;
1219 struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1220 struct ioc_cgrp *iocc = blkcg_to_iocc(blkg->blkcg);
1221 u32 weight;
1222
1223 lockdep_assert_held(&ioc->lock);
1224
1225 weight = iocg->cfg_weight ?: iocc->dfl_weight;
1226 if (weight != iocg->weight && iocg->active)
Tejun Heob0853ab2020-09-01 14:52:50 -04001227 propagate_weights(iocg, weight, iocg->inuse, true, now);
Tejun Heo7caa4712019-08-28 15:05:58 -07001228 iocg->weight = weight;
1229}
1230
1231static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
1232{
1233 struct ioc *ioc = iocg->ioc;
Tejun Heoac33e912020-09-01 14:52:54 -04001234 u64 last_period, cur_period;
1235 u64 vtime, vtarget;
Tejun Heo7caa4712019-08-28 15:05:58 -07001236 int i;
1237
1238 /*
1239 * If seem to be already active, just update the stamp to tell the
1240 * timer that we're still active. We don't mind occassional races.
1241 */
1242 if (!list_empty(&iocg->active_list)) {
1243 ioc_now(ioc, now);
1244 cur_period = atomic64_read(&ioc->cur_period);
1245 if (atomic64_read(&iocg->active_period) != cur_period)
1246 atomic64_set(&iocg->active_period, cur_period);
1247 return true;
1248 }
1249
1250 /* racy check on internal node IOs, treat as root level IOs */
1251 if (iocg->child_active_sum)
1252 return false;
1253
1254 spin_lock_irq(&ioc->lock);
1255
1256 ioc_now(ioc, now);
1257
1258 /* update period */
1259 cur_period = atomic64_read(&ioc->cur_period);
1260 last_period = atomic64_read(&iocg->active_period);
1261 atomic64_set(&iocg->active_period, cur_period);
1262
1263 /* already activated or breaking leaf-only constraint? */
Jiufei Xue8b37bc22019-11-13 15:21:31 +08001264 if (!list_empty(&iocg->active_list))
1265 goto succeed_unlock;
1266 for (i = iocg->level - 1; i > 0; i--)
1267 if (!list_empty(&iocg->ancestors[i]->active_list))
Tejun Heo7caa4712019-08-28 15:05:58 -07001268 goto fail_unlock;
Jiufei Xue8b37bc22019-11-13 15:21:31 +08001269
Tejun Heo7caa4712019-08-28 15:05:58 -07001270 if (iocg->child_active_sum)
1271 goto fail_unlock;
1272
1273 /*
Tejun Heoac33e912020-09-01 14:52:54 -04001274 * Always start with the target budget. On deactivation, we throw away
1275 * anything above it.
Tejun Heo7caa4712019-08-28 15:05:58 -07001276 */
Tejun Heoac33e912020-09-01 14:52:54 -04001277 vtarget = now->vnow - ioc->margins.target;
Tejun Heo7caa4712019-08-28 15:05:58 -07001278 vtime = atomic64_read(&iocg->vtime);
Tejun Heo7caa4712019-08-28 15:05:58 -07001279
Tejun Heoac33e912020-09-01 14:52:54 -04001280 atomic64_add(vtarget - vtime, &iocg->vtime);
1281 atomic64_add(vtarget - vtime, &iocg->done_vtime);
1282 vtime = vtarget;
Tejun Heo7caa4712019-08-28 15:05:58 -07001283
1284 /*
1285 * Activate, propagate weight and start period timer if not
1286 * running. Reset hweight_gen to avoid accidental match from
1287 * wrapping.
1288 */
1289 iocg->hweight_gen = atomic_read(&ioc->hweight_gen) - 1;
1290 list_add(&iocg->active_list, &ioc->active_iocgs);
Tejun Heob0853ab2020-09-01 14:52:50 -04001291
Tejun Heo00410f12020-09-01 14:52:34 -04001292 propagate_weights(iocg, iocg->weight,
Tejun Heob0853ab2020-09-01 14:52:50 -04001293 iocg->last_inuse ?: iocg->weight, true, now);
Tejun Heo7caa4712019-08-28 15:05:58 -07001294
1295 TRACE_IOCG_PATH(iocg_activate, iocg, now,
1296 last_period, cur_period, vtime);
1297
Tejun Heo1aa50d02020-09-01 14:52:44 -04001298 iocg->activated_at = now->now;
Tejun Heo7caa4712019-08-28 15:05:58 -07001299
1300 if (ioc->running == IOC_IDLE) {
1301 ioc->running = IOC_RUNNING;
Tejun Heoc7af2a02020-09-17 20:44:55 -04001302 ioc->dfgv_period_at = now->now;
1303 ioc->dfgv_period_rem = 0;
Tejun Heo7caa4712019-08-28 15:05:58 -07001304 ioc_start_period(ioc, now);
1305 }
1306
Jiufei Xue8b37bc22019-11-13 15:21:31 +08001307succeed_unlock:
Tejun Heo7caa4712019-08-28 15:05:58 -07001308 spin_unlock_irq(&ioc->lock);
1309 return true;
1310
1311fail_unlock:
1312 spin_unlock_irq(&ioc->lock);
1313 return false;
1314}
1315
Tejun Heo6ef20f72020-09-01 14:52:36 -04001316static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now)
1317{
1318 struct ioc *ioc = iocg->ioc;
1319 struct blkcg_gq *blkg = iocg_to_blkg(iocg);
Tejun Heo5160a5a2020-09-01 14:52:52 -04001320 u64 tdelta, delay, new_delay;
1321 s64 vover, vover_pct;
Tejun Heoc421a3e2020-09-01 14:52:51 -04001322 u32 hwa;
Tejun Heo6ef20f72020-09-01 14:52:36 -04001323
1324 lockdep_assert_held(&iocg->waitq.lock);
1325
Tejun Heo5160a5a2020-09-01 14:52:52 -04001326 /* calculate the current delay in effect - 1/2 every second */
1327 tdelta = now->now - iocg->delay_at;
1328 if (iocg->delay)
1329 delay = iocg->delay >> div64_u64(tdelta, USEC_PER_SEC);
1330 else
1331 delay = 0;
Tejun Heo6ef20f72020-09-01 14:52:36 -04001332
Tejun Heo5160a5a2020-09-01 14:52:52 -04001333 /* calculate the new delay from the debt amount */
1334 current_hweight(iocg, &hwa, NULL);
1335 vover = atomic64_read(&iocg->vtime) +
1336 abs_cost_to_cost(iocg->abs_vdebt, hwa) - now->vnow;
Tejun Heoac33e912020-09-01 14:52:54 -04001337 vover_pct = div64_s64(100 * vover,
1338 ioc->period_us * ioc->vtime_base_rate);
Tejun Heo5160a5a2020-09-01 14:52:52 -04001339
1340 if (vover_pct <= MIN_DELAY_THR_PCT)
1341 new_delay = 0;
1342 else if (vover_pct >= MAX_DELAY_THR_PCT)
1343 new_delay = MAX_DELAY;
1344 else
1345 new_delay = MIN_DELAY +
1346 div_u64((MAX_DELAY - MIN_DELAY) *
1347 (vover_pct - MIN_DELAY_THR_PCT),
1348 MAX_DELAY_THR_PCT - MIN_DELAY_THR_PCT);
1349
1350 /* pick the higher one and apply */
1351 if (new_delay > delay) {
1352 iocg->delay = new_delay;
1353 iocg->delay_at = now->now;
1354 delay = new_delay;
1355 }
1356
1357 if (delay >= MIN_DELAY) {
Tejun Heof0bf84a2020-09-01 14:52:56 -04001358 if (!iocg->indelay_since)
1359 iocg->indelay_since = now->now;
Tejun Heo5160a5a2020-09-01 14:52:52 -04001360 blkcg_set_delay(blkg, delay * NSEC_PER_USEC);
1361 return true;
1362 } else {
Tejun Heof0bf84a2020-09-01 14:52:56 -04001363 if (iocg->indelay_since) {
1364 iocg->local_stat.indelay_us += now->now - iocg->indelay_since;
1365 iocg->indelay_since = 0;
1366 }
Tejun Heo5160a5a2020-09-01 14:52:52 -04001367 iocg->delay = 0;
Tejun Heo6ef20f72020-09-01 14:52:36 -04001368 blkcg_clear_delay(blkg);
1369 return false;
1370 }
Tejun Heo6ef20f72020-09-01 14:52:36 -04001371}
1372
Tejun Heoc421a3e2020-09-01 14:52:51 -04001373static void iocg_incur_debt(struct ioc_gq *iocg, u64 abs_cost,
1374 struct ioc_now *now)
1375{
1376 struct iocg_pcpu_stat *gcs;
1377
1378 lockdep_assert_held(&iocg->ioc->lock);
1379 lockdep_assert_held(&iocg->waitq.lock);
1380 WARN_ON_ONCE(list_empty(&iocg->active_list));
1381
1382 /*
1383 * Once in debt, debt handling owns inuse. @iocg stays at the minimum
1384 * inuse donating all of it share to others until its debt is paid off.
1385 */
Tejun Heof0bf84a2020-09-01 14:52:56 -04001386 if (!iocg->abs_vdebt && abs_cost) {
1387 iocg->indebt_since = now->now;
Tejun Heoc421a3e2020-09-01 14:52:51 -04001388 propagate_weights(iocg, iocg->active, 0, false, now);
Tejun Heof0bf84a2020-09-01 14:52:56 -04001389 }
Tejun Heoc421a3e2020-09-01 14:52:51 -04001390
1391 iocg->abs_vdebt += abs_cost;
1392
1393 gcs = get_cpu_ptr(iocg->pcpu_stat);
1394 local64_add(abs_cost, &gcs->abs_vusage);
1395 put_cpu_ptr(gcs);
1396}
1397
1398static void iocg_pay_debt(struct ioc_gq *iocg, u64 abs_vpay,
1399 struct ioc_now *now)
1400{
1401 lockdep_assert_held(&iocg->ioc->lock);
1402 lockdep_assert_held(&iocg->waitq.lock);
1403
1404 /* make sure that nobody messed with @iocg */
1405 WARN_ON_ONCE(list_empty(&iocg->active_list));
1406 WARN_ON_ONCE(iocg->inuse > 1);
1407
1408 iocg->abs_vdebt -= min(abs_vpay, iocg->abs_vdebt);
1409
1410 /* if debt is paid in full, restore inuse */
Tejun Heof0bf84a2020-09-01 14:52:56 -04001411 if (!iocg->abs_vdebt) {
1412 iocg->local_stat.indebt_us += now->now - iocg->indebt_since;
1413 iocg->indebt_since = 0;
1414
Tejun Heoc421a3e2020-09-01 14:52:51 -04001415 propagate_weights(iocg, iocg->active, iocg->last_inuse,
1416 false, now);
Tejun Heof0bf84a2020-09-01 14:52:56 -04001417 }
Tejun Heoc421a3e2020-09-01 14:52:51 -04001418}
1419
Tejun Heo7caa4712019-08-28 15:05:58 -07001420static int iocg_wake_fn(struct wait_queue_entry *wq_entry, unsigned mode,
1421 int flags, void *key)
1422{
1423 struct iocg_wait *wait = container_of(wq_entry, struct iocg_wait, wait);
1424 struct iocg_wake_ctx *ctx = (struct iocg_wake_ctx *)key;
1425 u64 cost = abs_cost_to_cost(wait->abs_cost, ctx->hw_inuse);
1426
1427 ctx->vbudget -= cost;
1428
1429 if (ctx->vbudget < 0)
1430 return -1;
1431
Tejun Heo97eb1972020-09-01 14:52:43 -04001432 iocg_commit_bio(ctx->iocg, wait->bio, wait->abs_cost, cost);
Tejun Heo7caa4712019-08-28 15:05:58 -07001433
1434 /*
1435 * autoremove_wake_function() removes the wait entry only when it
1436 * actually changed the task state. We want the wait always
1437 * removed. Remove explicitly and use default_wake_function().
1438 */
1439 list_del_init(&wq_entry->entry);
1440 wait->committed = true;
1441
1442 default_wake_function(wq_entry, mode, flags, key);
1443 return 0;
1444}
1445
Tejun Heoda437b92020-09-01 14:52:42 -04001446/*
1447 * Calculate the accumulated budget, pay debt if @pay_debt and wake up waiters
1448 * accordingly. When @pay_debt is %true, the caller must be holding ioc->lock in
1449 * addition to iocg->waitq.lock.
1450 */
1451static void iocg_kick_waitq(struct ioc_gq *iocg, bool pay_debt,
1452 struct ioc_now *now)
Tejun Heo7caa4712019-08-28 15:05:58 -07001453{
1454 struct ioc *ioc = iocg->ioc;
1455 struct iocg_wake_ctx ctx = { .iocg = iocg };
Tejun Heoda437b92020-09-01 14:52:42 -04001456 u64 vshortage, expires, oexpires;
Tejun Heo36a52482019-09-04 12:45:52 -07001457 s64 vbudget;
Tejun Heoc421a3e2020-09-01 14:52:51 -04001458 u32 hwa;
Tejun Heo7caa4712019-08-28 15:05:58 -07001459
1460 lockdep_assert_held(&iocg->waitq.lock);
1461
Tejun Heoc421a3e2020-09-01 14:52:51 -04001462 current_hweight(iocg, &hwa, NULL);
Tejun Heo36a52482019-09-04 12:45:52 -07001463 vbudget = now->vnow - atomic64_read(&iocg->vtime);
1464
1465 /* pay off debt */
Tejun Heoda437b92020-09-01 14:52:42 -04001466 if (pay_debt && iocg->abs_vdebt && vbudget > 0) {
Tejun Heoc421a3e2020-09-01 14:52:51 -04001467 u64 abs_vbudget = cost_to_abs_cost(vbudget, hwa);
1468 u64 abs_vpay = min_t(u64, abs_vbudget, iocg->abs_vdebt);
1469 u64 vpay = abs_cost_to_cost(abs_vpay, hwa);
Tejun Heo36a52482019-09-04 12:45:52 -07001470
Tejun Heoda437b92020-09-01 14:52:42 -04001471 lockdep_assert_held(&ioc->lock);
1472
Tejun Heoc421a3e2020-09-01 14:52:51 -04001473 atomic64_add(vpay, &iocg->vtime);
1474 atomic64_add(vpay, &iocg->done_vtime);
1475 iocg_pay_debt(iocg, abs_vpay, now);
1476 vbudget -= vpay;
Tejun Heo36a52482019-09-04 12:45:52 -07001477 }
1478
Tejun Heo5160a5a2020-09-01 14:52:52 -04001479 if (iocg->abs_vdebt || iocg->delay)
1480 iocg_kick_delay(iocg, now);
1481
Tejun Heo7caa4712019-08-28 15:05:58 -07001482 /*
Tejun Heoda437b92020-09-01 14:52:42 -04001483 * Debt can still be outstanding if we haven't paid all yet or the
1484 * caller raced and called without @pay_debt. Shouldn't wake up waiters
1485 * under debt. Make sure @vbudget reflects the outstanding amount and is
1486 * not positive.
1487 */
1488 if (iocg->abs_vdebt) {
Tejun Heoc421a3e2020-09-01 14:52:51 -04001489 s64 vdebt = abs_cost_to_cost(iocg->abs_vdebt, hwa);
Tejun Heoda437b92020-09-01 14:52:42 -04001490 vbudget = min_t(s64, 0, vbudget - vdebt);
1491 }
1492
1493 /*
Tejun Heoc421a3e2020-09-01 14:52:51 -04001494 * Wake up the ones which are due and see how much vtime we'll need for
1495 * the next one. As paying off debt restores hw_inuse, it must be read
1496 * after the above debt payment.
Tejun Heo7caa4712019-08-28 15:05:58 -07001497 */
Tejun Heoda437b92020-09-01 14:52:42 -04001498 ctx.vbudget = vbudget;
Tejun Heoc421a3e2020-09-01 14:52:51 -04001499 current_hweight(iocg, NULL, &ctx.hw_inuse);
1500
Tejun Heo7caa4712019-08-28 15:05:58 -07001501 __wake_up_locked_key(&iocg->waitq, TASK_NORMAL, &ctx);
Tejun Heoc421a3e2020-09-01 14:52:51 -04001502
Tejun Heof0bf84a2020-09-01 14:52:56 -04001503 if (!waitqueue_active(&iocg->waitq)) {
1504 if (iocg->wait_since) {
1505 iocg->local_stat.wait_us += now->now - iocg->wait_since;
1506 iocg->wait_since = 0;
1507 }
Tejun Heo7caa4712019-08-28 15:05:58 -07001508 return;
Tejun Heof0bf84a2020-09-01 14:52:56 -04001509 }
1510
1511 if (!iocg->wait_since)
1512 iocg->wait_since = now->now;
1513
Tejun Heo7caa4712019-08-28 15:05:58 -07001514 if (WARN_ON_ONCE(ctx.vbudget >= 0))
1515 return;
1516
Tejun Heo7ca5b2e2020-09-01 14:52:41 -04001517 /* determine next wakeup, add a timer margin to guarantee chunking */
Tejun Heo7caa4712019-08-28 15:05:58 -07001518 vshortage = -ctx.vbudget;
1519 expires = now->now_ns +
Tejun Heoac33e912020-09-01 14:52:54 -04001520 DIV64_U64_ROUND_UP(vshortage, ioc->vtime_base_rate) *
1521 NSEC_PER_USEC;
Tejun Heo7ca5b2e2020-09-01 14:52:41 -04001522 expires += ioc->timer_slack_ns;
Tejun Heo7caa4712019-08-28 15:05:58 -07001523
1524 /* if already active and close enough, don't bother */
1525 oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->waitq_timer));
1526 if (hrtimer_is_queued(&iocg->waitq_timer) &&
Tejun Heo7ca5b2e2020-09-01 14:52:41 -04001527 abs(oexpires - expires) <= ioc->timer_slack_ns)
Tejun Heo7caa4712019-08-28 15:05:58 -07001528 return;
1529
1530 hrtimer_start_range_ns(&iocg->waitq_timer, ns_to_ktime(expires),
Tejun Heo7ca5b2e2020-09-01 14:52:41 -04001531 ioc->timer_slack_ns, HRTIMER_MODE_ABS);
Tejun Heo7caa4712019-08-28 15:05:58 -07001532}
1533
1534static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer)
1535{
1536 struct ioc_gq *iocg = container_of(timer, struct ioc_gq, waitq_timer);
Tejun Heoda437b92020-09-01 14:52:42 -04001537 bool pay_debt = READ_ONCE(iocg->abs_vdebt);
Tejun Heo7caa4712019-08-28 15:05:58 -07001538 struct ioc_now now;
1539 unsigned long flags;
1540
1541 ioc_now(iocg->ioc, &now);
1542
Tejun Heoda437b92020-09-01 14:52:42 -04001543 iocg_lock(iocg, pay_debt, &flags);
1544 iocg_kick_waitq(iocg, pay_debt, &now);
1545 iocg_unlock(iocg, pay_debt, &flags);
Tejun Heo7caa4712019-08-28 15:05:58 -07001546
1547 return HRTIMER_NORESTART;
1548}
1549
Tejun Heo7caa4712019-08-28 15:05:58 -07001550static void ioc_lat_stat(struct ioc *ioc, u32 *missed_ppm_ar, u32 *rq_wait_pct_p)
1551{
1552 u32 nr_met[2] = { };
1553 u32 nr_missed[2] = { };
1554 u64 rq_wait_ns = 0;
1555 int cpu, rw;
1556
1557 for_each_online_cpu(cpu) {
1558 struct ioc_pcpu_stat *stat = per_cpu_ptr(ioc->pcpu_stat, cpu);
1559 u64 this_rq_wait_ns;
1560
1561 for (rw = READ; rw <= WRITE; rw++) {
Tejun Heo5e124f72020-09-01 14:52:33 -04001562 u32 this_met = local_read(&stat->missed[rw].nr_met);
1563 u32 this_missed = local_read(&stat->missed[rw].nr_missed);
Tejun Heo7caa4712019-08-28 15:05:58 -07001564
1565 nr_met[rw] += this_met - stat->missed[rw].last_met;
1566 nr_missed[rw] += this_missed - stat->missed[rw].last_missed;
1567 stat->missed[rw].last_met = this_met;
1568 stat->missed[rw].last_missed = this_missed;
1569 }
1570
Tejun Heo5e124f72020-09-01 14:52:33 -04001571 this_rq_wait_ns = local64_read(&stat->rq_wait_ns);
Tejun Heo7caa4712019-08-28 15:05:58 -07001572 rq_wait_ns += this_rq_wait_ns - stat->last_rq_wait_ns;
1573 stat->last_rq_wait_ns = this_rq_wait_ns;
1574 }
1575
1576 for (rw = READ; rw <= WRITE; rw++) {
1577 if (nr_met[rw] + nr_missed[rw])
1578 missed_ppm_ar[rw] =
1579 DIV64_U64_ROUND_UP((u64)nr_missed[rw] * MILLION,
1580 nr_met[rw] + nr_missed[rw]);
1581 else
1582 missed_ppm_ar[rw] = 0;
1583 }
1584
1585 *rq_wait_pct_p = div64_u64(rq_wait_ns * 100,
1586 ioc->period_us * NSEC_PER_USEC);
1587}
1588
1589/* was iocg idle this period? */
1590static bool iocg_is_idle(struct ioc_gq *iocg)
1591{
1592 struct ioc *ioc = iocg->ioc;
1593
1594 /* did something get issued this period? */
1595 if (atomic64_read(&iocg->active_period) ==
1596 atomic64_read(&ioc->cur_period))
1597 return false;
1598
1599 /* is something in flight? */
Tejun Heodcd65892020-03-10 13:07:46 -04001600 if (atomic64_read(&iocg->done_vtime) != atomic64_read(&iocg->vtime))
Tejun Heo7caa4712019-08-28 15:05:58 -07001601 return false;
1602
1603 return true;
1604}
1605
Tejun Heo97eb1972020-09-01 14:52:43 -04001606/*
1607 * Call this function on the target leaf @iocg's to build pre-order traversal
1608 * list of all the ancestors in @inner_walk. The inner nodes are linked through
1609 * ->walk_list and the caller is responsible for dissolving the list after use.
1610 */
1611static void iocg_build_inner_walk(struct ioc_gq *iocg,
1612 struct list_head *inner_walk)
1613{
1614 int lvl;
1615
1616 WARN_ON_ONCE(!list_empty(&iocg->walk_list));
1617
1618 /* find the first ancestor which hasn't been visited yet */
1619 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1620 if (!list_empty(&iocg->ancestors[lvl]->walk_list))
1621 break;
1622 }
1623
1624 /* walk down and visit the inner nodes to get pre-order traversal */
1625 while (++lvl <= iocg->level - 1) {
1626 struct ioc_gq *inner = iocg->ancestors[lvl];
1627
1628 /* record traversal order */
1629 list_add_tail(&inner->walk_list, inner_walk);
1630 }
1631}
1632
1633/* collect per-cpu counters and propagate the deltas to the parent */
1634static void iocg_flush_stat_one(struct ioc_gq *iocg, struct ioc_now *now)
1635{
Tejun Heoac33e912020-09-01 14:52:54 -04001636 struct ioc *ioc = iocg->ioc;
Tejun Heo97eb1972020-09-01 14:52:43 -04001637 struct iocg_stat new_stat;
1638 u64 abs_vusage = 0;
1639 u64 vusage_delta;
1640 int cpu;
1641
1642 lockdep_assert_held(&iocg->ioc->lock);
1643
1644 /* collect per-cpu counters */
1645 for_each_possible_cpu(cpu) {
1646 abs_vusage += local64_read(
1647 per_cpu_ptr(&iocg->pcpu_stat->abs_vusage, cpu));
1648 }
1649 vusage_delta = abs_vusage - iocg->last_stat_abs_vusage;
1650 iocg->last_stat_abs_vusage = abs_vusage;
1651
Tejun Heoac33e912020-09-01 14:52:54 -04001652 iocg->usage_delta_us = div64_u64(vusage_delta, ioc->vtime_base_rate);
Tejun Heo1aa50d02020-09-01 14:52:44 -04001653 iocg->local_stat.usage_us += iocg->usage_delta_us;
Tejun Heo97eb1972020-09-01 14:52:43 -04001654
Tejun Heof0bf84a2020-09-01 14:52:56 -04001655 /* propagate upwards */
Tejun Heo97eb1972020-09-01 14:52:43 -04001656 new_stat.usage_us =
1657 iocg->local_stat.usage_us + iocg->desc_stat.usage_us;
Tejun Heof0bf84a2020-09-01 14:52:56 -04001658 new_stat.wait_us =
1659 iocg->local_stat.wait_us + iocg->desc_stat.wait_us;
1660 new_stat.indebt_us =
1661 iocg->local_stat.indebt_us + iocg->desc_stat.indebt_us;
1662 new_stat.indelay_us =
1663 iocg->local_stat.indelay_us + iocg->desc_stat.indelay_us;
Tejun Heo97eb1972020-09-01 14:52:43 -04001664
1665 /* propagate the deltas to the parent */
1666 if (iocg->level > 0) {
1667 struct iocg_stat *parent_stat =
1668 &iocg->ancestors[iocg->level - 1]->desc_stat;
1669
1670 parent_stat->usage_us +=
1671 new_stat.usage_us - iocg->last_stat.usage_us;
Tejun Heof0bf84a2020-09-01 14:52:56 -04001672 parent_stat->wait_us +=
1673 new_stat.wait_us - iocg->last_stat.wait_us;
1674 parent_stat->indebt_us +=
1675 new_stat.indebt_us - iocg->last_stat.indebt_us;
1676 parent_stat->indelay_us +=
1677 new_stat.indelay_us - iocg->last_stat.indelay_us;
Tejun Heo97eb1972020-09-01 14:52:43 -04001678 }
1679
1680 iocg->last_stat = new_stat;
1681}
1682
1683/* get stat counters ready for reading on all active iocgs */
1684static void iocg_flush_stat(struct list_head *target_iocgs, struct ioc_now *now)
1685{
1686 LIST_HEAD(inner_walk);
1687 struct ioc_gq *iocg, *tiocg;
1688
1689 /* flush leaves and build inner node walk list */
1690 list_for_each_entry(iocg, target_iocgs, active_list) {
1691 iocg_flush_stat_one(iocg, now);
1692 iocg_build_inner_walk(iocg, &inner_walk);
1693 }
1694
1695 /* keep flushing upwards by walking the inner list backwards */
1696 list_for_each_entry_safe_reverse(iocg, tiocg, &inner_walk, walk_list) {
1697 iocg_flush_stat_one(iocg, now);
1698 list_del_init(&iocg->walk_list);
1699 }
1700}
1701
Tejun Heo93f7d2d2020-09-01 14:52:47 -04001702/*
1703 * Determine what @iocg's hweight_inuse should be after donating unused
1704 * capacity. @hwm is the upper bound and used to signal no donation. This
1705 * function also throws away @iocg's excess budget.
1706 */
Tejun Heoac33e912020-09-01 14:52:54 -04001707static u32 hweight_after_donation(struct ioc_gq *iocg, u32 old_hwi, u32 hwm,
1708 u32 usage, struct ioc_now *now)
Tejun Heo7caa4712019-08-28 15:05:58 -07001709{
Tejun Heo93f7d2d2020-09-01 14:52:47 -04001710 struct ioc *ioc = iocg->ioc;
1711 u64 vtime = atomic64_read(&iocg->vtime);
Tejun Heof1de2432020-09-01 14:52:49 -04001712 s64 excess, delta, target, new_hwi;
Tejun Heo93f7d2d2020-09-01 14:52:47 -04001713
Tejun Heoc421a3e2020-09-01 14:52:51 -04001714 /* debt handling owns inuse for debtors */
1715 if (iocg->abs_vdebt)
1716 return 1;
1717
Tejun Heo93f7d2d2020-09-01 14:52:47 -04001718 /* see whether minimum margin requirement is met */
1719 if (waitqueue_active(&iocg->waitq) ||
1720 time_after64(vtime, now->vnow - ioc->margins.min))
1721 return hwm;
1722
Tejun Heoac33e912020-09-01 14:52:54 -04001723 /* throw away excess above target */
1724 excess = now->vnow - vtime - ioc->margins.target;
Tejun Heo93f7d2d2020-09-01 14:52:47 -04001725 if (excess > 0) {
1726 atomic64_add(excess, &iocg->vtime);
1727 atomic64_add(excess, &iocg->done_vtime);
1728 vtime += excess;
Tejun Heoac33e912020-09-01 14:52:54 -04001729 ioc->vtime_err -= div64_u64(excess * old_hwi, WEIGHT_ONE);
Tejun Heo93f7d2d2020-09-01 14:52:47 -04001730 }
1731
Tejun Heof1de2432020-09-01 14:52:49 -04001732 /*
1733 * Let's say the distance between iocg's and device's vtimes as a
1734 * fraction of period duration is delta. Assuming that the iocg will
1735 * consume the usage determined above, we want to determine new_hwi so
1736 * that delta equals MARGIN_TARGET at the end of the next period.
1737 *
1738 * We need to execute usage worth of IOs while spending the sum of the
1739 * new budget (1 - MARGIN_TARGET) and the leftover from the last period
1740 * (delta):
1741 *
1742 * usage = (1 - MARGIN_TARGET + delta) * new_hwi
1743 *
1744 * Therefore, the new_hwi is:
1745 *
1746 * new_hwi = usage / (1 - MARGIN_TARGET + delta)
1747 */
1748 delta = div64_s64(WEIGHT_ONE * (now->vnow - vtime),
1749 now->vnow - ioc->period_at_vtime);
1750 target = WEIGHT_ONE * MARGIN_TARGET_PCT / 100;
1751 new_hwi = div64_s64(WEIGHT_ONE * usage, WEIGHT_ONE - target + delta);
Tejun Heo7caa4712019-08-28 15:05:58 -07001752
Tejun Heof1de2432020-09-01 14:52:49 -04001753 return clamp_t(s64, new_hwi, 1, hwm);
Tejun Heo7caa4712019-08-28 15:05:58 -07001754}
1755
Tejun Heoe08d02a2020-09-01 14:52:48 -04001756/*
1757 * For work-conservation, an iocg which isn't using all of its share should
1758 * donate the leftover to other iocgs. There are two ways to achieve this - 1.
1759 * bumping up vrate accordingly 2. lowering the donating iocg's inuse weight.
1760 *
1761 * #1 is mathematically simpler but has the drawback of requiring synchronous
1762 * global hweight_inuse updates when idle iocg's get activated or inuse weights
1763 * change due to donation snapbacks as it has the possibility of grossly
1764 * overshooting what's allowed by the model and vrate.
1765 *
1766 * #2 is inherently safe with local operations. The donating iocg can easily
1767 * snap back to higher weights when needed without worrying about impacts on
1768 * other nodes as the impacts will be inherently correct. This also makes idle
1769 * iocg activations safe. The only effect activations have is decreasing
1770 * hweight_inuse of others, the right solution to which is for those iocgs to
1771 * snap back to higher weights.
1772 *
1773 * So, we go with #2. The challenge is calculating how each donating iocg's
1774 * inuse should be adjusted to achieve the target donation amounts. This is done
1775 * using Andy's method described in the following pdf.
1776 *
1777 * https://drive.google.com/file/d/1PsJwxPFtjUnwOY1QJ5AeICCcsL7BM3bo
1778 *
1779 * Given the weights and target after-donation hweight_inuse values, Andy's
1780 * method determines how the proportional distribution should look like at each
1781 * sibling level to maintain the relative relationship between all non-donating
1782 * pairs. To roughly summarize, it divides the tree into donating and
1783 * non-donating parts, calculates global donation rate which is used to
1784 * determine the target hweight_inuse for each node, and then derives per-level
1785 * proportions.
1786 *
1787 * The following pdf shows that global distribution calculated this way can be
1788 * achieved by scaling inuse weights of donating leaves and propagating the
1789 * adjustments upwards proportionally.
1790 *
1791 * https://drive.google.com/file/d/1vONz1-fzVO7oY5DXXsLjSxEtYYQbOvsE
1792 *
1793 * Combining the above two, we can determine how each leaf iocg's inuse should
1794 * be adjusted to achieve the target donation.
1795 *
1796 * https://drive.google.com/file/d/1WcrltBOSPN0qXVdBgnKm4mdp9FhuEFQN
1797 *
1798 * The inline comments use symbols from the last pdf.
1799 *
1800 * b is the sum of the absolute budgets in the subtree. 1 for the root node.
1801 * f is the sum of the absolute budgets of non-donating nodes in the subtree.
1802 * t is the sum of the absolute budgets of donating nodes in the subtree.
1803 * w is the weight of the node. w = w_f + w_t
1804 * w_f is the non-donating portion of w. w_f = w * f / b
1805 * w_b is the donating portion of w. w_t = w * t / b
1806 * s is the sum of all sibling weights. s = Sum(w) for siblings
1807 * s_f and s_t are the non-donating and donating portions of s.
1808 *
1809 * Subscript p denotes the parent's counterpart and ' the adjusted value - e.g.
1810 * w_pt is the donating portion of the parent's weight and w'_pt the same value
1811 * after adjustments. Subscript r denotes the root node's values.
1812 */
Tejun Heo93f7d2d2020-09-01 14:52:47 -04001813static void transfer_surpluses(struct list_head *surpluses, struct ioc_now *now)
1814{
Tejun Heoe08d02a2020-09-01 14:52:48 -04001815 LIST_HEAD(over_hwa);
1816 LIST_HEAD(inner_walk);
1817 struct ioc_gq *iocg, *tiocg, *root_iocg;
1818 u32 after_sum, over_sum, over_target, gamma;
Tejun Heo93f7d2d2020-09-01 14:52:47 -04001819
Tejun Heoe08d02a2020-09-01 14:52:48 -04001820 /*
1821 * It's pretty unlikely but possible for the total sum of
1822 * hweight_after_donation's to be higher than WEIGHT_ONE, which will
1823 * confuse the following calculations. If such condition is detected,
1824 * scale down everyone over its full share equally to keep the sum below
1825 * WEIGHT_ONE.
1826 */
1827 after_sum = 0;
1828 over_sum = 0;
Tejun Heo93f7d2d2020-09-01 14:52:47 -04001829 list_for_each_entry(iocg, surpluses, surplus_list) {
Tejun Heoe08d02a2020-09-01 14:52:48 -04001830 u32 hwa;
Tejun Heo93f7d2d2020-09-01 14:52:47 -04001831
Tejun Heoe08d02a2020-09-01 14:52:48 -04001832 current_hweight(iocg, &hwa, NULL);
1833 after_sum += iocg->hweight_after_donation;
Tejun Heo93f7d2d2020-09-01 14:52:47 -04001834
Tejun Heoe08d02a2020-09-01 14:52:48 -04001835 if (iocg->hweight_after_donation > hwa) {
1836 over_sum += iocg->hweight_after_donation;
1837 list_add(&iocg->walk_list, &over_hwa);
1838 }
Tejun Heo93f7d2d2020-09-01 14:52:47 -04001839 }
Tejun Heoe08d02a2020-09-01 14:52:48 -04001840
1841 if (after_sum >= WEIGHT_ONE) {
1842 /*
1843 * The delta should be deducted from the over_sum, calculate
1844 * target over_sum value.
1845 */
1846 u32 over_delta = after_sum - (WEIGHT_ONE - 1);
1847 WARN_ON_ONCE(over_sum <= over_delta);
1848 over_target = over_sum - over_delta;
1849 } else {
1850 over_target = 0;
1851 }
1852
1853 list_for_each_entry_safe(iocg, tiocg, &over_hwa, walk_list) {
1854 if (over_target)
1855 iocg->hweight_after_donation =
1856 div_u64((u64)iocg->hweight_after_donation *
1857 over_target, over_sum);
1858 list_del_init(&iocg->walk_list);
1859 }
1860
1861 /*
1862 * Build pre-order inner node walk list and prepare for donation
1863 * adjustment calculations.
1864 */
1865 list_for_each_entry(iocg, surpluses, surplus_list) {
1866 iocg_build_inner_walk(iocg, &inner_walk);
1867 }
1868
1869 root_iocg = list_first_entry(&inner_walk, struct ioc_gq, walk_list);
1870 WARN_ON_ONCE(root_iocg->level > 0);
1871
1872 list_for_each_entry(iocg, &inner_walk, walk_list) {
1873 iocg->child_adjusted_sum = 0;
1874 iocg->hweight_donating = 0;
1875 iocg->hweight_after_donation = 0;
1876 }
1877
1878 /*
1879 * Propagate the donating budget (b_t) and after donation budget (b'_t)
1880 * up the hierarchy.
1881 */
1882 list_for_each_entry(iocg, surpluses, surplus_list) {
1883 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1884
1885 parent->hweight_donating += iocg->hweight_donating;
1886 parent->hweight_after_donation += iocg->hweight_after_donation;
1887 }
1888
1889 list_for_each_entry_reverse(iocg, &inner_walk, walk_list) {
1890 if (iocg->level > 0) {
1891 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1892
1893 parent->hweight_donating += iocg->hweight_donating;
1894 parent->hweight_after_donation += iocg->hweight_after_donation;
1895 }
1896 }
1897
1898 /*
1899 * Calculate inner hwa's (b) and make sure the donation values are
1900 * within the accepted ranges as we're doing low res calculations with
1901 * roundups.
1902 */
1903 list_for_each_entry(iocg, &inner_walk, walk_list) {
1904 if (iocg->level) {
1905 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1906
1907 iocg->hweight_active = DIV64_U64_ROUND_UP(
1908 (u64)parent->hweight_active * iocg->active,
1909 parent->child_active_sum);
1910
1911 }
1912
1913 iocg->hweight_donating = min(iocg->hweight_donating,
1914 iocg->hweight_active);
1915 iocg->hweight_after_donation = min(iocg->hweight_after_donation,
1916 iocg->hweight_donating - 1);
1917 if (WARN_ON_ONCE(iocg->hweight_active <= 1 ||
1918 iocg->hweight_donating <= 1 ||
1919 iocg->hweight_after_donation == 0)) {
1920 pr_warn("iocg: invalid donation weights in ");
1921 pr_cont_cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup);
1922 pr_cont(": active=%u donating=%u after=%u\n",
1923 iocg->hweight_active, iocg->hweight_donating,
1924 iocg->hweight_after_donation);
1925 }
1926 }
1927
1928 /*
1929 * Calculate the global donation rate (gamma) - the rate to adjust
Tejun Heo769b6282020-09-11 18:40:49 -04001930 * non-donating budgets by.
1931 *
1932 * No need to use 64bit multiplication here as the first operand is
1933 * guaranteed to be smaller than WEIGHT_ONE (1<<16).
1934 *
1935 * We know that there are beneficiary nodes and the sum of the donating
1936 * hweights can't be whole; however, due to the round-ups during hweight
1937 * calculations, root_iocg->hweight_donating might still end up equal to
1938 * or greater than whole. Limit the range when calculating the divider.
Tejun Heoe08d02a2020-09-01 14:52:48 -04001939 *
1940 * gamma = (1 - t_r') / (1 - t_r)
1941 */
1942 gamma = DIV_ROUND_UP(
1943 (WEIGHT_ONE - root_iocg->hweight_after_donation) * WEIGHT_ONE,
Tejun Heo769b6282020-09-11 18:40:49 -04001944 WEIGHT_ONE - min_t(u32, root_iocg->hweight_donating, WEIGHT_ONE - 1));
Tejun Heoe08d02a2020-09-01 14:52:48 -04001945
1946 /*
1947 * Calculate adjusted hwi, child_adjusted_sum and inuse for the inner
1948 * nodes.
1949 */
1950 list_for_each_entry(iocg, &inner_walk, walk_list) {
1951 struct ioc_gq *parent;
1952 u32 inuse, wpt, wptp;
1953 u64 st, sf;
1954
1955 if (iocg->level == 0) {
1956 /* adjusted weight sum for 1st level: s' = s * b_pf / b'_pf */
1957 iocg->child_adjusted_sum = DIV64_U64_ROUND_UP(
1958 iocg->child_active_sum * (WEIGHT_ONE - iocg->hweight_donating),
1959 WEIGHT_ONE - iocg->hweight_after_donation);
1960 continue;
1961 }
1962
1963 parent = iocg->ancestors[iocg->level - 1];
1964
1965 /* b' = gamma * b_f + b_t' */
1966 iocg->hweight_inuse = DIV64_U64_ROUND_UP(
1967 (u64)gamma * (iocg->hweight_active - iocg->hweight_donating),
1968 WEIGHT_ONE) + iocg->hweight_after_donation;
1969
1970 /* w' = s' * b' / b'_p */
1971 inuse = DIV64_U64_ROUND_UP(
1972 (u64)parent->child_adjusted_sum * iocg->hweight_inuse,
1973 parent->hweight_inuse);
1974
1975 /* adjusted weight sum for children: s' = s_f + s_t * w'_pt / w_pt */
1976 st = DIV64_U64_ROUND_UP(
1977 iocg->child_active_sum * iocg->hweight_donating,
1978 iocg->hweight_active);
1979 sf = iocg->child_active_sum - st;
1980 wpt = DIV64_U64_ROUND_UP(
1981 (u64)iocg->active * iocg->hweight_donating,
1982 iocg->hweight_active);
1983 wptp = DIV64_U64_ROUND_UP(
1984 (u64)inuse * iocg->hweight_after_donation,
1985 iocg->hweight_inuse);
1986
1987 iocg->child_adjusted_sum = sf + DIV64_U64_ROUND_UP(st * wptp, wpt);
1988 }
1989
1990 /*
1991 * All inner nodes now have ->hweight_inuse and ->child_adjusted_sum and
1992 * we can finally determine leaf adjustments.
1993 */
1994 list_for_each_entry(iocg, surpluses, surplus_list) {
1995 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1996 u32 inuse;
1997
Tejun Heoc421a3e2020-09-01 14:52:51 -04001998 /*
1999 * In-debt iocgs participated in the donation calculation with
2000 * the minimum target hweight_inuse. Configuring inuse
2001 * accordingly would work fine but debt handling expects
2002 * @iocg->inuse stay at the minimum and we don't wanna
2003 * interfere.
2004 */
2005 if (iocg->abs_vdebt) {
2006 WARN_ON_ONCE(iocg->inuse > 1);
2007 continue;
2008 }
2009
Tejun Heoe08d02a2020-09-01 14:52:48 -04002010 /* w' = s' * b' / b'_p, note that b' == b'_t for donating leaves */
2011 inuse = DIV64_U64_ROUND_UP(
2012 parent->child_adjusted_sum * iocg->hweight_after_donation,
2013 parent->hweight_inuse);
Tejun Heo04603752020-09-01 14:52:55 -04002014
2015 TRACE_IOCG_PATH(inuse_transfer, iocg, now,
2016 iocg->inuse, inuse,
2017 iocg->hweight_inuse,
2018 iocg->hweight_after_donation);
2019
Tejun Heob0853ab2020-09-01 14:52:50 -04002020 __propagate_weights(iocg, iocg->active, inuse, true, now);
Tejun Heoe08d02a2020-09-01 14:52:48 -04002021 }
2022
2023 /* walk list should be dissolved after use */
2024 list_for_each_entry_safe(iocg, tiocg, &inner_walk, walk_list)
2025 list_del_init(&iocg->walk_list);
Tejun Heo93f7d2d2020-09-01 14:52:47 -04002026}
2027
Tejun Heoab8df822020-09-17 20:44:52 -04002028/*
2029 * A low weight iocg can amass a large amount of debt, for example, when
2030 * anonymous memory gets reclaimed aggressively. If the system has a lot of
2031 * memory paired with a slow IO device, the debt can span multiple seconds or
2032 * more. If there are no other subsequent IO issuers, the in-debt iocg may end
2033 * up blocked paying its debt while the IO device is idle.
2034 *
2035 * The following protects against such cases. If the device has been
Tejun Heod9517842020-09-17 20:44:54 -04002036 * sufficiently idle for a while, the debts are halved and delays are
2037 * recalculated.
Tejun Heoab8df822020-09-17 20:44:52 -04002038 */
2039static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors,
Tejun Heo33a1fe62020-09-17 20:44:53 -04002040 struct ioc_now *now)
Tejun Heoab8df822020-09-17 20:44:52 -04002041{
Tejun Heoc7af2a02020-09-17 20:44:55 -04002042 struct ioc_gq *iocg;
2043 u64 dur, usage_pct, nr_cycles;
Tejun Heoab8df822020-09-17 20:44:52 -04002044
Tejun Heoc7af2a02020-09-17 20:44:55 -04002045 /* if no debtor, reset the cycle */
2046 if (!nr_debtors) {
2047 ioc->dfgv_period_at = now->now;
2048 ioc->dfgv_period_rem = 0;
2049 ioc->dfgv_usage_us_sum = 0;
2050 return;
2051 }
Tejun Heoab8df822020-09-17 20:44:52 -04002052
Tejun Heoc7af2a02020-09-17 20:44:55 -04002053 /*
2054 * Debtors can pass through a lot of writes choking the device and we
2055 * don't want to be forgiving debts while the device is struggling from
2056 * write bursts. If we're missing latency targets, consider the device
2057 * fully utilized.
2058 */
2059 if (ioc->busy_level > 0)
2060 usage_us_sum = max_t(u64, usage_us_sum, ioc->period_us);
2061
2062 ioc->dfgv_usage_us_sum += usage_us_sum;
2063 if (time_before64(now->now, ioc->dfgv_period_at + DFGV_PERIOD))
2064 return;
2065
2066 /*
2067 * At least DFGV_PERIOD has passed since the last period. Calculate the
2068 * average usage and reset the period counters.
2069 */
2070 dur = now->now - ioc->dfgv_period_at;
2071 usage_pct = div64_u64(100 * ioc->dfgv_usage_us_sum, dur);
2072
2073 ioc->dfgv_period_at = now->now;
2074 ioc->dfgv_usage_us_sum = 0;
2075
2076 /* if was too busy, reset everything */
2077 if (usage_pct > DFGV_USAGE_PCT) {
2078 ioc->dfgv_period_rem = 0;
2079 return;
2080 }
2081
2082 /*
2083 * Usage is lower than threshold. Let's forgive some debts. Debt
2084 * forgiveness runs off of the usual ioc timer but its period usually
2085 * doesn't match ioc's. Compensate the difference by performing the
2086 * reduction as many times as would fit in the duration since the last
2087 * run and carrying over the left-over duration in @ioc->dfgv_period_rem
2088 * - if ioc period is 75% of DFGV_PERIOD, one out of three consecutive
2089 * reductions is doubled.
2090 */
2091 nr_cycles = dur + ioc->dfgv_period_rem;
2092 ioc->dfgv_period_rem = do_div(nr_cycles, DFGV_PERIOD);
2093
2094 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
Tejun Heoc5a65612020-09-17 20:44:56 -04002095 u64 __maybe_unused old_debt, __maybe_unused old_delay;
2096
Tejun Heobec02db2020-09-18 14:41:27 -04002097 if (!iocg->abs_vdebt && !iocg->delay)
Tejun Heoc7af2a02020-09-17 20:44:55 -04002098 continue;
Tejun Heoc5a65612020-09-17 20:44:56 -04002099
Tejun Heoc7af2a02020-09-17 20:44:55 -04002100 spin_lock(&iocg->waitq.lock);
Tejun Heoc5a65612020-09-17 20:44:56 -04002101
2102 old_debt = iocg->abs_vdebt;
2103 old_delay = iocg->delay;
2104
Tejun Heobec02db2020-09-18 14:41:27 -04002105 if (iocg->abs_vdebt)
2106 iocg->abs_vdebt = iocg->abs_vdebt >> nr_cycles ?: 1;
2107 if (iocg->delay)
2108 iocg->delay = iocg->delay >> nr_cycles ?: 1;
2109
Tejun Heoc7af2a02020-09-17 20:44:55 -04002110 iocg_kick_waitq(iocg, true, now);
Tejun Heoc5a65612020-09-17 20:44:56 -04002111
2112 TRACE_IOCG_PATH(iocg_forgive_debt, iocg, now, usage_pct,
2113 old_debt, iocg->abs_vdebt,
2114 old_delay, iocg->delay);
2115
Tejun Heoc7af2a02020-09-17 20:44:55 -04002116 spin_unlock(&iocg->waitq.lock);
Tejun Heoab8df822020-09-17 20:44:52 -04002117 }
2118}
2119
Baolin Wang24747872020-11-26 16:16:14 +08002120/*
2121 * Check the active iocgs' state to avoid oversleeping and deactive
2122 * idle iocgs.
2123 *
2124 * Since waiters determine the sleep durations based on the vrate
2125 * they saw at the time of sleep, if vrate has increased, some
2126 * waiters could be sleeping for too long. Wake up tardy waiters
2127 * which should have woken up in the last period and expire idle
2128 * iocgs.
2129 */
2130static int ioc_check_iocgs(struct ioc *ioc, struct ioc_now *now)
2131{
2132 int nr_debtors = 0;
2133 struct ioc_gq *iocg, *tiocg;
2134
2135 list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) {
2136 if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
2137 !iocg->delay && !iocg_is_idle(iocg))
2138 continue;
2139
2140 spin_lock(&iocg->waitq.lock);
2141
2142 /* flush wait and indebt stat deltas */
2143 if (iocg->wait_since) {
2144 iocg->local_stat.wait_us += now->now - iocg->wait_since;
2145 iocg->wait_since = now->now;
2146 }
2147 if (iocg->indebt_since) {
2148 iocg->local_stat.indebt_us +=
2149 now->now - iocg->indebt_since;
2150 iocg->indebt_since = now->now;
2151 }
2152 if (iocg->indelay_since) {
2153 iocg->local_stat.indelay_us +=
2154 now->now - iocg->indelay_since;
2155 iocg->indelay_since = now->now;
2156 }
2157
2158 if (waitqueue_active(&iocg->waitq) || iocg->abs_vdebt ||
2159 iocg->delay) {
2160 /* might be oversleeping vtime / hweight changes, kick */
2161 iocg_kick_waitq(iocg, true, now);
2162 if (iocg->abs_vdebt || iocg->delay)
2163 nr_debtors++;
2164 } else if (iocg_is_idle(iocg)) {
2165 /* no waiter and idle, deactivate */
2166 u64 vtime = atomic64_read(&iocg->vtime);
2167 s64 excess;
2168
2169 /*
2170 * @iocg has been inactive for a full duration and will
2171 * have a high budget. Account anything above target as
2172 * error and throw away. On reactivation, it'll start
2173 * with the target budget.
2174 */
2175 excess = now->vnow - vtime - ioc->margins.target;
2176 if (excess > 0) {
2177 u32 old_hwi;
2178
2179 current_hweight(iocg, NULL, &old_hwi);
2180 ioc->vtime_err -= div64_u64(excess * old_hwi,
2181 WEIGHT_ONE);
2182 }
2183
Baolin Wang76efc1c2020-12-10 18:56:44 +08002184 TRACE_IOCG_PATH(iocg_idle, iocg, now,
2185 atomic64_read(&iocg->active_period),
2186 atomic64_read(&ioc->cur_period), vtime);
Baolin Wang24747872020-11-26 16:16:14 +08002187 __propagate_weights(iocg, 0, 0, false, now);
2188 list_del_init(&iocg->active_list);
2189 }
2190
2191 spin_unlock(&iocg->waitq.lock);
2192 }
2193
2194 commit_weights(ioc);
2195 return nr_debtors;
2196}
2197
Tejun Heo7caa4712019-08-28 15:05:58 -07002198static void ioc_timer_fn(struct timer_list *timer)
2199{
2200 struct ioc *ioc = container_of(timer, struct ioc, timer);
2201 struct ioc_gq *iocg, *tiocg;
2202 struct ioc_now now;
Tejun Heo8692d2d2020-09-01 14:52:45 -04002203 LIST_HEAD(surpluses);
Baolin Wang24747872020-11-26 16:16:14 +08002204 int nr_debtors, nr_shortages = 0, nr_lagging = 0;
Tejun Heodda13152020-09-01 14:52:53 -04002205 u64 usage_us_sum = 0;
Tejun Heo7caa4712019-08-28 15:05:58 -07002206 u32 ppm_rthr = MILLION - ioc->params.qos[QOS_RPPM];
2207 u32 ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM];
2208 u32 missed_ppm[2], rq_wait_pct;
2209 u64 period_vtime;
Tejun Heof1de2432020-09-01 14:52:49 -04002210 int prev_busy_level;
Tejun Heo7caa4712019-08-28 15:05:58 -07002211
2212 /* how were the latencies during the period? */
2213 ioc_lat_stat(ioc, missed_ppm, &rq_wait_pct);
2214
2215 /* take care of active iocgs */
2216 spin_lock_irq(&ioc->lock);
2217
2218 ioc_now(ioc, &now);
2219
2220 period_vtime = now.vnow - ioc->period_at_vtime;
2221 if (WARN_ON_ONCE(!period_vtime)) {
2222 spin_unlock_irq(&ioc->lock);
2223 return;
2224 }
2225
Baolin Wang24747872020-11-26 16:16:14 +08002226 nr_debtors = ioc_check_iocgs(ioc, &now);
Tejun Heo7caa4712019-08-28 15:05:58 -07002227
Tejun Heof0bf84a2020-09-01 14:52:56 -04002228 /*
2229 * Wait and indebt stat are flushed above and the donation calculation
2230 * below needs updated usage stat. Let's bring stat up-to-date.
2231 */
2232 iocg_flush_stat(&ioc->active_iocgs, &now);
2233
Tejun Heof1de2432020-09-01 14:52:49 -04002234 /* calc usage and see whether some weights need to be moved around */
Tejun Heo7caa4712019-08-28 15:05:58 -07002235 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
Baolin Wangc09245f2020-11-26 16:16:13 +08002236 u64 vdone, vtime, usage_us;
2237 u32 hw_active, hw_inuse;
Tejun Heo7caa4712019-08-28 15:05:58 -07002238
2239 /*
2240 * Collect unused and wind vtime closer to vnow to prevent
2241 * iocgs from accumulating a large amount of budget.
2242 */
2243 vdone = atomic64_read(&iocg->done_vtime);
2244 vtime = atomic64_read(&iocg->vtime);
2245 current_hweight(iocg, &hw_active, &hw_inuse);
2246
2247 /*
2248 * Latency QoS detection doesn't account for IOs which are
2249 * in-flight for longer than a period. Detect them by
2250 * comparing vdone against period start. If lagging behind
2251 * IOs from past periods, don't increase vrate.
2252 */
Tejun Heo7cd806a2019-09-25 16:03:09 -07002253 if ((ppm_rthr != MILLION || ppm_wthr != MILLION) &&
2254 !atomic_read(&iocg_to_blkg(iocg)->use_delay) &&
Tejun Heo7caa4712019-08-28 15:05:58 -07002255 time_after64(vtime, vdone) &&
2256 time_after64(vtime, now.vnow -
2257 MAX_LAGGING_PERIODS * period_vtime) &&
2258 time_before64(vdone, now.vnow - period_vtime))
2259 nr_lagging++;
2260
Tejun Heo7caa4712019-08-28 15:05:58 -07002261 /*
Tejun Heof1de2432020-09-01 14:52:49 -04002262 * Determine absolute usage factoring in in-flight IOs to avoid
2263 * high-latency completions appearing as idle.
Tejun Heo7caa4712019-08-28 15:05:58 -07002264 */
Tejun Heo1aa50d02020-09-01 14:52:44 -04002265 usage_us = iocg->usage_delta_us;
Tejun Heodda13152020-09-01 14:52:53 -04002266 usage_us_sum += usage_us;
Tejun Heof1de2432020-09-01 14:52:49 -04002267
Tejun Heo93f7d2d2020-09-01 14:52:47 -04002268 /* see whether there's surplus vtime */
2269 WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
2270 if (hw_inuse < hw_active ||
2271 (!waitqueue_active(&iocg->waitq) &&
Tejun Heof1de2432020-09-01 14:52:49 -04002272 time_before64(vtime, now.vnow - ioc->margins.low))) {
Baolin Wangc09245f2020-11-26 16:16:13 +08002273 u32 hwa, old_hwi, hwm, new_hwi, usage;
2274 u64 usage_dur;
2275
2276 if (vdone != vtime) {
2277 u64 inflight_us = DIV64_U64_ROUND_UP(
2278 cost_to_abs_cost(vtime - vdone, hw_inuse),
2279 ioc->vtime_base_rate);
2280
2281 usage_us = max(usage_us, inflight_us);
2282 }
2283
2284 /* convert to hweight based usage ratio */
2285 if (time_after64(iocg->activated_at, ioc->period_at))
2286 usage_dur = max_t(u64, now.now - iocg->activated_at, 1);
2287 else
2288 usage_dur = max_t(u64, now.now - ioc->period_at, 1);
2289
2290 usage = clamp_t(u32,
2291 DIV64_U64_ROUND_UP(usage_us * WEIGHT_ONE,
2292 usage_dur),
2293 1, WEIGHT_ONE);
Tejun Heo7caa4712019-08-28 15:05:58 -07002294
Tejun Heo93f7d2d2020-09-01 14:52:47 -04002295 /*
2296 * Already donating or accumulated enough to start.
2297 * Determine the donation amount.
2298 */
Tejun Heoac33e912020-09-01 14:52:54 -04002299 current_hweight(iocg, &hwa, &old_hwi);
Tejun Heo93f7d2d2020-09-01 14:52:47 -04002300 hwm = current_hweight_max(iocg);
Tejun Heoac33e912020-09-01 14:52:54 -04002301 new_hwi = hweight_after_donation(iocg, old_hwi, hwm,
2302 usage, &now);
Tejun Heo93f7d2d2020-09-01 14:52:47 -04002303 if (new_hwi < hwm) {
Tejun Heoe08d02a2020-09-01 14:52:48 -04002304 iocg->hweight_donating = hwa;
Tejun Heo93f7d2d2020-09-01 14:52:47 -04002305 iocg->hweight_after_donation = new_hwi;
2306 list_add(&iocg->surplus_list, &surpluses);
2307 } else {
Tejun Heo04603752020-09-01 14:52:55 -04002308 TRACE_IOCG_PATH(inuse_shortage, iocg, &now,
2309 iocg->inuse, iocg->active,
2310 iocg->hweight_inuse, new_hwi);
2311
Tejun Heo93f7d2d2020-09-01 14:52:47 -04002312 __propagate_weights(iocg, iocg->active,
Tejun Heob0853ab2020-09-01 14:52:50 -04002313 iocg->active, true, &now);
Tejun Heo93f7d2d2020-09-01 14:52:47 -04002314 nr_shortages++;
2315 }
2316 } else {
2317 /* genuinely short on vtime */
2318 nr_shortages++;
Tejun Heo7caa4712019-08-28 15:05:58 -07002319 }
2320 }
Tejun Heo93f7d2d2020-09-01 14:52:47 -04002321
2322 if (!list_empty(&surpluses) && nr_shortages)
2323 transfer_surpluses(&surpluses, &now);
2324
Tejun Heo00410f12020-09-01 14:52:34 -04002325 commit_weights(ioc);
Tejun Heo7caa4712019-08-28 15:05:58 -07002326
Tejun Heo8692d2d2020-09-01 14:52:45 -04002327 /* surplus list should be dissolved after use */
2328 list_for_each_entry_safe(iocg, tiocg, &surpluses, surplus_list)
2329 list_del_init(&iocg->surplus_list);
2330
Tejun Heodda13152020-09-01 14:52:53 -04002331 /*
Tejun Heo7caa4712019-08-28 15:05:58 -07002332 * If q is getting clogged or we're missing too much, we're issuing
2333 * too much IO and should lower vtime rate. If we're not missing
2334 * and experiencing shortages but not surpluses, we're too stingy
2335 * and should increase vtime rate.
2336 */
Tejun Heo25d41e42019-09-25 16:02:07 -07002337 prev_busy_level = ioc->busy_level;
Tejun Heo7caa4712019-08-28 15:05:58 -07002338 if (rq_wait_pct > RQ_WAIT_BUSY_PCT ||
2339 missed_ppm[READ] > ppm_rthr ||
2340 missed_ppm[WRITE] > ppm_wthr) {
Tejun Heo81ca6272019-10-14 17:18:11 -07002341 /* clearly missing QoS targets, slow down vrate */
Tejun Heo7caa4712019-08-28 15:05:58 -07002342 ioc->busy_level = max(ioc->busy_level, 0);
2343 ioc->busy_level++;
Tejun Heo7cd806a2019-09-25 16:03:09 -07002344 } else if (rq_wait_pct <= RQ_WAIT_BUSY_PCT * UNBUSY_THR_PCT / 100 &&
Tejun Heo7caa4712019-08-28 15:05:58 -07002345 missed_ppm[READ] <= ppm_rthr * UNBUSY_THR_PCT / 100 &&
2346 missed_ppm[WRITE] <= ppm_wthr * UNBUSY_THR_PCT / 100) {
Tejun Heo81ca6272019-10-14 17:18:11 -07002347 /* QoS targets are being met with >25% margin */
2348 if (nr_shortages) {
2349 /*
2350 * We're throttling while the device has spare
2351 * capacity. If vrate was being slowed down, stop.
2352 */
Tejun Heo7cd806a2019-09-25 16:03:09 -07002353 ioc->busy_level = min(ioc->busy_level, 0);
Tejun Heo81ca6272019-10-14 17:18:11 -07002354
2355 /*
2356 * If there are IOs spanning multiple periods, wait
Tejun Heo065655c2020-09-01 14:52:46 -04002357 * them out before pushing the device harder.
Tejun Heo81ca6272019-10-14 17:18:11 -07002358 */
Tejun Heo065655c2020-09-01 14:52:46 -04002359 if (!nr_lagging)
Tejun Heo7cd806a2019-09-25 16:03:09 -07002360 ioc->busy_level--;
Tejun Heo81ca6272019-10-14 17:18:11 -07002361 } else {
2362 /*
2363 * Nobody is being throttled and the users aren't
2364 * issuing enough IOs to saturate the device. We
2365 * simply don't know how close the device is to
2366 * saturation. Coast.
2367 */
2368 ioc->busy_level = 0;
Tejun Heo7cd806a2019-09-25 16:03:09 -07002369 }
Tejun Heo7caa4712019-08-28 15:05:58 -07002370 } else {
Tejun Heo81ca6272019-10-14 17:18:11 -07002371 /* inside the hysterisis margin, we're good */
Tejun Heo7caa4712019-08-28 15:05:58 -07002372 ioc->busy_level = 0;
2373 }
2374
2375 ioc->busy_level = clamp(ioc->busy_level, -1000, 1000);
2376
Baolin Wang926f75f2020-11-26 16:16:15 +08002377 ioc_adjust_base_vrate(ioc, rq_wait_pct, nr_lagging, nr_shortages,
2378 prev_busy_level, missed_ppm);
Tejun Heo7caa4712019-08-28 15:05:58 -07002379
2380 ioc_refresh_params(ioc, false);
2381
Tejun Heo33a1fe62020-09-17 20:44:53 -04002382 ioc_forgive_debts(ioc, usage_us_sum, nr_debtors, &now);
2383
Tejun Heo7caa4712019-08-28 15:05:58 -07002384 /*
2385 * This period is done. Move onto the next one. If nothing's
2386 * going on with the device, stop the timer.
2387 */
2388 atomic64_inc(&ioc->cur_period);
2389
2390 if (ioc->running != IOC_STOP) {
2391 if (!list_empty(&ioc->active_iocgs)) {
2392 ioc_start_period(ioc, &now);
2393 } else {
2394 ioc->busy_level = 0;
Tejun Heoac33e912020-09-01 14:52:54 -04002395 ioc->vtime_err = 0;
Tejun Heo7caa4712019-08-28 15:05:58 -07002396 ioc->running = IOC_IDLE;
2397 }
Tejun Heoac33e912020-09-01 14:52:54 -04002398
2399 ioc_refresh_vrate(ioc, &now);
Tejun Heo7caa4712019-08-28 15:05:58 -07002400 }
2401
2402 spin_unlock_irq(&ioc->lock);
2403}
2404
Tejun Heob0853ab2020-09-01 14:52:50 -04002405static u64 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime,
2406 u64 abs_cost, struct ioc_now *now)
2407{
2408 struct ioc *ioc = iocg->ioc;
2409 struct ioc_margins *margins = &ioc->margins;
Tejun Heo04603752020-09-01 14:52:55 -04002410 u32 __maybe_unused old_inuse = iocg->inuse, __maybe_unused old_hwi;
Tejun Heoaa67db22020-09-14 11:05:13 -04002411 u32 hwi, adj_step;
Tejun Heob0853ab2020-09-01 14:52:50 -04002412 s64 margin;
2413 u64 cost, new_inuse;
2414
2415 current_hweight(iocg, NULL, &hwi);
Tejun Heo04603752020-09-01 14:52:55 -04002416 old_hwi = hwi;
Tejun Heob0853ab2020-09-01 14:52:50 -04002417 cost = abs_cost_to_cost(abs_cost, hwi);
2418 margin = now->vnow - vtime - cost;
2419
Tejun Heoc421a3e2020-09-01 14:52:51 -04002420 /* debt handling owns inuse for debtors */
2421 if (iocg->abs_vdebt)
2422 return cost;
2423
Tejun Heob0853ab2020-09-01 14:52:50 -04002424 /*
Baolin Wang5ba1add22020-11-26 16:16:11 +08002425 * We only increase inuse during period and do so if the margin has
Tejun Heob0853ab2020-09-01 14:52:50 -04002426 * deteriorated since the previous adjustment.
2427 */
2428 if (margin >= iocg->saved_margin || margin >= margins->low ||
2429 iocg->inuse == iocg->active)
2430 return cost;
2431
2432 spin_lock_irq(&ioc->lock);
2433
2434 /* we own inuse only when @iocg is in the normal active state */
Tejun Heoc421a3e2020-09-01 14:52:51 -04002435 if (iocg->abs_vdebt || list_empty(&iocg->active_list)) {
Tejun Heob0853ab2020-09-01 14:52:50 -04002436 spin_unlock_irq(&ioc->lock);
2437 return cost;
2438 }
2439
Tejun Heoaa67db22020-09-14 11:05:13 -04002440 /*
2441 * Bump up inuse till @abs_cost fits in the existing budget.
2442 * adj_step must be determined after acquiring ioc->lock - we might
2443 * have raced and lost to another thread for activation and could
2444 * be reading 0 iocg->active before ioc->lock which will lead to
2445 * infinite loop.
2446 */
Tejun Heob0853ab2020-09-01 14:52:50 -04002447 new_inuse = iocg->inuse;
Tejun Heoaa67db22020-09-14 11:05:13 -04002448 adj_step = DIV_ROUND_UP(iocg->active * INUSE_ADJ_STEP_PCT, 100);
Tejun Heob0853ab2020-09-01 14:52:50 -04002449 do {
2450 new_inuse = new_inuse + adj_step;
2451 propagate_weights(iocg, iocg->active, new_inuse, true, now);
2452 current_hweight(iocg, NULL, &hwi);
2453 cost = abs_cost_to_cost(abs_cost, hwi);
2454 } while (time_after64(vtime + cost, now->vnow) &&
2455 iocg->inuse != iocg->active);
2456
2457 spin_unlock_irq(&ioc->lock);
Tejun Heo04603752020-09-01 14:52:55 -04002458
2459 TRACE_IOCG_PATH(inuse_adjust, iocg, now,
2460 old_inuse, iocg->inuse, old_hwi, hwi);
2461
Tejun Heob0853ab2020-09-01 14:52:50 -04002462 return cost;
2463}
2464
Tejun Heo7caa4712019-08-28 15:05:58 -07002465static void calc_vtime_cost_builtin(struct bio *bio, struct ioc_gq *iocg,
2466 bool is_merge, u64 *costp)
2467{
2468 struct ioc *ioc = iocg->ioc;
2469 u64 coef_seqio, coef_randio, coef_page;
2470 u64 pages = max_t(u64, bio_sectors(bio) >> IOC_SECT_TO_PAGE_SHIFT, 1);
2471 u64 seek_pages = 0;
2472 u64 cost = 0;
2473
2474 switch (bio_op(bio)) {
2475 case REQ_OP_READ:
2476 coef_seqio = ioc->params.lcoefs[LCOEF_RSEQIO];
2477 coef_randio = ioc->params.lcoefs[LCOEF_RRANDIO];
2478 coef_page = ioc->params.lcoefs[LCOEF_RPAGE];
2479 break;
2480 case REQ_OP_WRITE:
2481 coef_seqio = ioc->params.lcoefs[LCOEF_WSEQIO];
2482 coef_randio = ioc->params.lcoefs[LCOEF_WRANDIO];
2483 coef_page = ioc->params.lcoefs[LCOEF_WPAGE];
2484 break;
2485 default:
2486 goto out;
2487 }
2488
2489 if (iocg->cursor) {
2490 seek_pages = abs(bio->bi_iter.bi_sector - iocg->cursor);
2491 seek_pages >>= IOC_SECT_TO_PAGE_SHIFT;
2492 }
2493
2494 if (!is_merge) {
2495 if (seek_pages > LCOEF_RANDIO_PAGES) {
2496 cost += coef_randio;
2497 } else {
2498 cost += coef_seqio;
2499 }
2500 }
2501 cost += pages * coef_page;
2502out:
2503 *costp = cost;
2504}
2505
2506static u64 calc_vtime_cost(struct bio *bio, struct ioc_gq *iocg, bool is_merge)
2507{
2508 u64 cost;
2509
2510 calc_vtime_cost_builtin(bio, iocg, is_merge, &cost);
2511 return cost;
2512}
2513
Tejun Heocd006502020-04-13 12:27:56 -04002514static void calc_size_vtime_cost_builtin(struct request *rq, struct ioc *ioc,
2515 u64 *costp)
2516{
2517 unsigned int pages = blk_rq_stats_sectors(rq) >> IOC_SECT_TO_PAGE_SHIFT;
2518
2519 switch (req_op(rq)) {
2520 case REQ_OP_READ:
2521 *costp = pages * ioc->params.lcoefs[LCOEF_RPAGE];
2522 break;
2523 case REQ_OP_WRITE:
2524 *costp = pages * ioc->params.lcoefs[LCOEF_WPAGE];
2525 break;
2526 default:
2527 *costp = 0;
2528 }
2529}
2530
2531static u64 calc_size_vtime_cost(struct request *rq, struct ioc *ioc)
2532{
2533 u64 cost;
2534
2535 calc_size_vtime_cost_builtin(rq, ioc, &cost);
2536 return cost;
2537}
2538
Tejun Heo7caa4712019-08-28 15:05:58 -07002539static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
2540{
2541 struct blkcg_gq *blkg = bio->bi_blkg;
2542 struct ioc *ioc = rqos_to_ioc(rqos);
2543 struct ioc_gq *iocg = blkg_to_iocg(blkg);
2544 struct ioc_now now;
2545 struct iocg_wait wait;
Tejun Heo7caa4712019-08-28 15:05:58 -07002546 u64 abs_cost, cost, vtime;
Tejun Heoda437b92020-09-01 14:52:42 -04002547 bool use_debt, ioc_locked;
2548 unsigned long flags;
Tejun Heo7caa4712019-08-28 15:05:58 -07002549
Tejun Heod16baa32021-01-05 12:37:23 -05002550 /* bypass IOs if disabled, still initializing, or for root cgroup */
2551 if (!ioc->enabled || !iocg || !iocg->level)
Tejun Heo7caa4712019-08-28 15:05:58 -07002552 return;
2553
Tejun Heo7caa4712019-08-28 15:05:58 -07002554 /* calculate the absolute vtime cost */
2555 abs_cost = calc_vtime_cost(bio, iocg, false);
2556 if (!abs_cost)
2557 return;
2558
Tejun Heof1de2432020-09-01 14:52:49 -04002559 if (!iocg_activate(iocg, &now))
2560 return;
2561
Tejun Heo7caa4712019-08-28 15:05:58 -07002562 iocg->cursor = bio_end_sector(bio);
Tejun Heo7caa4712019-08-28 15:05:58 -07002563 vtime = atomic64_read(&iocg->vtime);
Tejun Heob0853ab2020-09-01 14:52:50 -04002564 cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
Tejun Heo7caa4712019-08-28 15:05:58 -07002565
2566 /*
2567 * If no one's waiting and within budget, issue right away. The
2568 * tests are racy but the races aren't systemic - we only miss once
2569 * in a while which is fine.
2570 */
Tejun Heo0b80f982020-05-04 19:27:54 -04002571 if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
Tejun Heo7caa4712019-08-28 15:05:58 -07002572 time_before_eq64(vtime + cost, now.vnow)) {
Tejun Heo97eb1972020-09-01 14:52:43 -04002573 iocg_commit_bio(iocg, bio, abs_cost, cost);
Tejun Heo7caa4712019-08-28 15:05:58 -07002574 return;
2575 }
2576
Tejun Heo36a52482019-09-04 12:45:52 -07002577 /*
Tejun Heoda437b92020-09-01 14:52:42 -04002578 * We're over budget. This can be handled in two ways. IOs which may
2579 * cause priority inversions are punted to @ioc->aux_iocg and charged as
2580 * debt. Otherwise, the issuer is blocked on @iocg->waitq. Debt handling
2581 * requires @ioc->lock, waitq handling @iocg->waitq.lock. Determine
2582 * whether debt handling is needed and acquire locks accordingly.
Tejun Heo0b80f982020-05-04 19:27:54 -04002583 */
Tejun Heoda437b92020-09-01 14:52:42 -04002584 use_debt = bio_issue_as_root_blkg(bio) || fatal_signal_pending(current);
2585 ioc_locked = use_debt || READ_ONCE(iocg->abs_vdebt);
Tejun Heob0853ab2020-09-01 14:52:50 -04002586retry_lock:
Tejun Heoda437b92020-09-01 14:52:42 -04002587 iocg_lock(iocg, ioc_locked, &flags);
2588
2589 /*
2590 * @iocg must stay activated for debt and waitq handling. Deactivation
2591 * is synchronized against both ioc->lock and waitq.lock and we won't
2592 * get deactivated as long as we're waiting or has debt, so we're good
2593 * if we're activated here. In the unlikely cases that we aren't, just
2594 * issue the IO.
2595 */
Tejun Heo0b80f982020-05-04 19:27:54 -04002596 if (unlikely(list_empty(&iocg->active_list))) {
Tejun Heoda437b92020-09-01 14:52:42 -04002597 iocg_unlock(iocg, ioc_locked, &flags);
Tejun Heo97eb1972020-09-01 14:52:43 -04002598 iocg_commit_bio(iocg, bio, abs_cost, cost);
Tejun Heo0b80f982020-05-04 19:27:54 -04002599 return;
2600 }
2601
2602 /*
2603 * We're over budget. If @bio has to be issued regardless, remember
2604 * the abs_cost instead of advancing vtime. iocg_kick_waitq() will pay
2605 * off the debt before waking more IOs.
2606 *
Tejun Heo36a52482019-09-04 12:45:52 -07002607 * This way, the debt is continuously paid off each period with the
Tejun Heo0b80f982020-05-04 19:27:54 -04002608 * actual budget available to the cgroup. If we just wound vtime, we
2609 * would incorrectly use the current hw_inuse for the entire amount
2610 * which, for example, can lead to the cgroup staying blocked for a
2611 * long time even with substantially raised hw_inuse.
2612 *
2613 * An iocg with vdebt should stay online so that the timer can keep
2614 * deducting its vdebt and [de]activate use_delay mechanism
2615 * accordingly. We don't want to race against the timer trying to
2616 * clear them and leave @iocg inactive w/ dangling use_delay heavily
2617 * penalizing the cgroup and its descendants.
Tejun Heo36a52482019-09-04 12:45:52 -07002618 */
Tejun Heoda437b92020-09-01 14:52:42 -04002619 if (use_debt) {
Tejun Heoc421a3e2020-09-01 14:52:51 -04002620 iocg_incur_debt(iocg, abs_cost, &now);
Tejun Heo54c52e12020-04-13 12:27:55 -04002621 if (iocg_kick_delay(iocg, &now))
Tejun Heod7bd15a2019-12-16 13:34:00 -08002622 blkcg_schedule_throttle(rqos->q,
2623 (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
Tejun Heoda437b92020-09-01 14:52:42 -04002624 iocg_unlock(iocg, ioc_locked, &flags);
Tejun Heo7caa4712019-08-28 15:05:58 -07002625 return;
2626 }
2627
Tejun Heob0853ab2020-09-01 14:52:50 -04002628 /* guarantee that iocgs w/ waiters have maximum inuse */
Tejun Heoc421a3e2020-09-01 14:52:51 -04002629 if (!iocg->abs_vdebt && iocg->inuse != iocg->active) {
Tejun Heob0853ab2020-09-01 14:52:50 -04002630 if (!ioc_locked) {
2631 iocg_unlock(iocg, false, &flags);
2632 ioc_locked = true;
2633 goto retry_lock;
2634 }
2635 propagate_weights(iocg, iocg->active, iocg->active, true,
2636 &now);
2637 }
2638
Tejun Heo7caa4712019-08-28 15:05:58 -07002639 /*
2640 * Append self to the waitq and schedule the wakeup timer if we're
2641 * the first waiter. The timer duration is calculated based on the
2642 * current vrate. vtime and hweight changes can make it too short
2643 * or too long. Each wait entry records the absolute cost it's
2644 * waiting for to allow re-evaluation using a custom wait entry.
2645 *
2646 * If too short, the timer simply reschedules itself. If too long,
2647 * the period timer will notice and trigger wakeups.
2648 *
2649 * All waiters are on iocg->waitq and the wait states are
2650 * synchronized using waitq.lock.
2651 */
Tejun Heo7caa4712019-08-28 15:05:58 -07002652 init_waitqueue_func_entry(&wait.wait, iocg_wake_fn);
2653 wait.wait.private = current;
2654 wait.bio = bio;
2655 wait.abs_cost = abs_cost;
2656 wait.committed = false; /* will be set true by waker */
2657
2658 __add_wait_queue_entry_tail(&iocg->waitq, &wait.wait);
Tejun Heoda437b92020-09-01 14:52:42 -04002659 iocg_kick_waitq(iocg, ioc_locked, &now);
Tejun Heo7caa4712019-08-28 15:05:58 -07002660
Tejun Heoda437b92020-09-01 14:52:42 -04002661 iocg_unlock(iocg, ioc_locked, &flags);
Tejun Heo7caa4712019-08-28 15:05:58 -07002662
2663 while (true) {
2664 set_current_state(TASK_UNINTERRUPTIBLE);
2665 if (wait.committed)
2666 break;
2667 io_schedule();
2668 }
2669
2670 /* waker already committed us, proceed */
2671 finish_wait(&iocg->waitq, &wait.wait);
2672}
2673
2674static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
2675 struct bio *bio)
2676{
2677 struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
Tejun Heod16baa32021-01-05 12:37:23 -05002678 struct ioc *ioc = rqos_to_ioc(rqos);
Tejun Heo7caa4712019-08-28 15:05:58 -07002679 sector_t bio_end = bio_end_sector(bio);
Tejun Heoe1518f62019-09-04 12:45:53 -07002680 struct ioc_now now;
Tejun Heob0853ab2020-09-01 14:52:50 -04002681 u64 vtime, abs_cost, cost;
Tejun Heo0b80f982020-05-04 19:27:54 -04002682 unsigned long flags;
Tejun Heo7caa4712019-08-28 15:05:58 -07002683
Tejun Heod16baa32021-01-05 12:37:23 -05002684 /* bypass if disabled, still initializing, or for root cgroup */
2685 if (!ioc->enabled || !iocg || !iocg->level)
Tejun Heo7caa4712019-08-28 15:05:58 -07002686 return;
2687
2688 abs_cost = calc_vtime_cost(bio, iocg, true);
2689 if (!abs_cost)
2690 return;
2691
Tejun Heoe1518f62019-09-04 12:45:53 -07002692 ioc_now(ioc, &now);
Tejun Heob0853ab2020-09-01 14:52:50 -04002693
2694 vtime = atomic64_read(&iocg->vtime);
2695 cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
Tejun Heoe1518f62019-09-04 12:45:53 -07002696
Tejun Heo7caa4712019-08-28 15:05:58 -07002697 /* update cursor if backmerging into the request at the cursor */
2698 if (blk_rq_pos(rq) < bio_end &&
2699 blk_rq_pos(rq) + blk_rq_sectors(rq) == iocg->cursor)
2700 iocg->cursor = bio_end;
2701
Tejun Heoe1518f62019-09-04 12:45:53 -07002702 /*
Tejun Heo0b80f982020-05-04 19:27:54 -04002703 * Charge if there's enough vtime budget and the existing request has
2704 * cost assigned.
Tejun Heoe1518f62019-09-04 12:45:53 -07002705 */
2706 if (rq->bio && rq->bio->bi_iocost_cost &&
Tejun Heo0b80f982020-05-04 19:27:54 -04002707 time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow)) {
Tejun Heo97eb1972020-09-01 14:52:43 -04002708 iocg_commit_bio(iocg, bio, abs_cost, cost);
Tejun Heo0b80f982020-05-04 19:27:54 -04002709 return;
2710 }
2711
2712 /*
2713 * Otherwise, account it as debt if @iocg is online, which it should
2714 * be for the vast majority of cases. See debt handling in
2715 * ioc_rqos_throttle() for details.
2716 */
Tejun Heoc421a3e2020-09-01 14:52:51 -04002717 spin_lock_irqsave(&ioc->lock, flags);
2718 spin_lock(&iocg->waitq.lock);
2719
Tejun Heo0b80f982020-05-04 19:27:54 -04002720 if (likely(!list_empty(&iocg->active_list))) {
Tejun Heoc421a3e2020-09-01 14:52:51 -04002721 iocg_incur_debt(iocg, abs_cost, &now);
2722 if (iocg_kick_delay(iocg, &now))
2723 blkcg_schedule_throttle(rqos->q,
2724 (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
Tejun Heo0b80f982020-05-04 19:27:54 -04002725 } else {
Tejun Heo97eb1972020-09-01 14:52:43 -04002726 iocg_commit_bio(iocg, bio, abs_cost, cost);
Tejun Heo0b80f982020-05-04 19:27:54 -04002727 }
Tejun Heoc421a3e2020-09-01 14:52:51 -04002728
2729 spin_unlock(&iocg->waitq.lock);
2730 spin_unlock_irqrestore(&ioc->lock, flags);
Tejun Heo7caa4712019-08-28 15:05:58 -07002731}
2732
2733static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio)
2734{
2735 struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
2736
2737 if (iocg && bio->bi_iocost_cost)
2738 atomic64_add(bio->bi_iocost_cost, &iocg->done_vtime);
2739}
2740
2741static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq)
2742{
2743 struct ioc *ioc = rqos_to_ioc(rqos);
Tejun Heo5e124f72020-09-01 14:52:33 -04002744 struct ioc_pcpu_stat *ccs;
Tejun Heocd006502020-04-13 12:27:56 -04002745 u64 on_q_ns, rq_wait_ns, size_nsec;
Tejun Heo7caa4712019-08-28 15:05:58 -07002746 int pidx, rw;
2747
2748 if (!ioc->enabled || !rq->alloc_time_ns || !rq->start_time_ns)
2749 return;
2750
2751 switch (req_op(rq) & REQ_OP_MASK) {
2752 case REQ_OP_READ:
2753 pidx = QOS_RLAT;
2754 rw = READ;
2755 break;
2756 case REQ_OP_WRITE:
2757 pidx = QOS_WLAT;
2758 rw = WRITE;
2759 break;
2760 default:
2761 return;
2762 }
2763
2764 on_q_ns = ktime_get_ns() - rq->alloc_time_ns;
2765 rq_wait_ns = rq->start_time_ns - rq->alloc_time_ns;
Tejun Heocd006502020-04-13 12:27:56 -04002766 size_nsec = div64_u64(calc_size_vtime_cost(rq, ioc), VTIME_PER_NSEC);
Tejun Heo7caa4712019-08-28 15:05:58 -07002767
Tejun Heo5e124f72020-09-01 14:52:33 -04002768 ccs = get_cpu_ptr(ioc->pcpu_stat);
2769
Tejun Heocd006502020-04-13 12:27:56 -04002770 if (on_q_ns <= size_nsec ||
2771 on_q_ns - size_nsec <= ioc->params.qos[pidx] * NSEC_PER_USEC)
Tejun Heo5e124f72020-09-01 14:52:33 -04002772 local_inc(&ccs->missed[rw].nr_met);
Tejun Heo7caa4712019-08-28 15:05:58 -07002773 else
Tejun Heo5e124f72020-09-01 14:52:33 -04002774 local_inc(&ccs->missed[rw].nr_missed);
Tejun Heo7caa4712019-08-28 15:05:58 -07002775
Tejun Heo5e124f72020-09-01 14:52:33 -04002776 local64_add(rq_wait_ns, &ccs->rq_wait_ns);
2777
2778 put_cpu_ptr(ccs);
Tejun Heo7caa4712019-08-28 15:05:58 -07002779}
2780
2781static void ioc_rqos_queue_depth_changed(struct rq_qos *rqos)
2782{
2783 struct ioc *ioc = rqos_to_ioc(rqos);
2784
2785 spin_lock_irq(&ioc->lock);
2786 ioc_refresh_params(ioc, false);
2787 spin_unlock_irq(&ioc->lock);
2788}
2789
2790static void ioc_rqos_exit(struct rq_qos *rqos)
2791{
2792 struct ioc *ioc = rqos_to_ioc(rqos);
2793
2794 blkcg_deactivate_policy(rqos->q, &blkcg_policy_iocost);
2795
2796 spin_lock_irq(&ioc->lock);
2797 ioc->running = IOC_STOP;
2798 spin_unlock_irq(&ioc->lock);
2799
2800 del_timer_sync(&ioc->timer);
2801 free_percpu(ioc->pcpu_stat);
2802 kfree(ioc);
2803}
2804
2805static struct rq_qos_ops ioc_rqos_ops = {
2806 .throttle = ioc_rqos_throttle,
2807 .merge = ioc_rqos_merge,
2808 .done_bio = ioc_rqos_done_bio,
2809 .done = ioc_rqos_done,
2810 .queue_depth_changed = ioc_rqos_queue_depth_changed,
2811 .exit = ioc_rqos_exit,
2812};
2813
2814static int blk_iocost_init(struct request_queue *q)
2815{
2816 struct ioc *ioc;
2817 struct rq_qos *rqos;
Tejun Heo5e124f72020-09-01 14:52:33 -04002818 int i, cpu, ret;
Tejun Heo7caa4712019-08-28 15:05:58 -07002819
2820 ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
2821 if (!ioc)
2822 return -ENOMEM;
2823
2824 ioc->pcpu_stat = alloc_percpu(struct ioc_pcpu_stat);
2825 if (!ioc->pcpu_stat) {
2826 kfree(ioc);
2827 return -ENOMEM;
2828 }
2829
Tejun Heo5e124f72020-09-01 14:52:33 -04002830 for_each_possible_cpu(cpu) {
2831 struct ioc_pcpu_stat *ccs = per_cpu_ptr(ioc->pcpu_stat, cpu);
2832
2833 for (i = 0; i < ARRAY_SIZE(ccs->missed); i++) {
2834 local_set(&ccs->missed[i].nr_met, 0);
2835 local_set(&ccs->missed[i].nr_missed, 0);
2836 }
2837 local64_set(&ccs->rq_wait_ns, 0);
2838 }
2839
Tejun Heo7caa4712019-08-28 15:05:58 -07002840 rqos = &ioc->rqos;
2841 rqos->id = RQ_QOS_COST;
2842 rqos->ops = &ioc_rqos_ops;
2843 rqos->q = q;
2844
2845 spin_lock_init(&ioc->lock);
2846 timer_setup(&ioc->timer, ioc_timer_fn, 0);
2847 INIT_LIST_HEAD(&ioc->active_iocgs);
2848
2849 ioc->running = IOC_IDLE;
Tejun Heoac33e912020-09-01 14:52:54 -04002850 ioc->vtime_base_rate = VTIME_PER_USEC;
Tejun Heo7caa4712019-08-28 15:05:58 -07002851 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
Ahmed S. Darwish67b7b642020-07-20 17:55:26 +02002852 seqcount_spinlock_init(&ioc->period_seqcount, &ioc->lock);
Tejun Heo7caa4712019-08-28 15:05:58 -07002853 ioc->period_at = ktime_to_us(ktime_get());
2854 atomic64_set(&ioc->cur_period, 0);
2855 atomic_set(&ioc->hweight_gen, 0);
2856
2857 spin_lock_irq(&ioc->lock);
2858 ioc->autop_idx = AUTOP_INVALID;
2859 ioc_refresh_params(ioc, true);
2860 spin_unlock_irq(&ioc->lock);
2861
Tejun Heod16baa32021-01-05 12:37:23 -05002862 /*
2863 * rqos must be added before activation to allow iocg_pd_init() to
2864 * lookup the ioc from q. This means that the rqos methods may get
2865 * called before policy activation completion, can't assume that the
2866 * target bio has an iocg associated and need to test for NULL iocg.
2867 */
Tejun Heo7caa4712019-08-28 15:05:58 -07002868 rq_qos_add(q, rqos);
2869 ret = blkcg_activate_policy(q, &blkcg_policy_iocost);
2870 if (ret) {
2871 rq_qos_del(q, rqos);
Tejun Heo3532e722019-08-29 08:53:06 -07002872 free_percpu(ioc->pcpu_stat);
Tejun Heo7caa4712019-08-28 15:05:58 -07002873 kfree(ioc);
2874 return ret;
2875 }
2876 return 0;
2877}
2878
2879static struct blkcg_policy_data *ioc_cpd_alloc(gfp_t gfp)
2880{
2881 struct ioc_cgrp *iocc;
2882
2883 iocc = kzalloc(sizeof(struct ioc_cgrp), gfp);
Tejun Heoe916ad22019-08-30 06:10:58 -07002884 if (!iocc)
2885 return NULL;
Tejun Heo7caa4712019-08-28 15:05:58 -07002886
Tejun Heobd0adb92020-09-01 14:52:39 -04002887 iocc->dfl_weight = CGROUP_WEIGHT_DFL * WEIGHT_ONE;
Tejun Heo7caa4712019-08-28 15:05:58 -07002888 return &iocc->cpd;
2889}
2890
2891static void ioc_cpd_free(struct blkcg_policy_data *cpd)
2892{
2893 kfree(container_of(cpd, struct ioc_cgrp, cpd));
2894}
2895
2896static struct blkg_policy_data *ioc_pd_alloc(gfp_t gfp, struct request_queue *q,
2897 struct blkcg *blkcg)
2898{
2899 int levels = blkcg->css.cgroup->level + 1;
2900 struct ioc_gq *iocg;
2901
Gustavo A. R. Silvaf61d6e22020-06-19 18:08:30 -05002902 iocg = kzalloc_node(struct_size(iocg, ancestors, levels), gfp, q->node);
Tejun Heo7caa4712019-08-28 15:05:58 -07002903 if (!iocg)
2904 return NULL;
2905
Tejun Heo97eb1972020-09-01 14:52:43 -04002906 iocg->pcpu_stat = alloc_percpu_gfp(struct iocg_pcpu_stat, gfp);
2907 if (!iocg->pcpu_stat) {
2908 kfree(iocg);
2909 return NULL;
2910 }
2911
Tejun Heo7caa4712019-08-28 15:05:58 -07002912 return &iocg->pd;
2913}
2914
2915static void ioc_pd_init(struct blkg_policy_data *pd)
2916{
2917 struct ioc_gq *iocg = pd_to_iocg(pd);
2918 struct blkcg_gq *blkg = pd_to_blkg(&iocg->pd);
2919 struct ioc *ioc = q_to_ioc(blkg->q);
2920 struct ioc_now now;
2921 struct blkcg_gq *tblkg;
2922 unsigned long flags;
2923
2924 ioc_now(ioc, &now);
2925
2926 iocg->ioc = ioc;
2927 atomic64_set(&iocg->vtime, now.vnow);
2928 atomic64_set(&iocg->done_vtime, now.vnow);
2929 atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period));
2930 INIT_LIST_HEAD(&iocg->active_list);
Tejun Heo97eb1972020-09-01 14:52:43 -04002931 INIT_LIST_HEAD(&iocg->walk_list);
Tejun Heo8692d2d2020-09-01 14:52:45 -04002932 INIT_LIST_HEAD(&iocg->surplus_list);
Tejun Heofe20cdb52020-09-01 14:52:38 -04002933 iocg->hweight_active = WEIGHT_ONE;
2934 iocg->hweight_inuse = WEIGHT_ONE;
Tejun Heo7caa4712019-08-28 15:05:58 -07002935
2936 init_waitqueue_head(&iocg->waitq);
2937 hrtimer_init(&iocg->waitq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2938 iocg->waitq_timer.function = iocg_waitq_timer_fn;
Tejun Heo7caa4712019-08-28 15:05:58 -07002939
2940 iocg->level = blkg->blkcg->css.cgroup->level;
2941
2942 for (tblkg = blkg; tblkg; tblkg = tblkg->parent) {
2943 struct ioc_gq *tiocg = blkg_to_iocg(tblkg);
2944 iocg->ancestors[tiocg->level] = tiocg;
2945 }
2946
2947 spin_lock_irqsave(&ioc->lock, flags);
Tejun Heob0853ab2020-09-01 14:52:50 -04002948 weight_updated(iocg, &now);
Tejun Heo7caa4712019-08-28 15:05:58 -07002949 spin_unlock_irqrestore(&ioc->lock, flags);
2950}
2951
2952static void ioc_pd_free(struct blkg_policy_data *pd)
2953{
2954 struct ioc_gq *iocg = pd_to_iocg(pd);
2955 struct ioc *ioc = iocg->ioc;
Tejun Heo5aeac7c2020-09-01 14:52:31 -04002956 unsigned long flags;
Tejun Heo7caa4712019-08-28 15:05:58 -07002957
2958 if (ioc) {
Tejun Heo5aeac7c2020-09-01 14:52:31 -04002959 spin_lock_irqsave(&ioc->lock, flags);
Tejun Heo97eb1972020-09-01 14:52:43 -04002960
Tejun Heo7caa4712019-08-28 15:05:58 -07002961 if (!list_empty(&iocg->active_list)) {
Tejun Heob0853ab2020-09-01 14:52:50 -04002962 struct ioc_now now;
2963
2964 ioc_now(ioc, &now);
2965 propagate_weights(iocg, 0, 0, false, &now);
Tejun Heo7caa4712019-08-28 15:05:58 -07002966 list_del_init(&iocg->active_list);
2967 }
Tejun Heo97eb1972020-09-01 14:52:43 -04002968
2969 WARN_ON_ONCE(!list_empty(&iocg->walk_list));
Tejun Heo8692d2d2020-09-01 14:52:45 -04002970 WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
Tejun Heo97eb1972020-09-01 14:52:43 -04002971
Tejun Heo5aeac7c2020-09-01 14:52:31 -04002972 spin_unlock_irqrestore(&ioc->lock, flags);
Tejun Heoe036c4c2019-09-10 09:15:25 -07002973
2974 hrtimer_cancel(&iocg->waitq_timer);
Tejun Heo7caa4712019-08-28 15:05:58 -07002975 }
Tejun Heo97eb1972020-09-01 14:52:43 -04002976 free_percpu(iocg->pcpu_stat);
Tejun Heo7caa4712019-08-28 15:05:58 -07002977 kfree(iocg);
2978}
2979
Tejun Heo97eb1972020-09-01 14:52:43 -04002980static size_t ioc_pd_stat(struct blkg_policy_data *pd, char *buf, size_t size)
2981{
2982 struct ioc_gq *iocg = pd_to_iocg(pd);
2983 struct ioc *ioc = iocg->ioc;
2984 size_t pos = 0;
2985
2986 if (!ioc->enabled)
2987 return 0;
2988
2989 if (iocg->level == 0) {
2990 unsigned vp10k = DIV64_U64_ROUND_CLOSEST(
Tejun Heoac33e912020-09-01 14:52:54 -04002991 ioc->vtime_base_rate * 10000,
Tejun Heo97eb1972020-09-01 14:52:43 -04002992 VTIME_PER_USEC);
2993 pos += scnprintf(buf + pos, size - pos, " cost.vrate=%u.%02u",
2994 vp10k / 100, vp10k % 100);
2995 }
2996
2997 pos += scnprintf(buf + pos, size - pos, " cost.usage=%llu",
2998 iocg->last_stat.usage_us);
2999
Tejun Heof0bf84a2020-09-01 14:52:56 -04003000 if (blkcg_debug_stats)
3001 pos += scnprintf(buf + pos, size - pos,
3002 " cost.wait=%llu cost.indebt=%llu cost.indelay=%llu",
3003 iocg->last_stat.wait_us,
3004 iocg->last_stat.indebt_us,
3005 iocg->last_stat.indelay_us);
3006
Tejun Heo97eb1972020-09-01 14:52:43 -04003007 return pos;
3008}
3009
Tejun Heo7caa4712019-08-28 15:05:58 -07003010static u64 ioc_weight_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
3011 int off)
3012{
3013 const char *dname = blkg_dev_name(pd->blkg);
3014 struct ioc_gq *iocg = pd_to_iocg(pd);
3015
3016 if (dname && iocg->cfg_weight)
Tejun Heobd0adb92020-09-01 14:52:39 -04003017 seq_printf(sf, "%s %u\n", dname, iocg->cfg_weight / WEIGHT_ONE);
Tejun Heo7caa4712019-08-28 15:05:58 -07003018 return 0;
3019}
3020
3021
3022static int ioc_weight_show(struct seq_file *sf, void *v)
3023{
3024 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3025 struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
3026
Tejun Heobd0adb92020-09-01 14:52:39 -04003027 seq_printf(sf, "default %u\n", iocc->dfl_weight / WEIGHT_ONE);
Tejun Heo7caa4712019-08-28 15:05:58 -07003028 blkcg_print_blkgs(sf, blkcg, ioc_weight_prfill,
3029 &blkcg_policy_iocost, seq_cft(sf)->private, false);
3030 return 0;
3031}
3032
3033static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf,
3034 size_t nbytes, loff_t off)
3035{
3036 struct blkcg *blkcg = css_to_blkcg(of_css(of));
3037 struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
3038 struct blkg_conf_ctx ctx;
Tejun Heob0853ab2020-09-01 14:52:50 -04003039 struct ioc_now now;
Tejun Heo7caa4712019-08-28 15:05:58 -07003040 struct ioc_gq *iocg;
3041 u32 v;
3042 int ret;
3043
3044 if (!strchr(buf, ':')) {
3045 struct blkcg_gq *blkg;
3046
3047 if (!sscanf(buf, "default %u", &v) && !sscanf(buf, "%u", &v))
3048 return -EINVAL;
3049
3050 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
3051 return -EINVAL;
3052
3053 spin_lock(&blkcg->lock);
Tejun Heobd0adb92020-09-01 14:52:39 -04003054 iocc->dfl_weight = v * WEIGHT_ONE;
Tejun Heo7caa4712019-08-28 15:05:58 -07003055 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
3056 struct ioc_gq *iocg = blkg_to_iocg(blkg);
3057
3058 if (iocg) {
3059 spin_lock_irq(&iocg->ioc->lock);
Tejun Heob0853ab2020-09-01 14:52:50 -04003060 ioc_now(iocg->ioc, &now);
3061 weight_updated(iocg, &now);
Tejun Heo7caa4712019-08-28 15:05:58 -07003062 spin_unlock_irq(&iocg->ioc->lock);
3063 }
3064 }
3065 spin_unlock(&blkcg->lock);
3066
3067 return nbytes;
3068 }
3069
3070 ret = blkg_conf_prep(blkcg, &blkcg_policy_iocost, buf, &ctx);
3071 if (ret)
3072 return ret;
3073
3074 iocg = blkg_to_iocg(ctx.blkg);
3075
3076 if (!strncmp(ctx.body, "default", 7)) {
3077 v = 0;
3078 } else {
3079 if (!sscanf(ctx.body, "%u", &v))
3080 goto einval;
3081 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
3082 goto einval;
3083 }
3084
Dan Carpenter41591a52019-10-31 13:53:41 +03003085 spin_lock(&iocg->ioc->lock);
Tejun Heobd0adb92020-09-01 14:52:39 -04003086 iocg->cfg_weight = v * WEIGHT_ONE;
Tejun Heob0853ab2020-09-01 14:52:50 -04003087 ioc_now(iocg->ioc, &now);
3088 weight_updated(iocg, &now);
Dan Carpenter41591a52019-10-31 13:53:41 +03003089 spin_unlock(&iocg->ioc->lock);
Tejun Heo7caa4712019-08-28 15:05:58 -07003090
3091 blkg_conf_finish(&ctx);
3092 return nbytes;
3093
3094einval:
3095 blkg_conf_finish(&ctx);
3096 return -EINVAL;
3097}
3098
3099static u64 ioc_qos_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
3100 int off)
3101{
3102 const char *dname = blkg_dev_name(pd->blkg);
3103 struct ioc *ioc = pd_to_iocg(pd)->ioc;
3104
3105 if (!dname)
3106 return 0;
3107
3108 seq_printf(sf, "%s enable=%d ctrl=%s rpct=%u.%02u rlat=%u wpct=%u.%02u wlat=%u min=%u.%02u max=%u.%02u\n",
3109 dname, ioc->enabled, ioc->user_qos_params ? "user" : "auto",
3110 ioc->params.qos[QOS_RPPM] / 10000,
3111 ioc->params.qos[QOS_RPPM] % 10000 / 100,
3112 ioc->params.qos[QOS_RLAT],
3113 ioc->params.qos[QOS_WPPM] / 10000,
3114 ioc->params.qos[QOS_WPPM] % 10000 / 100,
3115 ioc->params.qos[QOS_WLAT],
3116 ioc->params.qos[QOS_MIN] / 10000,
3117 ioc->params.qos[QOS_MIN] % 10000 / 100,
3118 ioc->params.qos[QOS_MAX] / 10000,
3119 ioc->params.qos[QOS_MAX] % 10000 / 100);
3120 return 0;
3121}
3122
3123static int ioc_qos_show(struct seq_file *sf, void *v)
3124{
3125 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3126
3127 blkcg_print_blkgs(sf, blkcg, ioc_qos_prfill,
3128 &blkcg_policy_iocost, seq_cft(sf)->private, false);
3129 return 0;
3130}
3131
3132static const match_table_t qos_ctrl_tokens = {
3133 { QOS_ENABLE, "enable=%u" },
3134 { QOS_CTRL, "ctrl=%s" },
3135 { NR_QOS_CTRL_PARAMS, NULL },
3136};
3137
3138static const match_table_t qos_tokens = {
3139 { QOS_RPPM, "rpct=%s" },
3140 { QOS_RLAT, "rlat=%u" },
3141 { QOS_WPPM, "wpct=%s" },
3142 { QOS_WLAT, "wlat=%u" },
3143 { QOS_MIN, "min=%s" },
3144 { QOS_MAX, "max=%s" },
3145 { NR_QOS_PARAMS, NULL },
3146};
3147
3148static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
3149 size_t nbytes, loff_t off)
3150{
Christoph Hellwig22ae8ce2020-11-26 09:23:26 +01003151 struct block_device *bdev;
Tejun Heo7caa4712019-08-28 15:05:58 -07003152 struct ioc *ioc;
3153 u32 qos[NR_QOS_PARAMS];
3154 bool enable, user;
3155 char *p;
3156 int ret;
3157
Christoph Hellwig22ae8ce2020-11-26 09:23:26 +01003158 bdev = blkcg_conf_open_bdev(&input);
3159 if (IS_ERR(bdev))
3160 return PTR_ERR(bdev);
Tejun Heo7caa4712019-08-28 15:05:58 -07003161
Christoph Hellwig22ae8ce2020-11-26 09:23:26 +01003162 ioc = q_to_ioc(bdev->bd_disk->queue);
Tejun Heo7caa4712019-08-28 15:05:58 -07003163 if (!ioc) {
Christoph Hellwig22ae8ce2020-11-26 09:23:26 +01003164 ret = blk_iocost_init(bdev->bd_disk->queue);
Tejun Heo7caa4712019-08-28 15:05:58 -07003165 if (ret)
3166 goto err;
Christoph Hellwig22ae8ce2020-11-26 09:23:26 +01003167 ioc = q_to_ioc(bdev->bd_disk->queue);
Tejun Heo7caa4712019-08-28 15:05:58 -07003168 }
3169
3170 spin_lock_irq(&ioc->lock);
3171 memcpy(qos, ioc->params.qos, sizeof(qos));
3172 enable = ioc->enabled;
3173 user = ioc->user_qos_params;
3174 spin_unlock_irq(&ioc->lock);
3175
3176 while ((p = strsep(&input, " \t\n"))) {
3177 substring_t args[MAX_OPT_ARGS];
3178 char buf[32];
3179 int tok;
3180 s64 v;
3181
3182 if (!*p)
3183 continue;
3184
3185 switch (match_token(p, qos_ctrl_tokens, args)) {
3186 case QOS_ENABLE:
3187 match_u64(&args[0], &v);
3188 enable = v;
3189 continue;
3190 case QOS_CTRL:
3191 match_strlcpy(buf, &args[0], sizeof(buf));
3192 if (!strcmp(buf, "auto"))
3193 user = false;
3194 else if (!strcmp(buf, "user"))
3195 user = true;
3196 else
3197 goto einval;
3198 continue;
3199 }
3200
3201 tok = match_token(p, qos_tokens, args);
3202 switch (tok) {
3203 case QOS_RPPM:
3204 case QOS_WPPM:
3205 if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
3206 sizeof(buf))
3207 goto einval;
3208 if (cgroup_parse_float(buf, 2, &v))
3209 goto einval;
3210 if (v < 0 || v > 10000)
3211 goto einval;
3212 qos[tok] = v * 100;
3213 break;
3214 case QOS_RLAT:
3215 case QOS_WLAT:
3216 if (match_u64(&args[0], &v))
3217 goto einval;
3218 qos[tok] = v;
3219 break;
3220 case QOS_MIN:
3221 case QOS_MAX:
3222 if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
3223 sizeof(buf))
3224 goto einval;
3225 if (cgroup_parse_float(buf, 2, &v))
3226 goto einval;
3227 if (v < 0)
3228 goto einval;
3229 qos[tok] = clamp_t(s64, v * 100,
3230 VRATE_MIN_PPM, VRATE_MAX_PPM);
3231 break;
3232 default:
3233 goto einval;
3234 }
3235 user = true;
3236 }
3237
3238 if (qos[QOS_MIN] > qos[QOS_MAX])
3239 goto einval;
3240
3241 spin_lock_irq(&ioc->lock);
3242
3243 if (enable) {
Tejun Heocd006502020-04-13 12:27:56 -04003244 blk_stat_enable_accounting(ioc->rqos.q);
Tejun Heo7caa4712019-08-28 15:05:58 -07003245 blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q);
3246 ioc->enabled = true;
3247 } else {
3248 blk_queue_flag_clear(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q);
3249 ioc->enabled = false;
3250 }
3251
3252 if (user) {
3253 memcpy(ioc->params.qos, qos, sizeof(qos));
3254 ioc->user_qos_params = true;
3255 } else {
3256 ioc->user_qos_params = false;
3257 }
3258
3259 ioc_refresh_params(ioc, true);
3260 spin_unlock_irq(&ioc->lock);
3261
Christoph Hellwig22ae8ce2020-11-26 09:23:26 +01003262 blkdev_put_no_open(bdev);
Tejun Heo7caa4712019-08-28 15:05:58 -07003263 return nbytes;
3264einval:
3265 ret = -EINVAL;
3266err:
Christoph Hellwig22ae8ce2020-11-26 09:23:26 +01003267 blkdev_put_no_open(bdev);
Tejun Heo7caa4712019-08-28 15:05:58 -07003268 return ret;
3269}
3270
3271static u64 ioc_cost_model_prfill(struct seq_file *sf,
3272 struct blkg_policy_data *pd, int off)
3273{
3274 const char *dname = blkg_dev_name(pd->blkg);
3275 struct ioc *ioc = pd_to_iocg(pd)->ioc;
3276 u64 *u = ioc->params.i_lcoefs;
3277
3278 if (!dname)
3279 return 0;
3280
3281 seq_printf(sf, "%s ctrl=%s model=linear "
3282 "rbps=%llu rseqiops=%llu rrandiops=%llu "
3283 "wbps=%llu wseqiops=%llu wrandiops=%llu\n",
3284 dname, ioc->user_cost_model ? "user" : "auto",
3285 u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
3286 u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS]);
3287 return 0;
3288}
3289
3290static int ioc_cost_model_show(struct seq_file *sf, void *v)
3291{
3292 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3293
3294 blkcg_print_blkgs(sf, blkcg, ioc_cost_model_prfill,
3295 &blkcg_policy_iocost, seq_cft(sf)->private, false);
3296 return 0;
3297}
3298
3299static const match_table_t cost_ctrl_tokens = {
3300 { COST_CTRL, "ctrl=%s" },
3301 { COST_MODEL, "model=%s" },
3302 { NR_COST_CTRL_PARAMS, NULL },
3303};
3304
3305static const match_table_t i_lcoef_tokens = {
3306 { I_LCOEF_RBPS, "rbps=%u" },
3307 { I_LCOEF_RSEQIOPS, "rseqiops=%u" },
3308 { I_LCOEF_RRANDIOPS, "rrandiops=%u" },
3309 { I_LCOEF_WBPS, "wbps=%u" },
3310 { I_LCOEF_WSEQIOPS, "wseqiops=%u" },
3311 { I_LCOEF_WRANDIOPS, "wrandiops=%u" },
3312 { NR_I_LCOEFS, NULL },
3313};
3314
3315static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input,
3316 size_t nbytes, loff_t off)
3317{
Christoph Hellwig22ae8ce2020-11-26 09:23:26 +01003318 struct block_device *bdev;
Tejun Heo7caa4712019-08-28 15:05:58 -07003319 struct ioc *ioc;
3320 u64 u[NR_I_LCOEFS];
3321 bool user;
3322 char *p;
3323 int ret;
3324
Christoph Hellwig22ae8ce2020-11-26 09:23:26 +01003325 bdev = blkcg_conf_open_bdev(&input);
3326 if (IS_ERR(bdev))
3327 return PTR_ERR(bdev);
Tejun Heo7caa4712019-08-28 15:05:58 -07003328
Christoph Hellwig22ae8ce2020-11-26 09:23:26 +01003329 ioc = q_to_ioc(bdev->bd_disk->queue);
Tejun Heo7caa4712019-08-28 15:05:58 -07003330 if (!ioc) {
Christoph Hellwig22ae8ce2020-11-26 09:23:26 +01003331 ret = blk_iocost_init(bdev->bd_disk->queue);
Tejun Heo7caa4712019-08-28 15:05:58 -07003332 if (ret)
3333 goto err;
Christoph Hellwig22ae8ce2020-11-26 09:23:26 +01003334 ioc = q_to_ioc(bdev->bd_disk->queue);
Tejun Heo7caa4712019-08-28 15:05:58 -07003335 }
3336
3337 spin_lock_irq(&ioc->lock);
3338 memcpy(u, ioc->params.i_lcoefs, sizeof(u));
3339 user = ioc->user_cost_model;
3340 spin_unlock_irq(&ioc->lock);
3341
3342 while ((p = strsep(&input, " \t\n"))) {
3343 substring_t args[MAX_OPT_ARGS];
3344 char buf[32];
3345 int tok;
3346 u64 v;
3347
3348 if (!*p)
3349 continue;
3350
3351 switch (match_token(p, cost_ctrl_tokens, args)) {
3352 case COST_CTRL:
3353 match_strlcpy(buf, &args[0], sizeof(buf));
3354 if (!strcmp(buf, "auto"))
3355 user = false;
3356 else if (!strcmp(buf, "user"))
3357 user = true;
3358 else
3359 goto einval;
3360 continue;
3361 case COST_MODEL:
3362 match_strlcpy(buf, &args[0], sizeof(buf));
3363 if (strcmp(buf, "linear"))
3364 goto einval;
3365 continue;
3366 }
3367
3368 tok = match_token(p, i_lcoef_tokens, args);
3369 if (tok == NR_I_LCOEFS)
3370 goto einval;
3371 if (match_u64(&args[0], &v))
3372 goto einval;
3373 u[tok] = v;
3374 user = true;
3375 }
3376
3377 spin_lock_irq(&ioc->lock);
3378 if (user) {
3379 memcpy(ioc->params.i_lcoefs, u, sizeof(u));
3380 ioc->user_cost_model = true;
3381 } else {
3382 ioc->user_cost_model = false;
3383 }
3384 ioc_refresh_params(ioc, true);
3385 spin_unlock_irq(&ioc->lock);
3386
Christoph Hellwig22ae8ce2020-11-26 09:23:26 +01003387 blkdev_put_no_open(bdev);
Tejun Heo7caa4712019-08-28 15:05:58 -07003388 return nbytes;
3389
3390einval:
3391 ret = -EINVAL;
3392err:
Christoph Hellwig22ae8ce2020-11-26 09:23:26 +01003393 blkdev_put_no_open(bdev);
Tejun Heo7caa4712019-08-28 15:05:58 -07003394 return ret;
3395}
3396
3397static struct cftype ioc_files[] = {
3398 {
3399 .name = "weight",
3400 .flags = CFTYPE_NOT_ON_ROOT,
3401 .seq_show = ioc_weight_show,
3402 .write = ioc_weight_write,
3403 },
3404 {
3405 .name = "cost.qos",
3406 .flags = CFTYPE_ONLY_ON_ROOT,
3407 .seq_show = ioc_qos_show,
3408 .write = ioc_qos_write,
3409 },
3410 {
3411 .name = "cost.model",
3412 .flags = CFTYPE_ONLY_ON_ROOT,
3413 .seq_show = ioc_cost_model_show,
3414 .write = ioc_cost_model_write,
3415 },
3416 {}
3417};
3418
3419static struct blkcg_policy blkcg_policy_iocost = {
3420 .dfl_cftypes = ioc_files,
3421 .cpd_alloc_fn = ioc_cpd_alloc,
3422 .cpd_free_fn = ioc_cpd_free,
3423 .pd_alloc_fn = ioc_pd_alloc,
3424 .pd_init_fn = ioc_pd_init,
3425 .pd_free_fn = ioc_pd_free,
Tejun Heo97eb1972020-09-01 14:52:43 -04003426 .pd_stat_fn = ioc_pd_stat,
Tejun Heo7caa4712019-08-28 15:05:58 -07003427};
3428
3429static int __init ioc_init(void)
3430{
3431 return blkcg_policy_register(&blkcg_policy_iocost);
3432}
3433
3434static void __exit ioc_exit(void)
3435{
Baolin Wangfa1c3ea2020-09-28 08:42:26 +08003436 blkcg_policy_unregister(&blkcg_policy_iocost);
Tejun Heo7caa4712019-08-28 15:05:58 -07003437}
3438
3439module_init(ioc_init);
3440module_exit(ioc_exit);