Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 1 | /* |
| 2 | * Budget Fair Queueing (BFQ) I/O scheduler. |
| 3 | * |
| 4 | * Based on ideas and code from CFQ: |
| 5 | * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> |
| 6 | * |
| 7 | * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> |
| 8 | * Paolo Valente <paolo.valente@unimore.it> |
| 9 | * |
| 10 | * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it> |
| 11 | * Arianna Avanzini <avanzini@google.com> |
| 12 | * |
| 13 | * Copyright (C) 2017 Paolo Valente <paolo.valente@linaro.org> |
| 14 | * |
| 15 | * This program is free software; you can redistribute it and/or |
| 16 | * modify it under the terms of the GNU General Public License as |
| 17 | * published by the Free Software Foundation; either version 2 of the |
| 18 | * License, or (at your option) any later version. |
| 19 | * |
| 20 | * This program is distributed in the hope that it will be useful, |
| 21 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 22 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 23 | * General Public License for more details. |
| 24 | * |
| 25 | * BFQ is a proportional-share I/O scheduler, with some extra |
| 26 | * low-latency capabilities. BFQ also supports full hierarchical |
| 27 | * scheduling through cgroups. Next paragraphs provide an introduction |
| 28 | * on BFQ inner workings. Details on BFQ benefits, usage and |
| 29 | * limitations can be found in Documentation/block/bfq-iosched.txt. |
| 30 | * |
| 31 | * BFQ is a proportional-share storage-I/O scheduling algorithm based |
| 32 | * on the slice-by-slice service scheme of CFQ. But BFQ assigns |
| 33 | * budgets, measured in number of sectors, to processes instead of |
| 34 | * time slices. The device is not granted to the in-service process |
| 35 | * for a given time slice, but until it has exhausted its assigned |
| 36 | * budget. This change from the time to the service domain enables BFQ |
| 37 | * to distribute the device throughput among processes as desired, |
| 38 | * without any distortion due to throughput fluctuations, or to device |
| 39 | * internal queueing. BFQ uses an ad hoc internal scheduler, called |
| 40 | * B-WF2Q+, to schedule processes according to their budgets. More |
| 41 | * precisely, BFQ schedules queues associated with processes. Each |
| 42 | * process/queue is assigned a user-configurable weight, and B-WF2Q+ |
| 43 | * guarantees that each queue receives a fraction of the throughput |
| 44 | * proportional to its weight. Thanks to the accurate policy of |
| 45 | * B-WF2Q+, BFQ can afford to assign high budgets to I/O-bound |
| 46 | * processes issuing sequential requests (to boost the throughput), |
| 47 | * and yet guarantee a low latency to interactive and soft real-time |
| 48 | * applications. |
| 49 | * |
| 50 | * In particular, to provide these low-latency guarantees, BFQ |
| 51 | * explicitly privileges the I/O of two classes of time-sensitive |
| 52 | * applications: interactive and soft real-time. This feature enables |
| 53 | * BFQ to provide applications in these classes with a very low |
| 54 | * latency. Finally, BFQ also features additional heuristics for |
| 55 | * preserving both a low latency and a high throughput on NCQ-capable, |
| 56 | * rotational or flash-based devices, and to get the job done quickly |
| 57 | * for applications consisting in many I/O-bound processes. |
| 58 | * |
Paolo Valente | 43c1b3d | 2017-05-09 12:54:23 +0200 | [diff] [blame] | 59 | * NOTE: if the main or only goal, with a given device, is to achieve |
| 60 | * the maximum-possible throughput at all times, then do switch off |
| 61 | * all low-latency heuristics for that device, by setting low_latency |
| 62 | * to 0. |
| 63 | * |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 64 | * BFQ is described in [1], where also a reference to the initial, more |
| 65 | * theoretical paper on BFQ can be found. The interested reader can find |
| 66 | * in the latter paper full details on the main algorithm, as well as |
| 67 | * formulas of the guarantees and formal proofs of all the properties. |
| 68 | * With respect to the version of BFQ presented in these papers, this |
| 69 | * implementation adds a few more heuristics, such as the one that |
| 70 | * guarantees a low latency to soft real-time applications, and a |
| 71 | * hierarchical extension based on H-WF2Q+. |
| 72 | * |
| 73 | * B-WF2Q+ is based on WF2Q+, which is described in [2], together with |
| 74 | * H-WF2Q+, while the augmented tree used here to implement B-WF2Q+ |
| 75 | * with O(log N) complexity derives from the one introduced with EEVDF |
| 76 | * in [3]. |
| 77 | * |
| 78 | * [1] P. Valente, A. Avanzini, "Evolution of the BFQ Storage I/O |
| 79 | * Scheduler", Proceedings of the First Workshop on Mobile System |
| 80 | * Technologies (MST-2015), May 2015. |
| 81 | * http://algogroup.unimore.it/people/paolo/disk_sched/mst-2015.pdf |
| 82 | * |
| 83 | * [2] Jon C.R. Bennett and H. Zhang, "Hierarchical Packet Fair Queueing |
| 84 | * Algorithms", IEEE/ACM Transactions on Networking, 5(5):675-689, |
| 85 | * Oct 1997. |
| 86 | * |
| 87 | * http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz |
| 88 | * |
| 89 | * [3] I. Stoica and H. Abdel-Wahab, "Earliest Eligible Virtual Deadline |
| 90 | * First: A Flexible and Accurate Mechanism for Proportional Share |
| 91 | * Resource Allocation", technical report. |
| 92 | * |
| 93 | * http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf |
| 94 | */ |
| 95 | #include <linux/module.h> |
| 96 | #include <linux/slab.h> |
| 97 | #include <linux/blkdev.h> |
Arianna Avanzini | e21b7a0 | 2017-04-12 18:23:08 +0200 | [diff] [blame] | 98 | #include <linux/cgroup.h> |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 99 | #include <linux/elevator.h> |
| 100 | #include <linux/ktime.h> |
| 101 | #include <linux/rbtree.h> |
| 102 | #include <linux/ioprio.h> |
| 103 | #include <linux/sbitmap.h> |
| 104 | #include <linux/delay.h> |
| 105 | |
| 106 | #include "blk.h" |
| 107 | #include "blk-mq.h" |
| 108 | #include "blk-mq-tag.h" |
| 109 | #include "blk-mq-sched.h" |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 110 | #include "bfq-iosched.h" |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 111 | |
| 112 | #define BFQ_BFQQ_FNS(name) \ |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 113 | void bfq_mark_bfqq_##name(struct bfq_queue *bfqq) \ |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 114 | { \ |
| 115 | __set_bit(BFQQF_##name, &(bfqq)->flags); \ |
| 116 | } \ |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 117 | void bfq_clear_bfqq_##name(struct bfq_queue *bfqq) \ |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 118 | { \ |
| 119 | __clear_bit(BFQQF_##name, &(bfqq)->flags); \ |
| 120 | } \ |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 121 | int bfq_bfqq_##name(const struct bfq_queue *bfqq) \ |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 122 | { \ |
| 123 | return test_bit(BFQQF_##name, &(bfqq)->flags); \ |
| 124 | } |
| 125 | |
Arianna Avanzini | e1b2324 | 2017-04-12 18:23:20 +0200 | [diff] [blame] | 126 | BFQ_BFQQ_FNS(just_created); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 127 | BFQ_BFQQ_FNS(busy); |
| 128 | BFQ_BFQQ_FNS(wait_request); |
| 129 | BFQ_BFQQ_FNS(non_blocking_wait_rq); |
| 130 | BFQ_BFQQ_FNS(fifo_expire); |
Paolo Valente | d5be3fe | 2017-08-04 07:35:10 +0200 | [diff] [blame] | 131 | BFQ_BFQQ_FNS(has_short_ttime); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 132 | BFQ_BFQQ_FNS(sync); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 133 | BFQ_BFQQ_FNS(IO_bound); |
Arianna Avanzini | e1b2324 | 2017-04-12 18:23:20 +0200 | [diff] [blame] | 134 | BFQ_BFQQ_FNS(in_large_burst); |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 135 | BFQ_BFQQ_FNS(coop); |
| 136 | BFQ_BFQQ_FNS(split_coop); |
Paolo Valente | 77b7dce | 2017-04-12 18:23:13 +0200 | [diff] [blame] | 137 | BFQ_BFQQ_FNS(softrt_update); |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 138 | #undef BFQ_BFQQ_FNS \ |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 139 | |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 140 | /* Expiration time of sync (0) and async (1) requests, in ns. */ |
| 141 | static const u64 bfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 }; |
| 142 | |
| 143 | /* Maximum backwards seek (magic number lifted from CFQ), in KiB. */ |
| 144 | static const int bfq_back_max = 16 * 1024; |
| 145 | |
| 146 | /* Penalty of a backwards seek, in number of sectors. */ |
| 147 | static const int bfq_back_penalty = 2; |
| 148 | |
| 149 | /* Idling period duration, in ns. */ |
| 150 | static u64 bfq_slice_idle = NSEC_PER_SEC / 125; |
| 151 | |
| 152 | /* Minimum number of assigned budgets for which stats are safe to compute. */ |
| 153 | static const int bfq_stats_min_budgets = 194; |
| 154 | |
| 155 | /* Default maximum budget values, in sectors and number of requests. */ |
| 156 | static const int bfq_default_max_budget = 16 * 1024; |
| 157 | |
Paolo Valente | c074170e | 2017-04-12 18:23:11 +0200 | [diff] [blame] | 158 | /* |
| 159 | * Async to sync throughput distribution is controlled as follows: |
| 160 | * when an async request is served, the entity is charged the number |
| 161 | * of sectors of the request, multiplied by the factor below |
| 162 | */ |
| 163 | static const int bfq_async_charge_factor = 10; |
| 164 | |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 165 | /* Default timeout values, in jiffies, approximating CFQ defaults. */ |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 166 | const int bfq_timeout = HZ / 8; |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 167 | |
| 168 | static struct kmem_cache *bfq_pool; |
| 169 | |
Paolo Valente | ab0e43e | 2017-04-12 18:23:10 +0200 | [diff] [blame] | 170 | /* Below this threshold (in ns), we consider thinktime immediate. */ |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 171 | #define BFQ_MIN_TT (2 * NSEC_PER_MSEC) |
| 172 | |
| 173 | /* hw_tag detection: parallel requests threshold and min samples needed. */ |
| 174 | #define BFQ_HW_QUEUE_THRESHOLD 4 |
| 175 | #define BFQ_HW_QUEUE_SAMPLES 32 |
| 176 | |
| 177 | #define BFQQ_SEEK_THR (sector_t)(8 * 100) |
| 178 | #define BFQQ_SECT_THR_NONROT (sector_t)(2 * 32) |
| 179 | #define BFQQ_CLOSE_THR (sector_t)(8 * 1024) |
| 180 | #define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 32/8) |
| 181 | |
Paolo Valente | ab0e43e | 2017-04-12 18:23:10 +0200 | [diff] [blame] | 182 | /* Min number of samples required to perform peak-rate update */ |
| 183 | #define BFQ_RATE_MIN_SAMPLES 32 |
| 184 | /* Min observation time interval required to perform a peak-rate update (ns) */ |
| 185 | #define BFQ_RATE_MIN_INTERVAL (300*NSEC_PER_MSEC) |
| 186 | /* Target observation time interval for a peak-rate update (ns) */ |
| 187 | #define BFQ_RATE_REF_INTERVAL NSEC_PER_SEC |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 188 | |
| 189 | /* Shift used for peak rate fixed precision calculations. */ |
| 190 | #define BFQ_RATE_SHIFT 16 |
| 191 | |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 192 | /* |
| 193 | * By default, BFQ computes the duration of the weight raising for |
| 194 | * interactive applications automatically, using the following formula: |
| 195 | * duration = (R / r) * T, where r is the peak rate of the device, and |
| 196 | * R and T are two reference parameters. |
| 197 | * In particular, R is the peak rate of the reference device (see below), |
| 198 | * and T is a reference time: given the systems that are likely to be |
| 199 | * installed on the reference device according to its speed class, T is |
| 200 | * about the maximum time needed, under BFQ and while reading two files in |
| 201 | * parallel, to load typical large applications on these systems. |
| 202 | * In practice, the slower/faster the device at hand is, the more/less it |
| 203 | * takes to load applications with respect to the reference device. |
| 204 | * Accordingly, the longer/shorter BFQ grants weight raising to interactive |
| 205 | * applications. |
| 206 | * |
| 207 | * BFQ uses four different reference pairs (R, T), depending on: |
| 208 | * . whether the device is rotational or non-rotational; |
| 209 | * . whether the device is slow, such as old or portable HDDs, as well as |
| 210 | * SD cards, or fast, such as newer HDDs and SSDs. |
| 211 | * |
| 212 | * The device's speed class is dynamically (re)detected in |
| 213 | * bfq_update_peak_rate() every time the estimated peak rate is updated. |
| 214 | * |
| 215 | * In the following definitions, R_slow[0]/R_fast[0] and |
| 216 | * T_slow[0]/T_fast[0] are the reference values for a slow/fast |
| 217 | * rotational device, whereas R_slow[1]/R_fast[1] and |
| 218 | * T_slow[1]/T_fast[1] are the reference values for a slow/fast |
| 219 | * non-rotational device. Finally, device_speed_thresh are the |
| 220 | * thresholds used to switch between speed classes. The reference |
| 221 | * rates are not the actual peak rates of the devices used as a |
| 222 | * reference, but slightly lower values. The reason for using these |
| 223 | * slightly lower values is that the peak-rate estimator tends to |
| 224 | * yield slightly lower values than the actual peak rate (it can yield |
| 225 | * the actual peak rate only if there is only one process doing I/O, |
| 226 | * and the process does sequential I/O). |
| 227 | * |
| 228 | * Both the reference peak rates and the thresholds are measured in |
| 229 | * sectors/usec, left-shifted by BFQ_RATE_SHIFT. |
| 230 | */ |
| 231 | static int R_slow[2] = {1000, 10700}; |
| 232 | static int R_fast[2] = {14000, 33000}; |
| 233 | /* |
| 234 | * To improve readability, a conversion function is used to initialize the |
| 235 | * following arrays, which entails that they can be initialized only in a |
| 236 | * function. |
| 237 | */ |
| 238 | static int T_slow[2]; |
| 239 | static int T_fast[2]; |
| 240 | static int device_speed_thresh[2]; |
| 241 | |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 242 | #define RQ_BIC(rq) ((struct bfq_io_cq *) (rq)->elv.priv[0]) |
| 243 | #define RQ_BFQQ(rq) ((rq)->elv.priv[1]) |
| 244 | |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 245 | struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync) |
| 246 | { |
| 247 | return bic->bfqq[is_sync]; |
| 248 | } |
| 249 | |
| 250 | void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync) |
| 251 | { |
| 252 | bic->bfqq[is_sync] = bfqq; |
| 253 | } |
| 254 | |
| 255 | struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic) |
| 256 | { |
| 257 | return bic->icq.q->elevator->elevator_data; |
| 258 | } |
| 259 | |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 260 | /** |
| 261 | * icq_to_bic - convert iocontext queue structure to bfq_io_cq. |
| 262 | * @icq: the iocontext queue. |
| 263 | */ |
| 264 | static struct bfq_io_cq *icq_to_bic(struct io_cq *icq) |
| 265 | { |
| 266 | /* bic->icq is the first member, %NULL will convert to %NULL */ |
| 267 | return container_of(icq, struct bfq_io_cq, icq); |
| 268 | } |
| 269 | |
| 270 | /** |
| 271 | * bfq_bic_lookup - search into @ioc a bic associated to @bfqd. |
| 272 | * @bfqd: the lookup key. |
| 273 | * @ioc: the io_context of the process doing I/O. |
| 274 | * @q: the request queue. |
| 275 | */ |
| 276 | static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd, |
| 277 | struct io_context *ioc, |
| 278 | struct request_queue *q) |
| 279 | { |
| 280 | if (ioc) { |
| 281 | unsigned long flags; |
| 282 | struct bfq_io_cq *icq; |
| 283 | |
| 284 | spin_lock_irqsave(q->queue_lock, flags); |
| 285 | icq = icq_to_bic(ioc_lookup_icq(ioc, q)); |
| 286 | spin_unlock_irqrestore(q->queue_lock, flags); |
| 287 | |
| 288 | return icq; |
| 289 | } |
| 290 | |
| 291 | return NULL; |
| 292 | } |
| 293 | |
| 294 | /* |
Arianna Avanzini | e21b7a0 | 2017-04-12 18:23:08 +0200 | [diff] [blame] | 295 | * Scheduler run of queue, if there are requests pending and no one in the |
| 296 | * driver that will restart queueing. |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 297 | */ |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 298 | void bfq_schedule_dispatch(struct bfq_data *bfqd) |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 299 | { |
Arianna Avanzini | e21b7a0 | 2017-04-12 18:23:08 +0200 | [diff] [blame] | 300 | if (bfqd->queued != 0) { |
| 301 | bfq_log(bfqd, "schedule dispatch"); |
| 302 | blk_mq_run_hw_queues(bfqd->queue, true); |
| 303 | } |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 304 | } |
| 305 | |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 306 | #define bfq_class_idle(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_IDLE) |
| 307 | #define bfq_class_rt(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_RT) |
| 308 | |
| 309 | #define bfq_sample_valid(samples) ((samples) > 80) |
| 310 | |
| 311 | /* |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 312 | * Lifted from AS - choose which of rq1 and rq2 that is best served now. |
| 313 | * We choose the request that is closesr to the head right now. Distance |
| 314 | * behind the head is penalized and only allowed to a certain extent. |
| 315 | */ |
| 316 | static struct request *bfq_choose_req(struct bfq_data *bfqd, |
| 317 | struct request *rq1, |
| 318 | struct request *rq2, |
| 319 | sector_t last) |
| 320 | { |
| 321 | sector_t s1, s2, d1 = 0, d2 = 0; |
| 322 | unsigned long back_max; |
| 323 | #define BFQ_RQ1_WRAP 0x01 /* request 1 wraps */ |
| 324 | #define BFQ_RQ2_WRAP 0x02 /* request 2 wraps */ |
| 325 | unsigned int wrap = 0; /* bit mask: requests behind the disk head? */ |
| 326 | |
| 327 | if (!rq1 || rq1 == rq2) |
| 328 | return rq2; |
| 329 | if (!rq2) |
| 330 | return rq1; |
| 331 | |
| 332 | if (rq_is_sync(rq1) && !rq_is_sync(rq2)) |
| 333 | return rq1; |
| 334 | else if (rq_is_sync(rq2) && !rq_is_sync(rq1)) |
| 335 | return rq2; |
| 336 | if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META)) |
| 337 | return rq1; |
| 338 | else if ((rq2->cmd_flags & REQ_META) && !(rq1->cmd_flags & REQ_META)) |
| 339 | return rq2; |
| 340 | |
| 341 | s1 = blk_rq_pos(rq1); |
| 342 | s2 = blk_rq_pos(rq2); |
| 343 | |
| 344 | /* |
| 345 | * By definition, 1KiB is 2 sectors. |
| 346 | */ |
| 347 | back_max = bfqd->bfq_back_max * 2; |
| 348 | |
| 349 | /* |
| 350 | * Strict one way elevator _except_ in the case where we allow |
| 351 | * short backward seeks which are biased as twice the cost of a |
| 352 | * similar forward seek. |
| 353 | */ |
| 354 | if (s1 >= last) |
| 355 | d1 = s1 - last; |
| 356 | else if (s1 + back_max >= last) |
| 357 | d1 = (last - s1) * bfqd->bfq_back_penalty; |
| 358 | else |
| 359 | wrap |= BFQ_RQ1_WRAP; |
| 360 | |
| 361 | if (s2 >= last) |
| 362 | d2 = s2 - last; |
| 363 | else if (s2 + back_max >= last) |
| 364 | d2 = (last - s2) * bfqd->bfq_back_penalty; |
| 365 | else |
| 366 | wrap |= BFQ_RQ2_WRAP; |
| 367 | |
| 368 | /* Found required data */ |
| 369 | |
| 370 | /* |
| 371 | * By doing switch() on the bit mask "wrap" we avoid having to |
| 372 | * check two variables for all permutations: --> faster! |
| 373 | */ |
| 374 | switch (wrap) { |
| 375 | case 0: /* common case for CFQ: rq1 and rq2 not wrapped */ |
| 376 | if (d1 < d2) |
| 377 | return rq1; |
| 378 | else if (d2 < d1) |
| 379 | return rq2; |
| 380 | |
| 381 | if (s1 >= s2) |
| 382 | return rq1; |
| 383 | else |
| 384 | return rq2; |
| 385 | |
| 386 | case BFQ_RQ2_WRAP: |
| 387 | return rq1; |
| 388 | case BFQ_RQ1_WRAP: |
| 389 | return rq2; |
| 390 | case BFQ_RQ1_WRAP|BFQ_RQ2_WRAP: /* both rqs wrapped */ |
| 391 | default: |
| 392 | /* |
| 393 | * Since both rqs are wrapped, |
| 394 | * start with the one that's further behind head |
| 395 | * (--> only *one* back seek required), |
| 396 | * since back seek takes more time than forward. |
| 397 | */ |
| 398 | if (s1 <= s2) |
| 399 | return rq1; |
| 400 | else |
| 401 | return rq2; |
| 402 | } |
| 403 | } |
| 404 | |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 405 | static struct bfq_queue * |
| 406 | bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root, |
| 407 | sector_t sector, struct rb_node **ret_parent, |
| 408 | struct rb_node ***rb_link) |
| 409 | { |
| 410 | struct rb_node **p, *parent; |
| 411 | struct bfq_queue *bfqq = NULL; |
| 412 | |
| 413 | parent = NULL; |
| 414 | p = &root->rb_node; |
| 415 | while (*p) { |
| 416 | struct rb_node **n; |
| 417 | |
| 418 | parent = *p; |
| 419 | bfqq = rb_entry(parent, struct bfq_queue, pos_node); |
| 420 | |
| 421 | /* |
| 422 | * Sort strictly based on sector. Smallest to the left, |
| 423 | * largest to the right. |
| 424 | */ |
| 425 | if (sector > blk_rq_pos(bfqq->next_rq)) |
| 426 | n = &(*p)->rb_right; |
| 427 | else if (sector < blk_rq_pos(bfqq->next_rq)) |
| 428 | n = &(*p)->rb_left; |
| 429 | else |
| 430 | break; |
| 431 | p = n; |
| 432 | bfqq = NULL; |
| 433 | } |
| 434 | |
| 435 | *ret_parent = parent; |
| 436 | if (rb_link) |
| 437 | *rb_link = p; |
| 438 | |
| 439 | bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d", |
| 440 | (unsigned long long)sector, |
| 441 | bfqq ? bfqq->pid : 0); |
| 442 | |
| 443 | return bfqq; |
| 444 | } |
| 445 | |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 446 | void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq) |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 447 | { |
| 448 | struct rb_node **p, *parent; |
| 449 | struct bfq_queue *__bfqq; |
| 450 | |
| 451 | if (bfqq->pos_root) { |
| 452 | rb_erase(&bfqq->pos_node, bfqq->pos_root); |
| 453 | bfqq->pos_root = NULL; |
| 454 | } |
| 455 | |
| 456 | if (bfq_class_idle(bfqq)) |
| 457 | return; |
| 458 | if (!bfqq->next_rq) |
| 459 | return; |
| 460 | |
| 461 | bfqq->pos_root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree; |
| 462 | __bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root, |
| 463 | blk_rq_pos(bfqq->next_rq), &parent, &p); |
| 464 | if (!__bfqq) { |
| 465 | rb_link_node(&bfqq->pos_node, parent, p); |
| 466 | rb_insert_color(&bfqq->pos_node, bfqq->pos_root); |
| 467 | } else |
| 468 | bfqq->pos_root = NULL; |
| 469 | } |
| 470 | |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 471 | /* |
Arianna Avanzini | 1de0c4c | 2017-04-12 18:23:17 +0200 | [diff] [blame] | 472 | * Tell whether there are active queues or groups with differentiated weights. |
| 473 | */ |
| 474 | static bool bfq_differentiated_weights(struct bfq_data *bfqd) |
| 475 | { |
| 476 | /* |
| 477 | * For weights to differ, at least one of the trees must contain |
| 478 | * at least two nodes. |
| 479 | */ |
| 480 | return (!RB_EMPTY_ROOT(&bfqd->queue_weights_tree) && |
| 481 | (bfqd->queue_weights_tree.rb_node->rb_left || |
| 482 | bfqd->queue_weights_tree.rb_node->rb_right) |
| 483 | #ifdef CONFIG_BFQ_GROUP_IOSCHED |
| 484 | ) || |
| 485 | (!RB_EMPTY_ROOT(&bfqd->group_weights_tree) && |
| 486 | (bfqd->group_weights_tree.rb_node->rb_left || |
| 487 | bfqd->group_weights_tree.rb_node->rb_right) |
| 488 | #endif |
| 489 | ); |
| 490 | } |
| 491 | |
| 492 | /* |
| 493 | * The following function returns true if every queue must receive the |
| 494 | * same share of the throughput (this condition is used when deciding |
| 495 | * whether idling may be disabled, see the comments in the function |
| 496 | * bfq_bfqq_may_idle()). |
| 497 | * |
| 498 | * Such a scenario occurs when: |
| 499 | * 1) all active queues have the same weight, |
| 500 | * 2) all active groups at the same level in the groups tree have the same |
| 501 | * weight, |
| 502 | * 3) all active groups at the same level in the groups tree have the same |
| 503 | * number of children. |
| 504 | * |
| 505 | * Unfortunately, keeping the necessary state for evaluating exactly the |
| 506 | * above symmetry conditions would be quite complex and time-consuming. |
| 507 | * Therefore this function evaluates, instead, the following stronger |
| 508 | * sub-conditions, for which it is much easier to maintain the needed |
| 509 | * state: |
| 510 | * 1) all active queues have the same weight, |
| 511 | * 2) all active groups have the same weight, |
| 512 | * 3) all active groups have at most one active child each. |
| 513 | * In particular, the last two conditions are always true if hierarchical |
| 514 | * support and the cgroups interface are not enabled, thus no state needs |
| 515 | * to be maintained in this case. |
| 516 | */ |
| 517 | static bool bfq_symmetric_scenario(struct bfq_data *bfqd) |
| 518 | { |
| 519 | return !bfq_differentiated_weights(bfqd); |
| 520 | } |
| 521 | |
| 522 | /* |
| 523 | * If the weight-counter tree passed as input contains no counter for |
| 524 | * the weight of the input entity, then add that counter; otherwise just |
| 525 | * increment the existing counter. |
| 526 | * |
| 527 | * Note that weight-counter trees contain few nodes in mostly symmetric |
| 528 | * scenarios. For example, if all queues have the same weight, then the |
| 529 | * weight-counter tree for the queues may contain at most one node. |
| 530 | * This holds even if low_latency is on, because weight-raised queues |
| 531 | * are not inserted in the tree. |
| 532 | * In most scenarios, the rate at which nodes are created/destroyed |
| 533 | * should be low too. |
| 534 | */ |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 535 | void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_entity *entity, |
| 536 | struct rb_root *root) |
Arianna Avanzini | 1de0c4c | 2017-04-12 18:23:17 +0200 | [diff] [blame] | 537 | { |
| 538 | struct rb_node **new = &(root->rb_node), *parent = NULL; |
| 539 | |
| 540 | /* |
| 541 | * Do not insert if the entity is already associated with a |
| 542 | * counter, which happens if: |
| 543 | * 1) the entity is associated with a queue, |
| 544 | * 2) a request arrival has caused the queue to become both |
| 545 | * non-weight-raised, and hence change its weight, and |
| 546 | * backlogged; in this respect, each of the two events |
| 547 | * causes an invocation of this function, |
| 548 | * 3) this is the invocation of this function caused by the |
| 549 | * second event. This second invocation is actually useless, |
| 550 | * and we handle this fact by exiting immediately. More |
| 551 | * efficient or clearer solutions might possibly be adopted. |
| 552 | */ |
| 553 | if (entity->weight_counter) |
| 554 | return; |
| 555 | |
| 556 | while (*new) { |
| 557 | struct bfq_weight_counter *__counter = container_of(*new, |
| 558 | struct bfq_weight_counter, |
| 559 | weights_node); |
| 560 | parent = *new; |
| 561 | |
| 562 | if (entity->weight == __counter->weight) { |
| 563 | entity->weight_counter = __counter; |
| 564 | goto inc_counter; |
| 565 | } |
| 566 | if (entity->weight < __counter->weight) |
| 567 | new = &((*new)->rb_left); |
| 568 | else |
| 569 | new = &((*new)->rb_right); |
| 570 | } |
| 571 | |
| 572 | entity->weight_counter = kzalloc(sizeof(struct bfq_weight_counter), |
| 573 | GFP_ATOMIC); |
| 574 | |
| 575 | /* |
| 576 | * In the unlucky event of an allocation failure, we just |
| 577 | * exit. This will cause the weight of entity to not be |
| 578 | * considered in bfq_differentiated_weights, which, in its |
| 579 | * turn, causes the scenario to be deemed wrongly symmetric in |
| 580 | * case entity's weight would have been the only weight making |
| 581 | * the scenario asymmetric. On the bright side, no unbalance |
| 582 | * will however occur when entity becomes inactive again (the |
| 583 | * invocation of this function is triggered by an activation |
| 584 | * of entity). In fact, bfq_weights_tree_remove does nothing |
| 585 | * if !entity->weight_counter. |
| 586 | */ |
| 587 | if (unlikely(!entity->weight_counter)) |
| 588 | return; |
| 589 | |
| 590 | entity->weight_counter->weight = entity->weight; |
| 591 | rb_link_node(&entity->weight_counter->weights_node, parent, new); |
| 592 | rb_insert_color(&entity->weight_counter->weights_node, root); |
| 593 | |
| 594 | inc_counter: |
| 595 | entity->weight_counter->num_active++; |
| 596 | } |
| 597 | |
| 598 | /* |
| 599 | * Decrement the weight counter associated with the entity, and, if the |
| 600 | * counter reaches 0, remove the counter from the tree. |
| 601 | * See the comments to the function bfq_weights_tree_add() for considerations |
| 602 | * about overhead. |
| 603 | */ |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 604 | void bfq_weights_tree_remove(struct bfq_data *bfqd, struct bfq_entity *entity, |
| 605 | struct rb_root *root) |
Arianna Avanzini | 1de0c4c | 2017-04-12 18:23:17 +0200 | [diff] [blame] | 606 | { |
| 607 | if (!entity->weight_counter) |
| 608 | return; |
| 609 | |
| 610 | entity->weight_counter->num_active--; |
| 611 | if (entity->weight_counter->num_active > 0) |
| 612 | goto reset_entity_pointer; |
| 613 | |
| 614 | rb_erase(&entity->weight_counter->weights_node, root); |
| 615 | kfree(entity->weight_counter); |
| 616 | |
| 617 | reset_entity_pointer: |
| 618 | entity->weight_counter = NULL; |
| 619 | } |
| 620 | |
| 621 | /* |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 622 | * Return expired entry, or NULL to just start from scratch in rbtree. |
| 623 | */ |
| 624 | static struct request *bfq_check_fifo(struct bfq_queue *bfqq, |
| 625 | struct request *last) |
| 626 | { |
| 627 | struct request *rq; |
| 628 | |
| 629 | if (bfq_bfqq_fifo_expire(bfqq)) |
| 630 | return NULL; |
| 631 | |
| 632 | bfq_mark_bfqq_fifo_expire(bfqq); |
| 633 | |
| 634 | rq = rq_entry_fifo(bfqq->fifo.next); |
| 635 | |
| 636 | if (rq == last || ktime_get_ns() < rq->fifo_time) |
| 637 | return NULL; |
| 638 | |
| 639 | bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq); |
| 640 | return rq; |
| 641 | } |
| 642 | |
| 643 | static struct request *bfq_find_next_rq(struct bfq_data *bfqd, |
| 644 | struct bfq_queue *bfqq, |
| 645 | struct request *last) |
| 646 | { |
| 647 | struct rb_node *rbnext = rb_next(&last->rb_node); |
| 648 | struct rb_node *rbprev = rb_prev(&last->rb_node); |
| 649 | struct request *next, *prev = NULL; |
| 650 | |
| 651 | /* Follow expired path, else get first next available. */ |
| 652 | next = bfq_check_fifo(bfqq, last); |
| 653 | if (next) |
| 654 | return next; |
| 655 | |
| 656 | if (rbprev) |
| 657 | prev = rb_entry_rq(rbprev); |
| 658 | |
| 659 | if (rbnext) |
| 660 | next = rb_entry_rq(rbnext); |
| 661 | else { |
| 662 | rbnext = rb_first(&bfqq->sort_list); |
| 663 | if (rbnext && rbnext != &last->rb_node) |
| 664 | next = rb_entry_rq(rbnext); |
| 665 | } |
| 666 | |
| 667 | return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last)); |
| 668 | } |
| 669 | |
Paolo Valente | c074170e | 2017-04-12 18:23:11 +0200 | [diff] [blame] | 670 | /* see the definition of bfq_async_charge_factor for details */ |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 671 | static unsigned long bfq_serv_to_charge(struct request *rq, |
| 672 | struct bfq_queue *bfqq) |
| 673 | { |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 674 | if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1) |
Paolo Valente | c074170e | 2017-04-12 18:23:11 +0200 | [diff] [blame] | 675 | return blk_rq_sectors(rq); |
| 676 | |
Paolo Valente | cfd6971 | 2017-04-12 18:23:15 +0200 | [diff] [blame] | 677 | /* |
| 678 | * If there are no weight-raised queues, then amplify service |
| 679 | * by just the async charge factor; otherwise amplify service |
| 680 | * by twice the async charge factor, to further reduce latency |
| 681 | * for weight-raised queues. |
| 682 | */ |
| 683 | if (bfqq->bfqd->wr_busy_queues == 0) |
| 684 | return blk_rq_sectors(rq) * bfq_async_charge_factor; |
| 685 | |
| 686 | return blk_rq_sectors(rq) * 2 * bfq_async_charge_factor; |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 687 | } |
| 688 | |
| 689 | /** |
| 690 | * bfq_updated_next_req - update the queue after a new next_rq selection. |
| 691 | * @bfqd: the device data the queue belongs to. |
| 692 | * @bfqq: the queue to update. |
| 693 | * |
| 694 | * If the first request of a queue changes we make sure that the queue |
| 695 | * has enough budget to serve at least its first request (if the |
| 696 | * request has grown). We do this because if the queue has not enough |
| 697 | * budget for its first request, it has to go through two dispatch |
| 698 | * rounds to actually get it dispatched. |
| 699 | */ |
| 700 | static void bfq_updated_next_req(struct bfq_data *bfqd, |
| 701 | struct bfq_queue *bfqq) |
| 702 | { |
| 703 | struct bfq_entity *entity = &bfqq->entity; |
| 704 | struct request *next_rq = bfqq->next_rq; |
| 705 | unsigned long new_budget; |
| 706 | |
| 707 | if (!next_rq) |
| 708 | return; |
| 709 | |
| 710 | if (bfqq == bfqd->in_service_queue) |
| 711 | /* |
| 712 | * In order not to break guarantees, budgets cannot be |
| 713 | * changed after an entity has been selected. |
| 714 | */ |
| 715 | return; |
| 716 | |
| 717 | new_budget = max_t(unsigned long, bfqq->max_budget, |
| 718 | bfq_serv_to_charge(next_rq, bfqq)); |
| 719 | if (entity->budget != new_budget) { |
| 720 | entity->budget = new_budget; |
| 721 | bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu", |
| 722 | new_budget); |
Paolo Valente | 80294c3 | 2017-08-31 08:46:29 +0200 | [diff] [blame] | 723 | bfq_requeue_bfqq(bfqd, bfqq, false); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 724 | } |
| 725 | } |
| 726 | |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 727 | static void |
Paolo Valente | 13c931b | 2017-06-27 12:30:47 -0600 | [diff] [blame] | 728 | bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd, |
| 729 | struct bfq_io_cq *bic, bool bfq_already_existing) |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 730 | { |
Paolo Valente | 13c931b | 2017-06-27 12:30:47 -0600 | [diff] [blame] | 731 | unsigned int old_wr_coeff = bfqq->wr_coeff; |
| 732 | bool busy = bfq_already_existing && bfq_bfqq_busy(bfqq); |
| 733 | |
Paolo Valente | d5be3fe | 2017-08-04 07:35:10 +0200 | [diff] [blame] | 734 | if (bic->saved_has_short_ttime) |
| 735 | bfq_mark_bfqq_has_short_ttime(bfqq); |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 736 | else |
Paolo Valente | d5be3fe | 2017-08-04 07:35:10 +0200 | [diff] [blame] | 737 | bfq_clear_bfqq_has_short_ttime(bfqq); |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 738 | |
| 739 | if (bic->saved_IO_bound) |
| 740 | bfq_mark_bfqq_IO_bound(bfqq); |
| 741 | else |
| 742 | bfq_clear_bfqq_IO_bound(bfqq); |
| 743 | |
| 744 | bfqq->ttime = bic->saved_ttime; |
| 745 | bfqq->wr_coeff = bic->saved_wr_coeff; |
| 746 | bfqq->wr_start_at_switch_to_srt = bic->saved_wr_start_at_switch_to_srt; |
| 747 | bfqq->last_wr_start_finish = bic->saved_last_wr_start_finish; |
| 748 | bfqq->wr_cur_max_time = bic->saved_wr_cur_max_time; |
| 749 | |
Arianna Avanzini | e1b2324 | 2017-04-12 18:23:20 +0200 | [diff] [blame] | 750 | if (bfqq->wr_coeff > 1 && (bfq_bfqq_in_large_burst(bfqq) || |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 751 | time_is_before_jiffies(bfqq->last_wr_start_finish + |
Arianna Avanzini | e1b2324 | 2017-04-12 18:23:20 +0200 | [diff] [blame] | 752 | bfqq->wr_cur_max_time))) { |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 753 | bfq_log_bfqq(bfqq->bfqd, bfqq, |
| 754 | "resume state: switching off wr"); |
| 755 | |
| 756 | bfqq->wr_coeff = 1; |
| 757 | } |
| 758 | |
| 759 | /* make sure weight will be updated, however we got here */ |
| 760 | bfqq->entity.prio_changed = 1; |
Paolo Valente | 13c931b | 2017-06-27 12:30:47 -0600 | [diff] [blame] | 761 | |
| 762 | if (likely(!busy)) |
| 763 | return; |
| 764 | |
| 765 | if (old_wr_coeff == 1 && bfqq->wr_coeff > 1) |
| 766 | bfqd->wr_busy_queues++; |
| 767 | else if (old_wr_coeff > 1 && bfqq->wr_coeff == 1) |
| 768 | bfqd->wr_busy_queues--; |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 769 | } |
| 770 | |
| 771 | static int bfqq_process_refs(struct bfq_queue *bfqq) |
| 772 | { |
| 773 | return bfqq->ref - bfqq->allocated - bfqq->entity.on_st; |
| 774 | } |
| 775 | |
Arianna Avanzini | e1b2324 | 2017-04-12 18:23:20 +0200 | [diff] [blame] | 776 | /* Empty burst list and add just bfqq (see comments on bfq_handle_burst) */ |
| 777 | static void bfq_reset_burst_list(struct bfq_data *bfqd, struct bfq_queue *bfqq) |
| 778 | { |
| 779 | struct bfq_queue *item; |
| 780 | struct hlist_node *n; |
| 781 | |
| 782 | hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node) |
| 783 | hlist_del_init(&item->burst_list_node); |
| 784 | hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list); |
| 785 | bfqd->burst_size = 1; |
| 786 | bfqd->burst_parent_entity = bfqq->entity.parent; |
| 787 | } |
| 788 | |
| 789 | /* Add bfqq to the list of queues in current burst (see bfq_handle_burst) */ |
| 790 | static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) |
| 791 | { |
| 792 | /* Increment burst size to take into account also bfqq */ |
| 793 | bfqd->burst_size++; |
| 794 | |
| 795 | if (bfqd->burst_size == bfqd->bfq_large_burst_thresh) { |
| 796 | struct bfq_queue *pos, *bfqq_item; |
| 797 | struct hlist_node *n; |
| 798 | |
| 799 | /* |
| 800 | * Enough queues have been activated shortly after each |
| 801 | * other to consider this burst as large. |
| 802 | */ |
| 803 | bfqd->large_burst = true; |
| 804 | |
| 805 | /* |
| 806 | * We can now mark all queues in the burst list as |
| 807 | * belonging to a large burst. |
| 808 | */ |
| 809 | hlist_for_each_entry(bfqq_item, &bfqd->burst_list, |
| 810 | burst_list_node) |
| 811 | bfq_mark_bfqq_in_large_burst(bfqq_item); |
| 812 | bfq_mark_bfqq_in_large_burst(bfqq); |
| 813 | |
| 814 | /* |
| 815 | * From now on, and until the current burst finishes, any |
| 816 | * new queue being activated shortly after the last queue |
| 817 | * was inserted in the burst can be immediately marked as |
| 818 | * belonging to a large burst. So the burst list is not |
| 819 | * needed any more. Remove it. |
| 820 | */ |
| 821 | hlist_for_each_entry_safe(pos, n, &bfqd->burst_list, |
| 822 | burst_list_node) |
| 823 | hlist_del_init(&pos->burst_list_node); |
| 824 | } else /* |
| 825 | * Burst not yet large: add bfqq to the burst list. Do |
| 826 | * not increment the ref counter for bfqq, because bfqq |
| 827 | * is removed from the burst list before freeing bfqq |
| 828 | * in put_queue. |
| 829 | */ |
| 830 | hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list); |
| 831 | } |
| 832 | |
| 833 | /* |
| 834 | * If many queues belonging to the same group happen to be created |
| 835 | * shortly after each other, then the processes associated with these |
| 836 | * queues have typically a common goal. In particular, bursts of queue |
| 837 | * creations are usually caused by services or applications that spawn |
| 838 | * many parallel threads/processes. Examples are systemd during boot, |
| 839 | * or git grep. To help these processes get their job done as soon as |
| 840 | * possible, it is usually better to not grant either weight-raising |
| 841 | * or device idling to their queues. |
| 842 | * |
| 843 | * In this comment we describe, firstly, the reasons why this fact |
| 844 | * holds, and, secondly, the next function, which implements the main |
| 845 | * steps needed to properly mark these queues so that they can then be |
| 846 | * treated in a different way. |
| 847 | * |
| 848 | * The above services or applications benefit mostly from a high |
| 849 | * throughput: the quicker the requests of the activated queues are |
| 850 | * cumulatively served, the sooner the target job of these queues gets |
| 851 | * completed. As a consequence, weight-raising any of these queues, |
| 852 | * which also implies idling the device for it, is almost always |
| 853 | * counterproductive. In most cases it just lowers throughput. |
| 854 | * |
| 855 | * On the other hand, a burst of queue creations may be caused also by |
| 856 | * the start of an application that does not consist of a lot of |
| 857 | * parallel I/O-bound threads. In fact, with a complex application, |
| 858 | * several short processes may need to be executed to start-up the |
| 859 | * application. In this respect, to start an application as quickly as |
| 860 | * possible, the best thing to do is in any case to privilege the I/O |
| 861 | * related to the application with respect to all other |
| 862 | * I/O. Therefore, the best strategy to start as quickly as possible |
| 863 | * an application that causes a burst of queue creations is to |
| 864 | * weight-raise all the queues created during the burst. This is the |
| 865 | * exact opposite of the best strategy for the other type of bursts. |
| 866 | * |
| 867 | * In the end, to take the best action for each of the two cases, the |
| 868 | * two types of bursts need to be distinguished. Fortunately, this |
| 869 | * seems relatively easy, by looking at the sizes of the bursts. In |
| 870 | * particular, we found a threshold such that only bursts with a |
| 871 | * larger size than that threshold are apparently caused by |
| 872 | * services or commands such as systemd or git grep. For brevity, |
| 873 | * hereafter we call just 'large' these bursts. BFQ *does not* |
| 874 | * weight-raise queues whose creation occurs in a large burst. In |
| 875 | * addition, for each of these queues BFQ performs or does not perform |
| 876 | * idling depending on which choice boosts the throughput more. The |
| 877 | * exact choice depends on the device and request pattern at |
| 878 | * hand. |
| 879 | * |
| 880 | * Unfortunately, false positives may occur while an interactive task |
| 881 | * is starting (e.g., an application is being started). The |
| 882 | * consequence is that the queues associated with the task do not |
| 883 | * enjoy weight raising as expected. Fortunately these false positives |
| 884 | * are very rare. They typically occur if some service happens to |
| 885 | * start doing I/O exactly when the interactive task starts. |
| 886 | * |
| 887 | * Turning back to the next function, it implements all the steps |
| 888 | * needed to detect the occurrence of a large burst and to properly |
| 889 | * mark all the queues belonging to it (so that they can then be |
| 890 | * treated in a different way). This goal is achieved by maintaining a |
| 891 | * "burst list" that holds, temporarily, the queues that belong to the |
| 892 | * burst in progress. The list is then used to mark these queues as |
| 893 | * belonging to a large burst if the burst does become large. The main |
| 894 | * steps are the following. |
| 895 | * |
| 896 | * . when the very first queue is created, the queue is inserted into the |
| 897 | * list (as it could be the first queue in a possible burst) |
| 898 | * |
| 899 | * . if the current burst has not yet become large, and a queue Q that does |
| 900 | * not yet belong to the burst is activated shortly after the last time |
| 901 | * at which a new queue entered the burst list, then the function appends |
| 902 | * Q to the burst list |
| 903 | * |
| 904 | * . if, as a consequence of the previous step, the burst size reaches |
| 905 | * the large-burst threshold, then |
| 906 | * |
| 907 | * . all the queues in the burst list are marked as belonging to a |
| 908 | * large burst |
| 909 | * |
| 910 | * . the burst list is deleted; in fact, the burst list already served |
| 911 | * its purpose (keeping temporarily track of the queues in a burst, |
| 912 | * so as to be able to mark them as belonging to a large burst in the |
| 913 | * previous sub-step), and now is not needed any more |
| 914 | * |
| 915 | * . the device enters a large-burst mode |
| 916 | * |
| 917 | * . if a queue Q that does not belong to the burst is created while |
| 918 | * the device is in large-burst mode and shortly after the last time |
| 919 | * at which a queue either entered the burst list or was marked as |
| 920 | * belonging to the current large burst, then Q is immediately marked |
| 921 | * as belonging to a large burst. |
| 922 | * |
| 923 | * . if a queue Q that does not belong to the burst is created a while |
| 924 | * later, i.e., not shortly after, than the last time at which a queue |
| 925 | * either entered the burst list or was marked as belonging to the |
| 926 | * current large burst, then the current burst is deemed as finished and: |
| 927 | * |
| 928 | * . the large-burst mode is reset if set |
| 929 | * |
| 930 | * . the burst list is emptied |
| 931 | * |
| 932 | * . Q is inserted in the burst list, as Q may be the first queue |
| 933 | * in a possible new burst (then the burst list contains just Q |
| 934 | * after this step). |
| 935 | */ |
| 936 | static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) |
| 937 | { |
| 938 | /* |
| 939 | * If bfqq is already in the burst list or is part of a large |
| 940 | * burst, or finally has just been split, then there is |
| 941 | * nothing else to do. |
| 942 | */ |
| 943 | if (!hlist_unhashed(&bfqq->burst_list_node) || |
| 944 | bfq_bfqq_in_large_burst(bfqq) || |
| 945 | time_is_after_eq_jiffies(bfqq->split_time + |
| 946 | msecs_to_jiffies(10))) |
| 947 | return; |
| 948 | |
| 949 | /* |
| 950 | * If bfqq's creation happens late enough, or bfqq belongs to |
| 951 | * a different group than the burst group, then the current |
| 952 | * burst is finished, and related data structures must be |
| 953 | * reset. |
| 954 | * |
| 955 | * In this respect, consider the special case where bfqq is |
| 956 | * the very first queue created after BFQ is selected for this |
| 957 | * device. In this case, last_ins_in_burst and |
| 958 | * burst_parent_entity are not yet significant when we get |
| 959 | * here. But it is easy to verify that, whether or not the |
| 960 | * following condition is true, bfqq will end up being |
| 961 | * inserted into the burst list. In particular the list will |
| 962 | * happen to contain only bfqq. And this is exactly what has |
| 963 | * to happen, as bfqq may be the first queue of the first |
| 964 | * burst. |
| 965 | */ |
| 966 | if (time_is_before_jiffies(bfqd->last_ins_in_burst + |
| 967 | bfqd->bfq_burst_interval) || |
| 968 | bfqq->entity.parent != bfqd->burst_parent_entity) { |
| 969 | bfqd->large_burst = false; |
| 970 | bfq_reset_burst_list(bfqd, bfqq); |
| 971 | goto end; |
| 972 | } |
| 973 | |
| 974 | /* |
| 975 | * If we get here, then bfqq is being activated shortly after the |
| 976 | * last queue. So, if the current burst is also large, we can mark |
| 977 | * bfqq as belonging to this large burst immediately. |
| 978 | */ |
| 979 | if (bfqd->large_burst) { |
| 980 | bfq_mark_bfqq_in_large_burst(bfqq); |
| 981 | goto end; |
| 982 | } |
| 983 | |
| 984 | /* |
| 985 | * If we get here, then a large-burst state has not yet been |
| 986 | * reached, but bfqq is being activated shortly after the last |
| 987 | * queue. Then we add bfqq to the burst. |
| 988 | */ |
| 989 | bfq_add_to_burst(bfqd, bfqq); |
| 990 | end: |
| 991 | /* |
| 992 | * At this point, bfqq either has been added to the current |
| 993 | * burst or has caused the current burst to terminate and a |
| 994 | * possible new burst to start. In particular, in the second |
| 995 | * case, bfqq has become the first queue in the possible new |
| 996 | * burst. In both cases last_ins_in_burst needs to be moved |
| 997 | * forward. |
| 998 | */ |
| 999 | bfqd->last_ins_in_burst = jiffies; |
| 1000 | } |
| 1001 | |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 1002 | static int bfq_bfqq_budget_left(struct bfq_queue *bfqq) |
| 1003 | { |
| 1004 | struct bfq_entity *entity = &bfqq->entity; |
| 1005 | |
| 1006 | return entity->budget - entity->service; |
| 1007 | } |
| 1008 | |
| 1009 | /* |
| 1010 | * If enough samples have been computed, return the current max budget |
| 1011 | * stored in bfqd, which is dynamically updated according to the |
| 1012 | * estimated disk peak rate; otherwise return the default max budget |
| 1013 | */ |
| 1014 | static int bfq_max_budget(struct bfq_data *bfqd) |
| 1015 | { |
| 1016 | if (bfqd->budgets_assigned < bfq_stats_min_budgets) |
| 1017 | return bfq_default_max_budget; |
| 1018 | else |
| 1019 | return bfqd->bfq_max_budget; |
| 1020 | } |
| 1021 | |
| 1022 | /* |
| 1023 | * Return min budget, which is a fraction of the current or default |
| 1024 | * max budget (trying with 1/32) |
| 1025 | */ |
| 1026 | static int bfq_min_budget(struct bfq_data *bfqd) |
| 1027 | { |
| 1028 | if (bfqd->budgets_assigned < bfq_stats_min_budgets) |
| 1029 | return bfq_default_max_budget / 32; |
| 1030 | else |
| 1031 | return bfqd->bfq_max_budget / 32; |
| 1032 | } |
| 1033 | |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 1034 | /* |
| 1035 | * The next function, invoked after the input queue bfqq switches from |
| 1036 | * idle to busy, updates the budget of bfqq. The function also tells |
| 1037 | * whether the in-service queue should be expired, by returning |
| 1038 | * true. The purpose of expiring the in-service queue is to give bfqq |
| 1039 | * the chance to possibly preempt the in-service queue, and the reason |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 1040 | * for preempting the in-service queue is to achieve one of the two |
| 1041 | * goals below. |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 1042 | * |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 1043 | * 1. Guarantee to bfqq its reserved bandwidth even if bfqq has |
| 1044 | * expired because it has remained idle. In particular, bfqq may have |
| 1045 | * expired for one of the following two reasons: |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 1046 | * |
| 1047 | * - BFQQE_NO_MORE_REQUESTS bfqq did not enjoy any device idling |
| 1048 | * and did not make it to issue a new request before its last |
| 1049 | * request was served; |
| 1050 | * |
| 1051 | * - BFQQE_TOO_IDLE bfqq did enjoy device idling, but did not issue |
| 1052 | * a new request before the expiration of the idling-time. |
| 1053 | * |
| 1054 | * Even if bfqq has expired for one of the above reasons, the process |
| 1055 | * associated with the queue may be however issuing requests greedily, |
| 1056 | * and thus be sensitive to the bandwidth it receives (bfqq may have |
| 1057 | * remained idle for other reasons: CPU high load, bfqq not enjoying |
| 1058 | * idling, I/O throttling somewhere in the path from the process to |
| 1059 | * the I/O scheduler, ...). But if, after every expiration for one of |
| 1060 | * the above two reasons, bfqq has to wait for the service of at least |
| 1061 | * one full budget of another queue before being served again, then |
| 1062 | * bfqq is likely to get a much lower bandwidth or resource time than |
| 1063 | * its reserved ones. To address this issue, two countermeasures need |
| 1064 | * to be taken. |
| 1065 | * |
| 1066 | * First, the budget and the timestamps of bfqq need to be updated in |
| 1067 | * a special way on bfqq reactivation: they need to be updated as if |
| 1068 | * bfqq did not remain idle and did not expire. In fact, if they are |
| 1069 | * computed as if bfqq expired and remained idle until reactivation, |
| 1070 | * then the process associated with bfqq is treated as if, instead of |
| 1071 | * being greedy, it stopped issuing requests when bfqq remained idle, |
| 1072 | * and restarts issuing requests only on this reactivation. In other |
| 1073 | * words, the scheduler does not help the process recover the "service |
| 1074 | * hole" between bfqq expiration and reactivation. As a consequence, |
| 1075 | * the process receives a lower bandwidth than its reserved one. In |
| 1076 | * contrast, to recover this hole, the budget must be updated as if |
| 1077 | * bfqq was not expired at all before this reactivation, i.e., it must |
| 1078 | * be set to the value of the remaining budget when bfqq was |
| 1079 | * expired. Along the same line, timestamps need to be assigned the |
| 1080 | * value they had the last time bfqq was selected for service, i.e., |
| 1081 | * before last expiration. Thus timestamps need to be back-shifted |
| 1082 | * with respect to their normal computation (see [1] for more details |
| 1083 | * on this tricky aspect). |
| 1084 | * |
| 1085 | * Secondly, to allow the process to recover the hole, the in-service |
| 1086 | * queue must be expired too, to give bfqq the chance to preempt it |
| 1087 | * immediately. In fact, if bfqq has to wait for a full budget of the |
| 1088 | * in-service queue to be completed, then it may become impossible to |
| 1089 | * let the process recover the hole, even if the back-shifted |
| 1090 | * timestamps of bfqq are lower than those of the in-service queue. If |
| 1091 | * this happens for most or all of the holes, then the process may not |
| 1092 | * receive its reserved bandwidth. In this respect, it is worth noting |
| 1093 | * that, being the service of outstanding requests unpreemptible, a |
| 1094 | * little fraction of the holes may however be unrecoverable, thereby |
| 1095 | * causing a little loss of bandwidth. |
| 1096 | * |
| 1097 | * The last important point is detecting whether bfqq does need this |
| 1098 | * bandwidth recovery. In this respect, the next function deems the |
| 1099 | * process associated with bfqq greedy, and thus allows it to recover |
| 1100 | * the hole, if: 1) the process is waiting for the arrival of a new |
| 1101 | * request (which implies that bfqq expired for one of the above two |
| 1102 | * reasons), and 2) such a request has arrived soon. The first |
| 1103 | * condition is controlled through the flag non_blocking_wait_rq, |
| 1104 | * while the second through the flag arrived_in_time. If both |
| 1105 | * conditions hold, then the function computes the budget in the |
| 1106 | * above-described special way, and signals that the in-service queue |
| 1107 | * should be expired. Timestamp back-shifting is done later in |
| 1108 | * __bfq_activate_entity. |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 1109 | * |
| 1110 | * 2. Reduce latency. Even if timestamps are not backshifted to let |
| 1111 | * the process associated with bfqq recover a service hole, bfqq may |
| 1112 | * however happen to have, after being (re)activated, a lower finish |
| 1113 | * timestamp than the in-service queue. That is, the next budget of |
| 1114 | * bfqq may have to be completed before the one of the in-service |
| 1115 | * queue. If this is the case, then preempting the in-service queue |
| 1116 | * allows this goal to be achieved, apart from the unpreemptible, |
| 1117 | * outstanding requests mentioned above. |
| 1118 | * |
| 1119 | * Unfortunately, regardless of which of the above two goals one wants |
| 1120 | * to achieve, service trees need first to be updated to know whether |
| 1121 | * the in-service queue must be preempted. To have service trees |
| 1122 | * correctly updated, the in-service queue must be expired and |
| 1123 | * rescheduled, and bfqq must be scheduled too. This is one of the |
| 1124 | * most costly operations (in future versions, the scheduling |
| 1125 | * mechanism may be re-designed in such a way to make it possible to |
| 1126 | * know whether preemption is needed without needing to update service |
| 1127 | * trees). In addition, queue preemptions almost always cause random |
| 1128 | * I/O, and thus loss of throughput. Because of these facts, the next |
| 1129 | * function adopts the following simple scheme to avoid both costly |
| 1130 | * operations and too frequent preemptions: it requests the expiration |
| 1131 | * of the in-service queue (unconditionally) only for queues that need |
| 1132 | * to recover a hole, or that either are weight-raised or deserve to |
| 1133 | * be weight-raised. |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 1134 | */ |
| 1135 | static bool bfq_bfqq_update_budg_for_activation(struct bfq_data *bfqd, |
| 1136 | struct bfq_queue *bfqq, |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 1137 | bool arrived_in_time, |
| 1138 | bool wr_or_deserves_wr) |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 1139 | { |
| 1140 | struct bfq_entity *entity = &bfqq->entity; |
| 1141 | |
| 1142 | if (bfq_bfqq_non_blocking_wait_rq(bfqq) && arrived_in_time) { |
| 1143 | /* |
| 1144 | * We do not clear the flag non_blocking_wait_rq here, as |
| 1145 | * the latter is used in bfq_activate_bfqq to signal |
| 1146 | * that timestamps need to be back-shifted (and is |
| 1147 | * cleared right after). |
| 1148 | */ |
| 1149 | |
| 1150 | /* |
| 1151 | * In next assignment we rely on that either |
| 1152 | * entity->service or entity->budget are not updated |
| 1153 | * on expiration if bfqq is empty (see |
| 1154 | * __bfq_bfqq_recalc_budget). Thus both quantities |
| 1155 | * remain unchanged after such an expiration, and the |
| 1156 | * following statement therefore assigns to |
| 1157 | * entity->budget the remaining budget on such an |
| 1158 | * expiration. For clarity, entity->service is not |
| 1159 | * updated on expiration in any case, and, in normal |
| 1160 | * operation, is reset only when bfqq is selected for |
| 1161 | * service (see bfq_get_next_queue). |
| 1162 | */ |
| 1163 | entity->budget = min_t(unsigned long, |
| 1164 | bfq_bfqq_budget_left(bfqq), |
| 1165 | bfqq->max_budget); |
| 1166 | |
| 1167 | return true; |
| 1168 | } |
| 1169 | |
| 1170 | entity->budget = max_t(unsigned long, bfqq->max_budget, |
| 1171 | bfq_serv_to_charge(bfqq->next_rq, bfqq)); |
| 1172 | bfq_clear_bfqq_non_blocking_wait_rq(bfqq); |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 1173 | return wr_or_deserves_wr; |
| 1174 | } |
| 1175 | |
| 1176 | static unsigned int bfq_wr_duration(struct bfq_data *bfqd) |
| 1177 | { |
| 1178 | u64 dur; |
| 1179 | |
| 1180 | if (bfqd->bfq_wr_max_time > 0) |
| 1181 | return bfqd->bfq_wr_max_time; |
| 1182 | |
| 1183 | dur = bfqd->RT_prod; |
| 1184 | do_div(dur, bfqd->peak_rate); |
| 1185 | |
| 1186 | /* |
| 1187 | * Limit duration between 3 and 13 seconds. Tests show that |
| 1188 | * higher values than 13 seconds often yield the opposite of |
| 1189 | * the desired result, i.e., worsen responsiveness by letting |
| 1190 | * non-interactive and non-soft-real-time applications |
| 1191 | * preserve weight raising for a too long time interval. |
| 1192 | * |
| 1193 | * On the other end, lower values than 3 seconds make it |
| 1194 | * difficult for most interactive tasks to complete their jobs |
| 1195 | * before weight-raising finishes. |
| 1196 | */ |
| 1197 | if (dur > msecs_to_jiffies(13000)) |
| 1198 | dur = msecs_to_jiffies(13000); |
| 1199 | else if (dur < msecs_to_jiffies(3000)) |
| 1200 | dur = msecs_to_jiffies(3000); |
| 1201 | |
| 1202 | return dur; |
| 1203 | } |
| 1204 | |
| 1205 | static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd, |
| 1206 | struct bfq_queue *bfqq, |
| 1207 | unsigned int old_wr_coeff, |
| 1208 | bool wr_or_deserves_wr, |
Paolo Valente | 77b7dce | 2017-04-12 18:23:13 +0200 | [diff] [blame] | 1209 | bool interactive, |
Arianna Avanzini | e1b2324 | 2017-04-12 18:23:20 +0200 | [diff] [blame] | 1210 | bool in_burst, |
Paolo Valente | 77b7dce | 2017-04-12 18:23:13 +0200 | [diff] [blame] | 1211 | bool soft_rt) |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 1212 | { |
| 1213 | if (old_wr_coeff == 1 && wr_or_deserves_wr) { |
| 1214 | /* start a weight-raising period */ |
Paolo Valente | 77b7dce | 2017-04-12 18:23:13 +0200 | [diff] [blame] | 1215 | if (interactive) { |
| 1216 | bfqq->wr_coeff = bfqd->bfq_wr_coeff; |
| 1217 | bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); |
| 1218 | } else { |
| 1219 | bfqq->wr_start_at_switch_to_srt = jiffies; |
| 1220 | bfqq->wr_coeff = bfqd->bfq_wr_coeff * |
| 1221 | BFQ_SOFTRT_WEIGHT_FACTOR; |
| 1222 | bfqq->wr_cur_max_time = |
| 1223 | bfqd->bfq_wr_rt_max_time; |
| 1224 | } |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 1225 | |
| 1226 | /* |
| 1227 | * If needed, further reduce budget to make sure it is |
| 1228 | * close to bfqq's backlog, so as to reduce the |
| 1229 | * scheduling-error component due to a too large |
| 1230 | * budget. Do not care about throughput consequences, |
| 1231 | * but only about latency. Finally, do not assign a |
| 1232 | * too small budget either, to avoid increasing |
| 1233 | * latency by causing too frequent expirations. |
| 1234 | */ |
| 1235 | bfqq->entity.budget = min_t(unsigned long, |
| 1236 | bfqq->entity.budget, |
| 1237 | 2 * bfq_min_budget(bfqd)); |
| 1238 | } else if (old_wr_coeff > 1) { |
Paolo Valente | 77b7dce | 2017-04-12 18:23:13 +0200 | [diff] [blame] | 1239 | if (interactive) { /* update wr coeff and duration */ |
| 1240 | bfqq->wr_coeff = bfqd->bfq_wr_coeff; |
| 1241 | bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); |
Arianna Avanzini | e1b2324 | 2017-04-12 18:23:20 +0200 | [diff] [blame] | 1242 | } else if (in_burst) |
| 1243 | bfqq->wr_coeff = 1; |
| 1244 | else if (soft_rt) { |
Paolo Valente | 77b7dce | 2017-04-12 18:23:13 +0200 | [diff] [blame] | 1245 | /* |
| 1246 | * The application is now or still meeting the |
| 1247 | * requirements for being deemed soft rt. We |
| 1248 | * can then correctly and safely (re)charge |
| 1249 | * the weight-raising duration for the |
| 1250 | * application with the weight-raising |
| 1251 | * duration for soft rt applications. |
| 1252 | * |
| 1253 | * In particular, doing this recharge now, i.e., |
| 1254 | * before the weight-raising period for the |
| 1255 | * application finishes, reduces the probability |
| 1256 | * of the following negative scenario: |
| 1257 | * 1) the weight of a soft rt application is |
| 1258 | * raised at startup (as for any newly |
| 1259 | * created application), |
| 1260 | * 2) since the application is not interactive, |
| 1261 | * at a certain time weight-raising is |
| 1262 | * stopped for the application, |
| 1263 | * 3) at that time the application happens to |
| 1264 | * still have pending requests, and hence |
| 1265 | * is destined to not have a chance to be |
| 1266 | * deemed soft rt before these requests are |
| 1267 | * completed (see the comments to the |
| 1268 | * function bfq_bfqq_softrt_next_start() |
| 1269 | * for details on soft rt detection), |
| 1270 | * 4) these pending requests experience a high |
| 1271 | * latency because the application is not |
| 1272 | * weight-raised while they are pending. |
| 1273 | */ |
| 1274 | if (bfqq->wr_cur_max_time != |
| 1275 | bfqd->bfq_wr_rt_max_time) { |
| 1276 | bfqq->wr_start_at_switch_to_srt = |
| 1277 | bfqq->last_wr_start_finish; |
| 1278 | |
| 1279 | bfqq->wr_cur_max_time = |
| 1280 | bfqd->bfq_wr_rt_max_time; |
| 1281 | bfqq->wr_coeff = bfqd->bfq_wr_coeff * |
| 1282 | BFQ_SOFTRT_WEIGHT_FACTOR; |
| 1283 | } |
| 1284 | bfqq->last_wr_start_finish = jiffies; |
| 1285 | } |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 1286 | } |
| 1287 | } |
| 1288 | |
| 1289 | static bool bfq_bfqq_idle_for_long_time(struct bfq_data *bfqd, |
| 1290 | struct bfq_queue *bfqq) |
| 1291 | { |
| 1292 | return bfqq->dispatched == 0 && |
| 1293 | time_is_before_jiffies( |
| 1294 | bfqq->budget_timeout + |
| 1295 | bfqd->bfq_wr_min_idle_time); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 1296 | } |
| 1297 | |
| 1298 | static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd, |
| 1299 | struct bfq_queue *bfqq, |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 1300 | int old_wr_coeff, |
| 1301 | struct request *rq, |
| 1302 | bool *interactive) |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 1303 | { |
Arianna Avanzini | e1b2324 | 2017-04-12 18:23:20 +0200 | [diff] [blame] | 1304 | bool soft_rt, in_burst, wr_or_deserves_wr, |
| 1305 | bfqq_wants_to_preempt, |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 1306 | idle_for_long_time = bfq_bfqq_idle_for_long_time(bfqd, bfqq), |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 1307 | /* |
| 1308 | * See the comments on |
| 1309 | * bfq_bfqq_update_budg_for_activation for |
| 1310 | * details on the usage of the next variable. |
| 1311 | */ |
| 1312 | arrived_in_time = ktime_get_ns() <= |
| 1313 | bfqq->ttime.last_end_request + |
| 1314 | bfqd->bfq_slice_idle * 3; |
| 1315 | |
Arianna Avanzini | e21b7a0 | 2017-04-12 18:23:08 +0200 | [diff] [blame] | 1316 | bfqg_stats_update_io_add(bfqq_group(RQ_BFQQ(rq)), bfqq, rq->cmd_flags); |
| 1317 | |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 1318 | /* |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 1319 | * bfqq deserves to be weight-raised if: |
| 1320 | * - it is sync, |
Arianna Avanzini | e1b2324 | 2017-04-12 18:23:20 +0200 | [diff] [blame] | 1321 | * - it does not belong to a large burst, |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 1322 | * - it has been idle for enough time or is soft real-time, |
| 1323 | * - is linked to a bfq_io_cq (it is not shared in any sense). |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 1324 | */ |
Arianna Avanzini | e1b2324 | 2017-04-12 18:23:20 +0200 | [diff] [blame] | 1325 | in_burst = bfq_bfqq_in_large_burst(bfqq); |
Paolo Valente | 77b7dce | 2017-04-12 18:23:13 +0200 | [diff] [blame] | 1326 | soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 && |
Arianna Avanzini | e1b2324 | 2017-04-12 18:23:20 +0200 | [diff] [blame] | 1327 | !in_burst && |
Paolo Valente | 77b7dce | 2017-04-12 18:23:13 +0200 | [diff] [blame] | 1328 | time_is_before_jiffies(bfqq->soft_rt_next_start); |
Arianna Avanzini | e1b2324 | 2017-04-12 18:23:20 +0200 | [diff] [blame] | 1329 | *interactive = !in_burst && idle_for_long_time; |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 1330 | wr_or_deserves_wr = bfqd->low_latency && |
| 1331 | (bfqq->wr_coeff > 1 || |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 1332 | (bfq_bfqq_sync(bfqq) && |
| 1333 | bfqq->bic && (*interactive || soft_rt))); |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 1334 | |
| 1335 | /* |
| 1336 | * Using the last flag, update budget and check whether bfqq |
| 1337 | * may want to preempt the in-service queue. |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 1338 | */ |
| 1339 | bfqq_wants_to_preempt = |
| 1340 | bfq_bfqq_update_budg_for_activation(bfqd, bfqq, |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 1341 | arrived_in_time, |
| 1342 | wr_or_deserves_wr); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 1343 | |
Arianna Avanzini | e1b2324 | 2017-04-12 18:23:20 +0200 | [diff] [blame] | 1344 | /* |
| 1345 | * If bfqq happened to be activated in a burst, but has been |
| 1346 | * idle for much more than an interactive queue, then we |
| 1347 | * assume that, in the overall I/O initiated in the burst, the |
| 1348 | * I/O associated with bfqq is finished. So bfqq does not need |
| 1349 | * to be treated as a queue belonging to a burst |
| 1350 | * anymore. Accordingly, we reset bfqq's in_large_burst flag |
| 1351 | * if set, and remove bfqq from the burst list if it's |
| 1352 | * there. We do not decrement burst_size, because the fact |
| 1353 | * that bfqq does not need to belong to the burst list any |
| 1354 | * more does not invalidate the fact that bfqq was created in |
| 1355 | * a burst. |
| 1356 | */ |
| 1357 | if (likely(!bfq_bfqq_just_created(bfqq)) && |
| 1358 | idle_for_long_time && |
| 1359 | time_is_before_jiffies( |
| 1360 | bfqq->budget_timeout + |
| 1361 | msecs_to_jiffies(10000))) { |
| 1362 | hlist_del_init(&bfqq->burst_list_node); |
| 1363 | bfq_clear_bfqq_in_large_burst(bfqq); |
| 1364 | } |
| 1365 | |
| 1366 | bfq_clear_bfqq_just_created(bfqq); |
| 1367 | |
| 1368 | |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 1369 | if (!bfq_bfqq_IO_bound(bfqq)) { |
| 1370 | if (arrived_in_time) { |
| 1371 | bfqq->requests_within_timer++; |
| 1372 | if (bfqq->requests_within_timer >= |
| 1373 | bfqd->bfq_requests_within_timer) |
| 1374 | bfq_mark_bfqq_IO_bound(bfqq); |
| 1375 | } else |
| 1376 | bfqq->requests_within_timer = 0; |
| 1377 | } |
| 1378 | |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 1379 | if (bfqd->low_latency) { |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 1380 | if (unlikely(time_is_after_jiffies(bfqq->split_time))) |
| 1381 | /* wraparound */ |
| 1382 | bfqq->split_time = |
| 1383 | jiffies - bfqd->bfq_wr_min_idle_time - 1; |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 1384 | |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 1385 | if (time_is_before_jiffies(bfqq->split_time + |
| 1386 | bfqd->bfq_wr_min_idle_time)) { |
| 1387 | bfq_update_bfqq_wr_on_rq_arrival(bfqd, bfqq, |
| 1388 | old_wr_coeff, |
| 1389 | wr_or_deserves_wr, |
| 1390 | *interactive, |
Arianna Avanzini | e1b2324 | 2017-04-12 18:23:20 +0200 | [diff] [blame] | 1391 | in_burst, |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 1392 | soft_rt); |
| 1393 | |
| 1394 | if (old_wr_coeff != bfqq->wr_coeff) |
| 1395 | bfqq->entity.prio_changed = 1; |
| 1396 | } |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 1397 | } |
| 1398 | |
Paolo Valente | 77b7dce | 2017-04-12 18:23:13 +0200 | [diff] [blame] | 1399 | bfqq->last_idle_bklogged = jiffies; |
| 1400 | bfqq->service_from_backlogged = 0; |
| 1401 | bfq_clear_bfqq_softrt_update(bfqq); |
| 1402 | |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 1403 | bfq_add_bfqq_busy(bfqd, bfqq); |
| 1404 | |
| 1405 | /* |
| 1406 | * Expire in-service queue only if preemption may be needed |
| 1407 | * for guarantees. In this respect, the function |
| 1408 | * next_queue_may_preempt just checks a simple, necessary |
| 1409 | * condition, and not a sufficient condition based on |
| 1410 | * timestamps. In fact, for the latter condition to be |
| 1411 | * evaluated, timestamps would need first to be updated, and |
| 1412 | * this operation is quite costly (see the comments on the |
| 1413 | * function bfq_bfqq_update_budg_for_activation). |
| 1414 | */ |
| 1415 | if (bfqd->in_service_queue && bfqq_wants_to_preempt && |
Paolo Valente | 77b7dce | 2017-04-12 18:23:13 +0200 | [diff] [blame] | 1416 | bfqd->in_service_queue->wr_coeff < bfqq->wr_coeff && |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 1417 | next_queue_may_preempt(bfqd)) |
| 1418 | bfq_bfqq_expire(bfqd, bfqd->in_service_queue, |
| 1419 | false, BFQQE_PREEMPTED); |
| 1420 | } |
| 1421 | |
| 1422 | static void bfq_add_request(struct request *rq) |
| 1423 | { |
| 1424 | struct bfq_queue *bfqq = RQ_BFQQ(rq); |
| 1425 | struct bfq_data *bfqd = bfqq->bfqd; |
| 1426 | struct request *next_rq, *prev; |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 1427 | unsigned int old_wr_coeff = bfqq->wr_coeff; |
| 1428 | bool interactive = false; |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 1429 | |
| 1430 | bfq_log_bfqq(bfqd, bfqq, "add_request %d", rq_is_sync(rq)); |
| 1431 | bfqq->queued[rq_is_sync(rq)]++; |
| 1432 | bfqd->queued++; |
| 1433 | |
| 1434 | elv_rb_add(&bfqq->sort_list, rq); |
| 1435 | |
| 1436 | /* |
| 1437 | * Check if this request is a better next-serve candidate. |
| 1438 | */ |
| 1439 | prev = bfqq->next_rq; |
| 1440 | next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position); |
| 1441 | bfqq->next_rq = next_rq; |
| 1442 | |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 1443 | /* |
| 1444 | * Adjust priority tree position, if next_rq changes. |
| 1445 | */ |
| 1446 | if (prev != bfqq->next_rq) |
| 1447 | bfq_pos_tree_add_move(bfqd, bfqq); |
| 1448 | |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 1449 | if (!bfq_bfqq_busy(bfqq)) /* switching to busy ... */ |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 1450 | bfq_bfqq_handle_idle_busy_switch(bfqd, bfqq, old_wr_coeff, |
| 1451 | rq, &interactive); |
| 1452 | else { |
| 1453 | if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) && |
| 1454 | time_is_before_jiffies( |
| 1455 | bfqq->last_wr_start_finish + |
| 1456 | bfqd->bfq_wr_min_inter_arr_async)) { |
| 1457 | bfqq->wr_coeff = bfqd->bfq_wr_coeff; |
| 1458 | bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); |
| 1459 | |
Paolo Valente | cfd6971 | 2017-04-12 18:23:15 +0200 | [diff] [blame] | 1460 | bfqd->wr_busy_queues++; |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 1461 | bfqq->entity.prio_changed = 1; |
| 1462 | } |
| 1463 | if (prev != bfqq->next_rq) |
| 1464 | bfq_updated_next_req(bfqd, bfqq); |
| 1465 | } |
| 1466 | |
| 1467 | /* |
| 1468 | * Assign jiffies to last_wr_start_finish in the following |
| 1469 | * cases: |
| 1470 | * |
| 1471 | * . if bfqq is not going to be weight-raised, because, for |
| 1472 | * non weight-raised queues, last_wr_start_finish stores the |
| 1473 | * arrival time of the last request; as of now, this piece |
| 1474 | * of information is used only for deciding whether to |
| 1475 | * weight-raise async queues |
| 1476 | * |
| 1477 | * . if bfqq is not weight-raised, because, if bfqq is now |
| 1478 | * switching to weight-raised, then last_wr_start_finish |
| 1479 | * stores the time when weight-raising starts |
| 1480 | * |
| 1481 | * . if bfqq is interactive, because, regardless of whether |
| 1482 | * bfqq is currently weight-raised, the weight-raising |
| 1483 | * period must start or restart (this case is considered |
| 1484 | * separately because it is not detected by the above |
| 1485 | * conditions, if bfqq is already weight-raised) |
Paolo Valente | 77b7dce | 2017-04-12 18:23:13 +0200 | [diff] [blame] | 1486 | * |
| 1487 | * last_wr_start_finish has to be updated also if bfqq is soft |
| 1488 | * real-time, because the weight-raising period is constantly |
| 1489 | * restarted on idle-to-busy transitions for these queues, but |
| 1490 | * this is already done in bfq_bfqq_handle_idle_busy_switch if |
| 1491 | * needed. |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 1492 | */ |
| 1493 | if (bfqd->low_latency && |
| 1494 | (old_wr_coeff == 1 || bfqq->wr_coeff == 1 || interactive)) |
| 1495 | bfqq->last_wr_start_finish = jiffies; |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 1496 | } |
| 1497 | |
| 1498 | static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd, |
| 1499 | struct bio *bio, |
| 1500 | struct request_queue *q) |
| 1501 | { |
| 1502 | struct bfq_queue *bfqq = bfqd->bio_bfqq; |
| 1503 | |
| 1504 | |
| 1505 | if (bfqq) |
| 1506 | return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio)); |
| 1507 | |
| 1508 | return NULL; |
| 1509 | } |
| 1510 | |
Paolo Valente | ab0e43e | 2017-04-12 18:23:10 +0200 | [diff] [blame] | 1511 | static sector_t get_sdist(sector_t last_pos, struct request *rq) |
| 1512 | { |
| 1513 | if (last_pos) |
| 1514 | return abs(blk_rq_pos(rq) - last_pos); |
| 1515 | |
| 1516 | return 0; |
| 1517 | } |
| 1518 | |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 1519 | #if 0 /* Still not clear if we can do without next two functions */ |
| 1520 | static void bfq_activate_request(struct request_queue *q, struct request *rq) |
| 1521 | { |
| 1522 | struct bfq_data *bfqd = q->elevator->elevator_data; |
| 1523 | |
| 1524 | bfqd->rq_in_driver++; |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 1525 | } |
| 1526 | |
| 1527 | static void bfq_deactivate_request(struct request_queue *q, struct request *rq) |
| 1528 | { |
| 1529 | struct bfq_data *bfqd = q->elevator->elevator_data; |
| 1530 | |
| 1531 | bfqd->rq_in_driver--; |
| 1532 | } |
| 1533 | #endif |
| 1534 | |
| 1535 | static void bfq_remove_request(struct request_queue *q, |
| 1536 | struct request *rq) |
| 1537 | { |
| 1538 | struct bfq_queue *bfqq = RQ_BFQQ(rq); |
| 1539 | struct bfq_data *bfqd = bfqq->bfqd; |
| 1540 | const int sync = rq_is_sync(rq); |
| 1541 | |
| 1542 | if (bfqq->next_rq == rq) { |
| 1543 | bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq); |
| 1544 | bfq_updated_next_req(bfqd, bfqq); |
| 1545 | } |
| 1546 | |
| 1547 | if (rq->queuelist.prev != &rq->queuelist) |
| 1548 | list_del_init(&rq->queuelist); |
| 1549 | bfqq->queued[sync]--; |
| 1550 | bfqd->queued--; |
| 1551 | elv_rb_del(&bfqq->sort_list, rq); |
| 1552 | |
| 1553 | elv_rqhash_del(q, rq); |
| 1554 | if (q->last_merge == rq) |
| 1555 | q->last_merge = NULL; |
| 1556 | |
| 1557 | if (RB_EMPTY_ROOT(&bfqq->sort_list)) { |
| 1558 | bfqq->next_rq = NULL; |
| 1559 | |
| 1560 | if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue) { |
Arianna Avanzini | e21b7a0 | 2017-04-12 18:23:08 +0200 | [diff] [blame] | 1561 | bfq_del_bfqq_busy(bfqd, bfqq, false); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 1562 | /* |
| 1563 | * bfqq emptied. In normal operation, when |
| 1564 | * bfqq is empty, bfqq->entity.service and |
| 1565 | * bfqq->entity.budget must contain, |
| 1566 | * respectively, the service received and the |
| 1567 | * budget used last time bfqq emptied. These |
| 1568 | * facts do not hold in this case, as at least |
| 1569 | * this last removal occurred while bfqq is |
| 1570 | * not in service. To avoid inconsistencies, |
| 1571 | * reset both bfqq->entity.service and |
| 1572 | * bfqq->entity.budget, if bfqq has still a |
| 1573 | * process that may issue I/O requests to it. |
| 1574 | */ |
| 1575 | bfqq->entity.budget = bfqq->entity.service = 0; |
| 1576 | } |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 1577 | |
| 1578 | /* |
| 1579 | * Remove queue from request-position tree as it is empty. |
| 1580 | */ |
| 1581 | if (bfqq->pos_root) { |
| 1582 | rb_erase(&bfqq->pos_node, bfqq->pos_root); |
| 1583 | bfqq->pos_root = NULL; |
| 1584 | } |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 1585 | } |
| 1586 | |
| 1587 | if (rq->cmd_flags & REQ_META) |
| 1588 | bfqq->meta_pending--; |
Arianna Avanzini | e21b7a0 | 2017-04-12 18:23:08 +0200 | [diff] [blame] | 1589 | |
| 1590 | bfqg_stats_update_io_remove(bfqq_group(bfqq), rq->cmd_flags); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 1591 | } |
| 1592 | |
| 1593 | static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio) |
| 1594 | { |
| 1595 | struct request_queue *q = hctx->queue; |
| 1596 | struct bfq_data *bfqd = q->elevator->elevator_data; |
| 1597 | struct request *free = NULL; |
| 1598 | /* |
| 1599 | * bfq_bic_lookup grabs the queue_lock: invoke it now and |
| 1600 | * store its return value for later use, to avoid nesting |
| 1601 | * queue_lock inside the bfqd->lock. We assume that the bic |
| 1602 | * returned by bfq_bic_lookup does not go away before |
| 1603 | * bfqd->lock is taken. |
| 1604 | */ |
| 1605 | struct bfq_io_cq *bic = bfq_bic_lookup(bfqd, current->io_context, q); |
| 1606 | bool ret; |
| 1607 | |
| 1608 | spin_lock_irq(&bfqd->lock); |
| 1609 | |
| 1610 | if (bic) |
| 1611 | bfqd->bio_bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf)); |
| 1612 | else |
| 1613 | bfqd->bio_bfqq = NULL; |
| 1614 | bfqd->bio_bic = bic; |
| 1615 | |
| 1616 | ret = blk_mq_sched_try_merge(q, bio, &free); |
| 1617 | |
| 1618 | if (free) |
| 1619 | blk_mq_free_request(free); |
| 1620 | spin_unlock_irq(&bfqd->lock); |
| 1621 | |
| 1622 | return ret; |
| 1623 | } |
| 1624 | |
| 1625 | static int bfq_request_merge(struct request_queue *q, struct request **req, |
| 1626 | struct bio *bio) |
| 1627 | { |
| 1628 | struct bfq_data *bfqd = q->elevator->elevator_data; |
| 1629 | struct request *__rq; |
| 1630 | |
| 1631 | __rq = bfq_find_rq_fmerge(bfqd, bio, q); |
| 1632 | if (__rq && elv_bio_merge_ok(__rq, bio)) { |
| 1633 | *req = __rq; |
| 1634 | return ELEVATOR_FRONT_MERGE; |
| 1635 | } |
| 1636 | |
| 1637 | return ELEVATOR_NO_MERGE; |
| 1638 | } |
| 1639 | |
| 1640 | static void bfq_request_merged(struct request_queue *q, struct request *req, |
| 1641 | enum elv_merge type) |
| 1642 | { |
| 1643 | if (type == ELEVATOR_FRONT_MERGE && |
| 1644 | rb_prev(&req->rb_node) && |
| 1645 | blk_rq_pos(req) < |
| 1646 | blk_rq_pos(container_of(rb_prev(&req->rb_node), |
| 1647 | struct request, rb_node))) { |
| 1648 | struct bfq_queue *bfqq = RQ_BFQQ(req); |
| 1649 | struct bfq_data *bfqd = bfqq->bfqd; |
| 1650 | struct request *prev, *next_rq; |
| 1651 | |
| 1652 | /* Reposition request in its sort_list */ |
| 1653 | elv_rb_del(&bfqq->sort_list, req); |
| 1654 | elv_rb_add(&bfqq->sort_list, req); |
| 1655 | |
| 1656 | /* Choose next request to be served for bfqq */ |
| 1657 | prev = bfqq->next_rq; |
| 1658 | next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req, |
| 1659 | bfqd->last_position); |
| 1660 | bfqq->next_rq = next_rq; |
| 1661 | /* |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 1662 | * If next_rq changes, update both the queue's budget to |
| 1663 | * fit the new request and the queue's position in its |
| 1664 | * rq_pos_tree. |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 1665 | */ |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 1666 | if (prev != bfqq->next_rq) { |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 1667 | bfq_updated_next_req(bfqd, bfqq); |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 1668 | bfq_pos_tree_add_move(bfqd, bfqq); |
| 1669 | } |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 1670 | } |
| 1671 | } |
| 1672 | |
| 1673 | static void bfq_requests_merged(struct request_queue *q, struct request *rq, |
| 1674 | struct request *next) |
| 1675 | { |
| 1676 | struct bfq_queue *bfqq = RQ_BFQQ(rq), *next_bfqq = RQ_BFQQ(next); |
| 1677 | |
| 1678 | if (!RB_EMPTY_NODE(&rq->rb_node)) |
Arianna Avanzini | e21b7a0 | 2017-04-12 18:23:08 +0200 | [diff] [blame] | 1679 | goto end; |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 1680 | spin_lock_irq(&bfqq->bfqd->lock); |
| 1681 | |
| 1682 | /* |
| 1683 | * If next and rq belong to the same bfq_queue and next is older |
| 1684 | * than rq, then reposition rq in the fifo (by substituting next |
| 1685 | * with rq). Otherwise, if next and rq belong to different |
| 1686 | * bfq_queues, never reposition rq: in fact, we would have to |
| 1687 | * reposition it with respect to next's position in its own fifo, |
| 1688 | * which would most certainly be too expensive with respect to |
| 1689 | * the benefits. |
| 1690 | */ |
| 1691 | if (bfqq == next_bfqq && |
| 1692 | !list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && |
| 1693 | next->fifo_time < rq->fifo_time) { |
| 1694 | list_del_init(&rq->queuelist); |
| 1695 | list_replace_init(&next->queuelist, &rq->queuelist); |
| 1696 | rq->fifo_time = next->fifo_time; |
| 1697 | } |
| 1698 | |
| 1699 | if (bfqq->next_rq == next) |
| 1700 | bfqq->next_rq = rq; |
| 1701 | |
| 1702 | bfq_remove_request(q, next); |
| 1703 | |
| 1704 | spin_unlock_irq(&bfqq->bfqd->lock); |
Arianna Avanzini | e21b7a0 | 2017-04-12 18:23:08 +0200 | [diff] [blame] | 1705 | end: |
| 1706 | bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 1707 | } |
| 1708 | |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 1709 | /* Must be called with bfqq != NULL */ |
| 1710 | static void bfq_bfqq_end_wr(struct bfq_queue *bfqq) |
| 1711 | { |
Paolo Valente | cfd6971 | 2017-04-12 18:23:15 +0200 | [diff] [blame] | 1712 | if (bfq_bfqq_busy(bfqq)) |
| 1713 | bfqq->bfqd->wr_busy_queues--; |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 1714 | bfqq->wr_coeff = 1; |
| 1715 | bfqq->wr_cur_max_time = 0; |
Paolo Valente | 77b7dce | 2017-04-12 18:23:13 +0200 | [diff] [blame] | 1716 | bfqq->last_wr_start_finish = jiffies; |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 1717 | /* |
| 1718 | * Trigger a weight change on the next invocation of |
| 1719 | * __bfq_entity_update_weight_prio. |
| 1720 | */ |
| 1721 | bfqq->entity.prio_changed = 1; |
| 1722 | } |
| 1723 | |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 1724 | void bfq_end_wr_async_queues(struct bfq_data *bfqd, |
| 1725 | struct bfq_group *bfqg) |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 1726 | { |
| 1727 | int i, j; |
| 1728 | |
| 1729 | for (i = 0; i < 2; i++) |
| 1730 | for (j = 0; j < IOPRIO_BE_NR; j++) |
| 1731 | if (bfqg->async_bfqq[i][j]) |
| 1732 | bfq_bfqq_end_wr(bfqg->async_bfqq[i][j]); |
| 1733 | if (bfqg->async_idle_bfqq) |
| 1734 | bfq_bfqq_end_wr(bfqg->async_idle_bfqq); |
| 1735 | } |
| 1736 | |
| 1737 | static void bfq_end_wr(struct bfq_data *bfqd) |
| 1738 | { |
| 1739 | struct bfq_queue *bfqq; |
| 1740 | |
| 1741 | spin_lock_irq(&bfqd->lock); |
| 1742 | |
| 1743 | list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) |
| 1744 | bfq_bfqq_end_wr(bfqq); |
| 1745 | list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list) |
| 1746 | bfq_bfqq_end_wr(bfqq); |
| 1747 | bfq_end_wr_async(bfqd); |
| 1748 | |
| 1749 | spin_unlock_irq(&bfqd->lock); |
| 1750 | } |
| 1751 | |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 1752 | static sector_t bfq_io_struct_pos(void *io_struct, bool request) |
| 1753 | { |
| 1754 | if (request) |
| 1755 | return blk_rq_pos(io_struct); |
| 1756 | else |
| 1757 | return ((struct bio *)io_struct)->bi_iter.bi_sector; |
| 1758 | } |
| 1759 | |
| 1760 | static int bfq_rq_close_to_sector(void *io_struct, bool request, |
| 1761 | sector_t sector) |
| 1762 | { |
| 1763 | return abs(bfq_io_struct_pos(io_struct, request) - sector) <= |
| 1764 | BFQQ_CLOSE_THR; |
| 1765 | } |
| 1766 | |
| 1767 | static struct bfq_queue *bfqq_find_close(struct bfq_data *bfqd, |
| 1768 | struct bfq_queue *bfqq, |
| 1769 | sector_t sector) |
| 1770 | { |
| 1771 | struct rb_root *root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree; |
| 1772 | struct rb_node *parent, *node; |
| 1773 | struct bfq_queue *__bfqq; |
| 1774 | |
| 1775 | if (RB_EMPTY_ROOT(root)) |
| 1776 | return NULL; |
| 1777 | |
| 1778 | /* |
| 1779 | * First, if we find a request starting at the end of the last |
| 1780 | * request, choose it. |
| 1781 | */ |
| 1782 | __bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL); |
| 1783 | if (__bfqq) |
| 1784 | return __bfqq; |
| 1785 | |
| 1786 | /* |
| 1787 | * If the exact sector wasn't found, the parent of the NULL leaf |
| 1788 | * will contain the closest sector (rq_pos_tree sorted by |
| 1789 | * next_request position). |
| 1790 | */ |
| 1791 | __bfqq = rb_entry(parent, struct bfq_queue, pos_node); |
| 1792 | if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector)) |
| 1793 | return __bfqq; |
| 1794 | |
| 1795 | if (blk_rq_pos(__bfqq->next_rq) < sector) |
| 1796 | node = rb_next(&__bfqq->pos_node); |
| 1797 | else |
| 1798 | node = rb_prev(&__bfqq->pos_node); |
| 1799 | if (!node) |
| 1800 | return NULL; |
| 1801 | |
| 1802 | __bfqq = rb_entry(node, struct bfq_queue, pos_node); |
| 1803 | if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector)) |
| 1804 | return __bfqq; |
| 1805 | |
| 1806 | return NULL; |
| 1807 | } |
| 1808 | |
| 1809 | static struct bfq_queue *bfq_find_close_cooperator(struct bfq_data *bfqd, |
| 1810 | struct bfq_queue *cur_bfqq, |
| 1811 | sector_t sector) |
| 1812 | { |
| 1813 | struct bfq_queue *bfqq; |
| 1814 | |
| 1815 | /* |
| 1816 | * We shall notice if some of the queues are cooperating, |
| 1817 | * e.g., working closely on the same area of the device. In |
| 1818 | * that case, we can group them together and: 1) don't waste |
| 1819 | * time idling, and 2) serve the union of their requests in |
| 1820 | * the best possible order for throughput. |
| 1821 | */ |
| 1822 | bfqq = bfqq_find_close(bfqd, cur_bfqq, sector); |
| 1823 | if (!bfqq || bfqq == cur_bfqq) |
| 1824 | return NULL; |
| 1825 | |
| 1826 | return bfqq; |
| 1827 | } |
| 1828 | |
| 1829 | static struct bfq_queue * |
| 1830 | bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) |
| 1831 | { |
| 1832 | int process_refs, new_process_refs; |
| 1833 | struct bfq_queue *__bfqq; |
| 1834 | |
| 1835 | /* |
| 1836 | * If there are no process references on the new_bfqq, then it is |
| 1837 | * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain |
| 1838 | * may have dropped their last reference (not just their last process |
| 1839 | * reference). |
| 1840 | */ |
| 1841 | if (!bfqq_process_refs(new_bfqq)) |
| 1842 | return NULL; |
| 1843 | |
| 1844 | /* Avoid a circular list and skip interim queue merges. */ |
| 1845 | while ((__bfqq = new_bfqq->new_bfqq)) { |
| 1846 | if (__bfqq == bfqq) |
| 1847 | return NULL; |
| 1848 | new_bfqq = __bfqq; |
| 1849 | } |
| 1850 | |
| 1851 | process_refs = bfqq_process_refs(bfqq); |
| 1852 | new_process_refs = bfqq_process_refs(new_bfqq); |
| 1853 | /* |
| 1854 | * If the process for the bfqq has gone away, there is no |
| 1855 | * sense in merging the queues. |
| 1856 | */ |
| 1857 | if (process_refs == 0 || new_process_refs == 0) |
| 1858 | return NULL; |
| 1859 | |
| 1860 | bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d", |
| 1861 | new_bfqq->pid); |
| 1862 | |
| 1863 | /* |
| 1864 | * Merging is just a redirection: the requests of the process |
| 1865 | * owning one of the two queues are redirected to the other queue. |
| 1866 | * The latter queue, in its turn, is set as shared if this is the |
| 1867 | * first time that the requests of some process are redirected to |
| 1868 | * it. |
| 1869 | * |
Paolo Valente | 6fa3e8d | 2017-04-12 18:23:21 +0200 | [diff] [blame] | 1870 | * We redirect bfqq to new_bfqq and not the opposite, because |
| 1871 | * we are in the context of the process owning bfqq, thus we |
| 1872 | * have the io_cq of this process. So we can immediately |
| 1873 | * configure this io_cq to redirect the requests of the |
| 1874 | * process to new_bfqq. In contrast, the io_cq of new_bfqq is |
| 1875 | * not available any more (new_bfqq->bic == NULL). |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 1876 | * |
Paolo Valente | 6fa3e8d | 2017-04-12 18:23:21 +0200 | [diff] [blame] | 1877 | * Anyway, even in case new_bfqq coincides with the in-service |
| 1878 | * queue, redirecting requests the in-service queue is the |
| 1879 | * best option, as we feed the in-service queue with new |
| 1880 | * requests close to the last request served and, by doing so, |
| 1881 | * are likely to increase the throughput. |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 1882 | */ |
| 1883 | bfqq->new_bfqq = new_bfqq; |
| 1884 | new_bfqq->ref += process_refs; |
| 1885 | return new_bfqq; |
| 1886 | } |
| 1887 | |
| 1888 | static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq, |
| 1889 | struct bfq_queue *new_bfqq) |
| 1890 | { |
| 1891 | if (bfq_class_idle(bfqq) || bfq_class_idle(new_bfqq) || |
| 1892 | (bfqq->ioprio_class != new_bfqq->ioprio_class)) |
| 1893 | return false; |
| 1894 | |
| 1895 | /* |
| 1896 | * If either of the queues has already been detected as seeky, |
| 1897 | * then merging it with the other queue is unlikely to lead to |
| 1898 | * sequential I/O. |
| 1899 | */ |
| 1900 | if (BFQQ_SEEKY(bfqq) || BFQQ_SEEKY(new_bfqq)) |
| 1901 | return false; |
| 1902 | |
| 1903 | /* |
| 1904 | * Interleaved I/O is known to be done by (some) applications |
| 1905 | * only for reads, so it does not make sense to merge async |
| 1906 | * queues. |
| 1907 | */ |
| 1908 | if (!bfq_bfqq_sync(bfqq) || !bfq_bfqq_sync(new_bfqq)) |
| 1909 | return false; |
| 1910 | |
| 1911 | return true; |
| 1912 | } |
| 1913 | |
| 1914 | /* |
| 1915 | * If this function returns true, then bfqq cannot be merged. The idea |
| 1916 | * is that true cooperation happens very early after processes start |
| 1917 | * to do I/O. Usually, late cooperations are just accidental false |
| 1918 | * positives. In case bfqq is weight-raised, such false positives |
| 1919 | * would evidently degrade latency guarantees for bfqq. |
| 1920 | */ |
| 1921 | static bool wr_from_too_long(struct bfq_queue *bfqq) |
| 1922 | { |
| 1923 | return bfqq->wr_coeff > 1 && |
| 1924 | time_is_before_jiffies(bfqq->last_wr_start_finish + |
| 1925 | msecs_to_jiffies(100)); |
| 1926 | } |
| 1927 | |
| 1928 | /* |
| 1929 | * Attempt to schedule a merge of bfqq with the currently in-service |
| 1930 | * queue or with a close queue among the scheduled queues. Return |
| 1931 | * NULL if no merge was scheduled, a pointer to the shared bfq_queue |
| 1932 | * structure otherwise. |
| 1933 | * |
| 1934 | * The OOM queue is not allowed to participate to cooperation: in fact, since |
| 1935 | * the requests temporarily redirected to the OOM queue could be redirected |
| 1936 | * again to dedicated queues at any time, the state needed to correctly |
| 1937 | * handle merging with the OOM queue would be quite complex and expensive |
| 1938 | * to maintain. Besides, in such a critical condition as an out of memory, |
| 1939 | * the benefits of queue merging may be little relevant, or even negligible. |
| 1940 | * |
| 1941 | * Weight-raised queues can be merged only if their weight-raising |
| 1942 | * period has just started. In fact cooperating processes are usually |
| 1943 | * started together. Thus, with this filter we avoid false positives |
| 1944 | * that would jeopardize low-latency guarantees. |
| 1945 | * |
| 1946 | * WARNING: queue merging may impair fairness among non-weight raised |
| 1947 | * queues, for at least two reasons: 1) the original weight of a |
| 1948 | * merged queue may change during the merged state, 2) even being the |
| 1949 | * weight the same, a merged queue may be bloated with many more |
| 1950 | * requests than the ones produced by its originally-associated |
| 1951 | * process. |
| 1952 | */ |
| 1953 | static struct bfq_queue * |
| 1954 | bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, |
| 1955 | void *io_struct, bool request) |
| 1956 | { |
| 1957 | struct bfq_queue *in_service_bfqq, *new_bfqq; |
| 1958 | |
| 1959 | if (bfqq->new_bfqq) |
| 1960 | return bfqq->new_bfqq; |
| 1961 | |
| 1962 | if (!io_struct || |
| 1963 | wr_from_too_long(bfqq) || |
| 1964 | unlikely(bfqq == &bfqd->oom_bfqq)) |
| 1965 | return NULL; |
| 1966 | |
| 1967 | /* If there is only one backlogged queue, don't search. */ |
| 1968 | if (bfqd->busy_queues == 1) |
| 1969 | return NULL; |
| 1970 | |
| 1971 | in_service_bfqq = bfqd->in_service_queue; |
| 1972 | |
Paolo Valente | 6fa3e8d | 2017-04-12 18:23:21 +0200 | [diff] [blame] | 1973 | if (!in_service_bfqq || in_service_bfqq == bfqq |
| 1974 | || wr_from_too_long(in_service_bfqq) || |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 1975 | unlikely(in_service_bfqq == &bfqd->oom_bfqq)) |
| 1976 | goto check_scheduled; |
| 1977 | |
| 1978 | if (bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) && |
| 1979 | bfqq->entity.parent == in_service_bfqq->entity.parent && |
| 1980 | bfq_may_be_close_cooperator(bfqq, in_service_bfqq)) { |
| 1981 | new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq); |
| 1982 | if (new_bfqq) |
| 1983 | return new_bfqq; |
| 1984 | } |
| 1985 | /* |
| 1986 | * Check whether there is a cooperator among currently scheduled |
| 1987 | * queues. The only thing we need is that the bio/request is not |
| 1988 | * NULL, as we need it to establish whether a cooperator exists. |
| 1989 | */ |
| 1990 | check_scheduled: |
| 1991 | new_bfqq = bfq_find_close_cooperator(bfqd, bfqq, |
| 1992 | bfq_io_struct_pos(io_struct, request)); |
| 1993 | |
| 1994 | if (new_bfqq && !wr_from_too_long(new_bfqq) && |
| 1995 | likely(new_bfqq != &bfqd->oom_bfqq) && |
| 1996 | bfq_may_be_close_cooperator(bfqq, new_bfqq)) |
| 1997 | return bfq_setup_merge(bfqq, new_bfqq); |
| 1998 | |
| 1999 | return NULL; |
| 2000 | } |
| 2001 | |
| 2002 | static void bfq_bfqq_save_state(struct bfq_queue *bfqq) |
| 2003 | { |
| 2004 | struct bfq_io_cq *bic = bfqq->bic; |
| 2005 | |
| 2006 | /* |
| 2007 | * If !bfqq->bic, the queue is already shared or its requests |
| 2008 | * have already been redirected to a shared queue; both idle window |
| 2009 | * and weight raising state have already been saved. Do nothing. |
| 2010 | */ |
| 2011 | if (!bic) |
| 2012 | return; |
| 2013 | |
| 2014 | bic->saved_ttime = bfqq->ttime; |
Paolo Valente | d5be3fe | 2017-08-04 07:35:10 +0200 | [diff] [blame] | 2015 | bic->saved_has_short_ttime = bfq_bfqq_has_short_ttime(bfqq); |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 2016 | bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq); |
Arianna Avanzini | e1b2324 | 2017-04-12 18:23:20 +0200 | [diff] [blame] | 2017 | bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq); |
| 2018 | bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node); |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 2019 | bic->saved_wr_coeff = bfqq->wr_coeff; |
| 2020 | bic->saved_wr_start_at_switch_to_srt = bfqq->wr_start_at_switch_to_srt; |
| 2021 | bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish; |
| 2022 | bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time; |
| 2023 | } |
| 2024 | |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 2025 | static void |
| 2026 | bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, |
| 2027 | struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) |
| 2028 | { |
| 2029 | bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu", |
| 2030 | (unsigned long)new_bfqq->pid); |
| 2031 | /* Save weight raising and idle window of the merged queues */ |
| 2032 | bfq_bfqq_save_state(bfqq); |
| 2033 | bfq_bfqq_save_state(new_bfqq); |
| 2034 | if (bfq_bfqq_IO_bound(bfqq)) |
| 2035 | bfq_mark_bfqq_IO_bound(new_bfqq); |
| 2036 | bfq_clear_bfqq_IO_bound(bfqq); |
| 2037 | |
| 2038 | /* |
| 2039 | * If bfqq is weight-raised, then let new_bfqq inherit |
| 2040 | * weight-raising. To reduce false positives, neglect the case |
| 2041 | * where bfqq has just been created, but has not yet made it |
| 2042 | * to be weight-raised (which may happen because EQM may merge |
| 2043 | * bfqq even before bfq_add_request is executed for the first |
Arianna Avanzini | e1b2324 | 2017-04-12 18:23:20 +0200 | [diff] [blame] | 2044 | * time for bfqq). Handling this case would however be very |
| 2045 | * easy, thanks to the flag just_created. |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 2046 | */ |
| 2047 | if (new_bfqq->wr_coeff == 1 && bfqq->wr_coeff > 1) { |
| 2048 | new_bfqq->wr_coeff = bfqq->wr_coeff; |
| 2049 | new_bfqq->wr_cur_max_time = bfqq->wr_cur_max_time; |
| 2050 | new_bfqq->last_wr_start_finish = bfqq->last_wr_start_finish; |
| 2051 | new_bfqq->wr_start_at_switch_to_srt = |
| 2052 | bfqq->wr_start_at_switch_to_srt; |
| 2053 | if (bfq_bfqq_busy(new_bfqq)) |
| 2054 | bfqd->wr_busy_queues++; |
| 2055 | new_bfqq->entity.prio_changed = 1; |
| 2056 | } |
| 2057 | |
| 2058 | if (bfqq->wr_coeff > 1) { /* bfqq has given its wr to new_bfqq */ |
| 2059 | bfqq->wr_coeff = 1; |
| 2060 | bfqq->entity.prio_changed = 1; |
| 2061 | if (bfq_bfqq_busy(bfqq)) |
| 2062 | bfqd->wr_busy_queues--; |
| 2063 | } |
| 2064 | |
| 2065 | bfq_log_bfqq(bfqd, new_bfqq, "merge_bfqqs: wr_busy %d", |
| 2066 | bfqd->wr_busy_queues); |
| 2067 | |
| 2068 | /* |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 2069 | * Merge queues (that is, let bic redirect its requests to new_bfqq) |
| 2070 | */ |
| 2071 | bic_set_bfqq(bic, new_bfqq, 1); |
| 2072 | bfq_mark_bfqq_coop(new_bfqq); |
| 2073 | /* |
| 2074 | * new_bfqq now belongs to at least two bics (it is a shared queue): |
| 2075 | * set new_bfqq->bic to NULL. bfqq either: |
| 2076 | * - does not belong to any bic any more, and hence bfqq->bic must |
| 2077 | * be set to NULL, or |
| 2078 | * - is a queue whose owning bics have already been redirected to a |
| 2079 | * different queue, hence the queue is destined to not belong to |
| 2080 | * any bic soon and bfqq->bic is already NULL (therefore the next |
| 2081 | * assignment causes no harm). |
| 2082 | */ |
| 2083 | new_bfqq->bic = NULL; |
| 2084 | bfqq->bic = NULL; |
| 2085 | /* release process reference to bfqq */ |
| 2086 | bfq_put_queue(bfqq); |
| 2087 | } |
| 2088 | |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 2089 | static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq, |
| 2090 | struct bio *bio) |
| 2091 | { |
| 2092 | struct bfq_data *bfqd = q->elevator->elevator_data; |
| 2093 | bool is_sync = op_is_sync(bio->bi_opf); |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 2094 | struct bfq_queue *bfqq = bfqd->bio_bfqq, *new_bfqq; |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 2095 | |
| 2096 | /* |
| 2097 | * Disallow merge of a sync bio into an async request. |
| 2098 | */ |
| 2099 | if (is_sync && !rq_is_sync(rq)) |
| 2100 | return false; |
| 2101 | |
| 2102 | /* |
| 2103 | * Lookup the bfqq that this bio will be queued with. Allow |
| 2104 | * merge only if rq is queued there. |
| 2105 | */ |
| 2106 | if (!bfqq) |
| 2107 | return false; |
| 2108 | |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 2109 | /* |
| 2110 | * We take advantage of this function to perform an early merge |
| 2111 | * of the queues of possible cooperating processes. |
| 2112 | */ |
| 2113 | new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false); |
| 2114 | if (new_bfqq) { |
| 2115 | /* |
| 2116 | * bic still points to bfqq, then it has not yet been |
| 2117 | * redirected to some other bfq_queue, and a queue |
| 2118 | * merge beween bfqq and new_bfqq can be safely |
| 2119 | * fulfillled, i.e., bic can be redirected to new_bfqq |
| 2120 | * and bfqq can be put. |
| 2121 | */ |
| 2122 | bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq, |
| 2123 | new_bfqq); |
| 2124 | /* |
| 2125 | * If we get here, bio will be queued into new_queue, |
| 2126 | * so use new_bfqq to decide whether bio and rq can be |
| 2127 | * merged. |
| 2128 | */ |
| 2129 | bfqq = new_bfqq; |
| 2130 | |
| 2131 | /* |
| 2132 | * Change also bqfd->bio_bfqq, as |
| 2133 | * bfqd->bio_bic now points to new_bfqq, and |
| 2134 | * this function may be invoked again (and then may |
| 2135 | * use again bqfd->bio_bfqq). |
| 2136 | */ |
| 2137 | bfqd->bio_bfqq = bfqq; |
| 2138 | } |
| 2139 | |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 2140 | return bfqq == RQ_BFQQ(rq); |
| 2141 | } |
| 2142 | |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 2143 | /* |
| 2144 | * Set the maximum time for the in-service queue to consume its |
| 2145 | * budget. This prevents seeky processes from lowering the throughput. |
| 2146 | * In practice, a time-slice service scheme is used with seeky |
| 2147 | * processes. |
| 2148 | */ |
| 2149 | static void bfq_set_budget_timeout(struct bfq_data *bfqd, |
| 2150 | struct bfq_queue *bfqq) |
| 2151 | { |
Paolo Valente | 77b7dce | 2017-04-12 18:23:13 +0200 | [diff] [blame] | 2152 | unsigned int timeout_coeff; |
| 2153 | |
| 2154 | if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time) |
| 2155 | timeout_coeff = 1; |
| 2156 | else |
| 2157 | timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight; |
| 2158 | |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 2159 | bfqd->last_budget_start = ktime_get(); |
| 2160 | |
| 2161 | bfqq->budget_timeout = jiffies + |
Paolo Valente | 77b7dce | 2017-04-12 18:23:13 +0200 | [diff] [blame] | 2162 | bfqd->bfq_timeout * timeout_coeff; |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 2163 | } |
| 2164 | |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 2165 | static void __bfq_set_in_service_queue(struct bfq_data *bfqd, |
| 2166 | struct bfq_queue *bfqq) |
| 2167 | { |
| 2168 | if (bfqq) { |
Arianna Avanzini | e21b7a0 | 2017-04-12 18:23:08 +0200 | [diff] [blame] | 2169 | bfqg_stats_update_avg_queue_size(bfqq_group(bfqq)); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 2170 | bfq_clear_bfqq_fifo_expire(bfqq); |
| 2171 | |
| 2172 | bfqd->budgets_assigned = (bfqd->budgets_assigned * 7 + 256) / 8; |
| 2173 | |
Paolo Valente | 77b7dce | 2017-04-12 18:23:13 +0200 | [diff] [blame] | 2174 | if (time_is_before_jiffies(bfqq->last_wr_start_finish) && |
| 2175 | bfqq->wr_coeff > 1 && |
| 2176 | bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time && |
| 2177 | time_is_before_jiffies(bfqq->budget_timeout)) { |
| 2178 | /* |
| 2179 | * For soft real-time queues, move the start |
| 2180 | * of the weight-raising period forward by the |
| 2181 | * time the queue has not received any |
| 2182 | * service. Otherwise, a relatively long |
| 2183 | * service delay is likely to cause the |
| 2184 | * weight-raising period of the queue to end, |
| 2185 | * because of the short duration of the |
| 2186 | * weight-raising period of a soft real-time |
| 2187 | * queue. It is worth noting that this move |
| 2188 | * is not so dangerous for the other queues, |
| 2189 | * because soft real-time queues are not |
| 2190 | * greedy. |
| 2191 | * |
| 2192 | * To not add a further variable, we use the |
| 2193 | * overloaded field budget_timeout to |
| 2194 | * determine for how long the queue has not |
| 2195 | * received service, i.e., how much time has |
| 2196 | * elapsed since the queue expired. However, |
| 2197 | * this is a little imprecise, because |
| 2198 | * budget_timeout is set to jiffies if bfqq |
| 2199 | * not only expires, but also remains with no |
| 2200 | * request. |
| 2201 | */ |
| 2202 | if (time_after(bfqq->budget_timeout, |
| 2203 | bfqq->last_wr_start_finish)) |
| 2204 | bfqq->last_wr_start_finish += |
| 2205 | jiffies - bfqq->budget_timeout; |
| 2206 | else |
| 2207 | bfqq->last_wr_start_finish = jiffies; |
| 2208 | } |
| 2209 | |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 2210 | bfq_set_budget_timeout(bfqd, bfqq); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 2211 | bfq_log_bfqq(bfqd, bfqq, |
| 2212 | "set_in_service_queue, cur-budget = %d", |
| 2213 | bfqq->entity.budget); |
| 2214 | } |
| 2215 | |
| 2216 | bfqd->in_service_queue = bfqq; |
| 2217 | } |
| 2218 | |
| 2219 | /* |
| 2220 | * Get and set a new queue for service. |
| 2221 | */ |
| 2222 | static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd) |
| 2223 | { |
| 2224 | struct bfq_queue *bfqq = bfq_get_next_queue(bfqd); |
| 2225 | |
| 2226 | __bfq_set_in_service_queue(bfqd, bfqq); |
| 2227 | return bfqq; |
| 2228 | } |
| 2229 | |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 2230 | static void bfq_arm_slice_timer(struct bfq_data *bfqd) |
| 2231 | { |
| 2232 | struct bfq_queue *bfqq = bfqd->in_service_queue; |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 2233 | u32 sl; |
| 2234 | |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 2235 | bfq_mark_bfqq_wait_request(bfqq); |
| 2236 | |
| 2237 | /* |
| 2238 | * We don't want to idle for seeks, but we do want to allow |
| 2239 | * fair distribution of slice time for a process doing back-to-back |
| 2240 | * seeks. So allow a little bit of time for him to submit a new rq. |
| 2241 | */ |
| 2242 | sl = bfqd->bfq_slice_idle; |
| 2243 | /* |
Arianna Avanzini | 1de0c4c | 2017-04-12 18:23:17 +0200 | [diff] [blame] | 2244 | * Unless the queue is being weight-raised or the scenario is |
| 2245 | * asymmetric, grant only minimum idle time if the queue |
| 2246 | * is seeky. A long idling is preserved for a weight-raised |
| 2247 | * queue, or, more in general, in an asymmetric scenario, |
| 2248 | * because a long idling is needed for guaranteeing to a queue |
| 2249 | * its reserved share of the throughput (in particular, it is |
| 2250 | * needed if the queue has a higher weight than some other |
| 2251 | * queue). |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 2252 | */ |
Arianna Avanzini | 1de0c4c | 2017-04-12 18:23:17 +0200 | [diff] [blame] | 2253 | if (BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 && |
| 2254 | bfq_symmetric_scenario(bfqd)) |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 2255 | sl = min_t(u64, sl, BFQ_MIN_TT); |
| 2256 | |
| 2257 | bfqd->last_idling_start = ktime_get(); |
| 2258 | hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl), |
| 2259 | HRTIMER_MODE_REL); |
Arianna Avanzini | e21b7a0 | 2017-04-12 18:23:08 +0200 | [diff] [blame] | 2260 | bfqg_stats_set_start_idle_time(bfqq_group(bfqq)); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 2261 | } |
| 2262 | |
| 2263 | /* |
Paolo Valente | ab0e43e | 2017-04-12 18:23:10 +0200 | [diff] [blame] | 2264 | * In autotuning mode, max_budget is dynamically recomputed as the |
| 2265 | * amount of sectors transferred in timeout at the estimated peak |
| 2266 | * rate. This enables BFQ to utilize a full timeslice with a full |
| 2267 | * budget, even if the in-service queue is served at peak rate. And |
| 2268 | * this maximises throughput with sequential workloads. |
| 2269 | */ |
| 2270 | static unsigned long bfq_calc_max_budget(struct bfq_data *bfqd) |
| 2271 | { |
| 2272 | return (u64)bfqd->peak_rate * USEC_PER_MSEC * |
| 2273 | jiffies_to_msecs(bfqd->bfq_timeout)>>BFQ_RATE_SHIFT; |
| 2274 | } |
| 2275 | |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 2276 | /* |
| 2277 | * Update parameters related to throughput and responsiveness, as a |
| 2278 | * function of the estimated peak rate. See comments on |
| 2279 | * bfq_calc_max_budget(), and on T_slow and T_fast arrays. |
| 2280 | */ |
| 2281 | static void update_thr_responsiveness_params(struct bfq_data *bfqd) |
| 2282 | { |
| 2283 | int dev_type = blk_queue_nonrot(bfqd->queue); |
| 2284 | |
| 2285 | if (bfqd->bfq_user_max_budget == 0) |
| 2286 | bfqd->bfq_max_budget = |
| 2287 | bfq_calc_max_budget(bfqd); |
| 2288 | |
| 2289 | if (bfqd->device_speed == BFQ_BFQD_FAST && |
| 2290 | bfqd->peak_rate < device_speed_thresh[dev_type]) { |
| 2291 | bfqd->device_speed = BFQ_BFQD_SLOW; |
| 2292 | bfqd->RT_prod = R_slow[dev_type] * |
| 2293 | T_slow[dev_type]; |
| 2294 | } else if (bfqd->device_speed == BFQ_BFQD_SLOW && |
| 2295 | bfqd->peak_rate > device_speed_thresh[dev_type]) { |
| 2296 | bfqd->device_speed = BFQ_BFQD_FAST; |
| 2297 | bfqd->RT_prod = R_fast[dev_type] * |
| 2298 | T_fast[dev_type]; |
| 2299 | } |
| 2300 | |
| 2301 | bfq_log(bfqd, |
| 2302 | "dev_type %s dev_speed_class = %s (%llu sects/sec), thresh %llu setcs/sec", |
| 2303 | dev_type == 0 ? "ROT" : "NONROT", |
| 2304 | bfqd->device_speed == BFQ_BFQD_FAST ? "FAST" : "SLOW", |
| 2305 | bfqd->device_speed == BFQ_BFQD_FAST ? |
| 2306 | (USEC_PER_SEC*(u64)R_fast[dev_type])>>BFQ_RATE_SHIFT : |
| 2307 | (USEC_PER_SEC*(u64)R_slow[dev_type])>>BFQ_RATE_SHIFT, |
| 2308 | (USEC_PER_SEC*(u64)device_speed_thresh[dev_type])>> |
| 2309 | BFQ_RATE_SHIFT); |
| 2310 | } |
| 2311 | |
Paolo Valente | ab0e43e | 2017-04-12 18:23:10 +0200 | [diff] [blame] | 2312 | static void bfq_reset_rate_computation(struct bfq_data *bfqd, |
| 2313 | struct request *rq) |
| 2314 | { |
| 2315 | if (rq != NULL) { /* new rq dispatch now, reset accordingly */ |
| 2316 | bfqd->last_dispatch = bfqd->first_dispatch = ktime_get_ns(); |
| 2317 | bfqd->peak_rate_samples = 1; |
| 2318 | bfqd->sequential_samples = 0; |
| 2319 | bfqd->tot_sectors_dispatched = bfqd->last_rq_max_size = |
| 2320 | blk_rq_sectors(rq); |
| 2321 | } else /* no new rq dispatched, just reset the number of samples */ |
| 2322 | bfqd->peak_rate_samples = 0; /* full re-init on next disp. */ |
| 2323 | |
| 2324 | bfq_log(bfqd, |
| 2325 | "reset_rate_computation at end, sample %u/%u tot_sects %llu", |
| 2326 | bfqd->peak_rate_samples, bfqd->sequential_samples, |
| 2327 | bfqd->tot_sectors_dispatched); |
| 2328 | } |
| 2329 | |
| 2330 | static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq) |
| 2331 | { |
| 2332 | u32 rate, weight, divisor; |
| 2333 | |
| 2334 | /* |
| 2335 | * For the convergence property to hold (see comments on |
| 2336 | * bfq_update_peak_rate()) and for the assessment to be |
| 2337 | * reliable, a minimum number of samples must be present, and |
| 2338 | * a minimum amount of time must have elapsed. If not so, do |
| 2339 | * not compute new rate. Just reset parameters, to get ready |
| 2340 | * for a new evaluation attempt. |
| 2341 | */ |
| 2342 | if (bfqd->peak_rate_samples < BFQ_RATE_MIN_SAMPLES || |
| 2343 | bfqd->delta_from_first < BFQ_RATE_MIN_INTERVAL) |
| 2344 | goto reset_computation; |
| 2345 | |
| 2346 | /* |
| 2347 | * If a new request completion has occurred after last |
| 2348 | * dispatch, then, to approximate the rate at which requests |
| 2349 | * have been served by the device, it is more precise to |
| 2350 | * extend the observation interval to the last completion. |
| 2351 | */ |
| 2352 | bfqd->delta_from_first = |
| 2353 | max_t(u64, bfqd->delta_from_first, |
| 2354 | bfqd->last_completion - bfqd->first_dispatch); |
| 2355 | |
| 2356 | /* |
| 2357 | * Rate computed in sects/usec, and not sects/nsec, for |
| 2358 | * precision issues. |
| 2359 | */ |
| 2360 | rate = div64_ul(bfqd->tot_sectors_dispatched<<BFQ_RATE_SHIFT, |
| 2361 | div_u64(bfqd->delta_from_first, NSEC_PER_USEC)); |
| 2362 | |
| 2363 | /* |
| 2364 | * Peak rate not updated if: |
| 2365 | * - the percentage of sequential dispatches is below 3/4 of the |
| 2366 | * total, and rate is below the current estimated peak rate |
| 2367 | * - rate is unreasonably high (> 20M sectors/sec) |
| 2368 | */ |
| 2369 | if ((bfqd->sequential_samples < (3 * bfqd->peak_rate_samples)>>2 && |
| 2370 | rate <= bfqd->peak_rate) || |
| 2371 | rate > 20<<BFQ_RATE_SHIFT) |
| 2372 | goto reset_computation; |
| 2373 | |
| 2374 | /* |
| 2375 | * We have to update the peak rate, at last! To this purpose, |
| 2376 | * we use a low-pass filter. We compute the smoothing constant |
| 2377 | * of the filter as a function of the 'weight' of the new |
| 2378 | * measured rate. |
| 2379 | * |
| 2380 | * As can be seen in next formulas, we define this weight as a |
| 2381 | * quantity proportional to how sequential the workload is, |
| 2382 | * and to how long the observation time interval is. |
| 2383 | * |
| 2384 | * The weight runs from 0 to 8. The maximum value of the |
| 2385 | * weight, 8, yields the minimum value for the smoothing |
| 2386 | * constant. At this minimum value for the smoothing constant, |
| 2387 | * the measured rate contributes for half of the next value of |
| 2388 | * the estimated peak rate. |
| 2389 | * |
| 2390 | * So, the first step is to compute the weight as a function |
| 2391 | * of how sequential the workload is. Note that the weight |
| 2392 | * cannot reach 9, because bfqd->sequential_samples cannot |
| 2393 | * become equal to bfqd->peak_rate_samples, which, in its |
| 2394 | * turn, holds true because bfqd->sequential_samples is not |
| 2395 | * incremented for the first sample. |
| 2396 | */ |
| 2397 | weight = (9 * bfqd->sequential_samples) / bfqd->peak_rate_samples; |
| 2398 | |
| 2399 | /* |
| 2400 | * Second step: further refine the weight as a function of the |
| 2401 | * duration of the observation interval. |
| 2402 | */ |
| 2403 | weight = min_t(u32, 8, |
| 2404 | div_u64(weight * bfqd->delta_from_first, |
| 2405 | BFQ_RATE_REF_INTERVAL)); |
| 2406 | |
| 2407 | /* |
| 2408 | * Divisor ranging from 10, for minimum weight, to 2, for |
| 2409 | * maximum weight. |
| 2410 | */ |
| 2411 | divisor = 10 - weight; |
| 2412 | |
| 2413 | /* |
| 2414 | * Finally, update peak rate: |
| 2415 | * |
| 2416 | * peak_rate = peak_rate * (divisor-1) / divisor + rate / divisor |
| 2417 | */ |
| 2418 | bfqd->peak_rate *= divisor-1; |
| 2419 | bfqd->peak_rate /= divisor; |
| 2420 | rate /= divisor; /* smoothing constant alpha = 1/divisor */ |
| 2421 | |
| 2422 | bfqd->peak_rate += rate; |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 2423 | update_thr_responsiveness_params(bfqd); |
Paolo Valente | ab0e43e | 2017-04-12 18:23:10 +0200 | [diff] [blame] | 2424 | |
| 2425 | reset_computation: |
| 2426 | bfq_reset_rate_computation(bfqd, rq); |
| 2427 | } |
| 2428 | |
| 2429 | /* |
| 2430 | * Update the read/write peak rate (the main quantity used for |
| 2431 | * auto-tuning, see update_thr_responsiveness_params()). |
| 2432 | * |
| 2433 | * It is not trivial to estimate the peak rate (correctly): because of |
| 2434 | * the presence of sw and hw queues between the scheduler and the |
| 2435 | * device components that finally serve I/O requests, it is hard to |
| 2436 | * say exactly when a given dispatched request is served inside the |
| 2437 | * device, and for how long. As a consequence, it is hard to know |
| 2438 | * precisely at what rate a given set of requests is actually served |
| 2439 | * by the device. |
| 2440 | * |
| 2441 | * On the opposite end, the dispatch time of any request is trivially |
| 2442 | * available, and, from this piece of information, the "dispatch rate" |
| 2443 | * of requests can be immediately computed. So, the idea in the next |
| 2444 | * function is to use what is known, namely request dispatch times |
| 2445 | * (plus, when useful, request completion times), to estimate what is |
| 2446 | * unknown, namely in-device request service rate. |
| 2447 | * |
| 2448 | * The main issue is that, because of the above facts, the rate at |
| 2449 | * which a certain set of requests is dispatched over a certain time |
| 2450 | * interval can vary greatly with respect to the rate at which the |
| 2451 | * same requests are then served. But, since the size of any |
| 2452 | * intermediate queue is limited, and the service scheme is lossless |
| 2453 | * (no request is silently dropped), the following obvious convergence |
| 2454 | * property holds: the number of requests dispatched MUST become |
| 2455 | * closer and closer to the number of requests completed as the |
| 2456 | * observation interval grows. This is the key property used in |
| 2457 | * the next function to estimate the peak service rate as a function |
| 2458 | * of the observed dispatch rate. The function assumes to be invoked |
| 2459 | * on every request dispatch. |
| 2460 | */ |
| 2461 | static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq) |
| 2462 | { |
| 2463 | u64 now_ns = ktime_get_ns(); |
| 2464 | |
| 2465 | if (bfqd->peak_rate_samples == 0) { /* first dispatch */ |
| 2466 | bfq_log(bfqd, "update_peak_rate: goto reset, samples %d", |
| 2467 | bfqd->peak_rate_samples); |
| 2468 | bfq_reset_rate_computation(bfqd, rq); |
| 2469 | goto update_last_values; /* will add one sample */ |
| 2470 | } |
| 2471 | |
| 2472 | /* |
| 2473 | * Device idle for very long: the observation interval lasting |
| 2474 | * up to this dispatch cannot be a valid observation interval |
| 2475 | * for computing a new peak rate (similarly to the late- |
| 2476 | * completion event in bfq_completed_request()). Go to |
| 2477 | * update_rate_and_reset to have the following three steps |
| 2478 | * taken: |
| 2479 | * - close the observation interval at the last (previous) |
| 2480 | * request dispatch or completion |
| 2481 | * - compute rate, if possible, for that observation interval |
| 2482 | * - start a new observation interval with this dispatch |
| 2483 | */ |
| 2484 | if (now_ns - bfqd->last_dispatch > 100*NSEC_PER_MSEC && |
| 2485 | bfqd->rq_in_driver == 0) |
| 2486 | goto update_rate_and_reset; |
| 2487 | |
| 2488 | /* Update sampling information */ |
| 2489 | bfqd->peak_rate_samples++; |
| 2490 | |
| 2491 | if ((bfqd->rq_in_driver > 0 || |
| 2492 | now_ns - bfqd->last_completion < BFQ_MIN_TT) |
| 2493 | && get_sdist(bfqd->last_position, rq) < BFQQ_SEEK_THR) |
| 2494 | bfqd->sequential_samples++; |
| 2495 | |
| 2496 | bfqd->tot_sectors_dispatched += blk_rq_sectors(rq); |
| 2497 | |
| 2498 | /* Reset max observed rq size every 32 dispatches */ |
| 2499 | if (likely(bfqd->peak_rate_samples % 32)) |
| 2500 | bfqd->last_rq_max_size = |
| 2501 | max_t(u32, blk_rq_sectors(rq), bfqd->last_rq_max_size); |
| 2502 | else |
| 2503 | bfqd->last_rq_max_size = blk_rq_sectors(rq); |
| 2504 | |
| 2505 | bfqd->delta_from_first = now_ns - bfqd->first_dispatch; |
| 2506 | |
| 2507 | /* Target observation interval not yet reached, go on sampling */ |
| 2508 | if (bfqd->delta_from_first < BFQ_RATE_REF_INTERVAL) |
| 2509 | goto update_last_values; |
| 2510 | |
| 2511 | update_rate_and_reset: |
| 2512 | bfq_update_rate_reset(bfqd, rq); |
| 2513 | update_last_values: |
| 2514 | bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); |
| 2515 | bfqd->last_dispatch = now_ns; |
| 2516 | } |
| 2517 | |
| 2518 | /* |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 2519 | * Remove request from internal lists. |
| 2520 | */ |
| 2521 | static void bfq_dispatch_remove(struct request_queue *q, struct request *rq) |
| 2522 | { |
| 2523 | struct bfq_queue *bfqq = RQ_BFQQ(rq); |
| 2524 | |
| 2525 | /* |
| 2526 | * For consistency, the next instruction should have been |
| 2527 | * executed after removing the request from the queue and |
| 2528 | * dispatching it. We execute instead this instruction before |
| 2529 | * bfq_remove_request() (and hence introduce a temporary |
| 2530 | * inconsistency), for efficiency. In fact, should this |
| 2531 | * dispatch occur for a non in-service bfqq, this anticipated |
| 2532 | * increment prevents two counters related to bfqq->dispatched |
| 2533 | * from risking to be, first, uselessly decremented, and then |
| 2534 | * incremented again when the (new) value of bfqq->dispatched |
| 2535 | * happens to be taken into account. |
| 2536 | */ |
| 2537 | bfqq->dispatched++; |
Paolo Valente | ab0e43e | 2017-04-12 18:23:10 +0200 | [diff] [blame] | 2538 | bfq_update_peak_rate(q->elevator->elevator_data, rq); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 2539 | |
| 2540 | bfq_remove_request(q, rq); |
| 2541 | } |
| 2542 | |
| 2543 | static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq) |
| 2544 | { |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 2545 | /* |
| 2546 | * If this bfqq is shared between multiple processes, check |
| 2547 | * to make sure that those processes are still issuing I/Os |
| 2548 | * within the mean seek distance. If not, it may be time to |
| 2549 | * break the queues apart again. |
| 2550 | */ |
| 2551 | if (bfq_bfqq_coop(bfqq) && BFQQ_SEEKY(bfqq)) |
| 2552 | bfq_mark_bfqq_split_coop(bfqq); |
| 2553 | |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 2554 | if (RB_EMPTY_ROOT(&bfqq->sort_list)) { |
| 2555 | if (bfqq->dispatched == 0) |
| 2556 | /* |
| 2557 | * Overloading budget_timeout field to store |
| 2558 | * the time at which the queue remains with no |
| 2559 | * backlog and no outstanding request; used by |
| 2560 | * the weight-raising mechanism. |
| 2561 | */ |
| 2562 | bfqq->budget_timeout = jiffies; |
| 2563 | |
Arianna Avanzini | e21b7a0 | 2017-04-12 18:23:08 +0200 | [diff] [blame] | 2564 | bfq_del_bfqq_busy(bfqd, bfqq, true); |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 2565 | } else { |
Paolo Valente | 80294c3 | 2017-08-31 08:46:29 +0200 | [diff] [blame] | 2566 | bfq_requeue_bfqq(bfqd, bfqq, true); |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 2567 | /* |
| 2568 | * Resort priority tree of potential close cooperators. |
| 2569 | */ |
| 2570 | bfq_pos_tree_add_move(bfqd, bfqq); |
| 2571 | } |
Arianna Avanzini | e21b7a0 | 2017-04-12 18:23:08 +0200 | [diff] [blame] | 2572 | |
| 2573 | /* |
| 2574 | * All in-service entities must have been properly deactivated |
| 2575 | * or requeued before executing the next function, which |
| 2576 | * resets all in-service entites as no more in service. |
| 2577 | */ |
| 2578 | __bfq_bfqd_reset_in_service(bfqd); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 2579 | } |
| 2580 | |
| 2581 | /** |
| 2582 | * __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior. |
| 2583 | * @bfqd: device data. |
| 2584 | * @bfqq: queue to update. |
| 2585 | * @reason: reason for expiration. |
| 2586 | * |
| 2587 | * Handle the feedback on @bfqq budget at queue expiration. |
| 2588 | * See the body for detailed comments. |
| 2589 | */ |
| 2590 | static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd, |
| 2591 | struct bfq_queue *bfqq, |
| 2592 | enum bfqq_expiration reason) |
| 2593 | { |
| 2594 | struct request *next_rq; |
| 2595 | int budget, min_budget; |
| 2596 | |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 2597 | min_budget = bfq_min_budget(bfqd); |
| 2598 | |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 2599 | if (bfqq->wr_coeff == 1) |
| 2600 | budget = bfqq->max_budget; |
| 2601 | else /* |
| 2602 | * Use a constant, low budget for weight-raised queues, |
| 2603 | * to help achieve a low latency. Keep it slightly higher |
| 2604 | * than the minimum possible budget, to cause a little |
| 2605 | * bit fewer expirations. |
| 2606 | */ |
| 2607 | budget = 2 * min_budget; |
| 2608 | |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 2609 | bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %d, budg left %d", |
| 2610 | bfqq->entity.budget, bfq_bfqq_budget_left(bfqq)); |
| 2611 | bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %d, min budg %d", |
| 2612 | budget, bfq_min_budget(bfqd)); |
| 2613 | bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d", |
| 2614 | bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue)); |
| 2615 | |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 2616 | if (bfq_bfqq_sync(bfqq) && bfqq->wr_coeff == 1) { |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 2617 | switch (reason) { |
| 2618 | /* |
| 2619 | * Caveat: in all the following cases we trade latency |
| 2620 | * for throughput. |
| 2621 | */ |
| 2622 | case BFQQE_TOO_IDLE: |
Paolo Valente | 54b6045 | 2017-04-12 18:23:09 +0200 | [diff] [blame] | 2623 | /* |
| 2624 | * This is the only case where we may reduce |
| 2625 | * the budget: if there is no request of the |
| 2626 | * process still waiting for completion, then |
| 2627 | * we assume (tentatively) that the timer has |
| 2628 | * expired because the batch of requests of |
| 2629 | * the process could have been served with a |
| 2630 | * smaller budget. Hence, betting that |
| 2631 | * process will behave in the same way when it |
| 2632 | * becomes backlogged again, we reduce its |
| 2633 | * next budget. As long as we guess right, |
| 2634 | * this budget cut reduces the latency |
| 2635 | * experienced by the process. |
| 2636 | * |
| 2637 | * However, if there are still outstanding |
| 2638 | * requests, then the process may have not yet |
| 2639 | * issued its next request just because it is |
| 2640 | * still waiting for the completion of some of |
| 2641 | * the still outstanding ones. So in this |
| 2642 | * subcase we do not reduce its budget, on the |
| 2643 | * contrary we increase it to possibly boost |
| 2644 | * the throughput, as discussed in the |
| 2645 | * comments to the BUDGET_TIMEOUT case. |
| 2646 | */ |
| 2647 | if (bfqq->dispatched > 0) /* still outstanding reqs */ |
| 2648 | budget = min(budget * 2, bfqd->bfq_max_budget); |
| 2649 | else { |
| 2650 | if (budget > 5 * min_budget) |
| 2651 | budget -= 4 * min_budget; |
| 2652 | else |
| 2653 | budget = min_budget; |
| 2654 | } |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 2655 | break; |
| 2656 | case BFQQE_BUDGET_TIMEOUT: |
Paolo Valente | 54b6045 | 2017-04-12 18:23:09 +0200 | [diff] [blame] | 2657 | /* |
| 2658 | * We double the budget here because it gives |
| 2659 | * the chance to boost the throughput if this |
| 2660 | * is not a seeky process (and has bumped into |
| 2661 | * this timeout because of, e.g., ZBR). |
| 2662 | */ |
| 2663 | budget = min(budget * 2, bfqd->bfq_max_budget); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 2664 | break; |
| 2665 | case BFQQE_BUDGET_EXHAUSTED: |
| 2666 | /* |
| 2667 | * The process still has backlog, and did not |
| 2668 | * let either the budget timeout or the disk |
| 2669 | * idling timeout expire. Hence it is not |
| 2670 | * seeky, has a short thinktime and may be |
| 2671 | * happy with a higher budget too. So |
| 2672 | * definitely increase the budget of this good |
| 2673 | * candidate to boost the disk throughput. |
| 2674 | */ |
Paolo Valente | 54b6045 | 2017-04-12 18:23:09 +0200 | [diff] [blame] | 2675 | budget = min(budget * 4, bfqd->bfq_max_budget); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 2676 | break; |
| 2677 | case BFQQE_NO_MORE_REQUESTS: |
| 2678 | /* |
| 2679 | * For queues that expire for this reason, it |
| 2680 | * is particularly important to keep the |
| 2681 | * budget close to the actual service they |
| 2682 | * need. Doing so reduces the timestamp |
| 2683 | * misalignment problem described in the |
| 2684 | * comments in the body of |
| 2685 | * __bfq_activate_entity. In fact, suppose |
| 2686 | * that a queue systematically expires for |
| 2687 | * BFQQE_NO_MORE_REQUESTS and presents a |
| 2688 | * new request in time to enjoy timestamp |
| 2689 | * back-shifting. The larger the budget of the |
| 2690 | * queue is with respect to the service the |
| 2691 | * queue actually requests in each service |
| 2692 | * slot, the more times the queue can be |
| 2693 | * reactivated with the same virtual finish |
| 2694 | * time. It follows that, even if this finish |
| 2695 | * time is pushed to the system virtual time |
| 2696 | * to reduce the consequent timestamp |
| 2697 | * misalignment, the queue unjustly enjoys for |
| 2698 | * many re-activations a lower finish time |
| 2699 | * than all newly activated queues. |
| 2700 | * |
| 2701 | * The service needed by bfqq is measured |
| 2702 | * quite precisely by bfqq->entity.service. |
| 2703 | * Since bfqq does not enjoy device idling, |
| 2704 | * bfqq->entity.service is equal to the number |
| 2705 | * of sectors that the process associated with |
| 2706 | * bfqq requested to read/write before waiting |
| 2707 | * for request completions, or blocking for |
| 2708 | * other reasons. |
| 2709 | */ |
| 2710 | budget = max_t(int, bfqq->entity.service, min_budget); |
| 2711 | break; |
| 2712 | default: |
| 2713 | return; |
| 2714 | } |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 2715 | } else if (!bfq_bfqq_sync(bfqq)) { |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 2716 | /* |
| 2717 | * Async queues get always the maximum possible |
| 2718 | * budget, as for them we do not care about latency |
| 2719 | * (in addition, their ability to dispatch is limited |
| 2720 | * by the charging factor). |
| 2721 | */ |
| 2722 | budget = bfqd->bfq_max_budget; |
| 2723 | } |
| 2724 | |
| 2725 | bfqq->max_budget = budget; |
| 2726 | |
| 2727 | if (bfqd->budgets_assigned >= bfq_stats_min_budgets && |
| 2728 | !bfqd->bfq_user_max_budget) |
| 2729 | bfqq->max_budget = min(bfqq->max_budget, bfqd->bfq_max_budget); |
| 2730 | |
| 2731 | /* |
| 2732 | * If there is still backlog, then assign a new budget, making |
| 2733 | * sure that it is large enough for the next request. Since |
| 2734 | * the finish time of bfqq must be kept in sync with the |
| 2735 | * budget, be sure to call __bfq_bfqq_expire() *after* this |
| 2736 | * update. |
| 2737 | * |
| 2738 | * If there is no backlog, then no need to update the budget; |
| 2739 | * it will be updated on the arrival of a new request. |
| 2740 | */ |
| 2741 | next_rq = bfqq->next_rq; |
| 2742 | if (next_rq) |
| 2743 | bfqq->entity.budget = max_t(unsigned long, bfqq->max_budget, |
| 2744 | bfq_serv_to_charge(next_rq, bfqq)); |
| 2745 | |
| 2746 | bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %d", |
| 2747 | next_rq ? blk_rq_sectors(next_rq) : 0, |
| 2748 | bfqq->entity.budget); |
| 2749 | } |
| 2750 | |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 2751 | /* |
Paolo Valente | ab0e43e | 2017-04-12 18:23:10 +0200 | [diff] [blame] | 2752 | * Return true if the process associated with bfqq is "slow". The slow |
| 2753 | * flag is used, in addition to the budget timeout, to reduce the |
| 2754 | * amount of service provided to seeky processes, and thus reduce |
| 2755 | * their chances to lower the throughput. More details in the comments |
| 2756 | * on the function bfq_bfqq_expire(). |
| 2757 | * |
| 2758 | * An important observation is in order: as discussed in the comments |
| 2759 | * on the function bfq_update_peak_rate(), with devices with internal |
| 2760 | * queues, it is hard if ever possible to know when and for how long |
| 2761 | * an I/O request is processed by the device (apart from the trivial |
| 2762 | * I/O pattern where a new request is dispatched only after the |
| 2763 | * previous one has been completed). This makes it hard to evaluate |
| 2764 | * the real rate at which the I/O requests of each bfq_queue are |
| 2765 | * served. In fact, for an I/O scheduler like BFQ, serving a |
| 2766 | * bfq_queue means just dispatching its requests during its service |
| 2767 | * slot (i.e., until the budget of the queue is exhausted, or the |
| 2768 | * queue remains idle, or, finally, a timeout fires). But, during the |
| 2769 | * service slot of a bfq_queue, around 100 ms at most, the device may |
| 2770 | * be even still processing requests of bfq_queues served in previous |
| 2771 | * service slots. On the opposite end, the requests of the in-service |
| 2772 | * bfq_queue may be completed after the service slot of the queue |
| 2773 | * finishes. |
| 2774 | * |
| 2775 | * Anyway, unless more sophisticated solutions are used |
| 2776 | * (where possible), the sum of the sizes of the requests dispatched |
| 2777 | * during the service slot of a bfq_queue is probably the only |
| 2778 | * approximation available for the service received by the bfq_queue |
| 2779 | * during its service slot. And this sum is the quantity used in this |
| 2780 | * function to evaluate the I/O speed of a process. |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 2781 | */ |
Paolo Valente | ab0e43e | 2017-04-12 18:23:10 +0200 | [diff] [blame] | 2782 | static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq, |
| 2783 | bool compensate, enum bfqq_expiration reason, |
| 2784 | unsigned long *delta_ms) |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 2785 | { |
Paolo Valente | ab0e43e | 2017-04-12 18:23:10 +0200 | [diff] [blame] | 2786 | ktime_t delta_ktime; |
| 2787 | u32 delta_usecs; |
| 2788 | bool slow = BFQQ_SEEKY(bfqq); /* if delta too short, use seekyness */ |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 2789 | |
Paolo Valente | ab0e43e | 2017-04-12 18:23:10 +0200 | [diff] [blame] | 2790 | if (!bfq_bfqq_sync(bfqq)) |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 2791 | return false; |
| 2792 | |
| 2793 | if (compensate) |
Paolo Valente | ab0e43e | 2017-04-12 18:23:10 +0200 | [diff] [blame] | 2794 | delta_ktime = bfqd->last_idling_start; |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 2795 | else |
Paolo Valente | ab0e43e | 2017-04-12 18:23:10 +0200 | [diff] [blame] | 2796 | delta_ktime = ktime_get(); |
| 2797 | delta_ktime = ktime_sub(delta_ktime, bfqd->last_budget_start); |
| 2798 | delta_usecs = ktime_to_us(delta_ktime); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 2799 | |
| 2800 | /* don't use too short time intervals */ |
Paolo Valente | ab0e43e | 2017-04-12 18:23:10 +0200 | [diff] [blame] | 2801 | if (delta_usecs < 1000) { |
| 2802 | if (blk_queue_nonrot(bfqd->queue)) |
| 2803 | /* |
| 2804 | * give same worst-case guarantees as idling |
| 2805 | * for seeky |
| 2806 | */ |
| 2807 | *delta_ms = BFQ_MIN_TT / NSEC_PER_MSEC; |
| 2808 | else /* charge at least one seek */ |
| 2809 | *delta_ms = bfq_slice_idle / NSEC_PER_MSEC; |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 2810 | |
Paolo Valente | ab0e43e | 2017-04-12 18:23:10 +0200 | [diff] [blame] | 2811 | return slow; |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 2812 | } |
| 2813 | |
Paolo Valente | ab0e43e | 2017-04-12 18:23:10 +0200 | [diff] [blame] | 2814 | *delta_ms = delta_usecs / USEC_PER_MSEC; |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 2815 | |
| 2816 | /* |
Paolo Valente | ab0e43e | 2017-04-12 18:23:10 +0200 | [diff] [blame] | 2817 | * Use only long (> 20ms) intervals to filter out excessive |
| 2818 | * spikes in service rate estimation. |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 2819 | */ |
Paolo Valente | ab0e43e | 2017-04-12 18:23:10 +0200 | [diff] [blame] | 2820 | if (delta_usecs > 20000) { |
| 2821 | /* |
| 2822 | * Caveat for rotational devices: processes doing I/O |
| 2823 | * in the slower disk zones tend to be slow(er) even |
| 2824 | * if not seeky. In this respect, the estimated peak |
| 2825 | * rate is likely to be an average over the disk |
| 2826 | * surface. Accordingly, to not be too harsh with |
| 2827 | * unlucky processes, a process is deemed slow only if |
| 2828 | * its rate has been lower than half of the estimated |
| 2829 | * peak rate. |
| 2830 | */ |
| 2831 | slow = bfqq->entity.service < bfqd->bfq_max_budget / 2; |
| 2832 | } |
| 2833 | |
| 2834 | bfq_log_bfqq(bfqd, bfqq, "bfq_bfqq_is_slow: slow %d", slow); |
| 2835 | |
| 2836 | return slow; |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 2837 | } |
| 2838 | |
| 2839 | /* |
Paolo Valente | 77b7dce | 2017-04-12 18:23:13 +0200 | [diff] [blame] | 2840 | * To be deemed as soft real-time, an application must meet two |
| 2841 | * requirements. First, the application must not require an average |
| 2842 | * bandwidth higher than the approximate bandwidth required to playback or |
| 2843 | * record a compressed high-definition video. |
| 2844 | * The next function is invoked on the completion of the last request of a |
| 2845 | * batch, to compute the next-start time instant, soft_rt_next_start, such |
| 2846 | * that, if the next request of the application does not arrive before |
| 2847 | * soft_rt_next_start, then the above requirement on the bandwidth is met. |
| 2848 | * |
| 2849 | * The second requirement is that the request pattern of the application is |
| 2850 | * isochronous, i.e., that, after issuing a request or a batch of requests, |
| 2851 | * the application stops issuing new requests until all its pending requests |
| 2852 | * have been completed. After that, the application may issue a new batch, |
| 2853 | * and so on. |
| 2854 | * For this reason the next function is invoked to compute |
| 2855 | * soft_rt_next_start only for applications that meet this requirement, |
| 2856 | * whereas soft_rt_next_start is set to infinity for applications that do |
| 2857 | * not. |
| 2858 | * |
| 2859 | * Unfortunately, even a greedy application may happen to behave in an |
| 2860 | * isochronous way if the CPU load is high. In fact, the application may |
| 2861 | * stop issuing requests while the CPUs are busy serving other processes, |
| 2862 | * then restart, then stop again for a while, and so on. In addition, if |
| 2863 | * the disk achieves a low enough throughput with the request pattern |
| 2864 | * issued by the application (e.g., because the request pattern is random |
| 2865 | * and/or the device is slow), then the application may meet the above |
| 2866 | * bandwidth requirement too. To prevent such a greedy application to be |
| 2867 | * deemed as soft real-time, a further rule is used in the computation of |
| 2868 | * soft_rt_next_start: soft_rt_next_start must be higher than the current |
| 2869 | * time plus the maximum time for which the arrival of a request is waited |
| 2870 | * for when a sync queue becomes idle, namely bfqd->bfq_slice_idle. |
| 2871 | * This filters out greedy applications, as the latter issue instead their |
| 2872 | * next request as soon as possible after the last one has been completed |
| 2873 | * (in contrast, when a batch of requests is completed, a soft real-time |
| 2874 | * application spends some time processing data). |
| 2875 | * |
| 2876 | * Unfortunately, the last filter may easily generate false positives if |
| 2877 | * only bfqd->bfq_slice_idle is used as a reference time interval and one |
| 2878 | * or both the following cases occur: |
| 2879 | * 1) HZ is so low that the duration of a jiffy is comparable to or higher |
| 2880 | * than bfqd->bfq_slice_idle. This happens, e.g., on slow devices with |
| 2881 | * HZ=100. |
| 2882 | * 2) jiffies, instead of increasing at a constant rate, may stop increasing |
| 2883 | * for a while, then suddenly 'jump' by several units to recover the lost |
| 2884 | * increments. This seems to happen, e.g., inside virtual machines. |
| 2885 | * To address this issue, we do not use as a reference time interval just |
| 2886 | * bfqd->bfq_slice_idle, but bfqd->bfq_slice_idle plus a few jiffies. In |
| 2887 | * particular we add the minimum number of jiffies for which the filter |
| 2888 | * seems to be quite precise also in embedded systems and KVM/QEMU virtual |
| 2889 | * machines. |
| 2890 | */ |
| 2891 | static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd, |
| 2892 | struct bfq_queue *bfqq) |
| 2893 | { |
| 2894 | return max(bfqq->last_idle_bklogged + |
| 2895 | HZ * bfqq->service_from_backlogged / |
| 2896 | bfqd->bfq_wr_max_softrt_rate, |
| 2897 | jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4); |
| 2898 | } |
| 2899 | |
| 2900 | /* |
| 2901 | * Return the farthest future time instant according to jiffies |
| 2902 | * macros. |
| 2903 | */ |
| 2904 | static unsigned long bfq_greatest_from_now(void) |
| 2905 | { |
| 2906 | return jiffies + MAX_JIFFY_OFFSET; |
| 2907 | } |
| 2908 | |
| 2909 | /* |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 2910 | * Return the farthest past time instant according to jiffies |
| 2911 | * macros. |
| 2912 | */ |
| 2913 | static unsigned long bfq_smallest_from_now(void) |
| 2914 | { |
| 2915 | return jiffies - MAX_JIFFY_OFFSET; |
| 2916 | } |
| 2917 | |
| 2918 | /** |
| 2919 | * bfq_bfqq_expire - expire a queue. |
| 2920 | * @bfqd: device owning the queue. |
| 2921 | * @bfqq: the queue to expire. |
| 2922 | * @compensate: if true, compensate for the time spent idling. |
| 2923 | * @reason: the reason causing the expiration. |
| 2924 | * |
Paolo Valente | c074170e | 2017-04-12 18:23:11 +0200 | [diff] [blame] | 2925 | * If the process associated with bfqq does slow I/O (e.g., because it |
| 2926 | * issues random requests), we charge bfqq with the time it has been |
| 2927 | * in service instead of the service it has received (see |
| 2928 | * bfq_bfqq_charge_time for details on how this goal is achieved). As |
| 2929 | * a consequence, bfqq will typically get higher timestamps upon |
| 2930 | * reactivation, and hence it will be rescheduled as if it had |
| 2931 | * received more service than what it has actually received. In the |
| 2932 | * end, bfqq receives less service in proportion to how slowly its |
| 2933 | * associated process consumes its budgets (and hence how seriously it |
| 2934 | * tends to lower the throughput). In addition, this time-charging |
| 2935 | * strategy guarantees time fairness among slow processes. In |
| 2936 | * contrast, if the process associated with bfqq is not slow, we |
| 2937 | * charge bfqq exactly with the service it has received. |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 2938 | * |
Paolo Valente | c074170e | 2017-04-12 18:23:11 +0200 | [diff] [blame] | 2939 | * Charging time to the first type of queues and the exact service to |
| 2940 | * the other has the effect of using the WF2Q+ policy to schedule the |
| 2941 | * former on a timeslice basis, without violating service domain |
| 2942 | * guarantees among the latter. |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 2943 | */ |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 2944 | void bfq_bfqq_expire(struct bfq_data *bfqd, |
| 2945 | struct bfq_queue *bfqq, |
| 2946 | bool compensate, |
| 2947 | enum bfqq_expiration reason) |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 2948 | { |
| 2949 | bool slow; |
Paolo Valente | ab0e43e | 2017-04-12 18:23:10 +0200 | [diff] [blame] | 2950 | unsigned long delta = 0; |
| 2951 | struct bfq_entity *entity = &bfqq->entity; |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 2952 | int ref; |
| 2953 | |
| 2954 | /* |
Paolo Valente | ab0e43e | 2017-04-12 18:23:10 +0200 | [diff] [blame] | 2955 | * Check whether the process is slow (see bfq_bfqq_is_slow). |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 2956 | */ |
Paolo Valente | ab0e43e | 2017-04-12 18:23:10 +0200 | [diff] [blame] | 2957 | slow = bfq_bfqq_is_slow(bfqd, bfqq, compensate, reason, &delta); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 2958 | |
| 2959 | /* |
Paolo Valente | 77b7dce | 2017-04-12 18:23:13 +0200 | [diff] [blame] | 2960 | * Increase service_from_backlogged before next statement, |
| 2961 | * because the possible next invocation of |
| 2962 | * bfq_bfqq_charge_time would likely inflate |
| 2963 | * entity->service. In contrast, service_from_backlogged must |
| 2964 | * contain real service, to enable the soft real-time |
| 2965 | * heuristic to correctly compute the bandwidth consumed by |
| 2966 | * bfqq. |
| 2967 | */ |
| 2968 | bfqq->service_from_backlogged += entity->service; |
| 2969 | |
| 2970 | /* |
Paolo Valente | c074170e | 2017-04-12 18:23:11 +0200 | [diff] [blame] | 2971 | * As above explained, charge slow (typically seeky) and |
| 2972 | * timed-out queues with the time and not the service |
| 2973 | * received, to favor sequential workloads. |
| 2974 | * |
| 2975 | * Processes doing I/O in the slower disk zones will tend to |
| 2976 | * be slow(er) even if not seeky. Therefore, since the |
| 2977 | * estimated peak rate is actually an average over the disk |
| 2978 | * surface, these processes may timeout just for bad luck. To |
| 2979 | * avoid punishing them, do not charge time to processes that |
| 2980 | * succeeded in consuming at least 2/3 of their budget. This |
| 2981 | * allows BFQ to preserve enough elasticity to still perform |
| 2982 | * bandwidth, and not time, distribution with little unlucky |
| 2983 | * or quasi-sequential processes. |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 2984 | */ |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 2985 | if (bfqq->wr_coeff == 1 && |
| 2986 | (slow || |
| 2987 | (reason == BFQQE_BUDGET_TIMEOUT && |
| 2988 | bfq_bfqq_budget_left(bfqq) >= entity->budget / 3))) |
Paolo Valente | c074170e | 2017-04-12 18:23:11 +0200 | [diff] [blame] | 2989 | bfq_bfqq_charge_time(bfqd, bfqq, delta); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 2990 | |
| 2991 | if (reason == BFQQE_TOO_IDLE && |
Paolo Valente | ab0e43e | 2017-04-12 18:23:10 +0200 | [diff] [blame] | 2992 | entity->service <= 2 * entity->budget / 10) |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 2993 | bfq_clear_bfqq_IO_bound(bfqq); |
| 2994 | |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 2995 | if (bfqd->low_latency && bfqq->wr_coeff == 1) |
| 2996 | bfqq->last_wr_start_finish = jiffies; |
| 2997 | |
Paolo Valente | 77b7dce | 2017-04-12 18:23:13 +0200 | [diff] [blame] | 2998 | if (bfqd->low_latency && bfqd->bfq_wr_max_softrt_rate > 0 && |
| 2999 | RB_EMPTY_ROOT(&bfqq->sort_list)) { |
| 3000 | /* |
| 3001 | * If we get here, and there are no outstanding |
| 3002 | * requests, then the request pattern is isochronous |
| 3003 | * (see the comments on the function |
| 3004 | * bfq_bfqq_softrt_next_start()). Thus we can compute |
| 3005 | * soft_rt_next_start. If, instead, the queue still |
| 3006 | * has outstanding requests, then we have to wait for |
| 3007 | * the completion of all the outstanding requests to |
| 3008 | * discover whether the request pattern is actually |
| 3009 | * isochronous. |
| 3010 | */ |
| 3011 | if (bfqq->dispatched == 0) |
| 3012 | bfqq->soft_rt_next_start = |
| 3013 | bfq_bfqq_softrt_next_start(bfqd, bfqq); |
| 3014 | else { |
| 3015 | /* |
| 3016 | * The application is still waiting for the |
| 3017 | * completion of one or more requests: |
| 3018 | * prevent it from possibly being incorrectly |
| 3019 | * deemed as soft real-time by setting its |
| 3020 | * soft_rt_next_start to infinity. In fact, |
| 3021 | * without this assignment, the application |
| 3022 | * would be incorrectly deemed as soft |
| 3023 | * real-time if: |
| 3024 | * 1) it issued a new request before the |
| 3025 | * completion of all its in-flight |
| 3026 | * requests, and |
| 3027 | * 2) at that time, its soft_rt_next_start |
| 3028 | * happened to be in the past. |
| 3029 | */ |
| 3030 | bfqq->soft_rt_next_start = |
| 3031 | bfq_greatest_from_now(); |
| 3032 | /* |
| 3033 | * Schedule an update of soft_rt_next_start to when |
| 3034 | * the task may be discovered to be isochronous. |
| 3035 | */ |
| 3036 | bfq_mark_bfqq_softrt_update(bfqq); |
| 3037 | } |
| 3038 | } |
| 3039 | |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 3040 | bfq_log_bfqq(bfqd, bfqq, |
Paolo Valente | d5be3fe | 2017-08-04 07:35:10 +0200 | [diff] [blame] | 3041 | "expire (%d, slow %d, num_disp %d, short_ttime %d)", reason, |
| 3042 | slow, bfqq->dispatched, bfq_bfqq_has_short_ttime(bfqq)); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 3043 | |
| 3044 | /* |
| 3045 | * Increase, decrease or leave budget unchanged according to |
| 3046 | * reason. |
| 3047 | */ |
| 3048 | __bfq_bfqq_recalc_budget(bfqd, bfqq, reason); |
| 3049 | ref = bfqq->ref; |
| 3050 | __bfq_bfqq_expire(bfqd, bfqq); |
| 3051 | |
| 3052 | /* mark bfqq as waiting a request only if a bic still points to it */ |
| 3053 | if (ref > 1 && !bfq_bfqq_busy(bfqq) && |
| 3054 | reason != BFQQE_BUDGET_TIMEOUT && |
| 3055 | reason != BFQQE_BUDGET_EXHAUSTED) |
| 3056 | bfq_mark_bfqq_non_blocking_wait_rq(bfqq); |
| 3057 | } |
| 3058 | |
| 3059 | /* |
| 3060 | * Budget timeout is not implemented through a dedicated timer, but |
| 3061 | * just checked on request arrivals and completions, as well as on |
| 3062 | * idle timer expirations. |
| 3063 | */ |
| 3064 | static bool bfq_bfqq_budget_timeout(struct bfq_queue *bfqq) |
| 3065 | { |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 3066 | return time_is_before_eq_jiffies(bfqq->budget_timeout); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 3067 | } |
| 3068 | |
| 3069 | /* |
| 3070 | * If we expire a queue that is actively waiting (i.e., with the |
| 3071 | * device idled) for the arrival of a new request, then we may incur |
| 3072 | * the timestamp misalignment problem described in the body of the |
| 3073 | * function __bfq_activate_entity. Hence we return true only if this |
| 3074 | * condition does not hold, or if the queue is slow enough to deserve |
| 3075 | * only to be kicked off for preserving a high throughput. |
| 3076 | */ |
| 3077 | static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq) |
| 3078 | { |
| 3079 | bfq_log_bfqq(bfqq->bfqd, bfqq, |
| 3080 | "may_budget_timeout: wait_request %d left %d timeout %d", |
| 3081 | bfq_bfqq_wait_request(bfqq), |
| 3082 | bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3, |
| 3083 | bfq_bfqq_budget_timeout(bfqq)); |
| 3084 | |
| 3085 | return (!bfq_bfqq_wait_request(bfqq) || |
| 3086 | bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3) |
| 3087 | && |
| 3088 | bfq_bfqq_budget_timeout(bfqq); |
| 3089 | } |
| 3090 | |
| 3091 | /* |
| 3092 | * For a queue that becomes empty, device idling is allowed only if |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 3093 | * this function returns true for the queue. As a consequence, since |
| 3094 | * device idling plays a critical role in both throughput boosting and |
| 3095 | * service guarantees, the return value of this function plays a |
| 3096 | * critical role in both these aspects as well. |
| 3097 | * |
| 3098 | * In a nutshell, this function returns true only if idling is |
| 3099 | * beneficial for throughput or, even if detrimental for throughput, |
| 3100 | * idling is however necessary to preserve service guarantees (low |
| 3101 | * latency, desired throughput distribution, ...). In particular, on |
| 3102 | * NCQ-capable devices, this function tries to return false, so as to |
| 3103 | * help keep the drives' internal queues full, whenever this helps the |
| 3104 | * device boost the throughput without causing any service-guarantee |
| 3105 | * issue. |
| 3106 | * |
| 3107 | * In more detail, the return value of this function is obtained by, |
| 3108 | * first, computing a number of boolean variables that take into |
| 3109 | * account throughput and service-guarantee issues, and, then, |
| 3110 | * combining these variables in a logical expression. Most of the |
| 3111 | * issues taken into account are not trivial. We discuss these issues |
| 3112 | * individually while introducing the variables. |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 3113 | */ |
| 3114 | static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) |
| 3115 | { |
| 3116 | struct bfq_data *bfqd = bfqq->bfqd; |
Paolo Valente | edaf942 | 2017-08-04 07:35:11 +0200 | [diff] [blame] | 3117 | bool rot_without_queueing = |
| 3118 | !blk_queue_nonrot(bfqd->queue) && !bfqd->hw_tag, |
| 3119 | bfqq_sequential_and_IO_bound, |
| 3120 | idling_boosts_thr, idling_boosts_thr_without_issues, |
Arianna Avanzini | e1b2324 | 2017-04-12 18:23:20 +0200 | [diff] [blame] | 3121 | idling_needed_for_service_guarantees, |
Paolo Valente | cfd6971 | 2017-04-12 18:23:15 +0200 | [diff] [blame] | 3122 | asymmetric_scenario; |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 3123 | |
| 3124 | if (bfqd->strict_guarantees) |
| 3125 | return true; |
| 3126 | |
| 3127 | /* |
Paolo Valente | d5be3fe | 2017-08-04 07:35:10 +0200 | [diff] [blame] | 3128 | * Idling is performed only if slice_idle > 0. In addition, we |
| 3129 | * do not idle if |
| 3130 | * (a) bfqq is async |
| 3131 | * (b) bfqq is in the idle io prio class: in this case we do |
| 3132 | * not idle because we want to minimize the bandwidth that |
| 3133 | * queues in this class can steal to higher-priority queues |
| 3134 | */ |
| 3135 | if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_sync(bfqq) || |
| 3136 | bfq_class_idle(bfqq)) |
| 3137 | return false; |
| 3138 | |
Paolo Valente | edaf942 | 2017-08-04 07:35:11 +0200 | [diff] [blame] | 3139 | bfqq_sequential_and_IO_bound = !BFQQ_SEEKY(bfqq) && |
| 3140 | bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_has_short_ttime(bfqq); |
| 3141 | |
Paolo Valente | d5be3fe | 2017-08-04 07:35:10 +0200 | [diff] [blame] | 3142 | /* |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 3143 | * The next variable takes into account the cases where idling |
| 3144 | * boosts the throughput. |
| 3145 | * |
Paolo Valente | e01eff0 | 2017-04-12 18:23:19 +0200 | [diff] [blame] | 3146 | * The value of the variable is computed considering, first, that |
| 3147 | * idling is virtually always beneficial for the throughput if: |
Paolo Valente | edaf942 | 2017-08-04 07:35:11 +0200 | [diff] [blame] | 3148 | * (a) the device is not NCQ-capable and rotational, or |
| 3149 | * (b) regardless of the presence of NCQ, the device is rotational and |
| 3150 | * the request pattern for bfqq is I/O-bound and sequential, or |
| 3151 | * (c) regardless of whether it is rotational, the device is |
| 3152 | * not NCQ-capable and the request pattern for bfqq is |
| 3153 | * I/O-bound and sequential. |
Paolo Valente | bf2b79e | 2017-04-12 18:23:18 +0200 | [diff] [blame] | 3154 | * |
| 3155 | * Secondly, and in contrast to the above item (b), idling an |
| 3156 | * NCQ-capable flash-based device would not boost the |
Paolo Valente | e01eff0 | 2017-04-12 18:23:19 +0200 | [diff] [blame] | 3157 | * throughput even with sequential I/O; rather it would lower |
Paolo Valente | bf2b79e | 2017-04-12 18:23:18 +0200 | [diff] [blame] | 3158 | * the throughput in proportion to how fast the device |
| 3159 | * is. Accordingly, the next variable is true if any of the |
Paolo Valente | edaf942 | 2017-08-04 07:35:11 +0200 | [diff] [blame] | 3160 | * above conditions (a), (b) or (c) is true, and, in |
| 3161 | * particular, happens to be false if bfqd is an NCQ-capable |
| 3162 | * flash-based device. |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 3163 | */ |
Paolo Valente | edaf942 | 2017-08-04 07:35:11 +0200 | [diff] [blame] | 3164 | idling_boosts_thr = rot_without_queueing || |
| 3165 | ((!blk_queue_nonrot(bfqd->queue) || !bfqd->hw_tag) && |
| 3166 | bfqq_sequential_and_IO_bound); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 3167 | |
| 3168 | /* |
Paolo Valente | cfd6971 | 2017-04-12 18:23:15 +0200 | [diff] [blame] | 3169 | * The value of the next variable, |
| 3170 | * idling_boosts_thr_without_issues, is equal to that of |
| 3171 | * idling_boosts_thr, unless a special case holds. In this |
| 3172 | * special case, described below, idling may cause problems to |
| 3173 | * weight-raised queues. |
| 3174 | * |
| 3175 | * When the request pool is saturated (e.g., in the presence |
| 3176 | * of write hogs), if the processes associated with |
| 3177 | * non-weight-raised queues ask for requests at a lower rate, |
| 3178 | * then processes associated with weight-raised queues have a |
| 3179 | * higher probability to get a request from the pool |
| 3180 | * immediately (or at least soon) when they need one. Thus |
| 3181 | * they have a higher probability to actually get a fraction |
| 3182 | * of the device throughput proportional to their high |
| 3183 | * weight. This is especially true with NCQ-capable drives, |
| 3184 | * which enqueue several requests in advance, and further |
| 3185 | * reorder internally-queued requests. |
| 3186 | * |
| 3187 | * For this reason, we force to false the value of |
| 3188 | * idling_boosts_thr_without_issues if there are weight-raised |
| 3189 | * busy queues. In this case, and if bfqq is not weight-raised, |
| 3190 | * this guarantees that the device is not idled for bfqq (if, |
| 3191 | * instead, bfqq is weight-raised, then idling will be |
| 3192 | * guaranteed by another variable, see below). Combined with |
| 3193 | * the timestamping rules of BFQ (see [1] for details), this |
| 3194 | * behavior causes bfqq, and hence any sync non-weight-raised |
| 3195 | * queue, to get a lower number of requests served, and thus |
| 3196 | * to ask for a lower number of requests from the request |
| 3197 | * pool, before the busy weight-raised queues get served |
| 3198 | * again. This often mitigates starvation problems in the |
| 3199 | * presence of heavy write workloads and NCQ, thereby |
| 3200 | * guaranteeing a higher application and system responsiveness |
| 3201 | * in these hostile scenarios. |
| 3202 | */ |
| 3203 | idling_boosts_thr_without_issues = idling_boosts_thr && |
| 3204 | bfqd->wr_busy_queues == 0; |
| 3205 | |
| 3206 | /* |
Paolo Valente | bf2b79e | 2017-04-12 18:23:18 +0200 | [diff] [blame] | 3207 | * There is then a case where idling must be performed not |
| 3208 | * for throughput concerns, but to preserve service |
| 3209 | * guarantees. |
| 3210 | * |
| 3211 | * To introduce this case, we can note that allowing the drive |
| 3212 | * to enqueue more than one request at a time, and hence |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 3213 | * delegating de facto final scheduling decisions to the |
Paolo Valente | bf2b79e | 2017-04-12 18:23:18 +0200 | [diff] [blame] | 3214 | * drive's internal scheduler, entails loss of control on the |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 3215 | * actual request service order. In particular, the critical |
Paolo Valente | bf2b79e | 2017-04-12 18:23:18 +0200 | [diff] [blame] | 3216 | * situation is when requests from different processes happen |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 3217 | * to be present, at the same time, in the internal queue(s) |
| 3218 | * of the drive. In such a situation, the drive, by deciding |
| 3219 | * the service order of the internally-queued requests, does |
| 3220 | * determine also the actual throughput distribution among |
| 3221 | * these processes. But the drive typically has no notion or |
| 3222 | * concern about per-process throughput distribution, and |
| 3223 | * makes its decisions only on a per-request basis. Therefore, |
| 3224 | * the service distribution enforced by the drive's internal |
| 3225 | * scheduler is likely to coincide with the desired |
| 3226 | * device-throughput distribution only in a completely |
Paolo Valente | bf2b79e | 2017-04-12 18:23:18 +0200 | [diff] [blame] | 3227 | * symmetric scenario where: |
| 3228 | * (i) each of these processes must get the same throughput as |
| 3229 | * the others; |
| 3230 | * (ii) all these processes have the same I/O pattern |
| 3231 | (either sequential or random). |
| 3232 | * In fact, in such a scenario, the drive will tend to treat |
| 3233 | * the requests of each of these processes in about the same |
| 3234 | * way as the requests of the others, and thus to provide |
| 3235 | * each of these processes with about the same throughput |
| 3236 | * (which is exactly the desired throughput distribution). In |
| 3237 | * contrast, in any asymmetric scenario, device idling is |
| 3238 | * certainly needed to guarantee that bfqq receives its |
| 3239 | * assigned fraction of the device throughput (see [1] for |
| 3240 | * details). |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 3241 | * |
Paolo Valente | bf2b79e | 2017-04-12 18:23:18 +0200 | [diff] [blame] | 3242 | * We address this issue by controlling, actually, only the |
| 3243 | * symmetry sub-condition (i), i.e., provided that |
| 3244 | * sub-condition (i) holds, idling is not performed, |
| 3245 | * regardless of whether sub-condition (ii) holds. In other |
| 3246 | * words, only if sub-condition (i) holds, then idling is |
| 3247 | * allowed, and the device tends to be prevented from queueing |
| 3248 | * many requests, possibly of several processes. The reason |
| 3249 | * for not controlling also sub-condition (ii) is that we |
| 3250 | * exploit preemption to preserve guarantees in case of |
| 3251 | * symmetric scenarios, even if (ii) does not hold, as |
| 3252 | * explained in the next two paragraphs. |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 3253 | * |
Paolo Valente | bf2b79e | 2017-04-12 18:23:18 +0200 | [diff] [blame] | 3254 | * Even if a queue, say Q, is expired when it remains idle, Q |
| 3255 | * can still preempt the new in-service queue if the next |
| 3256 | * request of Q arrives soon (see the comments on |
| 3257 | * bfq_bfqq_update_budg_for_activation). If all queues and |
| 3258 | * groups have the same weight, this form of preemption, |
| 3259 | * combined with the hole-recovery heuristic described in the |
| 3260 | * comments on function bfq_bfqq_update_budg_for_activation, |
| 3261 | * are enough to preserve a correct bandwidth distribution in |
| 3262 | * the mid term, even without idling. In fact, even if not |
| 3263 | * idling allows the internal queues of the device to contain |
| 3264 | * many requests, and thus to reorder requests, we can rather |
| 3265 | * safely assume that the internal scheduler still preserves a |
| 3266 | * minimum of mid-term fairness. The motivation for using |
| 3267 | * preemption instead of idling is that, by not idling, |
| 3268 | * service guarantees are preserved without minimally |
| 3269 | * sacrificing throughput. In other words, both a high |
| 3270 | * throughput and its desired distribution are obtained. |
| 3271 | * |
| 3272 | * More precisely, this preemption-based, idleless approach |
| 3273 | * provides fairness in terms of IOPS, and not sectors per |
| 3274 | * second. This can be seen with a simple example. Suppose |
| 3275 | * that there are two queues with the same weight, but that |
| 3276 | * the first queue receives requests of 8 sectors, while the |
| 3277 | * second queue receives requests of 1024 sectors. In |
| 3278 | * addition, suppose that each of the two queues contains at |
| 3279 | * most one request at a time, which implies that each queue |
| 3280 | * always remains idle after it is served. Finally, after |
| 3281 | * remaining idle, each queue receives very quickly a new |
| 3282 | * request. It follows that the two queues are served |
| 3283 | * alternatively, preempting each other if needed. This |
| 3284 | * implies that, although both queues have the same weight, |
| 3285 | * the queue with large requests receives a service that is |
| 3286 | * 1024/8 times as high as the service received by the other |
| 3287 | * queue. |
| 3288 | * |
| 3289 | * On the other hand, device idling is performed, and thus |
| 3290 | * pure sector-domain guarantees are provided, for the |
| 3291 | * following queues, which are likely to need stronger |
| 3292 | * throughput guarantees: weight-raised queues, and queues |
| 3293 | * with a higher weight than other queues. When such queues |
| 3294 | * are active, sub-condition (i) is false, which triggers |
| 3295 | * device idling. |
| 3296 | * |
| 3297 | * According to the above considerations, the next variable is |
| 3298 | * true (only) if sub-condition (i) holds. To compute the |
| 3299 | * value of this variable, we not only use the return value of |
| 3300 | * the function bfq_symmetric_scenario(), but also check |
| 3301 | * whether bfqq is being weight-raised, because |
| 3302 | * bfq_symmetric_scenario() does not take into account also |
| 3303 | * weight-raised queues (see comments on |
| 3304 | * bfq_weights_tree_add()). |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 3305 | * |
| 3306 | * As a side note, it is worth considering that the above |
| 3307 | * device-idling countermeasures may however fail in the |
| 3308 | * following unlucky scenario: if idling is (correctly) |
Paolo Valente | bf2b79e | 2017-04-12 18:23:18 +0200 | [diff] [blame] | 3309 | * disabled in a time period during which all symmetry |
| 3310 | * sub-conditions hold, and hence the device is allowed to |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 3311 | * enqueue many requests, but at some later point in time some |
| 3312 | * sub-condition stops to hold, then it may become impossible |
| 3313 | * to let requests be served in the desired order until all |
| 3314 | * the requests already queued in the device have been served. |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 3315 | */ |
Paolo Valente | bf2b79e | 2017-04-12 18:23:18 +0200 | [diff] [blame] | 3316 | asymmetric_scenario = bfqq->wr_coeff > 1 || |
| 3317 | !bfq_symmetric_scenario(bfqd); |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 3318 | |
| 3319 | /* |
Arianna Avanzini | e1b2324 | 2017-04-12 18:23:20 +0200 | [diff] [blame] | 3320 | * Finally, there is a case where maximizing throughput is the |
| 3321 | * best choice even if it may cause unfairness toward |
| 3322 | * bfqq. Such a case is when bfqq became active in a burst of |
| 3323 | * queue activations. Queues that became active during a large |
| 3324 | * burst benefit only from throughput, as discussed in the |
| 3325 | * comments on bfq_handle_burst. Thus, if bfqq became active |
| 3326 | * in a burst and not idling the device maximizes throughput, |
| 3327 | * then the device must no be idled, because not idling the |
| 3328 | * device provides bfqq and all other queues in the burst with |
| 3329 | * maximum benefit. Combining this and the above case, we can |
| 3330 | * now establish when idling is actually needed to preserve |
| 3331 | * service guarantees. |
| 3332 | */ |
| 3333 | idling_needed_for_service_guarantees = |
| 3334 | asymmetric_scenario && !bfq_bfqq_in_large_burst(bfqq); |
| 3335 | |
| 3336 | /* |
Paolo Valente | d5be3fe | 2017-08-04 07:35:10 +0200 | [diff] [blame] | 3337 | * We have now all the components we need to compute the |
| 3338 | * return value of the function, which is true only if idling |
| 3339 | * either boosts the throughput (without issues), or is |
| 3340 | * necessary to preserve service guarantees. |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 3341 | */ |
Paolo Valente | d5be3fe | 2017-08-04 07:35:10 +0200 | [diff] [blame] | 3342 | return idling_boosts_thr_without_issues || |
| 3343 | idling_needed_for_service_guarantees; |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 3344 | } |
| 3345 | |
| 3346 | /* |
| 3347 | * If the in-service queue is empty but the function bfq_bfqq_may_idle |
| 3348 | * returns true, then: |
| 3349 | * 1) the queue must remain in service and cannot be expired, and |
| 3350 | * 2) the device must be idled to wait for the possible arrival of a new |
| 3351 | * request for the queue. |
| 3352 | * See the comments on the function bfq_bfqq_may_idle for the reasons |
| 3353 | * why performing device idling is the best choice to boost the throughput |
| 3354 | * and preserve service guarantees when bfq_bfqq_may_idle itself |
| 3355 | * returns true. |
| 3356 | */ |
| 3357 | static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq) |
| 3358 | { |
Paolo Valente | d5be3fe | 2017-08-04 07:35:10 +0200 | [diff] [blame] | 3359 | return RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_bfqq_may_idle(bfqq); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 3360 | } |
| 3361 | |
| 3362 | /* |
| 3363 | * Select a queue for service. If we have a current queue in service, |
| 3364 | * check whether to continue servicing it, or retrieve and set a new one. |
| 3365 | */ |
| 3366 | static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd) |
| 3367 | { |
| 3368 | struct bfq_queue *bfqq; |
| 3369 | struct request *next_rq; |
| 3370 | enum bfqq_expiration reason = BFQQE_BUDGET_TIMEOUT; |
| 3371 | |
| 3372 | bfqq = bfqd->in_service_queue; |
| 3373 | if (!bfqq) |
| 3374 | goto new_queue; |
| 3375 | |
| 3376 | bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue"); |
| 3377 | |
| 3378 | if (bfq_may_expire_for_budg_timeout(bfqq) && |
| 3379 | !bfq_bfqq_wait_request(bfqq) && |
| 3380 | !bfq_bfqq_must_idle(bfqq)) |
| 3381 | goto expire; |
| 3382 | |
| 3383 | check_queue: |
| 3384 | /* |
| 3385 | * This loop is rarely executed more than once. Even when it |
| 3386 | * happens, it is much more convenient to re-execute this loop |
| 3387 | * than to return NULL and trigger a new dispatch to get a |
| 3388 | * request served. |
| 3389 | */ |
| 3390 | next_rq = bfqq->next_rq; |
| 3391 | /* |
| 3392 | * If bfqq has requests queued and it has enough budget left to |
| 3393 | * serve them, keep the queue, otherwise expire it. |
| 3394 | */ |
| 3395 | if (next_rq) { |
| 3396 | if (bfq_serv_to_charge(next_rq, bfqq) > |
| 3397 | bfq_bfqq_budget_left(bfqq)) { |
| 3398 | /* |
| 3399 | * Expire the queue for budget exhaustion, |
| 3400 | * which makes sure that the next budget is |
| 3401 | * enough to serve the next request, even if |
| 3402 | * it comes from the fifo expired path. |
| 3403 | */ |
| 3404 | reason = BFQQE_BUDGET_EXHAUSTED; |
| 3405 | goto expire; |
| 3406 | } else { |
| 3407 | /* |
| 3408 | * The idle timer may be pending because we may |
| 3409 | * not disable disk idling even when a new request |
| 3410 | * arrives. |
| 3411 | */ |
| 3412 | if (bfq_bfqq_wait_request(bfqq)) { |
| 3413 | /* |
| 3414 | * If we get here: 1) at least a new request |
| 3415 | * has arrived but we have not disabled the |
| 3416 | * timer because the request was too small, |
| 3417 | * 2) then the block layer has unplugged |
| 3418 | * the device, causing the dispatch to be |
| 3419 | * invoked. |
| 3420 | * |
| 3421 | * Since the device is unplugged, now the |
| 3422 | * requests are probably large enough to |
| 3423 | * provide a reasonable throughput. |
| 3424 | * So we disable idling. |
| 3425 | */ |
| 3426 | bfq_clear_bfqq_wait_request(bfqq); |
| 3427 | hrtimer_try_to_cancel(&bfqd->idle_slice_timer); |
Arianna Avanzini | e21b7a0 | 2017-04-12 18:23:08 +0200 | [diff] [blame] | 3428 | bfqg_stats_update_idle_time(bfqq_group(bfqq)); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 3429 | } |
| 3430 | goto keep_queue; |
| 3431 | } |
| 3432 | } |
| 3433 | |
| 3434 | /* |
| 3435 | * No requests pending. However, if the in-service queue is idling |
| 3436 | * for a new request, or has requests waiting for a completion and |
| 3437 | * may idle after their completion, then keep it anyway. |
| 3438 | */ |
| 3439 | if (bfq_bfqq_wait_request(bfqq) || |
| 3440 | (bfqq->dispatched != 0 && bfq_bfqq_may_idle(bfqq))) { |
| 3441 | bfqq = NULL; |
| 3442 | goto keep_queue; |
| 3443 | } |
| 3444 | |
| 3445 | reason = BFQQE_NO_MORE_REQUESTS; |
| 3446 | expire: |
| 3447 | bfq_bfqq_expire(bfqd, bfqq, false, reason); |
| 3448 | new_queue: |
| 3449 | bfqq = bfq_set_in_service_queue(bfqd); |
| 3450 | if (bfqq) { |
| 3451 | bfq_log_bfqq(bfqd, bfqq, "select_queue: checking new queue"); |
| 3452 | goto check_queue; |
| 3453 | } |
| 3454 | keep_queue: |
| 3455 | if (bfqq) |
| 3456 | bfq_log_bfqq(bfqd, bfqq, "select_queue: returned this queue"); |
| 3457 | else |
| 3458 | bfq_log(bfqd, "select_queue: no queue returned"); |
| 3459 | |
| 3460 | return bfqq; |
| 3461 | } |
| 3462 | |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 3463 | static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq) |
| 3464 | { |
| 3465 | struct bfq_entity *entity = &bfqq->entity; |
| 3466 | |
| 3467 | if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */ |
| 3468 | bfq_log_bfqq(bfqd, bfqq, |
| 3469 | "raising period dur %u/%u msec, old coeff %u, w %d(%d)", |
| 3470 | jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish), |
| 3471 | jiffies_to_msecs(bfqq->wr_cur_max_time), |
| 3472 | bfqq->wr_coeff, |
| 3473 | bfqq->entity.weight, bfqq->entity.orig_weight); |
| 3474 | |
| 3475 | if (entity->prio_changed) |
| 3476 | bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change"); |
| 3477 | |
| 3478 | /* |
Arianna Avanzini | e1b2324 | 2017-04-12 18:23:20 +0200 | [diff] [blame] | 3479 | * If the queue was activated in a burst, or too much |
| 3480 | * time has elapsed from the beginning of this |
| 3481 | * weight-raising period, then end weight raising. |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 3482 | */ |
Arianna Avanzini | e1b2324 | 2017-04-12 18:23:20 +0200 | [diff] [blame] | 3483 | if (bfq_bfqq_in_large_burst(bfqq)) |
| 3484 | bfq_bfqq_end_wr(bfqq); |
| 3485 | else if (time_is_before_jiffies(bfqq->last_wr_start_finish + |
| 3486 | bfqq->wr_cur_max_time)) { |
Paolo Valente | 77b7dce | 2017-04-12 18:23:13 +0200 | [diff] [blame] | 3487 | if (bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time || |
| 3488 | time_is_before_jiffies(bfqq->wr_start_at_switch_to_srt + |
Arianna Avanzini | e1b2324 | 2017-04-12 18:23:20 +0200 | [diff] [blame] | 3489 | bfq_wr_duration(bfqd))) |
Paolo Valente | 77b7dce | 2017-04-12 18:23:13 +0200 | [diff] [blame] | 3490 | bfq_bfqq_end_wr(bfqq); |
| 3491 | else { |
| 3492 | /* switch back to interactive wr */ |
| 3493 | bfqq->wr_coeff = bfqd->bfq_wr_coeff; |
| 3494 | bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); |
| 3495 | bfqq->last_wr_start_finish = |
| 3496 | bfqq->wr_start_at_switch_to_srt; |
| 3497 | bfqq->entity.prio_changed = 1; |
| 3498 | } |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 3499 | } |
| 3500 | } |
Paolo Valente | 431b17f | 2017-07-03 10:00:10 +0200 | [diff] [blame] | 3501 | /* |
| 3502 | * To improve latency (for this or other queues), immediately |
| 3503 | * update weight both if it must be raised and if it must be |
| 3504 | * lowered. Since, entity may be on some active tree here, and |
| 3505 | * might have a pending change of its ioprio class, invoke |
| 3506 | * next function with the last parameter unset (see the |
| 3507 | * comments on the function). |
| 3508 | */ |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 3509 | if ((entity->weight > entity->orig_weight) != (bfqq->wr_coeff > 1)) |
Paolo Valente | 431b17f | 2017-07-03 10:00:10 +0200 | [diff] [blame] | 3510 | __bfq_entity_update_weight_prio(bfq_entity_service_tree(entity), |
| 3511 | entity, false); |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 3512 | } |
| 3513 | |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 3514 | /* |
| 3515 | * Dispatch next request from bfqq. |
| 3516 | */ |
| 3517 | static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd, |
| 3518 | struct bfq_queue *bfqq) |
| 3519 | { |
| 3520 | struct request *rq = bfqq->next_rq; |
| 3521 | unsigned long service_to_charge; |
| 3522 | |
| 3523 | service_to_charge = bfq_serv_to_charge(rq, bfqq); |
| 3524 | |
| 3525 | bfq_bfqq_served(bfqq, service_to_charge); |
| 3526 | |
| 3527 | bfq_dispatch_remove(bfqd->queue, rq); |
| 3528 | |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 3529 | /* |
| 3530 | * If weight raising has to terminate for bfqq, then next |
| 3531 | * function causes an immediate update of bfqq's weight, |
| 3532 | * without waiting for next activation. As a consequence, on |
| 3533 | * expiration, bfqq will be timestamped as if has never been |
| 3534 | * weight-raised during this service slot, even if it has |
| 3535 | * received part or even most of the service as a |
| 3536 | * weight-raised queue. This inflates bfqq's timestamps, which |
| 3537 | * is beneficial, as bfqq is then more willing to leave the |
| 3538 | * device immediately to possible other weight-raised queues. |
| 3539 | */ |
| 3540 | bfq_update_wr_data(bfqd, bfqq); |
| 3541 | |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 3542 | /* |
| 3543 | * Expire bfqq, pretending that its budget expired, if bfqq |
| 3544 | * belongs to CLASS_IDLE and other queues are waiting for |
| 3545 | * service. |
| 3546 | */ |
| 3547 | if (bfqd->busy_queues > 1 && bfq_class_idle(bfqq)) |
| 3548 | goto expire; |
| 3549 | |
| 3550 | return rq; |
| 3551 | |
| 3552 | expire: |
| 3553 | bfq_bfqq_expire(bfqd, bfqq, false, BFQQE_BUDGET_EXHAUSTED); |
| 3554 | return rq; |
| 3555 | } |
| 3556 | |
| 3557 | static bool bfq_has_work(struct blk_mq_hw_ctx *hctx) |
| 3558 | { |
| 3559 | struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; |
| 3560 | |
| 3561 | /* |
| 3562 | * Avoiding lock: a race on bfqd->busy_queues should cause at |
| 3563 | * most a call to dispatch for nothing |
| 3564 | */ |
| 3565 | return !list_empty_careful(&bfqd->dispatch) || |
| 3566 | bfqd->busy_queues > 0; |
| 3567 | } |
| 3568 | |
| 3569 | static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) |
| 3570 | { |
| 3571 | struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; |
| 3572 | struct request *rq = NULL; |
| 3573 | struct bfq_queue *bfqq = NULL; |
| 3574 | |
| 3575 | if (!list_empty(&bfqd->dispatch)) { |
| 3576 | rq = list_first_entry(&bfqd->dispatch, struct request, |
| 3577 | queuelist); |
| 3578 | list_del_init(&rq->queuelist); |
| 3579 | |
| 3580 | bfqq = RQ_BFQQ(rq); |
| 3581 | |
| 3582 | if (bfqq) { |
| 3583 | /* |
| 3584 | * Increment counters here, because this |
| 3585 | * dispatch does not follow the standard |
| 3586 | * dispatch flow (where counters are |
| 3587 | * incremented) |
| 3588 | */ |
| 3589 | bfqq->dispatched++; |
| 3590 | |
| 3591 | goto inc_in_driver_start_rq; |
| 3592 | } |
| 3593 | |
| 3594 | /* |
| 3595 | * We exploit the put_rq_private hook to decrement |
| 3596 | * rq_in_driver, but put_rq_private will not be |
| 3597 | * invoked on this request. So, to avoid unbalance, |
| 3598 | * just start this request, without incrementing |
| 3599 | * rq_in_driver. As a negative consequence, |
| 3600 | * rq_in_driver is deceptively lower than it should be |
| 3601 | * while this request is in service. This may cause |
| 3602 | * bfq_schedule_dispatch to be invoked uselessly. |
| 3603 | * |
| 3604 | * As for implementing an exact solution, the |
| 3605 | * put_request hook, if defined, is probably invoked |
| 3606 | * also on this request. So, by exploiting this hook, |
| 3607 | * we could 1) increment rq_in_driver here, and 2) |
| 3608 | * decrement it in put_request. Such a solution would |
| 3609 | * let the value of the counter be always accurate, |
| 3610 | * but it would entail using an extra interface |
| 3611 | * function. This cost seems higher than the benefit, |
| 3612 | * being the frequency of non-elevator-private |
| 3613 | * requests very low. |
| 3614 | */ |
| 3615 | goto start_rq; |
| 3616 | } |
| 3617 | |
| 3618 | bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues); |
| 3619 | |
| 3620 | if (bfqd->busy_queues == 0) |
| 3621 | goto exit; |
| 3622 | |
| 3623 | /* |
| 3624 | * Force device to serve one request at a time if |
| 3625 | * strict_guarantees is true. Forcing this service scheme is |
| 3626 | * currently the ONLY way to guarantee that the request |
| 3627 | * service order enforced by the scheduler is respected by a |
| 3628 | * queueing device. Otherwise the device is free even to make |
| 3629 | * some unlucky request wait for as long as the device |
| 3630 | * wishes. |
| 3631 | * |
| 3632 | * Of course, serving one request at at time may cause loss of |
| 3633 | * throughput. |
| 3634 | */ |
| 3635 | if (bfqd->strict_guarantees && bfqd->rq_in_driver > 0) |
| 3636 | goto exit; |
| 3637 | |
| 3638 | bfqq = bfq_select_queue(bfqd); |
| 3639 | if (!bfqq) |
| 3640 | goto exit; |
| 3641 | |
| 3642 | rq = bfq_dispatch_rq_from_bfqq(bfqd, bfqq); |
| 3643 | |
| 3644 | if (rq) { |
| 3645 | inc_in_driver_start_rq: |
| 3646 | bfqd->rq_in_driver++; |
| 3647 | start_rq: |
| 3648 | rq->rq_flags |= RQF_STARTED; |
| 3649 | } |
| 3650 | exit: |
| 3651 | return rq; |
| 3652 | } |
| 3653 | |
| 3654 | static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) |
| 3655 | { |
| 3656 | struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; |
| 3657 | struct request *rq; |
| 3658 | |
| 3659 | spin_lock_irq(&bfqd->lock); |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 3660 | |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 3661 | rq = __bfq_dispatch_request(hctx); |
Paolo Valente | 6fa3e8d | 2017-04-12 18:23:21 +0200 | [diff] [blame] | 3662 | spin_unlock_irq(&bfqd->lock); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 3663 | |
| 3664 | return rq; |
| 3665 | } |
| 3666 | |
| 3667 | /* |
| 3668 | * Task holds one reference to the queue, dropped when task exits. Each rq |
| 3669 | * in-flight on this queue also holds a reference, dropped when rq is freed. |
| 3670 | * |
| 3671 | * Scheduler lock must be held here. Recall not to use bfqq after calling |
| 3672 | * this function on it. |
| 3673 | */ |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 3674 | void bfq_put_queue(struct bfq_queue *bfqq) |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 3675 | { |
Arianna Avanzini | e21b7a0 | 2017-04-12 18:23:08 +0200 | [diff] [blame] | 3676 | #ifdef CONFIG_BFQ_GROUP_IOSCHED |
| 3677 | struct bfq_group *bfqg = bfqq_group(bfqq); |
| 3678 | #endif |
| 3679 | |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 3680 | if (bfqq->bfqd) |
| 3681 | bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d", |
| 3682 | bfqq, bfqq->ref); |
| 3683 | |
| 3684 | bfqq->ref--; |
| 3685 | if (bfqq->ref) |
| 3686 | return; |
| 3687 | |
Arianna Avanzini | e1b2324 | 2017-04-12 18:23:20 +0200 | [diff] [blame] | 3688 | if (bfq_bfqq_sync(bfqq)) |
| 3689 | /* |
| 3690 | * The fact that this queue is being destroyed does not |
| 3691 | * invalidate the fact that this queue may have been |
| 3692 | * activated during the current burst. As a consequence, |
| 3693 | * although the queue does not exist anymore, and hence |
| 3694 | * needs to be removed from the burst list if there, |
| 3695 | * the burst size has not to be decremented. |
| 3696 | */ |
| 3697 | hlist_del_init(&bfqq->burst_list_node); |
Arianna Avanzini | e21b7a0 | 2017-04-12 18:23:08 +0200 | [diff] [blame] | 3698 | |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 3699 | kmem_cache_free(bfq_pool, bfqq); |
Arianna Avanzini | e21b7a0 | 2017-04-12 18:23:08 +0200 | [diff] [blame] | 3700 | #ifdef CONFIG_BFQ_GROUP_IOSCHED |
Paolo Valente | 8f9bebc | 2017-06-05 10:11:15 +0200 | [diff] [blame] | 3701 | bfqg_and_blkg_put(bfqg); |
Arianna Avanzini | e21b7a0 | 2017-04-12 18:23:08 +0200 | [diff] [blame] | 3702 | #endif |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 3703 | } |
| 3704 | |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 3705 | static void bfq_put_cooperator(struct bfq_queue *bfqq) |
| 3706 | { |
| 3707 | struct bfq_queue *__bfqq, *next; |
| 3708 | |
| 3709 | /* |
| 3710 | * If this queue was scheduled to merge with another queue, be |
| 3711 | * sure to drop the reference taken on that queue (and others in |
| 3712 | * the merge chain). See bfq_setup_merge and bfq_merge_bfqqs. |
| 3713 | */ |
| 3714 | __bfqq = bfqq->new_bfqq; |
| 3715 | while (__bfqq) { |
| 3716 | if (__bfqq == bfqq) |
| 3717 | break; |
| 3718 | next = __bfqq->new_bfqq; |
| 3719 | bfq_put_queue(__bfqq); |
| 3720 | __bfqq = next; |
| 3721 | } |
| 3722 | } |
| 3723 | |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 3724 | static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) |
| 3725 | { |
| 3726 | if (bfqq == bfqd->in_service_queue) { |
| 3727 | __bfq_bfqq_expire(bfqd, bfqq); |
| 3728 | bfq_schedule_dispatch(bfqd); |
| 3729 | } |
| 3730 | |
| 3731 | bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref); |
| 3732 | |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 3733 | bfq_put_cooperator(bfqq); |
| 3734 | |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 3735 | bfq_put_queue(bfqq); /* release process reference */ |
| 3736 | } |
| 3737 | |
| 3738 | static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync) |
| 3739 | { |
| 3740 | struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync); |
| 3741 | struct bfq_data *bfqd; |
| 3742 | |
| 3743 | if (bfqq) |
| 3744 | bfqd = bfqq->bfqd; /* NULL if scheduler already exited */ |
| 3745 | |
| 3746 | if (bfqq && bfqd) { |
| 3747 | unsigned long flags; |
| 3748 | |
| 3749 | spin_lock_irqsave(&bfqd->lock, flags); |
| 3750 | bfq_exit_bfqq(bfqd, bfqq); |
| 3751 | bic_set_bfqq(bic, NULL, is_sync); |
Paolo Valente | 6fa3e8d | 2017-04-12 18:23:21 +0200 | [diff] [blame] | 3752 | spin_unlock_irqrestore(&bfqd->lock, flags); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 3753 | } |
| 3754 | } |
| 3755 | |
| 3756 | static void bfq_exit_icq(struct io_cq *icq) |
| 3757 | { |
| 3758 | struct bfq_io_cq *bic = icq_to_bic(icq); |
| 3759 | |
| 3760 | bfq_exit_icq_bfqq(bic, true); |
| 3761 | bfq_exit_icq_bfqq(bic, false); |
| 3762 | } |
| 3763 | |
| 3764 | /* |
| 3765 | * Update the entity prio values; note that the new values will not |
| 3766 | * be used until the next (re)activation. |
| 3767 | */ |
| 3768 | static void |
| 3769 | bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic) |
| 3770 | { |
| 3771 | struct task_struct *tsk = current; |
| 3772 | int ioprio_class; |
| 3773 | struct bfq_data *bfqd = bfqq->bfqd; |
| 3774 | |
| 3775 | if (!bfqd) |
| 3776 | return; |
| 3777 | |
| 3778 | ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio); |
| 3779 | switch (ioprio_class) { |
| 3780 | default: |
| 3781 | dev_err(bfqq->bfqd->queue->backing_dev_info->dev, |
| 3782 | "bfq: bad prio class %d\n", ioprio_class); |
Bart Van Assche | fa393d1 | 2017-08-30 11:42:07 -0700 | [diff] [blame] | 3783 | /* fall through */ |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 3784 | case IOPRIO_CLASS_NONE: |
| 3785 | /* |
| 3786 | * No prio set, inherit CPU scheduling settings. |
| 3787 | */ |
| 3788 | bfqq->new_ioprio = task_nice_ioprio(tsk); |
| 3789 | bfqq->new_ioprio_class = task_nice_ioclass(tsk); |
| 3790 | break; |
| 3791 | case IOPRIO_CLASS_RT: |
| 3792 | bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio); |
| 3793 | bfqq->new_ioprio_class = IOPRIO_CLASS_RT; |
| 3794 | break; |
| 3795 | case IOPRIO_CLASS_BE: |
| 3796 | bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio); |
| 3797 | bfqq->new_ioprio_class = IOPRIO_CLASS_BE; |
| 3798 | break; |
| 3799 | case IOPRIO_CLASS_IDLE: |
| 3800 | bfqq->new_ioprio_class = IOPRIO_CLASS_IDLE; |
| 3801 | bfqq->new_ioprio = 7; |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 3802 | break; |
| 3803 | } |
| 3804 | |
| 3805 | if (bfqq->new_ioprio >= IOPRIO_BE_NR) { |
| 3806 | pr_crit("bfq_set_next_ioprio_data: new_ioprio %d\n", |
| 3807 | bfqq->new_ioprio); |
| 3808 | bfqq->new_ioprio = IOPRIO_BE_NR; |
| 3809 | } |
| 3810 | |
| 3811 | bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio); |
| 3812 | bfqq->entity.prio_changed = 1; |
| 3813 | } |
| 3814 | |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 3815 | static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, |
| 3816 | struct bio *bio, bool is_sync, |
| 3817 | struct bfq_io_cq *bic); |
| 3818 | |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 3819 | static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio) |
| 3820 | { |
| 3821 | struct bfq_data *bfqd = bic_to_bfqd(bic); |
| 3822 | struct bfq_queue *bfqq; |
| 3823 | int ioprio = bic->icq.ioc->ioprio; |
| 3824 | |
| 3825 | /* |
| 3826 | * This condition may trigger on a newly created bic, be sure to |
| 3827 | * drop the lock before returning. |
| 3828 | */ |
| 3829 | if (unlikely(!bfqd) || likely(bic->ioprio == ioprio)) |
| 3830 | return; |
| 3831 | |
| 3832 | bic->ioprio = ioprio; |
| 3833 | |
| 3834 | bfqq = bic_to_bfqq(bic, false); |
| 3835 | if (bfqq) { |
| 3836 | /* release process reference on this queue */ |
| 3837 | bfq_put_queue(bfqq); |
| 3838 | bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic); |
| 3839 | bic_set_bfqq(bic, bfqq, false); |
| 3840 | } |
| 3841 | |
| 3842 | bfqq = bic_to_bfqq(bic, true); |
| 3843 | if (bfqq) |
| 3844 | bfq_set_next_ioprio_data(bfqq, bic); |
| 3845 | } |
| 3846 | |
| 3847 | static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, |
| 3848 | struct bfq_io_cq *bic, pid_t pid, int is_sync) |
| 3849 | { |
| 3850 | RB_CLEAR_NODE(&bfqq->entity.rb_node); |
| 3851 | INIT_LIST_HEAD(&bfqq->fifo); |
Arianna Avanzini | e1b2324 | 2017-04-12 18:23:20 +0200 | [diff] [blame] | 3852 | INIT_HLIST_NODE(&bfqq->burst_list_node); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 3853 | |
| 3854 | bfqq->ref = 0; |
| 3855 | bfqq->bfqd = bfqd; |
| 3856 | |
| 3857 | if (bic) |
| 3858 | bfq_set_next_ioprio_data(bfqq, bic); |
| 3859 | |
| 3860 | if (is_sync) { |
Paolo Valente | d5be3fe | 2017-08-04 07:35:10 +0200 | [diff] [blame] | 3861 | /* |
| 3862 | * No need to mark as has_short_ttime if in |
| 3863 | * idle_class, because no device idling is performed |
| 3864 | * for queues in idle class |
| 3865 | */ |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 3866 | if (!bfq_class_idle(bfqq)) |
Paolo Valente | d5be3fe | 2017-08-04 07:35:10 +0200 | [diff] [blame] | 3867 | /* tentatively mark as has_short_ttime */ |
| 3868 | bfq_mark_bfqq_has_short_ttime(bfqq); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 3869 | bfq_mark_bfqq_sync(bfqq); |
Arianna Avanzini | e1b2324 | 2017-04-12 18:23:20 +0200 | [diff] [blame] | 3870 | bfq_mark_bfqq_just_created(bfqq); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 3871 | } else |
| 3872 | bfq_clear_bfqq_sync(bfqq); |
| 3873 | |
| 3874 | /* set end request to minus infinity from now */ |
| 3875 | bfqq->ttime.last_end_request = ktime_get_ns() + 1; |
| 3876 | |
| 3877 | bfq_mark_bfqq_IO_bound(bfqq); |
| 3878 | |
| 3879 | bfqq->pid = pid; |
| 3880 | |
| 3881 | /* Tentative initial value to trade off between thr and lat */ |
Paolo Valente | 54b6045 | 2017-04-12 18:23:09 +0200 | [diff] [blame] | 3882 | bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3; |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 3883 | bfqq->budget_timeout = bfq_smallest_from_now(); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 3884 | |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 3885 | bfqq->wr_coeff = 1; |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 3886 | bfqq->last_wr_start_finish = jiffies; |
Paolo Valente | 77b7dce | 2017-04-12 18:23:13 +0200 | [diff] [blame] | 3887 | bfqq->wr_start_at_switch_to_srt = bfq_smallest_from_now(); |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 3888 | bfqq->split_time = bfq_smallest_from_now(); |
Paolo Valente | 77b7dce | 2017-04-12 18:23:13 +0200 | [diff] [blame] | 3889 | |
| 3890 | /* |
| 3891 | * Set to the value for which bfqq will not be deemed as |
| 3892 | * soft rt when it becomes backlogged. |
| 3893 | */ |
| 3894 | bfqq->soft_rt_next_start = bfq_greatest_from_now(); |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 3895 | |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 3896 | /* first request is almost certainly seeky */ |
| 3897 | bfqq->seek_history = 1; |
| 3898 | } |
| 3899 | |
| 3900 | static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd, |
Arianna Avanzini | e21b7a0 | 2017-04-12 18:23:08 +0200 | [diff] [blame] | 3901 | struct bfq_group *bfqg, |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 3902 | int ioprio_class, int ioprio) |
| 3903 | { |
| 3904 | switch (ioprio_class) { |
| 3905 | case IOPRIO_CLASS_RT: |
Arianna Avanzini | e21b7a0 | 2017-04-12 18:23:08 +0200 | [diff] [blame] | 3906 | return &bfqg->async_bfqq[0][ioprio]; |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 3907 | case IOPRIO_CLASS_NONE: |
| 3908 | ioprio = IOPRIO_NORM; |
| 3909 | /* fall through */ |
| 3910 | case IOPRIO_CLASS_BE: |
Arianna Avanzini | e21b7a0 | 2017-04-12 18:23:08 +0200 | [diff] [blame] | 3911 | return &bfqg->async_bfqq[1][ioprio]; |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 3912 | case IOPRIO_CLASS_IDLE: |
Arianna Avanzini | e21b7a0 | 2017-04-12 18:23:08 +0200 | [diff] [blame] | 3913 | return &bfqg->async_idle_bfqq; |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 3914 | default: |
| 3915 | return NULL; |
| 3916 | } |
| 3917 | } |
| 3918 | |
| 3919 | static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, |
| 3920 | struct bio *bio, bool is_sync, |
| 3921 | struct bfq_io_cq *bic) |
| 3922 | { |
| 3923 | const int ioprio = IOPRIO_PRIO_DATA(bic->ioprio); |
| 3924 | const int ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio); |
| 3925 | struct bfq_queue **async_bfqq = NULL; |
| 3926 | struct bfq_queue *bfqq; |
Arianna Avanzini | e21b7a0 | 2017-04-12 18:23:08 +0200 | [diff] [blame] | 3927 | struct bfq_group *bfqg; |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 3928 | |
| 3929 | rcu_read_lock(); |
| 3930 | |
Arianna Avanzini | e21b7a0 | 2017-04-12 18:23:08 +0200 | [diff] [blame] | 3931 | bfqg = bfq_find_set_group(bfqd, bio_blkcg(bio)); |
| 3932 | if (!bfqg) { |
| 3933 | bfqq = &bfqd->oom_bfqq; |
| 3934 | goto out; |
| 3935 | } |
| 3936 | |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 3937 | if (!is_sync) { |
Arianna Avanzini | e21b7a0 | 2017-04-12 18:23:08 +0200 | [diff] [blame] | 3938 | async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class, |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 3939 | ioprio); |
| 3940 | bfqq = *async_bfqq; |
| 3941 | if (bfqq) |
| 3942 | goto out; |
| 3943 | } |
| 3944 | |
| 3945 | bfqq = kmem_cache_alloc_node(bfq_pool, |
| 3946 | GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN, |
| 3947 | bfqd->queue->node); |
| 3948 | |
| 3949 | if (bfqq) { |
| 3950 | bfq_init_bfqq(bfqd, bfqq, bic, current->pid, |
| 3951 | is_sync); |
Arianna Avanzini | e21b7a0 | 2017-04-12 18:23:08 +0200 | [diff] [blame] | 3952 | bfq_init_entity(&bfqq->entity, bfqg); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 3953 | bfq_log_bfqq(bfqd, bfqq, "allocated"); |
| 3954 | } else { |
| 3955 | bfqq = &bfqd->oom_bfqq; |
| 3956 | bfq_log_bfqq(bfqd, bfqq, "using oom bfqq"); |
| 3957 | goto out; |
| 3958 | } |
| 3959 | |
| 3960 | /* |
| 3961 | * Pin the queue now that it's allocated, scheduler exit will |
| 3962 | * prune it. |
| 3963 | */ |
| 3964 | if (async_bfqq) { |
Arianna Avanzini | e21b7a0 | 2017-04-12 18:23:08 +0200 | [diff] [blame] | 3965 | bfqq->ref++; /* |
| 3966 | * Extra group reference, w.r.t. sync |
| 3967 | * queue. This extra reference is removed |
| 3968 | * only if bfqq->bfqg disappears, to |
| 3969 | * guarantee that this queue is not freed |
| 3970 | * until its group goes away. |
| 3971 | */ |
| 3972 | bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d", |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 3973 | bfqq, bfqq->ref); |
| 3974 | *async_bfqq = bfqq; |
| 3975 | } |
| 3976 | |
| 3977 | out: |
| 3978 | bfqq->ref++; /* get a process reference to this queue */ |
| 3979 | bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq, bfqq->ref); |
| 3980 | rcu_read_unlock(); |
| 3981 | return bfqq; |
| 3982 | } |
| 3983 | |
| 3984 | static void bfq_update_io_thinktime(struct bfq_data *bfqd, |
| 3985 | struct bfq_queue *bfqq) |
| 3986 | { |
| 3987 | struct bfq_ttime *ttime = &bfqq->ttime; |
| 3988 | u64 elapsed = ktime_get_ns() - bfqq->ttime.last_end_request; |
| 3989 | |
| 3990 | elapsed = min_t(u64, elapsed, 2ULL * bfqd->bfq_slice_idle); |
| 3991 | |
| 3992 | ttime->ttime_samples = (7*bfqq->ttime.ttime_samples + 256) / 8; |
| 3993 | ttime->ttime_total = div_u64(7*ttime->ttime_total + 256*elapsed, 8); |
| 3994 | ttime->ttime_mean = div64_ul(ttime->ttime_total + 128, |
| 3995 | ttime->ttime_samples); |
| 3996 | } |
| 3997 | |
| 3998 | static void |
| 3999 | bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq, |
| 4000 | struct request *rq) |
| 4001 | { |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4002 | bfqq->seek_history <<= 1; |
Paolo Valente | ab0e43e | 2017-04-12 18:23:10 +0200 | [diff] [blame] | 4003 | bfqq->seek_history |= |
| 4004 | get_sdist(bfqq->last_request_pos, rq) > BFQQ_SEEK_THR && |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4005 | (!blk_queue_nonrot(bfqd->queue) || |
| 4006 | blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT); |
| 4007 | } |
| 4008 | |
Paolo Valente | d5be3fe | 2017-08-04 07:35:10 +0200 | [diff] [blame] | 4009 | static void bfq_update_has_short_ttime(struct bfq_data *bfqd, |
| 4010 | struct bfq_queue *bfqq, |
| 4011 | struct bfq_io_cq *bic) |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4012 | { |
Paolo Valente | d5be3fe | 2017-08-04 07:35:10 +0200 | [diff] [blame] | 4013 | bool has_short_ttime = true; |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4014 | |
Paolo Valente | d5be3fe | 2017-08-04 07:35:10 +0200 | [diff] [blame] | 4015 | /* |
| 4016 | * No need to update has_short_ttime if bfqq is async or in |
| 4017 | * idle io prio class, or if bfq_slice_idle is zero, because |
| 4018 | * no device idling is performed for bfqq in this case. |
| 4019 | */ |
| 4020 | if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq) || |
| 4021 | bfqd->bfq_slice_idle == 0) |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4022 | return; |
| 4023 | |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 4024 | /* Idle window just restored, statistics are meaningless. */ |
| 4025 | if (time_is_after_eq_jiffies(bfqq->split_time + |
| 4026 | bfqd->bfq_wr_min_idle_time)) |
| 4027 | return; |
| 4028 | |
Paolo Valente | d5be3fe | 2017-08-04 07:35:10 +0200 | [diff] [blame] | 4029 | /* Think time is infinite if no process is linked to |
| 4030 | * bfqq. Otherwise check average think time to |
| 4031 | * decide whether to mark as has_short_ttime |
| 4032 | */ |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4033 | if (atomic_read(&bic->icq.ioc->active_ref) == 0 || |
Paolo Valente | d5be3fe | 2017-08-04 07:35:10 +0200 | [diff] [blame] | 4034 | (bfq_sample_valid(bfqq->ttime.ttime_samples) && |
| 4035 | bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle)) |
| 4036 | has_short_ttime = false; |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4037 | |
Paolo Valente | d5be3fe | 2017-08-04 07:35:10 +0200 | [diff] [blame] | 4038 | bfq_log_bfqq(bfqd, bfqq, "update_has_short_ttime: has_short_ttime %d", |
| 4039 | has_short_ttime); |
| 4040 | |
| 4041 | if (has_short_ttime) |
| 4042 | bfq_mark_bfqq_has_short_ttime(bfqq); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4043 | else |
Paolo Valente | d5be3fe | 2017-08-04 07:35:10 +0200 | [diff] [blame] | 4044 | bfq_clear_bfqq_has_short_ttime(bfqq); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4045 | } |
| 4046 | |
| 4047 | /* |
| 4048 | * Called when a new fs request (rq) is added to bfqq. Check if there's |
| 4049 | * something we should do about it. |
| 4050 | */ |
| 4051 | static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, |
| 4052 | struct request *rq) |
| 4053 | { |
| 4054 | struct bfq_io_cq *bic = RQ_BIC(rq); |
| 4055 | |
| 4056 | if (rq->cmd_flags & REQ_META) |
| 4057 | bfqq->meta_pending++; |
| 4058 | |
| 4059 | bfq_update_io_thinktime(bfqd, bfqq); |
Paolo Valente | d5be3fe | 2017-08-04 07:35:10 +0200 | [diff] [blame] | 4060 | bfq_update_has_short_ttime(bfqd, bfqq, bic); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4061 | bfq_update_io_seektime(bfqd, bfqq, rq); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4062 | |
| 4063 | bfq_log_bfqq(bfqd, bfqq, |
Paolo Valente | d5be3fe | 2017-08-04 07:35:10 +0200 | [diff] [blame] | 4064 | "rq_enqueued: has_short_ttime=%d (seeky %d)", |
| 4065 | bfq_bfqq_has_short_ttime(bfqq), BFQQ_SEEKY(bfqq)); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4066 | |
| 4067 | bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); |
| 4068 | |
| 4069 | if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) { |
| 4070 | bool small_req = bfqq->queued[rq_is_sync(rq)] == 1 && |
| 4071 | blk_rq_sectors(rq) < 32; |
| 4072 | bool budget_timeout = bfq_bfqq_budget_timeout(bfqq); |
| 4073 | |
| 4074 | /* |
| 4075 | * There is just this request queued: if the request |
| 4076 | * is small and the queue is not to be expired, then |
| 4077 | * just exit. |
| 4078 | * |
| 4079 | * In this way, if the device is being idled to wait |
| 4080 | * for a new request from the in-service queue, we |
| 4081 | * avoid unplugging the device and committing the |
| 4082 | * device to serve just a small request. On the |
| 4083 | * contrary, we wait for the block layer to decide |
| 4084 | * when to unplug the device: hopefully, new requests |
| 4085 | * will be merged to this one quickly, then the device |
| 4086 | * will be unplugged and larger requests will be |
| 4087 | * dispatched. |
| 4088 | */ |
| 4089 | if (small_req && !budget_timeout) |
| 4090 | return; |
| 4091 | |
| 4092 | /* |
| 4093 | * A large enough request arrived, or the queue is to |
| 4094 | * be expired: in both cases disk idling is to be |
| 4095 | * stopped, so clear wait_request flag and reset |
| 4096 | * timer. |
| 4097 | */ |
| 4098 | bfq_clear_bfqq_wait_request(bfqq); |
| 4099 | hrtimer_try_to_cancel(&bfqd->idle_slice_timer); |
Arianna Avanzini | e21b7a0 | 2017-04-12 18:23:08 +0200 | [diff] [blame] | 4100 | bfqg_stats_update_idle_time(bfqq_group(bfqq)); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4101 | |
| 4102 | /* |
| 4103 | * The queue is not empty, because a new request just |
| 4104 | * arrived. Hence we can safely expire the queue, in |
| 4105 | * case of budget timeout, without risking that the |
| 4106 | * timestamps of the queue are not updated correctly. |
| 4107 | * See [1] for more details. |
| 4108 | */ |
| 4109 | if (budget_timeout) |
| 4110 | bfq_bfqq_expire(bfqd, bfqq, false, |
| 4111 | BFQQE_BUDGET_TIMEOUT); |
| 4112 | } |
| 4113 | } |
| 4114 | |
| 4115 | static void __bfq_insert_request(struct bfq_data *bfqd, struct request *rq) |
| 4116 | { |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 4117 | struct bfq_queue *bfqq = RQ_BFQQ(rq), |
| 4118 | *new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true); |
| 4119 | |
| 4120 | if (new_bfqq) { |
| 4121 | if (bic_to_bfqq(RQ_BIC(rq), 1) != bfqq) |
| 4122 | new_bfqq = bic_to_bfqq(RQ_BIC(rq), 1); |
| 4123 | /* |
| 4124 | * Release the request's reference to the old bfqq |
| 4125 | * and make sure one is taken to the shared queue. |
| 4126 | */ |
| 4127 | new_bfqq->allocated++; |
| 4128 | bfqq->allocated--; |
| 4129 | new_bfqq->ref++; |
Arianna Avanzini | e1b2324 | 2017-04-12 18:23:20 +0200 | [diff] [blame] | 4130 | bfq_clear_bfqq_just_created(bfqq); |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 4131 | /* |
| 4132 | * If the bic associated with the process |
| 4133 | * issuing this request still points to bfqq |
| 4134 | * (and thus has not been already redirected |
| 4135 | * to new_bfqq or even some other bfq_queue), |
| 4136 | * then complete the merge and redirect it to |
| 4137 | * new_bfqq. |
| 4138 | */ |
| 4139 | if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq) |
| 4140 | bfq_merge_bfqqs(bfqd, RQ_BIC(rq), |
| 4141 | bfqq, new_bfqq); |
| 4142 | /* |
| 4143 | * rq is about to be enqueued into new_bfqq, |
| 4144 | * release rq reference on bfqq |
| 4145 | */ |
| 4146 | bfq_put_queue(bfqq); |
| 4147 | rq->elv.priv[1] = new_bfqq; |
| 4148 | bfqq = new_bfqq; |
| 4149 | } |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4150 | |
| 4151 | bfq_add_request(rq); |
| 4152 | |
| 4153 | rq->fifo_time = ktime_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)]; |
| 4154 | list_add_tail(&rq->queuelist, &bfqq->fifo); |
| 4155 | |
| 4156 | bfq_rq_enqueued(bfqd, bfqq, rq); |
| 4157 | } |
| 4158 | |
| 4159 | static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, |
| 4160 | bool at_head) |
| 4161 | { |
| 4162 | struct request_queue *q = hctx->queue; |
| 4163 | struct bfq_data *bfqd = q->elevator->elevator_data; |
| 4164 | |
| 4165 | spin_lock_irq(&bfqd->lock); |
| 4166 | if (blk_mq_sched_try_insert_merge(q, rq)) { |
| 4167 | spin_unlock_irq(&bfqd->lock); |
| 4168 | return; |
| 4169 | } |
| 4170 | |
| 4171 | spin_unlock_irq(&bfqd->lock); |
| 4172 | |
| 4173 | blk_mq_sched_request_inserted(rq); |
| 4174 | |
| 4175 | spin_lock_irq(&bfqd->lock); |
| 4176 | if (at_head || blk_rq_is_passthrough(rq)) { |
| 4177 | if (at_head) |
| 4178 | list_add(&rq->queuelist, &bfqd->dispatch); |
| 4179 | else |
| 4180 | list_add_tail(&rq->queuelist, &bfqd->dispatch); |
| 4181 | } else { |
| 4182 | __bfq_insert_request(bfqd, rq); |
| 4183 | |
| 4184 | if (rq_mergeable(rq)) { |
| 4185 | elv_rqhash_add(q, rq); |
| 4186 | if (!q->last_merge) |
| 4187 | q->last_merge = rq; |
| 4188 | } |
| 4189 | } |
| 4190 | |
Paolo Valente | 6fa3e8d | 2017-04-12 18:23:21 +0200 | [diff] [blame] | 4191 | spin_unlock_irq(&bfqd->lock); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4192 | } |
| 4193 | |
| 4194 | static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx, |
| 4195 | struct list_head *list, bool at_head) |
| 4196 | { |
| 4197 | while (!list_empty(list)) { |
| 4198 | struct request *rq; |
| 4199 | |
| 4200 | rq = list_first_entry(list, struct request, queuelist); |
| 4201 | list_del_init(&rq->queuelist); |
| 4202 | bfq_insert_request(hctx, rq, at_head); |
| 4203 | } |
| 4204 | } |
| 4205 | |
| 4206 | static void bfq_update_hw_tag(struct bfq_data *bfqd) |
| 4207 | { |
| 4208 | bfqd->max_rq_in_driver = max_t(int, bfqd->max_rq_in_driver, |
| 4209 | bfqd->rq_in_driver); |
| 4210 | |
| 4211 | if (bfqd->hw_tag == 1) |
| 4212 | return; |
| 4213 | |
| 4214 | /* |
| 4215 | * This sample is valid if the number of outstanding requests |
| 4216 | * is large enough to allow a queueing behavior. Note that the |
| 4217 | * sum is not exact, as it's not taking into account deactivated |
| 4218 | * requests. |
| 4219 | */ |
| 4220 | if (bfqd->rq_in_driver + bfqd->queued < BFQ_HW_QUEUE_THRESHOLD) |
| 4221 | return; |
| 4222 | |
| 4223 | if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES) |
| 4224 | return; |
| 4225 | |
| 4226 | bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD; |
| 4227 | bfqd->max_rq_in_driver = 0; |
| 4228 | bfqd->hw_tag_samples = 0; |
| 4229 | } |
| 4230 | |
| 4231 | static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd) |
| 4232 | { |
Paolo Valente | ab0e43e | 2017-04-12 18:23:10 +0200 | [diff] [blame] | 4233 | u64 now_ns; |
| 4234 | u32 delta_us; |
| 4235 | |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4236 | bfq_update_hw_tag(bfqd); |
| 4237 | |
| 4238 | bfqd->rq_in_driver--; |
| 4239 | bfqq->dispatched--; |
| 4240 | |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 4241 | if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) { |
| 4242 | /* |
| 4243 | * Set budget_timeout (which we overload to store the |
| 4244 | * time at which the queue remains with no backlog and |
| 4245 | * no outstanding request; used by the weight-raising |
| 4246 | * mechanism). |
| 4247 | */ |
| 4248 | bfqq->budget_timeout = jiffies; |
Arianna Avanzini | 1de0c4c | 2017-04-12 18:23:17 +0200 | [diff] [blame] | 4249 | |
| 4250 | bfq_weights_tree_remove(bfqd, &bfqq->entity, |
| 4251 | &bfqd->queue_weights_tree); |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 4252 | } |
| 4253 | |
Paolo Valente | ab0e43e | 2017-04-12 18:23:10 +0200 | [diff] [blame] | 4254 | now_ns = ktime_get_ns(); |
| 4255 | |
| 4256 | bfqq->ttime.last_end_request = now_ns; |
| 4257 | |
| 4258 | /* |
| 4259 | * Using us instead of ns, to get a reasonable precision in |
| 4260 | * computing rate in next check. |
| 4261 | */ |
| 4262 | delta_us = div_u64(now_ns - bfqd->last_completion, NSEC_PER_USEC); |
| 4263 | |
| 4264 | /* |
| 4265 | * If the request took rather long to complete, and, according |
| 4266 | * to the maximum request size recorded, this completion latency |
| 4267 | * implies that the request was certainly served at a very low |
| 4268 | * rate (less than 1M sectors/sec), then the whole observation |
| 4269 | * interval that lasts up to this time instant cannot be a |
| 4270 | * valid time interval for computing a new peak rate. Invoke |
| 4271 | * bfq_update_rate_reset to have the following three steps |
| 4272 | * taken: |
| 4273 | * - close the observation interval at the last (previous) |
| 4274 | * request dispatch or completion |
| 4275 | * - compute rate, if possible, for that observation interval |
| 4276 | * - reset to zero samples, which will trigger a proper |
| 4277 | * re-initialization of the observation interval on next |
| 4278 | * dispatch |
| 4279 | */ |
| 4280 | if (delta_us > BFQ_MIN_TT/NSEC_PER_USEC && |
| 4281 | (bfqd->last_rq_max_size<<BFQ_RATE_SHIFT)/delta_us < |
| 4282 | 1UL<<(BFQ_RATE_SHIFT - 10)) |
| 4283 | bfq_update_rate_reset(bfqd, NULL); |
| 4284 | bfqd->last_completion = now_ns; |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4285 | |
| 4286 | /* |
Paolo Valente | 77b7dce | 2017-04-12 18:23:13 +0200 | [diff] [blame] | 4287 | * If we are waiting to discover whether the request pattern |
| 4288 | * of the task associated with the queue is actually |
| 4289 | * isochronous, and both requisites for this condition to hold |
| 4290 | * are now satisfied, then compute soft_rt_next_start (see the |
| 4291 | * comments on the function bfq_bfqq_softrt_next_start()). We |
| 4292 | * schedule this delayed check when bfqq expires, if it still |
| 4293 | * has in-flight requests. |
| 4294 | */ |
| 4295 | if (bfq_bfqq_softrt_update(bfqq) && bfqq->dispatched == 0 && |
| 4296 | RB_EMPTY_ROOT(&bfqq->sort_list)) |
| 4297 | bfqq->soft_rt_next_start = |
| 4298 | bfq_bfqq_softrt_next_start(bfqd, bfqq); |
| 4299 | |
| 4300 | /* |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4301 | * If this is the in-service queue, check if it needs to be expired, |
| 4302 | * or if we want to idle in case it has no pending requests. |
| 4303 | */ |
| 4304 | if (bfqd->in_service_queue == bfqq) { |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 4305 | if (bfqq->dispatched == 0 && bfq_bfqq_must_idle(bfqq)) { |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4306 | bfq_arm_slice_timer(bfqd); |
| 4307 | return; |
| 4308 | } else if (bfq_may_expire_for_budg_timeout(bfqq)) |
| 4309 | bfq_bfqq_expire(bfqd, bfqq, false, |
| 4310 | BFQQE_BUDGET_TIMEOUT); |
| 4311 | else if (RB_EMPTY_ROOT(&bfqq->sort_list) && |
| 4312 | (bfqq->dispatched == 0 || |
| 4313 | !bfq_bfqq_may_idle(bfqq))) |
| 4314 | bfq_bfqq_expire(bfqd, bfqq, false, |
| 4315 | BFQQE_NO_MORE_REQUESTS); |
| 4316 | } |
Hou Tao | 3f7cb4f | 2017-07-11 21:58:15 +0800 | [diff] [blame] | 4317 | |
| 4318 | if (!bfqd->rq_in_driver) |
| 4319 | bfq_schedule_dispatch(bfqd); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4320 | } |
| 4321 | |
| 4322 | static void bfq_put_rq_priv_body(struct bfq_queue *bfqq) |
| 4323 | { |
| 4324 | bfqq->allocated--; |
| 4325 | |
| 4326 | bfq_put_queue(bfqq); |
| 4327 | } |
| 4328 | |
Christoph Hellwig | 7b9e936 | 2017-06-16 18:15:21 +0200 | [diff] [blame] | 4329 | static void bfq_finish_request(struct request *rq) |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4330 | { |
Christoph Hellwig | 5bbf4e5 | 2017-06-16 18:15:26 +0200 | [diff] [blame] | 4331 | struct bfq_queue *bfqq; |
| 4332 | struct bfq_data *bfqd; |
| 4333 | |
| 4334 | if (!rq->elv.icq) |
| 4335 | return; |
| 4336 | |
| 4337 | bfqq = RQ_BFQQ(rq); |
| 4338 | bfqd = bfqq->bfqd; |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4339 | |
Arianna Avanzini | e21b7a0 | 2017-04-12 18:23:08 +0200 | [diff] [blame] | 4340 | if (rq->rq_flags & RQF_STARTED) |
| 4341 | bfqg_stats_update_completion(bfqq_group(bfqq), |
| 4342 | rq_start_time_ns(rq), |
| 4343 | rq_io_start_time_ns(rq), |
| 4344 | rq->cmd_flags); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4345 | |
| 4346 | if (likely(rq->rq_flags & RQF_STARTED)) { |
| 4347 | unsigned long flags; |
| 4348 | |
| 4349 | spin_lock_irqsave(&bfqd->lock, flags); |
| 4350 | |
| 4351 | bfq_completed_request(bfqq, bfqd); |
| 4352 | bfq_put_rq_priv_body(bfqq); |
| 4353 | |
Paolo Valente | 6fa3e8d | 2017-04-12 18:23:21 +0200 | [diff] [blame] | 4354 | spin_unlock_irqrestore(&bfqd->lock, flags); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4355 | } else { |
| 4356 | /* |
| 4357 | * Request rq may be still/already in the scheduler, |
| 4358 | * in which case we need to remove it. And we cannot |
| 4359 | * defer such a check and removal, to avoid |
| 4360 | * inconsistencies in the time interval from the end |
| 4361 | * of this function to the start of the deferred work. |
| 4362 | * This situation seems to occur only in process |
| 4363 | * context, as a consequence of a merge. In the |
| 4364 | * current version of the code, this implies that the |
| 4365 | * lock is held. |
| 4366 | */ |
| 4367 | |
| 4368 | if (!RB_EMPTY_NODE(&rq->rb_node)) |
Christoph Hellwig | 7b9e936 | 2017-06-16 18:15:21 +0200 | [diff] [blame] | 4369 | bfq_remove_request(rq->q, rq); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4370 | bfq_put_rq_priv_body(bfqq); |
| 4371 | } |
| 4372 | |
| 4373 | rq->elv.priv[0] = NULL; |
| 4374 | rq->elv.priv[1] = NULL; |
| 4375 | } |
| 4376 | |
| 4377 | /* |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 4378 | * Returns NULL if a new bfqq should be allocated, or the old bfqq if this |
| 4379 | * was the last process referring to that bfqq. |
| 4380 | */ |
| 4381 | static struct bfq_queue * |
| 4382 | bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq) |
| 4383 | { |
| 4384 | bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue"); |
| 4385 | |
| 4386 | if (bfqq_process_refs(bfqq) == 1) { |
| 4387 | bfqq->pid = current->pid; |
| 4388 | bfq_clear_bfqq_coop(bfqq); |
| 4389 | bfq_clear_bfqq_split_coop(bfqq); |
| 4390 | return bfqq; |
| 4391 | } |
| 4392 | |
| 4393 | bic_set_bfqq(bic, NULL, 1); |
| 4394 | |
| 4395 | bfq_put_cooperator(bfqq); |
| 4396 | |
| 4397 | bfq_put_queue(bfqq); |
| 4398 | return NULL; |
| 4399 | } |
| 4400 | |
| 4401 | static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd, |
| 4402 | struct bfq_io_cq *bic, |
| 4403 | struct bio *bio, |
| 4404 | bool split, bool is_sync, |
| 4405 | bool *new_queue) |
| 4406 | { |
| 4407 | struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync); |
| 4408 | |
| 4409 | if (likely(bfqq && bfqq != &bfqd->oom_bfqq)) |
| 4410 | return bfqq; |
| 4411 | |
| 4412 | if (new_queue) |
| 4413 | *new_queue = true; |
| 4414 | |
| 4415 | if (bfqq) |
| 4416 | bfq_put_queue(bfqq); |
| 4417 | bfqq = bfq_get_queue(bfqd, bio, is_sync, bic); |
| 4418 | |
| 4419 | bic_set_bfqq(bic, bfqq, is_sync); |
Arianna Avanzini | e1b2324 | 2017-04-12 18:23:20 +0200 | [diff] [blame] | 4420 | if (split && is_sync) { |
| 4421 | if ((bic->was_in_burst_list && bfqd->large_burst) || |
| 4422 | bic->saved_in_large_burst) |
| 4423 | bfq_mark_bfqq_in_large_burst(bfqq); |
| 4424 | else { |
| 4425 | bfq_clear_bfqq_in_large_burst(bfqq); |
| 4426 | if (bic->was_in_burst_list) |
| 4427 | hlist_add_head(&bfqq->burst_list_node, |
| 4428 | &bfqd->burst_list); |
| 4429 | } |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 4430 | bfqq->split_time = jiffies; |
Arianna Avanzini | e1b2324 | 2017-04-12 18:23:20 +0200 | [diff] [blame] | 4431 | } |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 4432 | |
| 4433 | return bfqq; |
| 4434 | } |
| 4435 | |
| 4436 | /* |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4437 | * Allocate bfq data structures associated with this request. |
| 4438 | */ |
Christoph Hellwig | 5bbf4e5 | 2017-06-16 18:15:26 +0200 | [diff] [blame] | 4439 | static void bfq_prepare_request(struct request *rq, struct bio *bio) |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4440 | { |
Christoph Hellwig | 5bbf4e5 | 2017-06-16 18:15:26 +0200 | [diff] [blame] | 4441 | struct request_queue *q = rq->q; |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4442 | struct bfq_data *bfqd = q->elevator->elevator_data; |
Christoph Hellwig | 9f21073 | 2017-06-16 18:15:24 +0200 | [diff] [blame] | 4443 | struct bfq_io_cq *bic; |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4444 | const int is_sync = rq_is_sync(rq); |
| 4445 | struct bfq_queue *bfqq; |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 4446 | bool new_queue = false; |
Paolo Valente | 13c931b | 2017-06-27 12:30:47 -0600 | [diff] [blame] | 4447 | bool bfqq_already_existing = false, split = false; |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4448 | |
Christoph Hellwig | 9f21073 | 2017-06-16 18:15:24 +0200 | [diff] [blame] | 4449 | if (!rq->elv.icq) |
Christoph Hellwig | 5bbf4e5 | 2017-06-16 18:15:26 +0200 | [diff] [blame] | 4450 | return; |
Christoph Hellwig | 9f21073 | 2017-06-16 18:15:24 +0200 | [diff] [blame] | 4451 | bic = icq_to_bic(rq->elv.icq); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4452 | |
Christoph Hellwig | 9f21073 | 2017-06-16 18:15:24 +0200 | [diff] [blame] | 4453 | spin_lock_irq(&bfqd->lock); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4454 | |
Colin Ian King | 8c9ff1a | 2017-04-20 15:07:18 +0100 | [diff] [blame] | 4455 | bfq_check_ioprio_change(bic, bio); |
| 4456 | |
Arianna Avanzini | e21b7a0 | 2017-04-12 18:23:08 +0200 | [diff] [blame] | 4457 | bfq_bic_update_cgroup(bic, bio); |
| 4458 | |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 4459 | bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, false, is_sync, |
| 4460 | &new_queue); |
| 4461 | |
| 4462 | if (likely(!new_queue)) { |
| 4463 | /* If the queue was seeky for too long, break it apart. */ |
| 4464 | if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) { |
| 4465 | bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq"); |
Arianna Avanzini | e1b2324 | 2017-04-12 18:23:20 +0200 | [diff] [blame] | 4466 | |
| 4467 | /* Update bic before losing reference to bfqq */ |
| 4468 | if (bfq_bfqq_in_large_burst(bfqq)) |
| 4469 | bic->saved_in_large_burst = true; |
| 4470 | |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 4471 | bfqq = bfq_split_bfqq(bic, bfqq); |
Paolo Valente | 6fa3e8d | 2017-04-12 18:23:21 +0200 | [diff] [blame] | 4472 | split = true; |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 4473 | |
| 4474 | if (!bfqq) |
| 4475 | bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, |
| 4476 | true, is_sync, |
| 4477 | NULL); |
Paolo Valente | 13c931b | 2017-06-27 12:30:47 -0600 | [diff] [blame] | 4478 | else |
| 4479 | bfqq_already_existing = true; |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 4480 | } |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4481 | } |
| 4482 | |
| 4483 | bfqq->allocated++; |
| 4484 | bfqq->ref++; |
| 4485 | bfq_log_bfqq(bfqd, bfqq, "get_request %p: bfqq %p, %d", |
| 4486 | rq, bfqq, bfqq->ref); |
| 4487 | |
| 4488 | rq->elv.priv[0] = bic; |
| 4489 | rq->elv.priv[1] = bfqq; |
| 4490 | |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 4491 | /* |
| 4492 | * If a bfq_queue has only one process reference, it is owned |
| 4493 | * by only this bic: we can then set bfqq->bic = bic. in |
| 4494 | * addition, if the queue has also just been split, we have to |
| 4495 | * resume its state. |
| 4496 | */ |
| 4497 | if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) { |
| 4498 | bfqq->bic = bic; |
Paolo Valente | 6fa3e8d | 2017-04-12 18:23:21 +0200 | [diff] [blame] | 4499 | if (split) { |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 4500 | /* |
| 4501 | * The queue has just been split from a shared |
| 4502 | * queue: restore the idle window and the |
| 4503 | * possible weight raising period. |
| 4504 | */ |
Paolo Valente | 13c931b | 2017-06-27 12:30:47 -0600 | [diff] [blame] | 4505 | bfq_bfqq_resume_state(bfqq, bfqd, bic, |
| 4506 | bfqq_already_existing); |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 4507 | } |
| 4508 | } |
| 4509 | |
Arianna Avanzini | e1b2324 | 2017-04-12 18:23:20 +0200 | [diff] [blame] | 4510 | if (unlikely(bfq_bfqq_just_created(bfqq))) |
| 4511 | bfq_handle_burst(bfqd, bfqq); |
| 4512 | |
Paolo Valente | 6fa3e8d | 2017-04-12 18:23:21 +0200 | [diff] [blame] | 4513 | spin_unlock_irq(&bfqd->lock); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4514 | } |
| 4515 | |
| 4516 | static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq) |
| 4517 | { |
| 4518 | struct bfq_data *bfqd = bfqq->bfqd; |
| 4519 | enum bfqq_expiration reason; |
| 4520 | unsigned long flags; |
| 4521 | |
| 4522 | spin_lock_irqsave(&bfqd->lock, flags); |
| 4523 | bfq_clear_bfqq_wait_request(bfqq); |
| 4524 | |
| 4525 | if (bfqq != bfqd->in_service_queue) { |
| 4526 | spin_unlock_irqrestore(&bfqd->lock, flags); |
| 4527 | return; |
| 4528 | } |
| 4529 | |
| 4530 | if (bfq_bfqq_budget_timeout(bfqq)) |
| 4531 | /* |
| 4532 | * Also here the queue can be safely expired |
| 4533 | * for budget timeout without wasting |
| 4534 | * guarantees |
| 4535 | */ |
| 4536 | reason = BFQQE_BUDGET_TIMEOUT; |
| 4537 | else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0) |
| 4538 | /* |
| 4539 | * The queue may not be empty upon timer expiration, |
| 4540 | * because we may not disable the timer when the |
| 4541 | * first request of the in-service queue arrives |
| 4542 | * during disk idling. |
| 4543 | */ |
| 4544 | reason = BFQQE_TOO_IDLE; |
| 4545 | else |
| 4546 | goto schedule_dispatch; |
| 4547 | |
| 4548 | bfq_bfqq_expire(bfqd, bfqq, true, reason); |
| 4549 | |
| 4550 | schedule_dispatch: |
Paolo Valente | 6fa3e8d | 2017-04-12 18:23:21 +0200 | [diff] [blame] | 4551 | spin_unlock_irqrestore(&bfqd->lock, flags); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4552 | bfq_schedule_dispatch(bfqd); |
| 4553 | } |
| 4554 | |
| 4555 | /* |
| 4556 | * Handler of the expiration of the timer running if the in-service queue |
| 4557 | * is idling inside its time slice. |
| 4558 | */ |
| 4559 | static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer) |
| 4560 | { |
| 4561 | struct bfq_data *bfqd = container_of(timer, struct bfq_data, |
| 4562 | idle_slice_timer); |
| 4563 | struct bfq_queue *bfqq = bfqd->in_service_queue; |
| 4564 | |
| 4565 | /* |
| 4566 | * Theoretical race here: the in-service queue can be NULL or |
| 4567 | * different from the queue that was idling if a new request |
| 4568 | * arrives for the current queue and there is a full dispatch |
| 4569 | * cycle that changes the in-service queue. This can hardly |
| 4570 | * happen, but in the worst case we just expire a queue too |
| 4571 | * early. |
| 4572 | */ |
| 4573 | if (bfqq) |
| 4574 | bfq_idle_slice_timer_body(bfqq); |
| 4575 | |
| 4576 | return HRTIMER_NORESTART; |
| 4577 | } |
| 4578 | |
| 4579 | static void __bfq_put_async_bfqq(struct bfq_data *bfqd, |
| 4580 | struct bfq_queue **bfqq_ptr) |
| 4581 | { |
| 4582 | struct bfq_queue *bfqq = *bfqq_ptr; |
| 4583 | |
| 4584 | bfq_log(bfqd, "put_async_bfqq: %p", bfqq); |
| 4585 | if (bfqq) { |
Arianna Avanzini | e21b7a0 | 2017-04-12 18:23:08 +0200 | [diff] [blame] | 4586 | bfq_bfqq_move(bfqd, bfqq, bfqd->root_group); |
| 4587 | |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4588 | bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d", |
| 4589 | bfqq, bfqq->ref); |
| 4590 | bfq_put_queue(bfqq); |
| 4591 | *bfqq_ptr = NULL; |
| 4592 | } |
| 4593 | } |
| 4594 | |
| 4595 | /* |
Arianna Avanzini | e21b7a0 | 2017-04-12 18:23:08 +0200 | [diff] [blame] | 4596 | * Release all the bfqg references to its async queues. If we are |
| 4597 | * deallocating the group these queues may still contain requests, so |
| 4598 | * we reparent them to the root cgroup (i.e., the only one that will |
| 4599 | * exist for sure until all the requests on a device are gone). |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4600 | */ |
Paolo Valente | ea25da4 | 2017-04-19 08:48:24 -0600 | [diff] [blame] | 4601 | void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg) |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4602 | { |
| 4603 | int i, j; |
| 4604 | |
| 4605 | for (i = 0; i < 2; i++) |
| 4606 | for (j = 0; j < IOPRIO_BE_NR; j++) |
Arianna Avanzini | e21b7a0 | 2017-04-12 18:23:08 +0200 | [diff] [blame] | 4607 | __bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4608 | |
Arianna Avanzini | e21b7a0 | 2017-04-12 18:23:08 +0200 | [diff] [blame] | 4609 | __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4610 | } |
| 4611 | |
| 4612 | static void bfq_exit_queue(struct elevator_queue *e) |
| 4613 | { |
| 4614 | struct bfq_data *bfqd = e->elevator_data; |
| 4615 | struct bfq_queue *bfqq, *n; |
| 4616 | |
| 4617 | hrtimer_cancel(&bfqd->idle_slice_timer); |
| 4618 | |
| 4619 | spin_lock_irq(&bfqd->lock); |
| 4620 | list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list) |
Arianna Avanzini | e21b7a0 | 2017-04-12 18:23:08 +0200 | [diff] [blame] | 4621 | bfq_deactivate_bfqq(bfqd, bfqq, false, false); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4622 | spin_unlock_irq(&bfqd->lock); |
| 4623 | |
| 4624 | hrtimer_cancel(&bfqd->idle_slice_timer); |
| 4625 | |
Arianna Avanzini | e21b7a0 | 2017-04-12 18:23:08 +0200 | [diff] [blame] | 4626 | #ifdef CONFIG_BFQ_GROUP_IOSCHED |
| 4627 | blkcg_deactivate_policy(bfqd->queue, &blkcg_policy_bfq); |
| 4628 | #else |
| 4629 | spin_lock_irq(&bfqd->lock); |
| 4630 | bfq_put_async_queues(bfqd, bfqd->root_group); |
| 4631 | kfree(bfqd->root_group); |
| 4632 | spin_unlock_irq(&bfqd->lock); |
| 4633 | #endif |
| 4634 | |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4635 | kfree(bfqd); |
| 4636 | } |
| 4637 | |
Arianna Avanzini | e21b7a0 | 2017-04-12 18:23:08 +0200 | [diff] [blame] | 4638 | static void bfq_init_root_group(struct bfq_group *root_group, |
| 4639 | struct bfq_data *bfqd) |
| 4640 | { |
| 4641 | int i; |
| 4642 | |
| 4643 | #ifdef CONFIG_BFQ_GROUP_IOSCHED |
| 4644 | root_group->entity.parent = NULL; |
| 4645 | root_group->my_entity = NULL; |
| 4646 | root_group->bfqd = bfqd; |
| 4647 | #endif |
Arianna Avanzini | 36eca89 | 2017-04-12 18:23:16 +0200 | [diff] [blame] | 4648 | root_group->rq_pos_tree = RB_ROOT; |
Arianna Avanzini | e21b7a0 | 2017-04-12 18:23:08 +0200 | [diff] [blame] | 4649 | for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) |
| 4650 | root_group->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT; |
| 4651 | root_group->sched_data.bfq_class_idle_last_service = jiffies; |
| 4652 | } |
| 4653 | |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4654 | static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) |
| 4655 | { |
| 4656 | struct bfq_data *bfqd; |
| 4657 | struct elevator_queue *eq; |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4658 | |
| 4659 | eq = elevator_alloc(q, e); |
| 4660 | if (!eq) |
| 4661 | return -ENOMEM; |
| 4662 | |
| 4663 | bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node); |
| 4664 | if (!bfqd) { |
| 4665 | kobject_put(&eq->kobj); |
| 4666 | return -ENOMEM; |
| 4667 | } |
| 4668 | eq->elevator_data = bfqd; |
| 4669 | |
Arianna Avanzini | e21b7a0 | 2017-04-12 18:23:08 +0200 | [diff] [blame] | 4670 | spin_lock_irq(q->queue_lock); |
| 4671 | q->elevator = eq; |
| 4672 | spin_unlock_irq(q->queue_lock); |
| 4673 | |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4674 | /* |
| 4675 | * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues. |
| 4676 | * Grab a permanent reference to it, so that the normal code flow |
| 4677 | * will not attempt to free it. |
| 4678 | */ |
| 4679 | bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, NULL, 1, 0); |
| 4680 | bfqd->oom_bfqq.ref++; |
| 4681 | bfqd->oom_bfqq.new_ioprio = BFQ_DEFAULT_QUEUE_IOPRIO; |
| 4682 | bfqd->oom_bfqq.new_ioprio_class = IOPRIO_CLASS_BE; |
| 4683 | bfqd->oom_bfqq.entity.new_weight = |
| 4684 | bfq_ioprio_to_weight(bfqd->oom_bfqq.new_ioprio); |
Arianna Avanzini | e1b2324 | 2017-04-12 18:23:20 +0200 | [diff] [blame] | 4685 | |
| 4686 | /* oom_bfqq does not participate to bursts */ |
| 4687 | bfq_clear_bfqq_just_created(&bfqd->oom_bfqq); |
| 4688 | |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4689 | /* |
| 4690 | * Trigger weight initialization, according to ioprio, at the |
| 4691 | * oom_bfqq's first activation. The oom_bfqq's ioprio and ioprio |
| 4692 | * class won't be changed any more. |
| 4693 | */ |
| 4694 | bfqd->oom_bfqq.entity.prio_changed = 1; |
| 4695 | |
| 4696 | bfqd->queue = q; |
| 4697 | |
Arianna Avanzini | e21b7a0 | 2017-04-12 18:23:08 +0200 | [diff] [blame] | 4698 | INIT_LIST_HEAD(&bfqd->dispatch); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4699 | |
| 4700 | hrtimer_init(&bfqd->idle_slice_timer, CLOCK_MONOTONIC, |
| 4701 | HRTIMER_MODE_REL); |
| 4702 | bfqd->idle_slice_timer.function = bfq_idle_slice_timer; |
| 4703 | |
Arianna Avanzini | 1de0c4c | 2017-04-12 18:23:17 +0200 | [diff] [blame] | 4704 | bfqd->queue_weights_tree = RB_ROOT; |
| 4705 | bfqd->group_weights_tree = RB_ROOT; |
| 4706 | |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4707 | INIT_LIST_HEAD(&bfqd->active_list); |
| 4708 | INIT_LIST_HEAD(&bfqd->idle_list); |
Arianna Avanzini | e1b2324 | 2017-04-12 18:23:20 +0200 | [diff] [blame] | 4709 | INIT_HLIST_HEAD(&bfqd->burst_list); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4710 | |
| 4711 | bfqd->hw_tag = -1; |
| 4712 | |
| 4713 | bfqd->bfq_max_budget = bfq_default_max_budget; |
| 4714 | |
| 4715 | bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0]; |
| 4716 | bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1]; |
| 4717 | bfqd->bfq_back_max = bfq_back_max; |
| 4718 | bfqd->bfq_back_penalty = bfq_back_penalty; |
| 4719 | bfqd->bfq_slice_idle = bfq_slice_idle; |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4720 | bfqd->bfq_timeout = bfq_timeout; |
| 4721 | |
| 4722 | bfqd->bfq_requests_within_timer = 120; |
| 4723 | |
Arianna Avanzini | e1b2324 | 2017-04-12 18:23:20 +0200 | [diff] [blame] | 4724 | bfqd->bfq_large_burst_thresh = 8; |
| 4725 | bfqd->bfq_burst_interval = msecs_to_jiffies(180); |
| 4726 | |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 4727 | bfqd->low_latency = true; |
| 4728 | |
| 4729 | /* |
| 4730 | * Trade-off between responsiveness and fairness. |
| 4731 | */ |
| 4732 | bfqd->bfq_wr_coeff = 30; |
Paolo Valente | 77b7dce | 2017-04-12 18:23:13 +0200 | [diff] [blame] | 4733 | bfqd->bfq_wr_rt_max_time = msecs_to_jiffies(300); |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 4734 | bfqd->bfq_wr_max_time = 0; |
| 4735 | bfqd->bfq_wr_min_idle_time = msecs_to_jiffies(2000); |
| 4736 | bfqd->bfq_wr_min_inter_arr_async = msecs_to_jiffies(500); |
Paolo Valente | 77b7dce | 2017-04-12 18:23:13 +0200 | [diff] [blame] | 4737 | bfqd->bfq_wr_max_softrt_rate = 7000; /* |
| 4738 | * Approximate rate required |
| 4739 | * to playback or record a |
| 4740 | * high-definition compressed |
| 4741 | * video. |
| 4742 | */ |
Paolo Valente | cfd6971 | 2017-04-12 18:23:15 +0200 | [diff] [blame] | 4743 | bfqd->wr_busy_queues = 0; |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 4744 | |
| 4745 | /* |
| 4746 | * Begin by assuming, optimistically, that the device is a |
| 4747 | * high-speed one, and that its peak rate is equal to 2/3 of |
| 4748 | * the highest reference rate. |
| 4749 | */ |
| 4750 | bfqd->RT_prod = R_fast[blk_queue_nonrot(bfqd->queue)] * |
| 4751 | T_fast[blk_queue_nonrot(bfqd->queue)]; |
| 4752 | bfqd->peak_rate = R_fast[blk_queue_nonrot(bfqd->queue)] * 2 / 3; |
| 4753 | bfqd->device_speed = BFQ_BFQD_FAST; |
| 4754 | |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4755 | spin_lock_init(&bfqd->lock); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4756 | |
Arianna Avanzini | e21b7a0 | 2017-04-12 18:23:08 +0200 | [diff] [blame] | 4757 | /* |
| 4758 | * The invocation of the next bfq_create_group_hierarchy |
| 4759 | * function is the head of a chain of function calls |
| 4760 | * (bfq_create_group_hierarchy->blkcg_activate_policy-> |
| 4761 | * blk_mq_freeze_queue) that may lead to the invocation of the |
| 4762 | * has_work hook function. For this reason, |
| 4763 | * bfq_create_group_hierarchy is invoked only after all |
| 4764 | * scheduler data has been initialized, apart from the fields |
| 4765 | * that can be initialized only after invoking |
| 4766 | * bfq_create_group_hierarchy. This, in particular, enables |
| 4767 | * has_work to correctly return false. Of course, to avoid |
| 4768 | * other inconsistencies, the blk-mq stack must then refrain |
| 4769 | * from invoking further scheduler hooks before this init |
| 4770 | * function is finished. |
| 4771 | */ |
| 4772 | bfqd->root_group = bfq_create_group_hierarchy(bfqd, q->node); |
| 4773 | if (!bfqd->root_group) |
| 4774 | goto out_free; |
| 4775 | bfq_init_root_group(bfqd->root_group, bfqd); |
| 4776 | bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group); |
| 4777 | |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4778 | |
| 4779 | return 0; |
Arianna Avanzini | e21b7a0 | 2017-04-12 18:23:08 +0200 | [diff] [blame] | 4780 | |
| 4781 | out_free: |
| 4782 | kfree(bfqd); |
| 4783 | kobject_put(&eq->kobj); |
| 4784 | return -ENOMEM; |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4785 | } |
| 4786 | |
| 4787 | static void bfq_slab_kill(void) |
| 4788 | { |
| 4789 | kmem_cache_destroy(bfq_pool); |
| 4790 | } |
| 4791 | |
| 4792 | static int __init bfq_slab_setup(void) |
| 4793 | { |
| 4794 | bfq_pool = KMEM_CACHE(bfq_queue, 0); |
| 4795 | if (!bfq_pool) |
| 4796 | return -ENOMEM; |
| 4797 | return 0; |
| 4798 | } |
| 4799 | |
| 4800 | static ssize_t bfq_var_show(unsigned int var, char *page) |
| 4801 | { |
| 4802 | return sprintf(page, "%u\n", var); |
| 4803 | } |
| 4804 | |
Bart Van Assche | 2f79136 | 2017-08-30 11:42:09 -0700 | [diff] [blame] | 4805 | static int bfq_var_store(unsigned long *var, const char *page) |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4806 | { |
| 4807 | unsigned long new_val; |
| 4808 | int ret = kstrtoul(page, 10, &new_val); |
| 4809 | |
Bart Van Assche | 2f79136 | 2017-08-30 11:42:09 -0700 | [diff] [blame] | 4810 | if (ret) |
| 4811 | return ret; |
| 4812 | *var = new_val; |
| 4813 | return 0; |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4814 | } |
| 4815 | |
| 4816 | #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ |
| 4817 | static ssize_t __FUNC(struct elevator_queue *e, char *page) \ |
| 4818 | { \ |
| 4819 | struct bfq_data *bfqd = e->elevator_data; \ |
| 4820 | u64 __data = __VAR; \ |
| 4821 | if (__CONV == 1) \ |
| 4822 | __data = jiffies_to_msecs(__data); \ |
| 4823 | else if (__CONV == 2) \ |
| 4824 | __data = div_u64(__data, NSEC_PER_MSEC); \ |
| 4825 | return bfq_var_show(__data, (page)); \ |
| 4826 | } |
| 4827 | SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 2); |
| 4828 | SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 2); |
| 4829 | SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0); |
| 4830 | SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0); |
| 4831 | SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 2); |
| 4832 | SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0); |
| 4833 | SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout, 1); |
| 4834 | SHOW_FUNCTION(bfq_strict_guarantees_show, bfqd->strict_guarantees, 0); |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 4835 | SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4836 | #undef SHOW_FUNCTION |
| 4837 | |
| 4838 | #define USEC_SHOW_FUNCTION(__FUNC, __VAR) \ |
| 4839 | static ssize_t __FUNC(struct elevator_queue *e, char *page) \ |
| 4840 | { \ |
| 4841 | struct bfq_data *bfqd = e->elevator_data; \ |
| 4842 | u64 __data = __VAR; \ |
| 4843 | __data = div_u64(__data, NSEC_PER_USEC); \ |
| 4844 | return bfq_var_show(__data, (page)); \ |
| 4845 | } |
| 4846 | USEC_SHOW_FUNCTION(bfq_slice_idle_us_show, bfqd->bfq_slice_idle); |
| 4847 | #undef USEC_SHOW_FUNCTION |
| 4848 | |
| 4849 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ |
| 4850 | static ssize_t \ |
| 4851 | __FUNC(struct elevator_queue *e, const char *page, size_t count) \ |
| 4852 | { \ |
| 4853 | struct bfq_data *bfqd = e->elevator_data; \ |
Bart Van Assche | 1530486c | 2017-08-30 11:42:10 -0700 | [diff] [blame^] | 4854 | unsigned long __data, __min = (MIN), __max = (MAX); \ |
Bart Van Assche | 2f79136 | 2017-08-30 11:42:09 -0700 | [diff] [blame] | 4855 | int ret; \ |
| 4856 | \ |
| 4857 | ret = bfq_var_store(&__data, (page)); \ |
| 4858 | if (ret) \ |
| 4859 | return ret; \ |
Bart Van Assche | 1530486c | 2017-08-30 11:42:10 -0700 | [diff] [blame^] | 4860 | if (__data < __min) \ |
| 4861 | __data = __min; \ |
| 4862 | else if (__data > __max) \ |
| 4863 | __data = __max; \ |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4864 | if (__CONV == 1) \ |
| 4865 | *(__PTR) = msecs_to_jiffies(__data); \ |
| 4866 | else if (__CONV == 2) \ |
| 4867 | *(__PTR) = (u64)__data * NSEC_PER_MSEC; \ |
| 4868 | else \ |
| 4869 | *(__PTR) = __data; \ |
weiping zhang | 235f8da | 2017-08-25 01:11:33 +0800 | [diff] [blame] | 4870 | return count; \ |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4871 | } |
| 4872 | STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1, |
| 4873 | INT_MAX, 2); |
| 4874 | STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1, |
| 4875 | INT_MAX, 2); |
| 4876 | STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0); |
| 4877 | STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1, |
| 4878 | INT_MAX, 0); |
| 4879 | STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 2); |
| 4880 | #undef STORE_FUNCTION |
| 4881 | |
| 4882 | #define USEC_STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \ |
| 4883 | static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)\ |
| 4884 | { \ |
| 4885 | struct bfq_data *bfqd = e->elevator_data; \ |
Bart Van Assche | 1530486c | 2017-08-30 11:42:10 -0700 | [diff] [blame^] | 4886 | unsigned long __data, __min = (MIN), __max = (MAX); \ |
Bart Van Assche | 2f79136 | 2017-08-30 11:42:09 -0700 | [diff] [blame] | 4887 | int ret; \ |
| 4888 | \ |
| 4889 | ret = bfq_var_store(&__data, (page)); \ |
| 4890 | if (ret) \ |
| 4891 | return ret; \ |
Bart Van Assche | 1530486c | 2017-08-30 11:42:10 -0700 | [diff] [blame^] | 4892 | if (__data < __min) \ |
| 4893 | __data = __min; \ |
| 4894 | else if (__data > __max) \ |
| 4895 | __data = __max; \ |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4896 | *(__PTR) = (u64)__data * NSEC_PER_USEC; \ |
weiping zhang | 235f8da | 2017-08-25 01:11:33 +0800 | [diff] [blame] | 4897 | return count; \ |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4898 | } |
| 4899 | USEC_STORE_FUNCTION(bfq_slice_idle_us_store, &bfqd->bfq_slice_idle, 0, |
| 4900 | UINT_MAX); |
| 4901 | #undef USEC_STORE_FUNCTION |
| 4902 | |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4903 | static ssize_t bfq_max_budget_store(struct elevator_queue *e, |
| 4904 | const char *page, size_t count) |
| 4905 | { |
| 4906 | struct bfq_data *bfqd = e->elevator_data; |
Bart Van Assche | 2f79136 | 2017-08-30 11:42:09 -0700 | [diff] [blame] | 4907 | unsigned long __data; |
| 4908 | int ret; |
weiping zhang | 235f8da | 2017-08-25 01:11:33 +0800 | [diff] [blame] | 4909 | |
Bart Van Assche | 2f79136 | 2017-08-30 11:42:09 -0700 | [diff] [blame] | 4910 | ret = bfq_var_store(&__data, (page)); |
| 4911 | if (ret) |
| 4912 | return ret; |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4913 | |
| 4914 | if (__data == 0) |
Paolo Valente | ab0e43e | 2017-04-12 18:23:10 +0200 | [diff] [blame] | 4915 | bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4916 | else { |
| 4917 | if (__data > INT_MAX) |
| 4918 | __data = INT_MAX; |
| 4919 | bfqd->bfq_max_budget = __data; |
| 4920 | } |
| 4921 | |
| 4922 | bfqd->bfq_user_max_budget = __data; |
| 4923 | |
weiping zhang | 235f8da | 2017-08-25 01:11:33 +0800 | [diff] [blame] | 4924 | return count; |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4925 | } |
| 4926 | |
| 4927 | /* |
| 4928 | * Leaving this name to preserve name compatibility with cfq |
| 4929 | * parameters, but this timeout is used for both sync and async. |
| 4930 | */ |
| 4931 | static ssize_t bfq_timeout_sync_store(struct elevator_queue *e, |
| 4932 | const char *page, size_t count) |
| 4933 | { |
| 4934 | struct bfq_data *bfqd = e->elevator_data; |
Bart Van Assche | 2f79136 | 2017-08-30 11:42:09 -0700 | [diff] [blame] | 4935 | unsigned long __data; |
| 4936 | int ret; |
weiping zhang | 235f8da | 2017-08-25 01:11:33 +0800 | [diff] [blame] | 4937 | |
Bart Van Assche | 2f79136 | 2017-08-30 11:42:09 -0700 | [diff] [blame] | 4938 | ret = bfq_var_store(&__data, (page)); |
| 4939 | if (ret) |
| 4940 | return ret; |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4941 | |
| 4942 | if (__data < 1) |
| 4943 | __data = 1; |
| 4944 | else if (__data > INT_MAX) |
| 4945 | __data = INT_MAX; |
| 4946 | |
| 4947 | bfqd->bfq_timeout = msecs_to_jiffies(__data); |
| 4948 | if (bfqd->bfq_user_max_budget == 0) |
Paolo Valente | ab0e43e | 2017-04-12 18:23:10 +0200 | [diff] [blame] | 4949 | bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4950 | |
weiping zhang | 235f8da | 2017-08-25 01:11:33 +0800 | [diff] [blame] | 4951 | return count; |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4952 | } |
| 4953 | |
| 4954 | static ssize_t bfq_strict_guarantees_store(struct elevator_queue *e, |
| 4955 | const char *page, size_t count) |
| 4956 | { |
| 4957 | struct bfq_data *bfqd = e->elevator_data; |
Bart Van Assche | 2f79136 | 2017-08-30 11:42:09 -0700 | [diff] [blame] | 4958 | unsigned long __data; |
| 4959 | int ret; |
weiping zhang | 235f8da | 2017-08-25 01:11:33 +0800 | [diff] [blame] | 4960 | |
Bart Van Assche | 2f79136 | 2017-08-30 11:42:09 -0700 | [diff] [blame] | 4961 | ret = bfq_var_store(&__data, (page)); |
| 4962 | if (ret) |
| 4963 | return ret; |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4964 | |
| 4965 | if (__data > 1) |
| 4966 | __data = 1; |
| 4967 | if (!bfqd->strict_guarantees && __data == 1 |
| 4968 | && bfqd->bfq_slice_idle < 8 * NSEC_PER_MSEC) |
| 4969 | bfqd->bfq_slice_idle = 8 * NSEC_PER_MSEC; |
| 4970 | |
| 4971 | bfqd->strict_guarantees = __data; |
| 4972 | |
weiping zhang | 235f8da | 2017-08-25 01:11:33 +0800 | [diff] [blame] | 4973 | return count; |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4974 | } |
| 4975 | |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 4976 | static ssize_t bfq_low_latency_store(struct elevator_queue *e, |
| 4977 | const char *page, size_t count) |
| 4978 | { |
| 4979 | struct bfq_data *bfqd = e->elevator_data; |
Bart Van Assche | 2f79136 | 2017-08-30 11:42:09 -0700 | [diff] [blame] | 4980 | unsigned long __data; |
| 4981 | int ret; |
weiping zhang | 235f8da | 2017-08-25 01:11:33 +0800 | [diff] [blame] | 4982 | |
Bart Van Assche | 2f79136 | 2017-08-30 11:42:09 -0700 | [diff] [blame] | 4983 | ret = bfq_var_store(&__data, (page)); |
| 4984 | if (ret) |
| 4985 | return ret; |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 4986 | |
| 4987 | if (__data > 1) |
| 4988 | __data = 1; |
| 4989 | if (__data == 0 && bfqd->low_latency != 0) |
| 4990 | bfq_end_wr(bfqd); |
| 4991 | bfqd->low_latency = __data; |
| 4992 | |
weiping zhang | 235f8da | 2017-08-25 01:11:33 +0800 | [diff] [blame] | 4993 | return count; |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 4994 | } |
| 4995 | |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 4996 | #define BFQ_ATTR(name) \ |
| 4997 | __ATTR(name, 0644, bfq_##name##_show, bfq_##name##_store) |
| 4998 | |
| 4999 | static struct elv_fs_entry bfq_attrs[] = { |
| 5000 | BFQ_ATTR(fifo_expire_sync), |
| 5001 | BFQ_ATTR(fifo_expire_async), |
| 5002 | BFQ_ATTR(back_seek_max), |
| 5003 | BFQ_ATTR(back_seek_penalty), |
| 5004 | BFQ_ATTR(slice_idle), |
| 5005 | BFQ_ATTR(slice_idle_us), |
| 5006 | BFQ_ATTR(max_budget), |
| 5007 | BFQ_ATTR(timeout_sync), |
| 5008 | BFQ_ATTR(strict_guarantees), |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 5009 | BFQ_ATTR(low_latency), |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 5010 | __ATTR_NULL |
| 5011 | }; |
| 5012 | |
| 5013 | static struct elevator_type iosched_bfq_mq = { |
| 5014 | .ops.mq = { |
Christoph Hellwig | 5bbf4e5 | 2017-06-16 18:15:26 +0200 | [diff] [blame] | 5015 | .prepare_request = bfq_prepare_request, |
Christoph Hellwig | 7b9e936 | 2017-06-16 18:15:21 +0200 | [diff] [blame] | 5016 | .finish_request = bfq_finish_request, |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 5017 | .exit_icq = bfq_exit_icq, |
| 5018 | .insert_requests = bfq_insert_requests, |
| 5019 | .dispatch_request = bfq_dispatch_request, |
| 5020 | .next_request = elv_rb_latter_request, |
| 5021 | .former_request = elv_rb_former_request, |
| 5022 | .allow_merge = bfq_allow_bio_merge, |
| 5023 | .bio_merge = bfq_bio_merge, |
| 5024 | .request_merge = bfq_request_merge, |
| 5025 | .requests_merged = bfq_requests_merged, |
| 5026 | .request_merged = bfq_request_merged, |
| 5027 | .has_work = bfq_has_work, |
| 5028 | .init_sched = bfq_init_queue, |
| 5029 | .exit_sched = bfq_exit_queue, |
| 5030 | }, |
| 5031 | |
| 5032 | .uses_mq = true, |
| 5033 | .icq_size = sizeof(struct bfq_io_cq), |
| 5034 | .icq_align = __alignof__(struct bfq_io_cq), |
| 5035 | .elevator_attrs = bfq_attrs, |
| 5036 | .elevator_name = "bfq", |
| 5037 | .elevator_owner = THIS_MODULE, |
| 5038 | }; |
| 5039 | |
| 5040 | static int __init bfq_init(void) |
| 5041 | { |
| 5042 | int ret; |
| 5043 | |
Arianna Avanzini | e21b7a0 | 2017-04-12 18:23:08 +0200 | [diff] [blame] | 5044 | #ifdef CONFIG_BFQ_GROUP_IOSCHED |
| 5045 | ret = blkcg_policy_register(&blkcg_policy_bfq); |
| 5046 | if (ret) |
| 5047 | return ret; |
| 5048 | #endif |
| 5049 | |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 5050 | ret = -ENOMEM; |
| 5051 | if (bfq_slab_setup()) |
| 5052 | goto err_pol_unreg; |
| 5053 | |
Paolo Valente | 44e44a1 | 2017-04-12 18:23:12 +0200 | [diff] [blame] | 5054 | /* |
| 5055 | * Times to load large popular applications for the typical |
| 5056 | * systems installed on the reference devices (see the |
| 5057 | * comments before the definitions of the next two |
| 5058 | * arrays). Actually, we use slightly slower values, as the |
| 5059 | * estimated peak rate tends to be smaller than the actual |
| 5060 | * peak rate. The reason for this last fact is that estimates |
| 5061 | * are computed over much shorter time intervals than the long |
| 5062 | * intervals typically used for benchmarking. Why? First, to |
| 5063 | * adapt more quickly to variations. Second, because an I/O |
| 5064 | * scheduler cannot rely on a peak-rate-evaluation workload to |
| 5065 | * be run for a long time. |
| 5066 | */ |
| 5067 | T_slow[0] = msecs_to_jiffies(3500); /* actually 4 sec */ |
| 5068 | T_slow[1] = msecs_to_jiffies(6000); /* actually 6.5 sec */ |
| 5069 | T_fast[0] = msecs_to_jiffies(7000); /* actually 8 sec */ |
| 5070 | T_fast[1] = msecs_to_jiffies(2500); /* actually 3 sec */ |
| 5071 | |
| 5072 | /* |
| 5073 | * Thresholds that determine the switch between speed classes |
| 5074 | * (see the comments before the definition of the array |
| 5075 | * device_speed_thresh). These thresholds are biased towards |
| 5076 | * transitions to the fast class. This is safer than the |
| 5077 | * opposite bias. In fact, a wrong transition to the slow |
| 5078 | * class results in short weight-raising periods, because the |
| 5079 | * speed of the device then tends to be higher that the |
| 5080 | * reference peak rate. On the opposite end, a wrong |
| 5081 | * transition to the fast class tends to increase |
| 5082 | * weight-raising periods, because of the opposite reason. |
| 5083 | */ |
| 5084 | device_speed_thresh[0] = (4 * R_slow[0]) / 3; |
| 5085 | device_speed_thresh[1] = (4 * R_slow[1]) / 3; |
| 5086 | |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 5087 | ret = elv_register(&iosched_bfq_mq); |
| 5088 | if (ret) |
weiping zhang | 37dcd65 | 2017-08-19 00:37:20 +0800 | [diff] [blame] | 5089 | goto slab_kill; |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 5090 | |
| 5091 | return 0; |
| 5092 | |
weiping zhang | 37dcd65 | 2017-08-19 00:37:20 +0800 | [diff] [blame] | 5093 | slab_kill: |
| 5094 | bfq_slab_kill(); |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 5095 | err_pol_unreg: |
Arianna Avanzini | e21b7a0 | 2017-04-12 18:23:08 +0200 | [diff] [blame] | 5096 | #ifdef CONFIG_BFQ_GROUP_IOSCHED |
| 5097 | blkcg_policy_unregister(&blkcg_policy_bfq); |
| 5098 | #endif |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 5099 | return ret; |
| 5100 | } |
| 5101 | |
| 5102 | static void __exit bfq_exit(void) |
| 5103 | { |
| 5104 | elv_unregister(&iosched_bfq_mq); |
Arianna Avanzini | e21b7a0 | 2017-04-12 18:23:08 +0200 | [diff] [blame] | 5105 | #ifdef CONFIG_BFQ_GROUP_IOSCHED |
| 5106 | blkcg_policy_unregister(&blkcg_policy_bfq); |
| 5107 | #endif |
Paolo Valente | aee69d7 | 2017-04-19 08:29:02 -0600 | [diff] [blame] | 5108 | bfq_slab_kill(); |
| 5109 | } |
| 5110 | |
| 5111 | module_init(bfq_init); |
| 5112 | module_exit(bfq_exit); |
| 5113 | |
| 5114 | MODULE_AUTHOR("Paolo Valente"); |
| 5115 | MODULE_LICENSE("GPL"); |
| 5116 | MODULE_DESCRIPTION("MQ Budget Fair Queueing I/O Scheduler"); |